Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

General cleanup #32980

Merged
merged 5 commits into from
Aug 24, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion accounts-db/src/account_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ impl AccountStorage {
pub fn initialize(&mut self, all_storages: AccountStorageMap) {
assert!(self.map.is_empty());
assert!(self.no_shrink_in_progress());
self.map.extend(all_storages.into_iter())
self.map.extend(all_storages)
}

/// remove the append vec at 'slot'
Expand Down
7 changes: 3 additions & 4 deletions accounts-db/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3487,8 +3487,7 @@ impl AccountsDb {

let (reclaims, pubkeys_removed_from_accounts_index2) =
self.purge_keys_exact(pubkey_to_slot_set.iter());
pubkeys_removed_from_accounts_index
.extend(pubkeys_removed_from_accounts_index2.into_iter());
pubkeys_removed_from_accounts_index.extend(pubkeys_removed_from_accounts_index2);

// Don't reset from clean, since the pubkeys in those stores may need to be unref'ed
// and those stores may be used for background hashing.
Expand Down Expand Up @@ -7358,7 +7357,7 @@ impl AccountsDb {
let mut sort_time = Measure::start("sort_storages");
let min_root = self.accounts_index.min_alive_root();
let storages = SortedStorages::new_with_slots(
combined_maps.iter().zip(slots.into_iter()),
combined_maps.iter().zip(slots),
min_root,
Some(slot),
);
Expand Down Expand Up @@ -7824,7 +7823,7 @@ impl AccountsDb {
let (storages, slots) =
self.get_snapshot_storages(base_slot.checked_add(1).unwrap()..=slot);
let sorted_storages =
SortedStorages::new_with_slots(storages.iter().zip(slots.into_iter()), None, None);
SortedStorages::new_with_slots(storages.iter().zip(slots), None, None);
let calculated_incremental_accounts_hash = self.calculate_incremental_accounts_hash(
&calc_config,
&sorted_storages,
Expand Down
4 changes: 2 additions & 2 deletions accounts-db/src/append_vec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ impl AppendVec {
Some((
//UNSAFE: This unsafe creates a slice that represents a chunk of self.map memory
//The lifetime of this slice is tied to &self, since it points to self.map memory
unsafe { std::slice::from_raw_parts(data.as_ptr() as *const u8, size) },
unsafe { std::slice::from_raw_parts(data.as_ptr(), size) },
next,
))
}
Expand Down Expand Up @@ -615,7 +615,7 @@ impl AppendVec {
let ptrs = [
(meta_ptr as *const u8, mem::size_of::<StoredMeta>()),
(account_meta_ptr as *const u8, mem::size_of::<AccountMeta>()),
(hash_ptr as *const u8, mem::size_of::<Hash>()),
(hash_ptr, mem::size_of::<Hash>()),
(data_ptr, data_len),
];
if let Some(res) = self.append_ptrs_locked(&mut offset, &ptrs) {
Expand Down
2 changes: 1 addition & 1 deletion accounts-db/src/sorted_storages.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ impl<'a> SortedStorages<'a> {
let slots = source.iter().map(|storage| {
storage.slot() // this must be unique. Will be enforced in new_with_slots
});
Self::new_with_slots(source.iter().zip(slots.into_iter()), None, None)
Self::new_with_slots(source.iter().zip(slots), None, None)
}

/// create [`SortedStorages`] from `source` iterator.
Expand Down
2 changes: 1 addition & 1 deletion accounts-db/src/tiered_storage/mmap_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ pub fn get_slice(map: &Mmap, offset: usize, size: usize) -> std::io::Result<(&[u
}
let data = &map[offset..next];
let next = u64_align!(next);
let ptr = data.as_ptr() as *const u8;
let ptr = data.as_ptr();

Ok((unsafe { std::slice::from_raw_parts(ptr, size) }, next))
}
2 changes: 1 addition & 1 deletion bucket_map/src/bucket_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ impl<O: BucketOccupied> BucketStorage<O> {

unsafe {
let dst = dst_slice.as_ptr() as *mut u8;
let src = src_slice.as_ptr() as *const u8;
let src = src_slice.as_ptr();
std::ptr::copy_nonoverlapping(src, dst, old_bucket.cell_size as usize);
};
}
Expand Down
2 changes: 1 addition & 1 deletion cli-output/src/cli_output.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2385,7 +2385,7 @@ pub fn return_signers_data(tx: &Transaction, config: &ReturnSignersConfig) -> Cl
tx.signatures
.iter()
.zip(tx.message.account_keys.iter())
.zip(verify_results.into_iter())
.zip(verify_results)
.for_each(|((sig, key), res)| {
if res {
signers.push(format!("{key}={sig}"))
Expand Down
2 changes: 1 addition & 1 deletion cli/src/cluster_query.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1980,7 +1980,7 @@ pub fn process_show_validators(

let validators: Vec<_> = current_validators
.into_iter()
.chain(delinquent_validators.into_iter())
.chain(delinquent_validators)
.collect();

let (average_skip_rate, average_stake_weighted_skip_rate) = {
Expand Down
2 changes: 1 addition & 1 deletion cli/src/vote.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1385,7 +1385,7 @@ pub fn process_close_vote_account(
if let Some(vote_account) = vote_account_status
.current
.into_iter()
.chain(vote_account_status.delinquent.into_iter())
.chain(vote_account_status.delinquent)
.next()
{
if vote_account.activated_stake != 0 {
Expand Down
2 changes: 1 addition & 1 deletion core/src/shred_fetch_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ impl ShredFetchStage {
turbine_disabled.clone(),
);

tvu_threads.extend(repair_receiver.into_iter());
tvu_threads.extend(repair_receiver);
tvu_threads.push(tvu_filter);
tvu_threads.push(repair_handler);

Expand Down
2 changes: 1 addition & 1 deletion gossip/src/cluster_info_metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -676,7 +676,7 @@ pub(crate) fn submit_gossip_stats(
.pull
.votes
.into_iter()
.chain(crds_stats.push.votes.into_iter())
.chain(crds_stats.push.votes)
.into_grouping_map()
.aggregate(|acc, _slot, num_votes| Some(acc.unwrap_or_default() + num_votes));
submit_vote_stats("cluster_info_crds_stats_votes", &votes);
Expand Down
4 changes: 1 addition & 3 deletions gossip/src/contact_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -527,9 +527,7 @@ fn sanitize_entries(addrs: &[IpAddr], sockets: &[SocketEntry]) -> Result<(), Err
// Verify that port offsets don't overflow.
if sockets
.iter()
.fold(Some(0u16), |offset, entry| {
offset?.checked_add(entry.offset)
})
.try_fold(0u16, |offset, entry| offset.checked_add(entry.offset))
.is_none()
{
return Err(Error::PortOffsetsOverflow);
Expand Down
4 changes: 2 additions & 2 deletions ledger-tool/src/program.rs
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ impl ProgramSubCommand for App<'_, '_> {
.arg(
Arg::with_name("input")
.help(
r##"Input for the program to run on, where FILE is a name of a JSON file
r#"Input for the program to run on, where FILE is a name of a JSON file
with input data, or BYTES is the number of 0-valued bytes to allocate for program parameters"

The input data for a program execution have to be in JSON format
Expand All @@ -196,7 +196,7 @@ and the following fields are required
],
"instruction_data": [31, 32, 23, 24]
}
"##,
"#,
)
.short("i")
.long("input")
Expand Down
2 changes: 1 addition & 1 deletion local-cluster/src/cluster_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -516,7 +516,7 @@ pub fn start_gossip_voter(
let (labels, votes) = cluster_info.get_votes_with_labels(&mut cursor);
let mut parsed_vote_iter: Vec<_> = labels
.into_iter()
.zip(votes.into_iter())
.zip(votes)
.filter_map(&vote_filter)
.collect();

Expand Down
2 changes: 1 addition & 1 deletion merkle-tree/src/merkle_tree.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ impl<'a> Proof<'a> {
None
}
});
matches!(result, Some(_))
result.is_some()
}
}

Expand Down
5 changes: 1 addition & 4 deletions metrics/src/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -527,10 +527,7 @@ pub mod test_mocks {
assert!(!points.is_empty());

let new_points = points.len();
self.points_written
.lock()
.unwrap()
.extend(points.into_iter());
self.points_written.lock().unwrap().extend(points);

info!(
"Writing {} points ({} total)",
Expand Down
2 changes: 1 addition & 1 deletion programs/bpf_loader/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,7 @@ fn create_memory_mapping<'a, 'b, C: ContextObject>(
MemoryRegion::new_writable(heap.as_slice_mut(), MM_HEAP_START),
]
.into_iter()
.chain(additional_regions.into_iter())
.chain(additional_regions)
.collect();

Ok(if let Some(cow_cb) = cow_cb {
Expand Down
12 changes: 3 additions & 9 deletions programs/bpf_loader/src/syscalls/mem_ops.rs
Original file line number Diff line number Diff line change
Expand Up @@ -196,13 +196,7 @@ fn memmove_non_contiguous(
memory_mapping,
reverse,
|src_host_addr, dst_host_addr, chunk_len| {
unsafe {
std::ptr::copy(
src_host_addr as *const u8,
dst_host_addr as *mut u8,
chunk_len,
)
};
unsafe { std::ptr::copy(src_host_addr, dst_host_addr as *mut u8, chunk_len) };
Ok(0)
},
)
Expand Down Expand Up @@ -237,8 +231,8 @@ fn memcmp_non_contiguous(
false,
|s1_addr, s2_addr, chunk_len| {
let res = unsafe {
let s1 = slice::from_raw_parts(s1_addr as *const u8, chunk_len);
let s2 = slice::from_raw_parts(s2_addr as *const u8, chunk_len);
let s1 = slice::from_raw_parts(s1_addr, chunk_len);
let s2 = slice::from_raw_parts(s2_addr, chunk_len);
// Safety:
// memcmp is marked unsafe since it assumes that s1 and s2 are exactly chunk_len
// long. The whole point of iter_memory_pair_chunks is to find same length chunks
Expand Down
6 changes: 3 additions & 3 deletions programs/bpf_loader/src/syscalls/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1738,23 +1738,23 @@ declare_syscall!(

let base = translate_slice::<u8>(
memory_mapping,
params.base as *const _ as *const u8 as u64,
params.base as *const _ as u64,
params.base_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?;

let exponent = translate_slice::<u8>(
memory_mapping,
params.exponent as *const _ as *const u8 as u64,
params.exponent as *const _ as u64,
params.exponent_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
)?;

let modulus = translate_slice::<u8>(
memory_mapping,
params.modulus as *const _ as *const u8 as u64,
params.modulus as *const _ as u64,
params.modulus_len,
invoke_context.get_check_aligned(),
invoke_context.get_check_size(),
Expand Down
4 changes: 2 additions & 2 deletions programs/stake/src/config.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
//! config for staking
//! carries variables that the stake program cares about
#[allow(deprecated)]
use solana_sdk::stake::config;
#[deprecated(
since = "1.8.0",
note = "Please use `solana_sdk::stake::config` or `solana_program::stake::config` instead"
)]
pub use solana_sdk::stake::config::*;
#[allow(deprecated)]
use solana_sdk::stake::config::{self, Config};
use {
bincode::deserialize,
solana_config_program::{create_config_account, get_config_data},
Expand Down
2 changes: 1 addition & 1 deletion programs/vote/src/vote_state/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use {
log::*,
serde_derive::{Deserialize, Serialize},
solana_metrics::datapoint_debug,
solana_program::vote::{error::VoteError, program::id, state::serde_compact_vote_state_update},
solana_program::vote::{error::VoteError, program::id},
solana_sdk::{
account::{AccountSharedData, ReadableAccount, WritableAccount},
clock::{Epoch, Slot, UnixTimestamp},
Expand Down
2 changes: 1 addition & 1 deletion runtime/src/serde_snapshot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ impl<T> SnapshotAccountsDbFields<T> {
})?;

let mut combined_storages = full_snapshot_storages;
combined_storages.extend(incremental_snapshot_storages.into_iter());
combined_storages.extend(incremental_snapshot_storages);

Ok(AccountsDbFields(
combined_storages,
Expand Down
2 changes: 1 addition & 1 deletion runtime/src/snapshot_bank_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ pub fn bank_from_snapshot_archives(
if let Some(ref mut unarchive_preparation_result) = unarchived_incremental_snapshot {
let incremental_snapshot_storages =
std::mem::take(&mut unarchive_preparation_result.storage);
storage.extend(incremental_snapshot_storages.into_iter());
storage.extend(incremental_snapshot_storages);
}

let storage_and_next_append_vec_id = StorageAndNextAppendVecId {
Expand Down
2 changes: 1 addition & 1 deletion stake-accounts/src/stake_accounts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ fn move_stake_account(
new_withdraw_authority_pubkey,
);

instructions.extend(authorize_instructions.into_iter());
instructions.extend(authorize_instructions);
let message = Message::new(&instructions, Some(fee_payer_pubkey));
Some(message)
}
Expand Down
38 changes: 17 additions & 21 deletions storage-bigtable/src/compression.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,28 +48,24 @@ pub fn decompress(data: &[u8]) -> Result<Vec<u8>, io::Error> {

pub fn compress(method: CompressionMethod, data: &[u8]) -> Result<Vec<u8>, io::Error> {
let mut compressed_data = bincode::serialize(&method).unwrap();
compressed_data.extend(
match method {
CompressionMethod::Bzip2 => {
let mut e = bzip2::write::BzEncoder::new(Vec::new(), bzip2::Compression::best());
e.write_all(data)?;
e.finish()?
}
CompressionMethod::Gzip => {
let mut e =
flate2::write::GzEncoder::new(Vec::new(), flate2::Compression::default());
e.write_all(data)?;
e.finish()?
}
CompressionMethod::Zstd => {
let mut e = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
e.write_all(data)?;
e.finish()?
}
CompressionMethod::NoCompression => data.to_vec(),
compressed_data.extend(match method {
CompressionMethod::Bzip2 => {
let mut e = bzip2::write::BzEncoder::new(Vec::new(), bzip2::Compression::best());
e.write_all(data)?;
e.finish()?
}
.into_iter(),
);
CompressionMethod::Gzip => {
let mut e = flate2::write::GzEncoder::new(Vec::new(), flate2::Compression::default());
e.write_all(data)?;
e.finish()?
}
CompressionMethod::Zstd => {
let mut e = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap();
e.write_all(data)?;
e.finish()?
}
CompressionMethod::NoCompression => data.to_vec(),
});

Ok(compressed_data)
}
Expand Down
2 changes: 1 addition & 1 deletion storage-proto/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ fn main() -> Result<(), std::io::Error> {
let mut protos = Vec::new();
for proto_file in &proto_files {
let proto = proto_base_path.join(proto_file);
println!("cargo::rerun-if-changed={}", proto.display());
println!("cargo:rerun-if-changed={}", proto.display());
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nice!

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Well credit on this one goes to @t-nelson: t-nelson@4e17f71

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

all credits king clippy!

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

oh i guess this'll fix broken docs build in solana-core 🚀

https://docs.rs/crate/solana-core/1.16.8/builds/893154

protos.push(proto);
}

Expand Down
2 changes: 1 addition & 1 deletion validator/src/admin_rpc_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -485,7 +485,7 @@ impl AdminRpc for AdminRpcImpl {
.staked_map_id;
let mut write_staked_nodes = meta.staked_nodes_overrides.write().unwrap();
write_staked_nodes.clear();
write_staked_nodes.extend(loaded_config.into_iter());
write_staked_nodes.extend(loaded_config);
info!("Staked nodes overrides loaded from {}", path);
debug!("overrides map: {:?}", write_staked_nodes);
Ok(())
Expand Down
2 changes: 1 addition & 1 deletion validator/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1450,7 +1450,7 @@ pub fn main() {
if let Some(account_shrink_snapshot_paths) = account_shrink_snapshot_paths {
account_snapshot_paths
.into_iter()
.chain(account_shrink_snapshot_paths.into_iter())
.chain(account_shrink_snapshot_paths)
.collect()
} else {
account_snapshot_paths
Expand Down