Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: use IntoIterator more #6998

Merged
merged 5 commits into from
Mar 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion crates/rpc/rpc/src/eth/api/fee_history.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ impl FeeHistoryCache {
/// Insert block data into the cache.
async fn insert_blocks<I>(&self, blocks: I)
where
I: Iterator<Item = (SealedBlock, Arc<Vec<Receipt>>)>,
I: IntoIterator<Item = (SealedBlock, Arc<Vec<Receipt>>)>,
{
let mut entries = self.inner.entries.write().await;

Expand Down
8 changes: 4 additions & 4 deletions crates/stages/src/test_utils/test_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ impl TestStageDB {

fn insert_headers_inner<'a, I, const TD: bool>(&self, headers: I) -> ProviderResult<()>
where
I: Iterator<Item = &'a SealedHeader>,
I: IntoIterator<Item = &'a SealedHeader>,
{
let provider = self.factory.static_file_provider();
let mut writer = provider.latest_writer(reth_primitives::StaticFileSegment::Headers)?;
Expand All @@ -176,7 +176,7 @@ impl TestStageDB {
/// that are supposed to be populated by the headers stage.
pub fn insert_headers<'a, I>(&self, headers: I) -> ProviderResult<()>
where
I: Iterator<Item = &'a SealedHeader>,
I: IntoIterator<Item = &'a SealedHeader>,
{
self.insert_headers_inner::<I, false>(headers)
}
Expand All @@ -186,7 +186,7 @@ impl TestStageDB {
/// Superset functionality of [TestStageDB::insert_headers].
pub fn insert_headers_with_td<'a, I>(&self, headers: I) -> ProviderResult<()>
where
I: Iterator<Item = &'a SealedHeader>,
I: IntoIterator<Item = &'a SealedHeader>,
{
self.insert_headers_inner::<I, true>(headers)
}
Expand All @@ -200,7 +200,7 @@ impl TestStageDB {
/// Assumes that there's a single transition for each transaction (i.e. no block rewards).
pub fn insert_blocks<'a, I>(&self, blocks: I, storage_kind: StorageKind) -> ProviderResult<()>
where
I: Iterator<Item = &'a SealedBlock>,
I: IntoIterator<Item = &'a SealedBlock>,
{
let provider = self.factory.static_file_provider();

Expand Down
2 changes: 1 addition & 1 deletion crates/storage/db/src/static_file/generation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ macro_rules! generate_static_file_func {
/// * `tx`: Database transaction.
/// * `range`: Data range for columns in tables.
/// * `additional`: Additional columns which can't be straight straightforwardly walked on.
/// * `keys`: Iterator of keys (eg. `TxHash` or `BlockHash`) with length equal to `row_count` and ordered by future column insertion from `range`.
/// * `keys`: IntoIterator of keys (eg. `TxHash` or `BlockHash`) with length equal to `row_count` and ordered by future column insertion from `range`.
/// * `dict_compression_set`: Sets of column data for compression dictionaries. Max size is 2GB. Row count is independent.
/// * `row_count`: Total rows to add to `NippyJar`. Must match row count in `range`.
/// * `nippy_jar`: Static File object responsible for file generation.
Expand Down
3 changes: 2 additions & 1 deletion crates/transaction-pool/src/maintain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -525,10 +525,11 @@ fn load_accounts<Client, I>(
addresses: I,
) -> Result<LoadedAccounts, Box<(HashSet<Address>, ProviderError)>>
where
I: Iterator<Item = Address>,
I: IntoIterator<Item = Address>,

Client: StateProviderFactory,
{
let addresses = addresses.into_iter();
let mut res = LoadedAccounts::default();
let state = match client.history_by_block_hash(at) {
Ok(state) => state,
Expand Down
33 changes: 15 additions & 18 deletions crates/trie/src/test_utils.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use alloy_rlp::{encode_fixed_size, Encodable};
use alloy_rlp::encode_fixed_size;
use reth_primitives::{
proofs::triehash::KeccakHasher, trie::TrieAccount, Account, Address, B256, U256,
};
Expand All @@ -9,44 +9,41 @@ pub use triehash;
/// Compute the state root of a given set of accounts using [triehash::sec_trie_root].
pub fn state_root<I, S>(accounts: I) -> B256
where
I: Iterator<Item = (Address, (Account, S))>,
I: IntoIterator<Item = (Address, (Account, S))>,
S: IntoIterator<Item = (B256, U256)>,
{
let encoded_accounts = accounts.map(|(address, (account, storage))| {
let storage_root = storage_root(storage.into_iter());
let mut out = Vec::new();
TrieAccount::from((account, storage_root)).encode(&mut out);
(address, out)
let encoded_accounts = accounts.into_iter().map(|(address, (account, storage))| {
let storage_root = storage_root(storage);
let account = TrieAccount::from((account, storage_root));
(address, alloy_rlp::encode(account))
});

triehash::sec_trie_root::<KeccakHasher, _, _, _>(encoded_accounts)
}

/// Compute the storage root for a given account using [triehash::sec_trie_root].
pub fn storage_root<I: Iterator<Item = (B256, U256)>>(storage: I) -> B256 {
let encoded_storage = storage.map(|(k, v)| (k, encode_fixed_size(&v).to_vec()));
pub fn storage_root<I: IntoIterator<Item = (B256, U256)>>(storage: I) -> B256 {
let encoded_storage = storage.into_iter().map(|(k, v)| (k, encode_fixed_size(&v)));
triehash::sec_trie_root::<KeccakHasher, _, _, _>(encoded_storage)
}

/// Compute the state root of a given set of accounts with prehashed keys using
/// [triehash::trie_root].
pub fn state_root_prehashed<I, S>(accounts: I) -> B256
where
I: Iterator<Item = (B256, (Account, S))>,
I: IntoIterator<Item = (B256, (Account, S))>,
S: IntoIterator<Item = (B256, U256)>,
{
let encoded_accounts = accounts.map(|(address, (account, storage))| {
let storage_root = storage_root_prehashed(storage.into_iter());
let mut out = Vec::new();
TrieAccount::from((account, storage_root)).encode(&mut out);
(address, out)
let encoded_accounts = accounts.into_iter().map(|(address, (account, storage))| {
let storage_root = storage_root_prehashed(storage);
let account = TrieAccount::from((account, storage_root));
(address, alloy_rlp::encode(account))
});

triehash::trie_root::<KeccakHasher, _, _, _>(encoded_accounts)
}

/// Compute the storage root for a given account with prehashed slots using [triehash::trie_root].
pub fn storage_root_prehashed<I: Iterator<Item = (B256, U256)>>(storage: I) -> B256 {
let encoded_storage = storage.map(|(k, v)| (k, encode_fixed_size(&v).to_vec()));
pub fn storage_root_prehashed<I: IntoIterator<Item = (B256, U256)>>(storage: I) -> B256 {
let encoded_storage = storage.into_iter().map(|(k, v)| (k, encode_fixed_size(&v)));
triehash::trie_root::<KeccakHasher, _, _, _>(encoded_storage)
}
4 changes: 2 additions & 2 deletions crates/trie/src/trie.rs
Original file line number Diff line number Diff line change
Expand Up @@ -824,7 +824,7 @@ mod tests {
insert_account(tx.tx_ref(), *address, *account, storage)
}
tx.commit().unwrap();
let expected = state_root(state.into_iter());
let expected = state_root(state);

let tx = factory.provider_rw().unwrap();
let got = StateRoot::from_tx(tx.tx_ref()).root().unwrap();
Expand Down Expand Up @@ -867,7 +867,7 @@ mod tests {
let tx = factory.provider_rw().unwrap();

let account3_storage_root = StorageRoot::from_tx(tx.tx_ref(), address3).root().unwrap();
let expected_root = storage_root_prehashed(storage.into_iter());
let expected_root = storage_root_prehashed(storage);
assert_eq!(expected_root, account3_storage_root);
}

Expand Down
Loading