Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Fix some clippy errors
Browse files Browse the repository at this point in the history
  • Loading branch information
mrcnski committed Jan 29, 2023
1 parent 3ae0f24 commit d2e6081
Show file tree
Hide file tree
Showing 22 changed files with 66 additions and 78 deletions.
4 changes: 2 additions & 2 deletions node/core/approval-voting/src/criteria.rs
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ pub(crate) fn compute_assignments(
// Ignore any cores where the assigned group is our own.
let leaving_cores = leaving_cores
.into_iter()
.filter(|&(_, _, ref g)| !is_in_backing_group(&config.validator_groups, index, *g))
.filter(|(_, _, g)| !is_in_backing_group(&config.validator_groups, index, *g))
.map(|(c_hash, core, _)| (c_hash, core))
.collect::<Vec<_>>();

Expand Down Expand Up @@ -496,7 +496,7 @@ pub(crate) fn check_assignment_cert(
return Err(InvalidAssignment(Reason::IsInBackingGroup))
}

let &(ref vrf_output, ref vrf_proof) = &assignment.vrf;
let (vrf_output, vrf_proof) = &assignment.vrf;
match assignment.kind {
AssignmentCertKind::RelayVRFModulo { sample } => {
if sample >= config.relay_vrf_modulo_samples {
Expand Down
2 changes: 1 addition & 1 deletion node/core/approval-voting/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -486,7 +486,7 @@ impl Wakeups {
.collect();

let mut pruned_wakeups = BTreeMap::new();
self.reverse_wakeups.retain(|&(ref h, ref c_h), tick| {
self.reverse_wakeups.retain(|(h, c_h), tick| {
let live = !pruned_blocks.contains(h);
if !live {
pruned_wakeups.entry(*tick).or_insert_with(HashSet::new).insert((*h, *c_h));
Expand Down
4 changes: 2 additions & 2 deletions node/core/approval-voting/src/ops.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ fn visit_and_remove_block_entry(
};

overlayed_db.delete_block_entry(&block_hash);
for &(_, ref candidate_hash) in block_entry.candidates() {
for (_, candidate_hash) in block_entry.candidates() {
let candidate = match visited_candidates.entry(*candidate_hash) {
Entry::Occupied(e) => e.into_mut(),
Entry::Vacant(e) => {
Expand Down Expand Up @@ -227,7 +227,7 @@ pub fn add_block_entry(

// read and write all updated entries.
{
for &(_, ref candidate_hash) in entry.candidates() {
for (_, candidate_hash) in entry.candidates() {
let NewCandidateInfo { candidate, backing_group, our_assignment } =
match candidate_info(candidate_hash) {
None => return Ok(Vec::new()),
Expand Down
2 changes: 1 addition & 1 deletion node/core/bitfield-signing/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ async fn get_core_availability(
sender: &Mutex<&mut impl SubsystemSender<overseer::BitfieldSigningOutgoingMessages>>,
span: &jaeger::Span,
) -> Result<bool, Error> {
if let &CoreState::Occupied(ref core) = core {
if let CoreState::Occupied(core) = core {
let _span = span.child("query-chunk-availability");

let (tx, rx) = oneshot::channel();
Expand Down
2 changes: 1 addition & 1 deletion node/core/dispute-coordinator/src/import.rs
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ pub enum OwnVoteState {
}

impl OwnVoteState {
fn new<'a>(votes: &CandidateVotes, env: &CandidateEnvironment<'a>) -> Self {
fn new(votes: &CandidateVotes, env: &CandidateEnvironment) -> Self {
let controlled_indices = env.controlled_indices();
if controlled_indices.is_empty() {
return Self::CannotVote
Expand Down
14 changes: 5 additions & 9 deletions node/core/prospective-parachains/src/fragment_tree.rs
Original file line number Diff line number Diff line change
Expand Up @@ -468,7 +468,7 @@ impl FragmentTree {

/// Returns an O(n) iterator over the hashes of candidates contained in the
/// tree.
pub(crate) fn candidates<'a>(&'a self) -> impl Iterator<Item = CandidateHash> + 'a {
pub(crate) fn candidates(&self) -> impl Iterator<Item = CandidateHash> + '_ {
self.candidates.keys().cloned()
}

Expand Down Expand Up @@ -514,10 +514,10 @@ impl FragmentTree {
///
/// If the candidate is already known, this returns the actual depths where this
/// candidate is part of the tree.
pub(crate) fn hypothetical_depths<'a>(
pub(crate) fn hypothetical_depths(
&self,
hash: CandidateHash,
candidate: HypotheticalCandidate<'a>,
candidate: HypotheticalCandidate,
) -> Vec<usize> {
// if known.
if let Some(depths) = self.candidates.get(&hash) {
Expand Down Expand Up @@ -656,11 +656,7 @@ impl FragmentTree {
}
}

fn populate_from_bases<'a>(
&mut self,
storage: &'a CandidateStorage,
initial_bases: Vec<NodePointer>,
) {
fn populate_from_bases(&mut self, storage: &CandidateStorage, initial_bases: Vec<NodePointer>) {
// Populate the tree breadth-first.
let mut last_sweep_start = None;

Expand Down Expand Up @@ -766,7 +762,7 @@ impl FragmentTree {
let node = FragmentNode {
parent: parent_pointer,
fragment,
candidate_hash: candidate.candidate_hash.clone(),
candidate_hash: candidate.candidate_hash,
depth: child_depth,
cumulative_modifications,
children: Vec::new(),
Expand Down
4 changes: 2 additions & 2 deletions node/core/prospective-parachains/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -470,8 +470,8 @@ fn answer_hypothetical_frontier_request(
} => (
*candidate_hash,
fragment_tree::HypotheticalCandidate::Complete {
receipt: Cow::Borrowed(&*receipt),
persisted_validation_data: Cow::Borrowed(&*persisted_validation_data),
receipt: Cow::Borrowed(receipt),
persisted_validation_data: Cow::Borrowed(persisted_validation_data),
},
),
HypotheticalCandidate::Incomplete {
Expand Down
2 changes: 1 addition & 1 deletion node/network/approval-distribution/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1256,7 +1256,7 @@ impl State {
.iter()
.filter_map(|(p, k)| peer_data.get(&p).map(|pd| (p, k, pd.version)))
.filter(|(p, k, _)| peer_filter(p, k))
.map(|(p, _, v)| (p.clone(), v))
.map(|(p, _, v)| (*p, v))
.collect::<Vec<_>>();

// Add the metadata of the assignment to the knowledge of each peer.
Expand Down
10 changes: 3 additions & 7 deletions node/network/bridge/src/rx/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -772,22 +772,18 @@ fn update_our_view<Net, Context>(
shared
.validation_peers
.iter()
.map(|(peer_id, data)| (peer_id.clone(), data.version))
.map(|(peer_id, data)| (*peer_id, data.version))
.collect::<Vec<_>>(),
shared
.collation_peers
.iter()
.map(|(peer_id, data)| (peer_id.clone(), data.version))
.map(|(peer_id, data)| (*peer_id, data.version))
.collect::<Vec<_>>(),
)
};

let filter_by_version = |peers: &[(PeerId, ProtocolVersion)], version| {
peers
.iter()
.filter(|(_, v)| v == &version)
.map(|(p, _)| p.clone())
.collect::<Vec<_>>()
peers.iter().filter(|(_, v)| v == &version).map(|(p, _)| *p).collect::<Vec<_>>()
};

let v1_validation_peers = filter_by_version(&validation_peers, ValidationVersion::V1.into());
Expand Down
4 changes: 2 additions & 2 deletions node/network/collator-protocol/src/collator_side/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -713,7 +713,7 @@ async fn advertise_collation<Context>(
};

ctx.send_message(NetworkBridgeTxMessage::SendCollationMessage(
vec![peer.clone()],
vec![*peer],
collation_message,
))
.await;
Expand Down Expand Up @@ -1180,7 +1180,7 @@ where
{
let current_leaves = state.active_leaves.clone();

let removed = current_leaves.iter().filter(|(h, _)| !view.contains(*h));
let removed = current_leaves.iter().filter(|(h, _)| !view.contains(h));
let added = view.iter().filter(|h| !current_leaves.contains_key(h));

for leaf in added {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ impl PendingCollation {
Self {
relay_parent,
para_id,
peer_id: peer_id.clone(),
peer_id: *peer_id,
prospective_candidate,
commitments_hash: None,
}
Expand Down
12 changes: 6 additions & 6 deletions node/network/collator-protocol/src/validator_side/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,7 @@ impl PeerData {
}
let candidates = state.advertisements.entry(on_relay_parent).or_default();

if candidates.len() >= max_candidate_depth + 1 {
if candidates.len() > max_candidate_depth {
return Err(InsertAdvertisementError::PeerLimitReached)
}
candidates.insert(candidate_hash);
Expand Down Expand Up @@ -867,7 +867,7 @@ async fn process_incoming_peer_message<Context>(
);

if let Some(rep) = err.reputation_changes() {
modify_reputation(ctx.sender(), origin.clone(), rep).await;
modify_reputation(ctx.sender(), origin, rep).await;
}
},
Versioned::VStaging(VStaging::AdvertiseCollation {
Expand All @@ -894,7 +894,7 @@ async fn process_incoming_peer_message<Context>(
);

if let Some(rep) = err.reputation_changes() {
modify_reputation(ctx.sender(), origin.clone(), rep).await;
modify_reputation(ctx.sender(), origin, rep).await;
}
},
Versioned::V1(V1::CollationSeconded(..)) |
Expand Down Expand Up @@ -1241,7 +1241,7 @@ where
{
let current_leaves = state.active_leaves.clone();

let removed = current_leaves.iter().filter(|(h, _)| !view.contains(*h));
let removed = current_leaves.iter().filter(|(h, _)| !view.contains(h));
let added = view.iter().filter(|h| !current_leaves.contains_key(h));

for leaf in added {
Expand Down Expand Up @@ -1352,7 +1352,7 @@ where
?para_id,
"Disconnecting peer on view change (not current parachain id)"
);
disconnect_peer(sender, peer_id.clone()).await;
disconnect_peer(sender, *peer_id).await;
}
}
}
Expand Down Expand Up @@ -1645,7 +1645,7 @@ async fn poll_requests(
.await;

if !result.is_ready() {
retained_requested.insert(pending_collation.clone());
retained_requested.insert(*pending_collation);
}
if let CollationFetchResult::Error(Some(rep)) = result {
reputation_changes.push((pending_collation.peer_id, rep));
Expand Down
44 changes: 23 additions & 21 deletions node/network/statement-distribution/src/legacy_v1/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -265,10 +265,10 @@ impl PeerRelayParentKnowledge {

let new_known = match fingerprint.0 {
CompactStatement::Seconded(ref h) => {
self.seconded_counts.entry(fingerprint.1).or_default().note_local(h.clone());
self.seconded_counts.entry(fingerprint.1).or_default().note_local(*h);

let was_known = self.is_known_candidate(h);
self.sent_candidates.insert(h.clone());
self.sent_candidates.insert(*h);
!was_known
},
CompactStatement::Valid(_) => false,
Expand Down Expand Up @@ -332,7 +332,7 @@ impl PeerRelayParentKnowledge {
.seconded_counts
.entry(fingerprint.1)
.or_insert_with(Default::default)
.note_remote(h.clone());
.note_remote(*h);

if !allowed_remote {
return Err(COST_UNEXPECTED_STATEMENT_REMOTE)
Expand Down Expand Up @@ -361,7 +361,7 @@ impl PeerRelayParentKnowledge {
}

self.received_statements.insert(fingerprint.clone());
self.received_candidates.insert(candidate_hash.clone());
self.received_candidates.insert(*candidate_hash);
Ok(fresh)
}

Expand Down Expand Up @@ -1016,13 +1016,15 @@ async fn circulate_statement<'a, Context>(

let mut peers_to_send: Vec<PeerId> = peers
.iter()
.filter_map(|(peer, data)| {
if data.can_send(&relay_parent, &fingerprint) {
Some(peer.clone())
} else {
None
}
})
.filter_map(
|(peer, data)| {
if data.can_send(&relay_parent, &fingerprint) {
Some(*peer)
} else {
None
}
},
)
.collect();

let good_peers: HashSet<&PeerId> = peers_to_send.iter().collect();
Expand Down Expand Up @@ -1060,13 +1062,13 @@ async fn circulate_statement<'a, Context>(

let (v1_peers_to_send, vstaging_peers_to_send) = peers_to_send
.into_iter()
.filter_map(|peer_id| {
.map(|peer_id| {
let peer_data =
peers.get_mut(&peer_id).expect("a subset is taken above, so it exists; qed");

let new = peer_data.send(&relay_parent, &fingerprint);

Some((peer_id, new, peer_data.protocol_version))
(peer_id, new, peer_data.protocol_version)
})
.partition::<Vec<_>, _>(|(_, _, version)| match version {
ValidationVersion::V1 => true,
Expand All @@ -1085,7 +1087,7 @@ async fn circulate_statement<'a, Context>(
"Sending statement to v1 peers",
);
ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
v1_peers_to_send.iter().map(|(p, _, _)| p.clone()).collect(),
v1_peers_to_send.iter().map(|(p, _, _)| *p).collect(),
compatible_v1_message(ValidationVersion::V1, payload.clone()).into(),
))
.await;
Expand All @@ -1099,7 +1101,7 @@ async fn circulate_statement<'a, Context>(
"Sending statement to vstaging peers",
);
ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
vstaging_peers_to_send.iter().map(|(p, _, _)| p.clone()).collect(),
vstaging_peers_to_send.iter().map(|(p, _, _)| *p).collect(),
compatible_v1_message(ValidationVersion::VStaging, payload.clone()).into(),
))
.await;
Expand Down Expand Up @@ -1140,7 +1142,7 @@ async fn send_statements_about<Context>(
"Sending statement",
);
ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
vec![peer.clone()],
vec![peer],
compatible_v1_message(peer_data.protocol_version, payload).into(),
))
.await;
Expand Down Expand Up @@ -1175,7 +1177,7 @@ async fn send_statements<Context>(
"Sending statement"
);
ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(
vec![peer.clone()],
vec![peer],
compatible_v1_message(peer_data.protocol_version, payload).into(),
))
.await;
Expand Down Expand Up @@ -1467,7 +1469,7 @@ async fn handle_incoming_message<'a, Context>(
}

let fingerprint = message.get_fingerprint();
let candidate_hash = fingerprint.0.candidate_hash().clone();
let candidate_hash = *fingerprint.0.candidate_hash();
let handle_incoming_span = active_head
.span
.child("handle-incoming")
Expand Down Expand Up @@ -1594,7 +1596,7 @@ async fn handle_incoming_message<'a, Context>(
// Send the peer all statements concerning the candidate that we have,
// since it appears to have just learned about the candidate.
send_statements_about(
peer.clone(),
peer,
peer_data,
ctx,
relay_parent,
Expand Down Expand Up @@ -1704,7 +1706,7 @@ async fn update_peer_view_and_maybe_send_unlocked<Context, R>(
continue
}
if let Some(active_head) = active_heads.get(&new) {
send_statements(peer.clone(), peer_data, ctx, new, active_head, metrics).await;
send_statements(peer, peer_data, ctx, new, active_head, metrics).await;
}
}
}
Expand Down Expand Up @@ -1805,7 +1807,7 @@ pub(crate) async fn handle_network_update<Context, R>(
topology_storage,
peers,
active_heads,
&*recent_outdated_heads,
recent_outdated_heads,
ctx,
message,
req_sender,
Expand Down
2 changes: 1 addition & 1 deletion node/network/statement-distribution/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
// pass to legacy, but not if the message isn't
// v1.
let legacy = match &event {
&NetworkBridgeEvent::PeerMessage(_, ref message) => match message {
NetworkBridgeEvent::PeerMessage(_, message) => match message {
Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility(_)) => true,
Versioned::V1(_) => true,
// TODO [now]: _ => false,
Expand Down
Loading

0 comments on commit d2e6081

Please sign in to comment.