new layout: fix clippy lints

This commit is contained in:
Alex Auvolat 2023-09-18 12:17:07 +02:00
parent 749b4865d0
commit 0088599f52
3 changed files with 17 additions and 17 deletions

View File

@ -189,7 +189,7 @@ impl Graph<FlowEdge> {
let mut fifo = VecDeque::new(); let mut fifo = VecDeque::new();
fifo.push_back((idsource, 0)); fifo.push_back((idsource, 0));
while let Some((id, lvl)) = fifo.pop_front() { while let Some((id, lvl)) = fifo.pop_front() {
if level[id] == None { if level[id].is_none() {
// it means id has not yet been reached // it means id has not yet been reached
level[id] = Some(lvl); level[id] = Some(lvl);
for edge in self.graph[id].iter() { for edge in self.graph[id].iter() {
@ -199,7 +199,7 @@ impl Graph<FlowEdge> {
} }
} }
} }
if level[idsink] == None { if level[idsink].is_none() {
// There is no residual flow // There is no residual flow
break; break;
} }
@ -383,7 +383,7 @@ fn cycles_of_1_forest(forest: &[Option<usize>]) -> Vec<Vec<usize>> {
for t in 0..forest.len() { for t in 0..forest.len() {
let mut id = t; let mut id = t;
// while we are on a valid undiscovered node // while we are on a valid undiscovered node
while time_of_discovery[id] == None { while time_of_discovery[id].is_none() {
time_of_discovery[id] = Some(t); time_of_discovery[id] = Some(t);
if let Some(i) = forest[id] { if let Some(i) = forest[id] {
id = i; id = i;
@ -391,7 +391,7 @@ fn cycles_of_1_forest(forest: &[Option<usize>]) -> Vec<Vec<usize>> {
break; break;
} }
} }
if forest[id] != None && time_of_discovery[id] == Some(t) { if forest[id].is_some() && time_of_discovery[id] == Some(t) {
// We discovered an id that we explored at this iteration t. // We discovered an id that we explored at this iteration t.
// It means we are on a cycle // It means we are on a cycle
let mut cy = vec![id; 1]; let mut cy = vec![id; 1];

View File

@ -195,7 +195,7 @@ mod v09 {
.. ..
})) = role })) = role
{ {
*cap = *cap * mul; *cap *= mul;
} }
new_roles.merge_raw(node, *ts, &role); new_roles.merge_raw(node, *ts, &role);
} }
@ -258,7 +258,7 @@ impl ClusterLayout {
let parameters = LayoutParameters { let parameters = LayoutParameters {
zone_redundancy: ZoneRedundancy::Maximum, zone_redundancy: ZoneRedundancy::Maximum,
}; };
let staging_parameters = Lww::<LayoutParameters>::new(parameters.clone()); let staging_parameters = Lww::<LayoutParameters>::new(parameters);
let empty_lwwmap = LwwMap::new(); let empty_lwwmap = LwwMap::new();
@ -322,7 +322,7 @@ To know the correct value of the new layout version, invoke `garage layout show`
self.roles.merge(&self.staging_roles); self.roles.merge(&self.staging_roles);
self.roles.retain(|(_, _, v)| v.0.is_some()); self.roles.retain(|(_, _, v)| v.0.is_some());
self.parameters = self.staging_parameters.get().clone(); self.parameters = *self.staging_parameters.get();
self.staging_roles.clear(); self.staging_roles.clear();
self.staging_hash = self.calculate_staging_hash(); self.staging_hash = self.calculate_staging_hash();
@ -351,7 +351,7 @@ To know the correct value of the new layout version, invoke `garage layout show`
} }
self.staging_roles.clear(); self.staging_roles.clear();
self.staging_parameters.update(self.parameters.clone()); self.staging_parameters.update(self.parameters);
self.staging_hash = self.calculate_staging_hash(); self.staging_hash = self.calculate_staging_hash();
self.version += 1; self.version += 1;
@ -382,7 +382,7 @@ To know the correct value of the new layout version, invoke `garage layout show`
let mut result = Vec::<Uuid>::new(); let mut result = Vec::<Uuid>::new();
for uuid in self.node_id_vec.iter() { for uuid in self.node_id_vec.iter() {
match self.node_role(uuid) { match self.node_role(uuid) {
Some(role) if role.capacity != None => result.push(*uuid), Some(role) if role.capacity.is_some() => result.push(*uuid),
_ => (), _ => (),
} }
} }
@ -633,7 +633,7 @@ impl ClusterLayout {
let partition_size = self.compute_optimal_partition_size(&zone_to_id, zone_redundancy)?; let partition_size = self.compute_optimal_partition_size(&zone_to_id, zone_redundancy)?;
msg.push("".into()); msg.push("".into());
if old_assignment_opt != None { if old_assignment_opt.is_some() {
msg.push(format!( msg.push(format!(
"Optimal partition size: {} ({} in previous layout)", "Optimal partition size: {} ({} in previous layout)",
ByteSize::b(partition_size).to_string_as(false), ByteSize::b(partition_size).to_string_as(false),
@ -692,7 +692,7 @@ impl ClusterLayout {
.roles .roles
.items() .items()
.iter() .iter()
.filter(|(_, _, v)| matches!(&v.0, Some(r) if r.capacity != None)) .filter(|(_, _, v)| matches!(&v.0, Some(r) if r.capacity.is_some()))
.map(|(k, _, _)| *k) .map(|(k, _, _)| *k)
.collect(); .collect();
@ -708,7 +708,7 @@ impl ClusterLayout {
.roles .roles
.items() .items()
.iter() .iter()
.filter(|(_, _, v)| matches!(v, NodeRoleV(Some(r)) if r.capacity == None)) .filter(|(_, _, v)| matches!(v, NodeRoleV(Some(r)) if r.capacity.is_none()))
.map(|(k, _, _)| *k) .map(|(k, _, _)| *k)
.collect(); .collect();
@ -770,7 +770,7 @@ impl ClusterLayout {
for uuid in self.nongateway_nodes().iter() { for uuid in self.nongateway_nodes().iter() {
let r = self.node_role(uuid).unwrap(); let r = self.node_role(uuid).unwrap();
if !zone_to_id.contains_key(&r.zone) && r.capacity != None { if !zone_to_id.contains_key(&r.zone) && r.capacity.is_some() {
zone_to_id.insert(r.zone.clone(), id_to_zone.len()); zone_to_id.insert(r.zone.clone(), id_to_zone.len());
id_to_zone.push(r.zone.clone()); id_to_zone.push(r.zone.clone());
} }
@ -1055,7 +1055,7 @@ impl ClusterLayout {
} }
} }
if *prev_assign_opt == None { if prev_assign_opt.is_none() {
new_partitions = stored_partitions.clone(); new_partitions = stored_partitions.clone();
//new_partitions_zone = stored_partitions_zone.clone(); //new_partitions_zone = stored_partitions_zone.clone();
} }
@ -1063,7 +1063,7 @@ impl ClusterLayout {
// We display the statistics // We display the statistics
msg.push("".into()); msg.push("".into());
if *prev_assign_opt != None { if prev_assign_opt.is_some() {
let total_new_partitions: usize = new_partitions.iter().sum(); let total_new_partitions: usize = new_partitions.iter().sum();
msg.push(format!( msg.push(format!(
"A total of {} new copies of partitions need to be \ "A total of {} new copies of partitions need to be \

View File

@ -668,7 +668,7 @@ impl System {
let prev_layout_check = layout.check().is_ok(); let prev_layout_check = layout.check().is_ok();
if layout.merge(adv) { if layout.merge(adv) {
if prev_layout_check && !layout.check().is_ok() { if prev_layout_check && layout.check().is_err() {
error!("New cluster layout is invalid, discarding."); error!("New cluster layout is invalid, discarding.");
return Err(Error::Message( return Err(Error::Message(
"New cluster layout is invalid, discarding.".into(), "New cluster layout is invalid, discarding.".into(),
@ -724,7 +724,7 @@ impl System {
async fn discovery_loop(self: &Arc<Self>, mut stop_signal: watch::Receiver<bool>) { async fn discovery_loop(self: &Arc<Self>, mut stop_signal: watch::Receiver<bool>) {
while !*stop_signal.borrow() { while !*stop_signal.borrow() {
let not_configured = !self.ring.borrow().layout.check().is_ok(); let not_configured = self.ring.borrow().layout.check().is_err();
let no_peers = self.fullmesh.get_peer_list().len() < self.replication_factor; let no_peers = self.fullmesh.get_peer_list().len() < self.replication_factor;
let expected_n_nodes = self.ring.borrow().layout.num_nodes(); let expected_n_nodes = self.ring.borrow().layout.num_nodes();
let bad_peers = self let bad_peers = self