lifecycle worker: use queue_insert and process objects in batches
This commit is contained in:
parent
1cfcc61de8
commit
adbf5925de
@ -152,41 +152,44 @@ impl Worker for LifecycleWorker {
|
|||||||
pos,
|
pos,
|
||||||
last_bucket,
|
last_bucket,
|
||||||
} => {
|
} => {
|
||||||
let (object_bytes, next_pos) = match self
|
// Process a batch of 100 items before yielding to bg task scheduler
|
||||||
.garage
|
for _ in 0..100 {
|
||||||
.object_table
|
let (object_bytes, next_pos) = match self
|
||||||
.data
|
.garage
|
||||||
.store
|
.object_table
|
||||||
.get_gt(&pos)?
|
.data
|
||||||
{
|
.store
|
||||||
None => {
|
.get_gt(&pos)?
|
||||||
info!("Lifecycle worker finished for {}, objects expired: {}, mpu aborted: {}", date, *objects_expired, *mpu_aborted);
|
{
|
||||||
self.persister
|
None => {
|
||||||
.set_with(|x| x.last_completed = Some(date.to_string()))?;
|
info!("Lifecycle worker finished for {}, objects expired: {}, mpu aborted: {}", date, *objects_expired, *mpu_aborted);
|
||||||
self.state = State::Completed(*date);
|
self.persister
|
||||||
return Ok(WorkerState::Idle);
|
.set_with(|x| x.last_completed = Some(date.to_string()))?;
|
||||||
|
self.state = State::Completed(*date);
|
||||||
|
return Ok(WorkerState::Idle);
|
||||||
|
}
|
||||||
|
Some((k, v)) => (v, k),
|
||||||
|
};
|
||||||
|
|
||||||
|
let object = self.garage.object_table.data.decode_entry(&object_bytes)?;
|
||||||
|
let skip = process_object(
|
||||||
|
&self.garage,
|
||||||
|
*date,
|
||||||
|
&object,
|
||||||
|
objects_expired,
|
||||||
|
mpu_aborted,
|
||||||
|
last_bucket,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
*counter += 1;
|
||||||
|
if skip == Skip::SkipBucket {
|
||||||
|
let bucket_id_len = object.bucket_id.as_slice().len();
|
||||||
|
assert_eq!(pos.get(..bucket_id_len), Some(object.bucket_id.as_slice()));
|
||||||
|
*pos = [&pos[..bucket_id_len], &[0xFFu8][..]].concat();
|
||||||
|
} else {
|
||||||
|
*pos = next_pos;
|
||||||
}
|
}
|
||||||
Some((k, v)) => (v, k),
|
|
||||||
};
|
|
||||||
|
|
||||||
let object = self.garage.object_table.data.decode_entry(&object_bytes)?;
|
|
||||||
let skip = process_object(
|
|
||||||
&self.garage,
|
|
||||||
*date,
|
|
||||||
&object,
|
|
||||||
objects_expired,
|
|
||||||
mpu_aborted,
|
|
||||||
last_bucket,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
*counter += 1;
|
|
||||||
if skip == Skip::SkipBucket {
|
|
||||||
let bucket_id_len = object.bucket_id.as_slice().len();
|
|
||||||
assert_eq!(pos.get(..bucket_id_len), Some(object.bucket_id.as_slice()));
|
|
||||||
*pos = [&pos[..bucket_id_len], &[0xFFu8][..]].concat();
|
|
||||||
} else {
|
|
||||||
*pos = next_pos;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(WorkerState::Busy)
|
Ok(WorkerState::Busy)
|
||||||
@ -260,6 +263,8 @@ async fn process_object(
|
|||||||
return Ok(Skip::SkipBucket);
|
return Ok(Skip::SkipBucket);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let db = garage.object_table.data.store.db();
|
||||||
|
|
||||||
for rule in lifecycle_policy.iter() {
|
for rule in lifecycle_policy.iter() {
|
||||||
if !rule.enabled {
|
if !rule.enabled {
|
||||||
continue;
|
continue;
|
||||||
@ -310,7 +315,9 @@ async fn process_object(
|
|||||||
"Lifecycle: expiring 1 object in bucket {:?}",
|
"Lifecycle: expiring 1 object in bucket {:?}",
|
||||||
object.bucket_id
|
object.bucket_id
|
||||||
);
|
);
|
||||||
garage.object_table.insert(&deleted_object).await?;
|
db.transaction(|mut tx| {
|
||||||
|
garage.object_table.queue_insert(&mut tx, &deleted_object)
|
||||||
|
})?;
|
||||||
*objects_expired += 1;
|
*objects_expired += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -343,7 +350,9 @@ async fn process_object(
|
|||||||
);
|
);
|
||||||
let aborted_object =
|
let aborted_object =
|
||||||
Object::new(object.bucket_id, object.key.clone(), aborted_versions);
|
Object::new(object.bucket_id, object.key.clone(), aborted_versions);
|
||||||
garage.object_table.insert(&aborted_object).await?;
|
db.transaction(|mut tx| {
|
||||||
|
garage.object_table.queue_insert(&mut tx, &aborted_object)
|
||||||
|
})?;
|
||||||
*mpu_aborted += n_aborted;
|
*mpu_aborted += n_aborted;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user