Merge branch 'main' into next

This commit is contained in:
Alex Auvolat 2023-01-04 11:34:43 +01:00
commit 570e5e5bbb
No known key found for this signature in database
GPG Key ID: 0E496D15096376BE
80 changed files with 2387 additions and 1889 deletions

25
Cargo.lock generated
View File

@ -1048,7 +1048,7 @@ dependencies = [
[[package]]
name = "garage"
version = "0.8.0"
version = "0.8.1"
dependencies = [
"assert-json-diff",
"async-trait",
@ -1080,7 +1080,6 @@ dependencies = [
"parse_duration",
"prometheus",
"rand 0.8.5",
"rmp-serde",
"serde",
"serde_bytes",
"serde_json",
@ -1096,7 +1095,7 @@ dependencies = [
[[package]]
name = "garage_api"
version = "0.8.0"
version = "0.8.1"
dependencies = [
"async-trait",
"base64",
@ -1141,7 +1140,7 @@ dependencies = [
[[package]]
name = "garage_block"
version = "0.8.0"
version = "0.8.1"
dependencies = [
"arc-swap",
"async-compression",
@ -1156,7 +1155,6 @@ dependencies = [
"hex",
"opentelemetry",
"rand 0.8.5",
"rmp-serde",
"serde",
"serde_bytes",
"tokio",
@ -1167,7 +1165,7 @@ dependencies = [
[[package]]
name = "garage_db"
version = "0.8.0"
version = "0.8.1"
dependencies = [
"clap 3.1.18",
"err-derive",
@ -1182,7 +1180,7 @@ dependencies = [
[[package]]
name = "garage_model"
version = "0.8.0"
version = "0.8.1"
dependencies = [
"arc-swap",
"async-trait",
@ -1200,7 +1198,6 @@ dependencies = [
"netapp",
"opentelemetry",
"rand 0.8.5",
"rmp-serde",
"serde",
"serde_bytes",
"tokio",
@ -1210,7 +1207,7 @@ dependencies = [
[[package]]
name = "garage_rpc"
version = "0.8.0"
version = "0.8.1"
dependencies = [
"arc-swap",
"async-trait",
@ -1231,7 +1228,6 @@ dependencies = [
"pnet_datalink",
"rand 0.8.5",
"reqwest",
"rmp-serde",
"schemars",
"serde",
"serde_bytes",
@ -1243,8 +1239,9 @@ dependencies = [
[[package]]
name = "garage_table"
version = "0.8.0"
version = "0.8.1"
dependencies = [
"arc-swap",
"async-trait",
"bytes",
"futures",
@ -1256,7 +1253,6 @@ dependencies = [
"hexdump",
"opentelemetry",
"rand 0.8.5",
"rmp-serde",
"serde",
"serde_bytes",
"tokio",
@ -1265,7 +1261,7 @@ dependencies = [
[[package]]
name = "garage_util"
version = "0.8.0"
version = "0.8.1"
dependencies = [
"arc-swap",
"async-trait",
@ -1278,6 +1274,7 @@ dependencies = [
"garage_db",
"git-version",
"hex",
"hexdump",
"http",
"hyper",
"lazy_static",
@ -1296,7 +1293,7 @@ dependencies = [
[[package]]
name = "garage_web"
version = "0.8.0"
version = "0.8.1"
dependencies = [
"err-derive",
"futures",

135
Cargo.nix
View File

@ -32,7 +32,7 @@ args@{
ignoreLockHash,
}:
let
nixifiedLockHash = "a68c589851ec1990d29cdc20e8b922b27c1a6b402b682f7b0d9a9e6258f25828";
nixifiedLockHash = "cc1f7d987fc14a2f087cb5d39365361d6f1fb1a7cf5aae7cbf628ff5dbc880c8";
workspaceSrc = if args.workspaceSrc == null then ./. else args.workspaceSrc;
currentLockHash = builtins.hashFile "sha256" (workspaceSrc + /Cargo.lock);
lockHashIgnored = if ignoreLockHash
@ -56,15 +56,15 @@ in
{
cargo2nixVersion = "0.11.0";
workspace = {
garage_db = rustPackages.unknown.garage_db."0.8.0";
garage_util = rustPackages.unknown.garage_util."0.8.0";
garage_rpc = rustPackages.unknown.garage_rpc."0.8.0";
garage_table = rustPackages.unknown.garage_table."0.8.0";
garage_block = rustPackages.unknown.garage_block."0.8.0";
garage_model = rustPackages.unknown.garage_model."0.8.0";
garage_api = rustPackages.unknown.garage_api."0.8.0";
garage_web = rustPackages.unknown.garage_web."0.8.0";
garage = rustPackages.unknown.garage."0.8.0";
garage_db = rustPackages.unknown.garage_db."0.8.1";
garage_util = rustPackages.unknown.garage_util."0.8.1";
garage_rpc = rustPackages.unknown.garage_rpc."0.8.1";
garage_table = rustPackages.unknown.garage_table."0.8.1";
garage_block = rustPackages.unknown.garage_block."0.8.1";
garage_model = rustPackages.unknown.garage_model."0.8.1";
garage_api = rustPackages.unknown.garage_api."0.8.1";
garage_web = rustPackages.unknown.garage_web."0.8.1";
garage = rustPackages.unknown.garage."0.8.1";
k2v-client = rustPackages.unknown.k2v-client."0.0.1";
};
"registry+https://github.com/rust-lang/crates.io-index".addr2line."0.17.0" = overridableMkRustCrate (profileName: rec {
@ -1494,16 +1494,16 @@ in
};
});
"unknown".garage."0.8.0" = overridableMkRustCrate (profileName: rec {
"unknown".garage."0.8.1" = overridableMkRustCrate (profileName: rec {
name = "garage";
version = "0.8.0";
version = "0.8.1";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/garage");
features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/bundled-libs" || rootFeatures' ? "garage/default") "bundled-libs")
(lib.optional (rootFeatures' ? "garage/consul-discovery") "consul-discovery")
(lib.optional (rootFeatures' ? "garage/default") "default")
(lib.optional (rootFeatures' ? "garage/k2v") "k2v")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v") "k2v")
(lib.optional (rootFeatures' ? "garage/kubernetes-discovery") "kubernetes-discovery")
(lib.optional (rootFeatures' ? "garage/lmdb") "lmdb")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics") "metrics")
@ -1522,14 +1522,14 @@ in
bytesize = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytesize."1.1.0" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
garage_api = (rustPackages."unknown".garage_api."0.8.0" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.8.0" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.8.0" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
garage_web = (rustPackages."unknown".garage_web."0.8.0" { inherit profileName; }).out;
garage_api = (rustPackages."unknown".garage_api."0.8.1" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.8.1" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.8.1" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
garage_web = (rustPackages."unknown".garage_web."0.8.1" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
sodiumoxide = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".kuska-sodiumoxide."0.2.5-0" { inherit profileName; }).out;
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
@ -1539,7 +1539,6 @@ in
parse_duration = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".parse_duration."2.1.1" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage/prometheus" then "prometheus" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".prometheus."0.13.0" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
structopt = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".structopt."0.3.26" { inherit profileName; }).out;
@ -1563,13 +1562,13 @@ in
};
});
"unknown".garage_api."0.8.0" = overridableMkRustCrate (profileName: rec {
"unknown".garage_api."0.8.1" = overridableMkRustCrate (profileName: rec {
name = "garage_api";
version = "0.8.0";
version = "0.8.1";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/api");
features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v") "k2v")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v") "k2v")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics") "metrics")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/opentelemetry-prometheus") "opentelemetry-prometheus")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/metrics" || rootFeatures' ? "garage_api/metrics" || rootFeatures' ? "garage_api/prometheus") "prometheus")
@ -1584,11 +1583,11 @@ in
form_urlencoded = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".form_urlencoded."1.0.1" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.8.0" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.8.0" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.8.1" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.8.1" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
hmac = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hmac."0.12.1" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
@ -1617,9 +1616,9 @@ in
};
});
"unknown".garage_block."0.8.0" = overridableMkRustCrate (profileName: rec {
"unknown".garage_block."0.8.1" = overridableMkRustCrate (profileName: rec {
name = "garage_block";
version = "0.8.0";
version = "0.8.1";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/block");
features = builtins.concatLists [
@ -1632,14 +1631,13 @@ in
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out;
@ -1649,9 +1647,9 @@ in
};
});
"unknown".garage_db."0.8.0" = overridableMkRustCrate (profileName: rec {
"unknown".garage_db."0.8.1" = overridableMkRustCrate (profileName: rec {
name = "garage_db";
version = "0.8.0";
version = "0.8.1";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/db");
features = builtins.concatLists [
@ -1681,14 +1679,14 @@ in
};
});
"unknown".garage_model."0.8.0" = overridableMkRustCrate (profileName: rec {
"unknown".garage_model."0.8.1" = overridableMkRustCrate (profileName: rec {
name = "garage_model";
version = "0.8.0";
version = "0.8.1";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/model");
features = builtins.concatLists [
[ "default" ]
(lib.optional (rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v") "k2v")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v") "k2v")
(lib.optional (rootFeatures' ? "garage/lmdb" || rootFeatures' ? "garage_model/lmdb") "lmdb")
[ "sled" ]
(lib.optional (rootFeatures' ? "garage/sqlite" || rootFeatures' ? "garage_model/sqlite") "sqlite")
@ -1701,16 +1699,15 @@ in
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.8.0" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
garage_block = (rustPackages."unknown".garage_block."0.8.1" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
netapp = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".netapp."0.5.2" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out;
@ -1719,9 +1716,9 @@ in
};
});
"unknown".garage_rpc."0.8.0" = overridableMkRustCrate (profileName: rec {
"unknown".garage_rpc."0.8.1" = overridableMkRustCrate (profileName: rec {
name = "garage_rpc";
version = "0.8.0";
version = "0.8.1";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/rpc");
features = builtins.concatLists [
@ -1742,7 +1739,7 @@ in
${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/err-derive" then "err_derive" else null } = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
gethostname = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".gethostname."0.2.3" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
itertools = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".itertools."0.10.3" { inherit profileName; }).out;
@ -1754,7 +1751,6 @@ in
pnet_datalink = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".pnet_datalink."0.28.0" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/consul-discovery" || rootFeatures' ? "garage_rpc/consul-discovery" || rootFeatures' ? "garage_rpc/reqwest" then "reqwest" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".reqwest."0.11.12" { inherit profileName; }).out;
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
${ if rootFeatures' ? "garage/kubernetes-discovery" || rootFeatures' ? "garage_rpc/kubernetes-discovery" || rootFeatures' ? "garage_rpc/schemars" then "schemars" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".schemars."0.8.8" { inherit profileName; }).out;
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
@ -1765,24 +1761,24 @@ in
};
});
"unknown".garage_table."0.8.0" = overridableMkRustCrate (profileName: rec {
"unknown".garage_table."0.8.1" = overridableMkRustCrate (profileName: rec {
name = "garage_table";
version = "0.8.0";
version = "0.8.1";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/table");
dependencies = {
arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.5.0" { inherit profileName; }).out;
async_trait = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".async-trait."0.1.52" { profileName = "__noProfile"; }).out;
bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".bytes."1.2.0" { inherit profileName; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
futures_util = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures-util."0.3.21" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.0" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
garage_rpc = (rustPackages."unknown".garage_rpc."0.8.1" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
rand = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rand."0.8.5" { inherit profileName; }).out;
rmp_serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rmp-serde."0.15.5" { inherit profileName; }).out;
serde = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde."1.0.137" { inherit profileName; }).out;
serde_bytes = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".serde_bytes."0.11.5" { inherit profileName; }).out;
tokio = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".tokio."1.17.0" { inherit profileName; }).out;
@ -1790,13 +1786,13 @@ in
};
});
"unknown".garage_util."0.8.0" = overridableMkRustCrate (profileName: rec {
"unknown".garage_util."0.8.1" = overridableMkRustCrate (profileName: rec {
name = "garage_util";
version = "0.8.0";
version = "0.8.1";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/util");
features = builtins.concatLists [
(lib.optional (rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v" || rootFeatures' ? "garage_util/k2v") "k2v")
(lib.optional (rootFeatures' ? "garage/default" || rootFeatures' ? "garage/k2v" || rootFeatures' ? "garage_api/k2v" || rootFeatures' ? "garage_model/k2v" || rootFeatures' ? "garage_util/k2v") "k2v")
];
dependencies = {
arc_swap = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".arc-swap."1.5.0" { inherit profileName; }).out;
@ -1807,9 +1803,10 @@ in
digest = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".digest."0.10.3" { inherit profileName; }).out;
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.8.0" { inherit profileName; }).out;
garage_db = (rustPackages."unknown".garage_db."0.8.1" { inherit profileName; }).out;
git_version = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".git-version."0.3.5" { inherit profileName; }).out;
hex = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hex."0.4.3" { inherit profileName; }).out;
hexdump = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hexdump."0.1.1" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }).out;
lazy_static = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".lazy_static."1.4.0" { inherit profileName; }).out;
@ -1827,18 +1824,18 @@ in
};
});
"unknown".garage_web."0.8.0" = overridableMkRustCrate (profileName: rec {
"unknown".garage_web."0.8.1" = overridableMkRustCrate (profileName: rec {
name = "garage_web";
version = "0.8.0";
version = "0.8.1";
registry = "unknown";
src = fetchCrateLocal (workspaceSrc + "/src/web");
dependencies = {
err_derive = (buildRustPackages."registry+https://github.com/rust-lang/crates.io-index".err-derive."0.3.1" { profileName = "__noProfile"; }).out;
futures = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".futures."0.3.21" { inherit profileName; }).out;
garage_api = (rustPackages."unknown".garage_api."0.8.0" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.8.0" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.8.0" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
garage_api = (rustPackages."unknown".garage_api."0.8.1" { inherit profileName; }).out;
garage_model = (rustPackages."unknown".garage_model."0.8.1" { inherit profileName; }).out;
garage_table = (rustPackages."unknown".garage_table."0.8.1" { inherit profileName; }).out;
garage_util = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
hyper = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".hyper."0.14.18" { inherit profileName; }).out;
opentelemetry = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".opentelemetry."0.17.0" { inherit profileName; }).out;
@ -2452,7 +2449,7 @@ in
dependencies = {
base64 = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".base64."0.13.0" { inherit profileName; }).out;
${ if rootFeatures' ? "k2v-client/clap" || rootFeatures' ? "k2v-client/cli" then "clap" else null } = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".clap."3.1.18" { inherit profileName; }).out;
${ if rootFeatures' ? "k2v-client/cli" || rootFeatures' ? "k2v-client/garage_util" then "garage_util" else null } = (rustPackages."unknown".garage_util."0.8.0" { inherit profileName; }).out;
${ if rootFeatures' ? "k2v-client/cli" || rootFeatures' ? "k2v-client/garage_util" then "garage_util" else null } = (rustPackages."unknown".garage_util."0.8.1" { inherit profileName; }).out;
http = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".http."0.2.8" { inherit profileName; }).out;
log = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".log."0.4.16" { inherit profileName; }).out;
rusoto_core = (rustPackages."registry+https://github.com/rust-lang/crates.io-index".rusoto_core."0.48.0" { inherit profileName; }).out;

View File

@ -5,13 +5,56 @@ weight = 20
## S3
### Using Minio SDK
First install the SDK:
```bash
pip3 install minio
```
Then instantiate a client object using garage root domain, api key and secret:
```python
import minio
client = minio.Minio(
"your.domain.tld",
"GKyourapikey",
"abcd[...]1234",
# Force the region, this is specific to garage
region="region",
)
```
Then use all the standard S3 endpoints as implemented by the Minio SDK:
```
# List buckets
print(client.list_buckets())
# Put an object containing 'content' to /path in bucket named 'bucket':
content = b"content"
client.put_object(
"bucket",
"path",
io.BytesIO(content),
len(content),
)
# Read the object back and check contents
data = client.get_object("bucket", "path").read()
assert data == content
```
For further documentation, see the Minio SDK
[Reference](https://docs.min.io/docs/python-client-api-reference.html)
### Using Amazon boto3
*Coming soon*
Some refs:
- Minio SDK
- [Reference](https://docs.min.io/docs/python-client-api-reference.html)
- Amazon boto3
See the official documentation:
- [Installation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html)
- [Reference](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html)
- [Example](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html)

View File

@ -8,7 +8,7 @@ In this section, we cover the following web applications:
| Name | Status | Note |
|------|--------|------|
| [Nextcloud](#nextcloud) | ✅ | Both Primary Storage and External Storage are supported |
| [Peertube](#peertube) | ✅ | Must be configured with the website endpoint |
| [Peertube](#peertube) | ✅ | Supported with the website endpoint, proxifying private videos unsupported |
| [Mastodon](#mastodon) | ✅ | Natively supported |
| [Matrix](#matrix) | ✅ | Tested with `synapse-s3-storage-provider` |
| [Pixelfed](#pixelfed) | ❓ | Not yet tested |
@ -128,6 +128,10 @@ In other words, Peertube is only responsible of the "control plane" and offload
In return, this system is a bit harder to configure.
We show how it is still possible to configure Garage with Peertube, allowing you to spread the load and the bandwidth usage on the Garage cluster.
Starting from version 5.0, Peertube also supports improving the security for private videos by not exposing them directly
but relying on a single control point in the Peertube instance. This is based on S3 per-object and prefix ACL, which are not currently supported
in Garage, so this feature is unsupported. While this technically impedes security for private videos, it is not a blocking issue and could be
a reasonable trade-off for some instances.
### Create resources in Garage
@ -195,6 +199,11 @@ object_storage:
max_upload_part: 2GB
proxy:
# You may enable this feature, yet it will not provide any security benefit, so
# you should rather benefit from Garage public endpoint for all videos
proxify_private_files: false
streaming_playlists:
bucket_name: 'peertube-playlist'

View File

@ -15,7 +15,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
version: 0.2.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@ -18,6 +18,9 @@ metadata:
name: {{ $fullName }}-s3-api
labels:
{{- include "garage.labels" . | nindent 4 }}
{{- with .Values.ingress.s3.api.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.ingress.s3.api.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
@ -80,6 +83,9 @@ metadata:
name: {{ $fullName }}-s3-web
labels:
{{- include "garage.labels" . | nindent 4 }}
{{- with .Values.ingress.s3.web.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.ingress.s3.web.annotations }}
annotations:
{{- toYaml . | nindent 4 }}

View File

@ -85,14 +85,15 @@ service:
ingress:
s3:
api:
enabled: true
enabled: false
# Rely either on the className or the annotation below but not both
# replace "nginx" by an Ingress controller
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
className: "nginx"
annotations:
# className: "nginx"
annotations: {}
# kubernetes.io/ingress.class: "nginx"
# kubernetes.io/tls-acme: "true"
labels: {}
hosts:
- host: "s3.garage.tld" # garage S3 API endpoint
paths:
@ -107,11 +108,15 @@ ingress:
# hosts:
# - kubernetes.docker.internal
web:
enabled: true
className: "nginx"
enabled: false
# Rely either on the className or the annotation below but not both
# replace "nginx" by an Ingress controller
# you can find examples here https://kubernetes.io/docs/concepts/services-networking/ingress-controllers
# className: "nginx"
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
hosts:
- host: "*.web.garage.tld" # wildcard website access with bucket name prefix
paths:

View File

@ -1,6 +1,6 @@
[package]
name = "garage_api"
version = "0.8.0"
version = "0.8.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -14,11 +14,11 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_model = { version = "0.8.0", path = "../model" }
garage_table = { version = "0.8.0", path = "../table" }
garage_block = { version = "0.8.0", path = "../block" }
garage_util = { version = "0.8.0", path = "../util" }
garage_rpc = { version = "0.8.0", path = "../rpc" }
garage_model = { version = "0.8.1", path = "../model" }
garage_table = { version = "0.8.1", path = "../table" }
garage_block = { version = "0.8.1", path = "../block" }
garage_util = { version = "0.8.1", path = "../util" }
garage_rpc = { version = "0.8.1", path = "../rpc" }
async-trait = "0.1.7"
base64 = "0.13"

View File

@ -143,10 +143,13 @@ impl Endpoint {
}
generateQueryParameters! {
keywords: [],
fields: [
"format" => format,
"id" => id,
"search" => search,
"globalAlias" => global_alias,
"alias" => alias,
"accessKeyId" => access_key_id
]
}

View File

@ -96,7 +96,7 @@ impl Endpoint {
fn from_get(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
router_match! {
@gen_parser
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
key: [
EMPTY if causality_token => PollItem (query::sort_key, query::causality_token, opt_parse::timeout),
EMPTY => ReadItem (query::sort_key),
@ -111,7 +111,7 @@ impl Endpoint {
fn from_search(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
router_match! {
@gen_parser
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
key: [
],
no_key: [
@ -125,7 +125,7 @@ impl Endpoint {
fn from_head(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
router_match! {
@gen_parser
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
key: [
EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id),
],
@ -140,7 +140,7 @@ impl Endpoint {
fn from_post(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
router_match! {
@gen_parser
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
key: [
],
no_key: [
@ -155,7 +155,7 @@ impl Endpoint {
fn from_put(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
router_match! {
@gen_parser
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
key: [
EMPTY => InsertItem (query::sort_key),
@ -169,7 +169,7 @@ impl Endpoint {
fn from_delete(partition_key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
router_match! {
@gen_parser
(query.keyword.take().unwrap_or_default().as_ref(), partition_key, query, None),
(query.keyword.take().unwrap_or_default(), partition_key, query, None),
key: [
EMPTY => DeleteItem (query::sort_key),
],
@ -232,6 +232,11 @@ impl Endpoint {
// parameter name => struct field
generateQueryParameters! {
keywords: [
"delete" => DELETE,
"search" => SEARCH
],
fields: [
"prefix" => prefix,
"start" => start,
"causality_token" => causality_token,
@ -240,13 +245,5 @@ generateQueryParameters! {
"reverse" => reverse,
"sort_key" => sort_key,
"timeout" => timeout
}
mod keywords {
//! This module contain all query parameters with no associated value
//! used to differentiate endpoints.
pub const EMPTY: &str = "";
pub const DELETE: &str = "delete";
pub const SEARCH: &str = "search";
]
}

View File

@ -4,10 +4,9 @@ macro_rules! router_match {
(@match $enum:expr , [ $($endpoint:ident,)* ]) => {{
// usage: router_match {@match my_enum, [ VariantWithField1, VariantWithField2 ..] }
// returns true if the variant was one of the listed variants, false otherwise.
use Endpoint::*;
match $enum {
$(
$endpoint { .. } => true,
Endpoint::$endpoint { .. } => true,
)*
_ => false
}
@ -15,10 +14,9 @@ macro_rules! router_match {
(@extract $enum:expr , $param:ident, [ $($endpoint:ident,)* ]) => {{
// usage: router_match {@extract my_enum, field_name, [ VariantWithField1, VariantWithField2 ..] }
// returns Some(field_value), or None if the variant was not one of the listed variants.
use Endpoint::*;
match $enum {
$(
$endpoint {$param, ..} => Some($param),
Endpoint::$endpoint {$param, ..} => Some($param),
)*
_ => None
}
@ -28,10 +26,9 @@ macro_rules! router_match {
$($meth:ident $path:pat $(if $required:ident)? => $api:ident $(($($conv:ident :: $param:ident),*))?,)*
]) => {{
{
use Endpoint::*;
match ($method, $reqpath) {
$(
(&Method::$meth, $path) if true $(&& $query.$required.is_some())? => $api {
(&Method::$meth, $path) if true $(&& $query.$required.is_some())? => Endpoint::$api {
$($(
$param: router_match!(@@parse_param $query, $conv, $param),
)*)?
@ -60,11 +57,9 @@ macro_rules! router_match {
// ]
// }
// See in from_{method} for more detailed usage.
use Endpoint::*;
use keywords::*;
match ($keyword, !$key.is_empty()){
$(
($kw_k, true) if true $(&& $query.$required_k.is_some())? $(&& $header.contains_key($header_k))? => Ok($api_k {
(Keyword::$kw_k, true) if true $(&& $query.$required_k.is_some())? $(&& $header.contains_key($header_k))? => Ok(Endpoint::$api_k {
$key,
$($(
$param_k: router_match!(@@parse_param $query, $conv_k, $param_k),
@ -72,7 +67,7 @@ macro_rules! router_match {
}),
)*
$(
($kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok($api_nk {
(Keyword::$kw_nk, false) $(if $query.$required_nk.is_some())? $(if $header.contains($header_nk))? => Ok(Endpoint::$api_nk {
$($(
$param_nk: router_match!(@@parse_param $query, $conv_nk, $param_nk),
)*)?
@ -144,14 +139,40 @@ macro_rules! router_match {
/// This macro is used to generate part of the code in this module. It must be called only one, and
/// is useless outside of this module.
macro_rules! generateQueryParameters {
( $($rest:expr => $name:ident),* ) => {
(
keywords: [ $($kw_param:expr => $kw_name: ident),* ],
fields: [ $($f_param:expr => $f_name:ident),* ]
) => {
#[derive(Debug)]
#[allow(non_camel_case_types)]
#[allow(clippy::upper_case_acronyms)]
enum Keyword {
EMPTY,
$( $kw_name, )*
}
impl std::fmt::Display for Keyword {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Keyword::EMPTY => write!(f, "``"),
$( Keyword::$kw_name => write!(f, "`{}`", $kw_param), )*
}
}
}
impl Default for Keyword {
fn default() -> Self {
Keyword::EMPTY
}
}
/// Struct containing all query parameters used in endpoints. Think of it as an HashMap,
/// but with keys statically known.
#[derive(Debug, Default)]
struct QueryParameters<'a> {
keyword: Option<Cow<'a, str>>,
keyword: Option<Keyword>,
$(
$name: Option<Cow<'a, str>>,
$f_name: Option<Cow<'a, str>>,
)*
}
@ -160,34 +181,29 @@ macro_rules! generateQueryParameters {
fn from_query(query: &'a str) -> Result<Self, Error> {
let mut res: Self = Default::default();
for (k, v) in url::form_urlencoded::parse(query.as_bytes()) {
let repeated = match k.as_ref() {
match k.as_ref() {
$(
$rest => if !v.is_empty() {
res.$name.replace(v).is_some()
} else {
false
$kw_param => if let Some(prev_kw) = res.keyword.replace(Keyword::$kw_name) {
return Err(Error::bad_request(format!(
"Multiple keywords: '{}' and '{}'", prev_kw, $kw_param
)));
},
)*
$(
$f_param => if !v.is_empty() {
if res.$f_name.replace(v).is_some() {
return Err(Error::bad_request(format!(
"Query parameter repeated: '{}'", k
)));
}
},
)*
_ => {
if k.starts_with("response-") || k.starts_with("X-Amz-") {
false
} else if v.as_ref().is_empty() {
if res.keyword.replace(k).is_some() {
return Err(Error::bad_request("Multiple keywords"));
}
continue;
} else {
if !(k.starts_with("response-") || k.starts_with("X-Amz-")) {
debug!("Received an unknown query parameter: '{}'", k);
false
}
}
};
if repeated {
return Err(Error::bad_request(format!(
"Query parameter repeated: '{}'",
k
)));
}
}
Ok(res)
}
@ -198,8 +214,8 @@ macro_rules! generateQueryParameters {
if self.keyword.is_some() {
Some("Keyword not used")
} $(
else if self.$name.is_some() {
Some(concat!("'", $rest, "'"))
else if self.$f_name.is_some() {
Some(concat!("'", $f_param, "'"))
}
)* else {
None

View File

@ -161,6 +161,15 @@ pub async fn handle_create_bucket(
return Err(CommonError::BucketAlreadyExists.into());
}
} else {
// Check user is allowed to create bucket
if !key_params.allow_create_bucket.get() {
return Err(CommonError::Forbidden(format!(
"Access key {} is not allowed to create buckets",
api_key.key_id
))
.into());
}
// Create the bucket!
if !is_valid_bucket_name(&bucket_name) {
return Err(Error::bad_request(format!(

View File

@ -355,7 +355,7 @@ impl Endpoint {
fn from_get(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
router_match! {
@gen_parser
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
(query.keyword.take().unwrap_or_default(), key, query, None),
key: [
EMPTY if upload_id => ListParts (query::upload_id, opt_parse::max_parts, opt_parse::part_number_marker),
EMPTY => GetObject (query_opt::version_id, opt_parse::part_number),
@ -412,7 +412,7 @@ impl Endpoint {
fn from_head(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
router_match! {
@gen_parser
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
(query.keyword.take().unwrap_or_default(), key, query, None),
key: [
EMPTY => HeadObject(opt_parse::part_number, query_opt::version_id),
],
@ -426,7 +426,7 @@ impl Endpoint {
fn from_post(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
router_match! {
@gen_parser
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
(query.keyword.take().unwrap_or_default(), key, query, None),
key: [
EMPTY if upload_id => CompleteMultipartUpload (query::upload_id),
RESTORE => RestoreObject (query_opt::version_id),
@ -448,7 +448,7 @@ impl Endpoint {
) -> Result<Self, Error> {
router_match! {
@gen_parser
(query.keyword.take().unwrap_or_default().as_ref(), key, query, headers),
(query.keyword.take().unwrap_or_default(), key, query, headers),
key: [
EMPTY if part_number header "x-amz-copy-source" => UploadPartCopy (parse::part_number, query::upload_id),
EMPTY header "x-amz-copy-source" => CopyObject,
@ -490,7 +490,7 @@ impl Endpoint {
fn from_delete(key: String, query: &mut QueryParameters<'_>) -> Result<Self, Error> {
router_match! {
@gen_parser
(query.keyword.take().unwrap_or_default().as_ref(), key, query, None),
(query.keyword.take().unwrap_or_default(), key, query, None),
key: [
EMPTY if upload_id => AbortMultipartUpload (query::upload_id),
EMPTY => DeleteObject (query_opt::version_id),
@ -624,6 +624,39 @@ impl Endpoint {
// parameter name => struct field
generateQueryParameters! {
keywords: [
"accelerate" => ACCELERATE,
"acl" => ACL,
"analytics" => ANALYTICS,
"cors" => CORS,
"delete" => DELETE,
"encryption" => ENCRYPTION,
"intelligent-tiering" => INTELLIGENT_TIERING,
"inventory" => INVENTORY,
"legal-hold" => LEGAL_HOLD,
"lifecycle" => LIFECYCLE,
"location" => LOCATION,
"logging" => LOGGING,
"metrics" => METRICS,
"notification" => NOTIFICATION,
"object-lock" => OBJECT_LOCK,
"ownershipControls" => OWNERSHIP_CONTROLS,
"policy" => POLICY,
"policyStatus" => POLICY_STATUS,
"publicAccessBlock" => PUBLIC_ACCESS_BLOCK,
"replication" => REPLICATION,
"requestPayment" => REQUEST_PAYMENT,
"restore" => RESTORE,
"retention" => RETENTION,
"select" => SELECT,
"tagging" => TAGGING,
"torrent" => TORRENT,
"uploads" => UPLOADS,
"versioning" => VERSIONING,
"versions" => VERSIONS,
"website" => WEBSITE
],
fields: [
"continuation-token" => continuation_token,
"delimiter" => delimiter,
"encoding-type" => encoding_type,
@ -644,43 +677,7 @@ generateQueryParameters! {
"upload-id-marker" => upload_id_marker,
"versionId" => version_id,
"version-id-marker" => version_id_marker
}
mod keywords {
//! This module contain all query parameters with no associated value S3 uses to differentiate
//! endpoints.
pub const EMPTY: &str = "";
pub const ACCELERATE: &str = "accelerate";
pub const ACL: &str = "acl";
pub const ANALYTICS: &str = "analytics";
pub const CORS: &str = "cors";
pub const DELETE: &str = "delete";
pub const ENCRYPTION: &str = "encryption";
pub const INTELLIGENT_TIERING: &str = "intelligent-tiering";
pub const INVENTORY: &str = "inventory";
pub const LEGAL_HOLD: &str = "legal-hold";
pub const LIFECYCLE: &str = "lifecycle";
pub const LOCATION: &str = "location";
pub const LOGGING: &str = "logging";
pub const METRICS: &str = "metrics";
pub const NOTIFICATION: &str = "notification";
pub const OBJECT_LOCK: &str = "object-lock";
pub const OWNERSHIP_CONTROLS: &str = "ownershipControls";
pub const POLICY: &str = "policy";
pub const POLICY_STATUS: &str = "policyStatus";
pub const PUBLIC_ACCESS_BLOCK: &str = "publicAccessBlock";
pub const REPLICATION: &str = "replication";
pub const REQUEST_PAYMENT: &str = "requestPayment";
pub const RESTORE: &str = "restore";
pub const RETENTION: &str = "retention";
pub const SELECT: &str = "select";
pub const TAGGING: &str = "tagging";
pub const TORRENT: &str = "torrent";
pub const UPLOADS: &str = "uploads";
pub const VERSIONING: &str = "versioning";
pub const VERSIONS: &str = "versions";
pub const WEBSITE: &str = "website";
]
}
#[cfg(test)]

View File

@ -1,6 +1,6 @@
[package]
name = "garage_block"
version = "0.8.0"
version = "0.8.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -14,10 +14,10 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_db = { version = "0.8.0", path = "../db" }
garage_rpc = { version = "0.8.0", path = "../rpc" }
garage_util = { version = "0.8.0", path = "../util" }
garage_table = { version = "0.8.0", path = "../table" }
garage_db = { version = "0.8.1", path = "../db" }
garage_rpc = { version = "0.8.1", path = "../rpc" }
garage_util = { version = "0.8.1", path = "../util" }
garage_table = { version = "0.8.1", path = "../table" }
opentelemetry = "0.17"
@ -31,7 +31,6 @@ rand = "0.8"
async-compression = { version = "0.3", features = ["tokio", "zstd"] }
zstd = { version = "0.9", default-features = false }
rmp-serde = "0.15"
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
serde_bytes = "0.11"

View File

@ -3,6 +3,7 @@ use std::pin::Pin;
use std::sync::Arc;
use std::time::Duration;
use arc_swap::ArcSwapOption;
use async_trait::async_trait;
use bytes::Bytes;
use serde::{Deserialize, Serialize};
@ -22,6 +23,7 @@ use garage_rpc::rpc_helper::netapp::stream::{stream_asyncread, ByteStream};
use garage_db as db;
use garage_util::background::BackgroundRunner;
use garage_util::data::*;
use garage_util::error::*;
use garage_util::metrics::RecordDuration;
@ -87,7 +89,16 @@ pub struct BlockManager {
pub(crate) metrics: BlockManagerMetrics,
tx_scrub_command: mpsc::Sender<ScrubWorkerCommand>,
tx_scrub_command: ArcSwapOption<mpsc::Sender<ScrubWorkerCommand>>,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct BlockResyncErrorInfo {
pub hash: Hash,
pub refcount: u64,
pub error_count: u64,
pub last_try: u64,
pub next_try: u64,
}
// This custom struct contains functions that must only be ran
@ -114,9 +125,8 @@ impl BlockManager {
.netapp
.endpoint("garage_block/manager.rs/Rpc".to_string());
let metrics = BlockManagerMetrics::new(resync.queue.clone(), resync.errors.clone());
let (scrub_tx, scrub_rx) = mpsc::channel(1);
let metrics =
BlockManagerMetrics::new(rc.rc.clone(), resync.queue.clone(), resync.errors.clone());
let block_manager = Arc::new(Self {
replication,
@ -128,21 +138,24 @@ impl BlockManager {
system,
endpoint,
metrics,
tx_scrub_command: scrub_tx,
tx_scrub_command: ArcSwapOption::new(None),
});
block_manager.endpoint.set_handler(block_manager.clone());
block_manager
}
pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
// Spawn a bunch of resync workers
for index in 0..MAX_RESYNC_WORKERS {
let worker = ResyncWorker::new(index, block_manager.clone());
block_manager.system.background.spawn_worker(worker);
let worker = ResyncWorker::new(index, self.clone());
bg.spawn_worker(worker);
}
// Spawn scrub worker
let scrub_worker = ScrubWorker::new(block_manager.clone(), scrub_rx);
block_manager.system.background.spawn_worker(scrub_worker);
block_manager
let (scrub_tx, scrub_rx) = mpsc::channel(1);
self.tx_scrub_command.store(Some(Arc::new(scrub_tx)));
bg.spawn_worker(ScrubWorker::new(self.clone(), scrub_rx));
}
/// Ask nodes that might have a (possibly compressed) block for it
@ -309,9 +322,42 @@ impl BlockManager {
Ok(self.rc.rc.len()?)
}
/// Get number of items in the refcount table
pub fn rc_fast_len(&self) -> Result<Option<usize>, Error> {
Ok(self.rc.rc.fast_len()?)
}
/// Send command to start/stop/manager scrub worker
pub async fn send_scrub_command(&self, cmd: ScrubWorkerCommand) {
let _ = self.tx_scrub_command.send(cmd).await;
pub async fn send_scrub_command(&self, cmd: ScrubWorkerCommand) -> Result<(), Error> {
let tx = self.tx_scrub_command.load();
let tx = tx.as_ref().ok_or_message("scrub worker is not running")?;
tx.send(cmd).await.ok_or_message("send error")?;
Ok(())
}
/// Get the reference count of a block
pub fn get_block_rc(&self, hash: &Hash) -> Result<u64, Error> {
Ok(self.rc.get_block_rc(hash)?.as_u64())
}
/// List all resync errors
pub fn list_resync_errors(&self) -> Result<Vec<BlockResyncErrorInfo>, Error> {
let mut blocks = Vec::with_capacity(self.resync.errors.len());
for ent in self.resync.errors.iter()? {
let (hash, cnt) = ent?;
let cnt = ErrorCounter::decode(&cnt);
blocks.push(BlockResyncErrorInfo {
hash: Hash::try_from(&hash).unwrap(),
refcount: 0,
error_count: cnt.errors,
last_try: cnt.last_try,
next_try: cnt.next_try(),
});
}
for block in blocks.iter_mut() {
block.refcount = self.get_block_rc(&block.hash)?;
}
Ok(blocks)
}
//// ----- Managing the reference counter ----

View File

@ -1,9 +1,11 @@
use opentelemetry::{global, metrics::*};
use garage_db as db;
use garage_db::counted_tree_hack::CountedTree;
/// TableMetrics reference all counter used for metrics
pub struct BlockManagerMetrics {
pub(crate) _rc_size: ValueObserver<u64>,
pub(crate) _resync_queue_len: ValueObserver<u64>,
pub(crate) _resync_errored_blocks: ValueObserver<u64>,
@ -23,9 +25,17 @@ pub struct BlockManagerMetrics {
}
impl BlockManagerMetrics {
pub fn new(resync_queue: CountedTree, resync_errors: CountedTree) -> Self {
pub fn new(rc_tree: db::Tree, resync_queue: CountedTree, resync_errors: CountedTree) -> Self {
let meter = global::meter("garage_model/block");
Self {
_rc_size: meter
.u64_value_observer("block.rc_size", move |observer| {
if let Ok(Some(v)) = rc_tree.fast_len() {
observer.observe(v as u64, &[])
}
})
.with_description("Number of blocks known to the reference counter")
.init(),
_resync_queue_len: meter
.u64_value_observer("block.resync_queue_length", move |observer| {
observer.observe(resync_queue.len() as u64, &[])

View File

@ -169,4 +169,11 @@ impl RcEntry {
pub(crate) fn is_needed(&self) -> bool {
!self.is_deletable()
}
pub(crate) fn as_u64(&self) -> u64 {
match self {
RcEntry::Present { count } => *count,
_ => 0,
}
}
}

View File

@ -53,7 +53,7 @@ impl Worker for RepairWorker {
"Block repair worker".into()
}
fn info(&self) -> Option<String> {
fn status(&self) -> WorkerStatus {
match self.block_iter.as_ref() {
None => {
let idx_bytes = self
@ -66,9 +66,20 @@ impl Worker for RepairWorker {
} else {
idx_bytes
};
Some(format!("Phase 1: {}", hex::encode(idx_bytes)))
WorkerStatus {
progress: Some("0.00%".into()),
freeform: vec![format!(
"Currently in phase 1, iterator position: {}",
hex::encode(idx_bytes)
)],
..Default::default()
}
Some(bi) => Some(format!("Phase 2: {:.2}% done", bi.progress() * 100.)),
}
Some(bi) => WorkerStatus {
progress: Some(format!("{:.2}%", bi.progress() * 100.)),
freeform: vec!["Currently in phase 2".into()],
..Default::default()
},
}
}
@ -137,7 +148,7 @@ impl Worker for RepairWorker {
}
}
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
async fn wait_for_work(&mut self) -> WorkerState {
unreachable!()
}
}
@ -167,6 +178,7 @@ struct ScrubWorkerPersisted {
time_last_complete_scrub: u64,
corruptions_detected: u64,
}
impl garage_util::migrate::InitialFormat for ScrubWorkerPersisted {}
enum ScrubWorkerState {
Running(BlockStoreIterator),
@ -271,29 +283,28 @@ impl Worker for ScrubWorker {
"Block scrub worker".into()
}
fn info(&self) -> Option<String> {
let s = match &self.work {
ScrubWorkerState::Running(bsi) => format!(
"{:.2}% done (tranquility = {})",
bsi.progress() * 100.,
self.persisted.tranquility
),
ScrubWorkerState::Paused(bsi, rt) => {
format!(
"Paused, {:.2}% done, resumes at {}",
bsi.progress() * 100.,
msec_to_rfc3339(*rt)
)
}
ScrubWorkerState::Finished => format!(
"Last completed scrub: {}",
msec_to_rfc3339(self.persisted.time_last_complete_scrub)
),
fn status(&self) -> WorkerStatus {
let mut s = WorkerStatus {
persistent_errors: Some(self.persisted.corruptions_detected),
tranquility: Some(self.persisted.tranquility),
..Default::default()
};
Some(format!(
"{} ; corruptions detected: {}",
s, self.persisted.corruptions_detected
))
match &self.work {
ScrubWorkerState::Running(bsi) => {
s.progress = Some(format!("{:.2}%", bsi.progress() * 100.));
}
ScrubWorkerState::Paused(bsi, rt) => {
s.progress = Some(format!("{:.2}%", bsi.progress() * 100.));
s.freeform = vec![format!("Scrub paused, resumes at {}", msec_to_rfc3339(*rt))];
}
ScrubWorkerState::Finished => {
s.freeform = vec![format!(
"Last scrub completed at {}",
msec_to_rfc3339(self.persisted.time_last_complete_scrub)
)];
}
}
s
}
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
@ -331,7 +342,7 @@ impl Worker for ScrubWorker {
}
}
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
async fn wait_for_work(&mut self) -> WorkerState {
let (wait_until, command) = match &self.work {
ScrubWorkerState::Running(_) => return WorkerState::Busy,
ScrubWorkerState::Paused(_, resume_time) => (*resume_time, ScrubWorkerCommand::Resume),

View File

@ -63,6 +63,7 @@ struct ResyncPersistedConfig {
n_workers: usize,
tranquility: u32,
}
impl garage_util::migrate::InitialFormat for ResyncPersistedConfig {}
enum ResyncIterResult {
BusyDidSomething,
@ -123,6 +124,24 @@ impl BlockResyncManager {
Ok(self.errors.len())
}
/// Clear the error counter for a block and put it in queue immediately
pub fn clear_backoff(&self, hash: &Hash) -> Result<(), Error> {
let now = now_msec();
if let Some(ec) = self.errors.get(hash)? {
let mut ec = ErrorCounter::decode(&ec);
if ec.errors > 0 {
ec.last_try = now - ec.delay_msec();
self.errors.insert(hash, ec.encode())?;
self.put_to_resync_at(hash, now)?;
return Ok(());
}
}
Err(Error::Message(format!(
"Block {:?} was not in an errored state",
hash
)))
}
// ---- Resync loop ----
// This part manages a queue of blocks that need to be
@ -257,7 +276,7 @@ impl BlockResyncManager {
if let Err(e) = &res {
manager.metrics.resync_error_counter.add(1);
warn!("Error when resyncing {:?}: {}", hash, e);
error!("Error when resyncing {:?}: {}", hash, e);
let err_counter = match self.errors.get(hash.as_slice())? {
Some(ec) => ErrorCounter::decode(&ec).add1(now + 1),
@ -477,27 +496,22 @@ impl Worker for ResyncWorker {
format!("Block resync worker #{}", self.index + 1)
}
fn info(&self) -> Option<String> {
fn status(&self) -> WorkerStatus {
let persisted = self.manager.resync.persisted.load();
if self.index >= persisted.n_workers {
return Some("(unused)".into());
return WorkerStatus {
freeform: vec!["This worker is currently disabled".into()],
..Default::default()
};
}
let mut ret = vec![];
ret.push(format!("tranquility = {}", persisted.tranquility));
let qlen = self.manager.resync.queue_len().unwrap_or(0);
if qlen > 0 {
ret.push(format!("{} blocks in queue", qlen));
WorkerStatus {
queue_length: Some(self.manager.resync.queue_len().unwrap_or(0) as u64),
tranquility: Some(persisted.tranquility),
persistent_errors: Some(self.manager.resync.errors_len().unwrap_or(0) as u64),
..Default::default()
}
let elen = self.manager.resync.errors_len().unwrap_or(0);
if elen > 0 {
ret.push(format!("{} blocks in error state", elen));
}
Some(ret.join(", "))
}
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
@ -527,7 +541,7 @@ impl Worker for ResyncWorker {
}
}
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
async fn wait_for_work(&mut self) -> WorkerState {
while self.index >= self.manager.resync.persisted.load().n_workers {
self.manager.resync.notify.notified().await
}
@ -545,9 +559,9 @@ impl Worker for ResyncWorker {
/// and the time of the last try.
/// Used to implement exponential backoff.
#[derive(Clone, Copy, Debug)]
struct ErrorCounter {
errors: u64,
last_try: u64,
pub(crate) struct ErrorCounter {
pub(crate) errors: u64,
pub(crate) last_try: u64,
}
impl ErrorCounter {
@ -558,12 +572,13 @@ impl ErrorCounter {
}
}
fn decode(data: &[u8]) -> Self {
pub(crate) fn decode(data: &[u8]) -> Self {
Self {
errors: u64::from_be_bytes(data[0..8].try_into().unwrap()),
last_try: u64::from_be_bytes(data[8..16].try_into().unwrap()),
}
}
fn encode(&self) -> Vec<u8> {
[
u64::to_be_bytes(self.errors),
@ -583,7 +598,8 @@ impl ErrorCounter {
(RESYNC_RETRY_DELAY.as_millis() as u64)
<< std::cmp::min(self.errors - 1, RESYNC_RETRY_DELAY_MAX_BACKOFF_POWER)
}
fn next_try(&self) -> u64 {
pub(crate) fn next_try(&self) -> u64 {
self.last_try + self.delay_msec()
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "garage_db"
version = "0.8.0"
version = "0.8.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"

View File

@ -178,6 +178,10 @@ impl Tree {
pub fn len(&self) -> Result<usize> {
self.0.len(self.1)
}
#[inline]
pub fn fast_len(&self) -> Result<Option<usize>> {
self.0.fast_len(self.1)
}
#[inline]
pub fn first(&self) -> Result<Option<(Value, Value)>> {
@ -320,6 +324,9 @@ pub(crate) trait IDb: Send + Sync {
fn get(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;
fn len(&self, tree: usize) -> Result<usize>;
fn fast_len(&self, _tree: usize) -> Result<Option<usize>> {
Ok(None)
}
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>>;
fn remove(&self, tree: usize, key: &[u8]) -> Result<Option<Value>>;

View File

@ -121,6 +121,10 @@ impl IDb for LmdbDb {
Ok(tree.len(&tx)?.try_into().unwrap())
}
fn fast_len(&self, tree: usize) -> Result<Option<usize>> {
Ok(Some(self.len(tree)?))
}
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
let tree = self.get_tree(tree)?;
let mut tx = self.db.write_txn()?;

View File

@ -144,6 +144,10 @@ impl IDb for SqliteDb {
}
}
fn fast_len(&self, tree: usize) -> Result<Option<usize>> {
Ok(Some(self.len(tree)?))
}
fn insert(&self, tree: usize, key: &[u8], value: &[u8]) -> Result<Option<Value>> {
trace!("insert {}: lock db", tree);
let this = self.0.lock().unwrap();

View File

@ -1,6 +1,6 @@
[package]
name = "garage"
version = "0.8.0"
version = "0.8.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -21,14 +21,14 @@ path = "tests/lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_db = { version = "0.8.0", path = "../db" }
garage_api = { version = "0.8.0", path = "../api" }
garage_block = { version = "0.8.0", path = "../block" }
garage_model = { version = "0.8.0", path = "../model" }
garage_rpc = { version = "0.8.0", path = "../rpc" }
garage_table = { version = "0.8.0", path = "../table" }
garage_util = { version = "0.8.0", path = "../util" }
garage_web = { version = "0.8.0", path = "../web" }
garage_db = { version = "0.8.1", path = "../db" }
garage_api = { version = "0.8.1", path = "../api" }
garage_block = { version = "0.8.1", path = "../block" }
garage_model = { version = "0.8.1", path = "../model" }
garage_rpc = { version = "0.8.1", path = "../rpc" }
garage_table = { version = "0.8.1", path = "../table" }
garage_util = { version = "0.8.1", path = "../util" }
garage_web = { version = "0.8.1", path = "../web" }
backtrace = "0.3"
bytes = "1.0"
@ -42,7 +42,6 @@ rand = "0.8"
async-trait = "0.1.7"
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
rmp-serde = "0.15"
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
serde_bytes = "0.11"
structopt = { version = "0.3", default-features = false }
@ -74,7 +73,7 @@ base64 = "0.13"
[features]
default = [ "bundled-libs", "metrics", "sled" ]
default = [ "bundled-libs", "metrics", "sled", "k2v" ]
k2v = [ "garage_util/k2v", "garage_api/k2v" ]

View File

@ -5,9 +5,11 @@ use std::sync::Arc;
use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use garage_util::background::BackgroundRunner;
use garage_util::crdt::*;
use garage_util::data::*;
use garage_util::error::Error as GarageError;
use garage_util::formater::format_table_to_string;
use garage_util::time::*;
use garage_table::replication::*;
@ -15,6 +17,7 @@ use garage_table::*;
use garage_rpc::*;
use garage_block::manager::BlockResyncErrorInfo;
use garage_block::repair::ScrubWorkerCommand;
use garage_model::bucket_alias_table::*;
@ -24,6 +27,8 @@ use garage_model::helper::error::{Error, OkOrBadRequest};
use garage_model::key_table::*;
use garage_model::migrate::Migrate;
use garage_model::permission::*;
use garage_model::s3::object_table::*;
use garage_model::s3::version_table::Version;
use crate::cli::*;
use crate::repair::online::launch_online_repair;
@ -38,7 +43,8 @@ pub enum AdminRpc {
LaunchRepair(RepairOpt),
Migrate(MigrateOpt),
Stats(StatsOpt),
Worker(WorkerOpt),
Worker(WorkerOperation),
BlockOperation(BlockOperation),
// Replies
Ok(String),
@ -54,6 +60,13 @@ pub enum AdminRpc {
HashMap<usize, garage_util::background::WorkerInfo>,
WorkerListOpt,
),
WorkerInfo(usize, garage_util::background::WorkerInfo),
BlockErrorList(Vec<BlockResyncErrorInfo>),
BlockInfo {
hash: Hash,
refcount: u64,
versions: Vec<Result<Version, Uuid>>,
},
}
impl Rpc for AdminRpc {
@ -62,17 +75,24 @@ impl Rpc for AdminRpc {
pub struct AdminRpcHandler {
garage: Arc<Garage>,
background: Arc<BackgroundRunner>,
endpoint: Arc<Endpoint<AdminRpc, Self>>,
}
impl AdminRpcHandler {
pub fn new(garage: Arc<Garage>) -> Arc<Self> {
pub fn new(garage: Arc<Garage>, background: Arc<BackgroundRunner>) -> Arc<Self> {
let endpoint = garage.system.netapp.endpoint(ADMIN_RPC_PATH.into());
let admin = Arc::new(Self { garage, endpoint });
let admin = Arc::new(Self {
garage,
background,
endpoint,
});
admin.endpoint.set_handler(admin.clone());
admin
}
// ================ BUCKET COMMANDS ====================
async fn handle_bucket_cmd(&self, cmd: &BucketOperation) -> Result<AdminRpc, Error> {
match cmd {
BucketOperation::List => self.handle_list_buckets().await,
@ -551,6 +571,8 @@ impl AdminRpcHandler {
Ok(AdminRpc::Ok(ret))
}
// ================ KEY COMMANDS ====================
async fn handle_key_cmd(&self, cmd: &KeyOperation) -> Result<AdminRpc, Error> {
match cmd {
KeyOperation::List => self.handle_list_keys().await,
@ -688,6 +710,8 @@ impl AdminRpcHandler {
Ok(AdminRpc::KeyInfo(key, relevant_buckets))
}
// ================ MIGRATION COMMANDS ====================
async fn handle_migrate(self: &Arc<Self>, opt: MigrateOpt) -> Result<AdminRpc, Error> {
if !opt.yes {
return Err(Error::BadRequest(
@ -704,6 +728,8 @@ impl AdminRpcHandler {
Ok(AdminRpc::Ok("Migration successfull.".into()))
}
// ================ REPAIR COMMANDS ====================
async fn handle_launch_repair(self: &Arc<Self>, opt: RepairOpt) -> Result<AdminRpc, Error> {
if !opt.yes {
return Err(Error::BadRequest(
@ -739,7 +765,7 @@ impl AdminRpcHandler {
)))
}
} else {
launch_online_repair(self.garage.clone(), opt).await;
launch_online_repair(&self.garage, &self.background, opt).await?;
Ok(AdminRpc::Ok(format!(
"Repair launched on {:?}",
self.garage.system.id
@ -747,6 +773,8 @@ impl AdminRpcHandler {
}
}
// ================ STATS COMMANDS ====================
async fn handle_stats(&self, opt: StatsOpt) -> Result<AdminRpc, Error> {
if opt.all_nodes {
let mut ret = String::new();
@ -763,11 +791,12 @@ impl AdminRpcHandler {
match self
.endpoint
.call(&node_id, AdminRpc::Stats(opt), PRIO_NORMAL)
.await?
.await
{
Ok(AdminRpc::Ok(s)) => writeln!(&mut ret, "{}", s).unwrap(),
Ok(x) => writeln!(&mut ret, "Bad answer: {:?}", x).unwrap(),
Err(e) => writeln!(&mut ret, "Error: {}", e).unwrap(),
Ok(Ok(AdminRpc::Ok(s))) => writeln!(&mut ret, "{}", s).unwrap(),
Ok(Ok(x)) => writeln!(&mut ret, "Bad answer: {:?}", x).unwrap(),
Ok(Err(e)) => writeln!(&mut ret, "Remote error: {}", e).unwrap(),
Err(e) => writeln!(&mut ret, "Network error: {}", e).unwrap(),
}
}
Ok(AdminRpc::Ok(ret))
@ -787,6 +816,7 @@ impl AdminRpcHandler {
.unwrap_or_else(|| "(unknown)".into()),
)
.unwrap();
writeln!(&mut ret, "\nDatabase engine: {}", self.garage.db.engine()).unwrap();
// Gather ring statistics
@ -805,21 +835,38 @@ impl AdminRpcHandler {
writeln!(&mut ret, " {:?} {}", n, c).unwrap();
}
self.gather_table_stats(&mut ret, &self.garage.bucket_table, &opt)?;
self.gather_table_stats(&mut ret, &self.garage.key_table, &opt)?;
self.gather_table_stats(&mut ret, &self.garage.object_table, &opt)?;
self.gather_table_stats(&mut ret, &self.garage.version_table, &opt)?;
self.gather_table_stats(&mut ret, &self.garage.block_ref_table, &opt)?;
// Gather table statistics
let mut table = vec![" Table\tItems\tMklItems\tMklTodo\tGcTodo".into()];
table.push(self.gather_table_stats(&self.garage.bucket_table, opt.detailed)?);
table.push(self.gather_table_stats(&self.garage.key_table, opt.detailed)?);
table.push(self.gather_table_stats(&self.garage.object_table, opt.detailed)?);
table.push(self.gather_table_stats(&self.garage.version_table, opt.detailed)?);
table.push(self.gather_table_stats(&self.garage.block_ref_table, opt.detailed)?);
write!(
&mut ret,
"\nTable stats:\n{}",
format_table_to_string(table)
)
.unwrap();
// Gather block manager statistics
writeln!(&mut ret, "\nBlock manager stats:").unwrap();
if opt.detailed {
let rc_len = if opt.detailed {
self.garage.block_manager.rc_len()?.to_string()
} else {
self.garage
.block_manager
.rc_fast_len()?
.map(|x| x.to_string())
.unwrap_or_else(|| "NC".into())
};
writeln!(
&mut ret,
" number of RC entries (~= number of blocks): {}",
self.garage.block_manager.rc_len()?
rc_len
)
.unwrap();
}
writeln!(
&mut ret,
" resync queue length: {}",
@ -833,67 +880,83 @@ impl AdminRpcHandler {
)
.unwrap();
if !opt.detailed {
writeln!(&mut ret, "\nIf values are missing (marked as NC), consider adding the --detailed flag - this will be slow.").unwrap();
}
Ok(ret)
}
fn gather_table_stats<F, R>(
&self,
to: &mut String,
t: &Arc<Table<F, R>>,
opt: &StatsOpt,
) -> Result<(), Error>
detailed: bool,
) -> Result<String, Error>
where
F: TableSchema + 'static,
R: TableReplication + 'static,
{
writeln!(to, "\nTable stats for {}", F::TABLE_NAME).unwrap();
if opt.detailed {
writeln!(
to,
" number of items: {}",
t.data.store.len().map_err(GarageError::from)?
let (data_len, mkl_len) = if detailed {
(
t.data.store.len().map_err(GarageError::from)?.to_string(),
t.merkle_updater.merkle_tree_len()?.to_string(),
)
.unwrap();
writeln!(
to,
" Merkle tree size: {}",
t.merkle_updater.merkle_tree_len()?
} else {
(
t.data
.store
.fast_len()
.map_err(GarageError::from)?
.map(|x| x.to_string())
.unwrap_or_else(|| "NC".into()),
t.merkle_updater
.merkle_tree_fast_len()?
.map(|x| x.to_string())
.unwrap_or_else(|| "NC".into()),
)
.unwrap();
}
writeln!(
to,
" Merkle updater todo queue length: {}",
t.merkle_updater.todo_len()?
)
.unwrap();
writeln!(to, " GC todo queue length: {}", t.data.gc_todo_len()?).unwrap();
};
Ok(())
Ok(format!(
" {}\t{}\t{}\t{}\t{}",
F::TABLE_NAME,
data_len,
mkl_len,
t.merkle_updater.todo_len()?,
t.data.gc_todo_len()?
))
}
// ----
// ================ WORKER COMMANDS ====================
async fn handle_worker_cmd(&self, opt: WorkerOpt) -> Result<AdminRpc, Error> {
match opt.cmd {
WorkerCmd::List { opt } => {
let workers = self.garage.background.get_worker_info();
Ok(AdminRpc::WorkerList(workers, opt))
async fn handle_worker_cmd(&self, cmd: &WorkerOperation) -> Result<AdminRpc, Error> {
match cmd {
WorkerOperation::List { opt } => {
let workers = self.background.get_worker_info();
Ok(AdminRpc::WorkerList(workers, *opt))
}
WorkerCmd::Set { opt } => match opt {
WorkerOperation::Info { tid } => {
let info = self
.background
.get_worker_info()
.get(tid)
.ok_or_bad_request(format!("No worker with TID {}", tid))?
.clone();
Ok(AdminRpc::WorkerInfo(*tid, info))
}
WorkerOperation::Set { opt } => match opt {
WorkerSetCmd::ScrubTranquility { tranquility } => {
let scrub_command = ScrubWorkerCommand::SetTranquility(tranquility);
let scrub_command = ScrubWorkerCommand::SetTranquility(*tranquility);
self.garage
.block_manager
.send_scrub_command(scrub_command)
.await;
.await?;
Ok(AdminRpc::Ok("Scrub tranquility updated".into()))
}
WorkerSetCmd::ResyncNWorkers { n_workers } => {
WorkerSetCmd::ResyncWorkerCount { worker_count } => {
self.garage
.block_manager
.resync
.set_n_workers(n_workers)
.set_n_workers(*worker_count)
.await?;
Ok(AdminRpc::Ok("Number of resync workers updated".into()))
}
@ -901,13 +964,154 @@ impl AdminRpcHandler {
self.garage
.block_manager
.resync
.set_tranquility(tranquility)
.set_tranquility(*tranquility)
.await?;
Ok(AdminRpc::Ok("Resync tranquility updated".into()))
}
},
}
}
// ================ BLOCK COMMANDS ====================
async fn handle_block_cmd(&self, cmd: &BlockOperation) -> Result<AdminRpc, Error> {
match cmd {
BlockOperation::ListErrors => Ok(AdminRpc::BlockErrorList(
self.garage.block_manager.list_resync_errors()?,
)),
BlockOperation::Info { hash } => {
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
let refcount = self.garage.block_manager.get_block_rc(&hash)?;
let block_refs = self
.garage
.block_ref_table
.get_range(&hash, None, None, 10000, Default::default())
.await?;
let mut versions = vec![];
for br in block_refs {
if let Some(v) = self
.garage
.version_table
.get(&br.version, &EmptyKey)
.await?
{
versions.push(Ok(v));
} else {
versions.push(Err(br.version));
}
}
Ok(AdminRpc::BlockInfo {
hash,
refcount,
versions,
})
}
BlockOperation::RetryNow { all, blocks } => {
if *all {
if !blocks.is_empty() {
return Err(Error::BadRequest(
"--all was specified, cannot also specify blocks".into(),
));
}
let blocks = self.garage.block_manager.list_resync_errors()?;
for b in blocks.iter() {
self.garage.block_manager.resync.clear_backoff(&b.hash)?;
}
Ok(AdminRpc::Ok(format!(
"{} blocks returned in queue for a retry now (check logs to see results)",
blocks.len()
)))
} else {
for hash in blocks {
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
self.garage.block_manager.resync.clear_backoff(&hash)?;
}
Ok(AdminRpc::Ok(format!(
"{} blocks returned in queue for a retry now (check logs to see results)",
blocks.len()
)))
}
}
BlockOperation::Purge { yes, blocks } => {
if !yes {
return Err(Error::BadRequest(
"Pass the --yes flag to confirm block purge operation.".into(),
));
}
let mut obj_dels = 0;
let mut ver_dels = 0;
for hash in blocks {
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
let hash = Hash::try_from(&hash).ok_or_bad_request("invalid hash")?;
let block_refs = self
.garage
.block_ref_table
.get_range(&hash, None, None, 10000, Default::default())
.await?;
for br in block_refs {
let version = match self
.garage
.version_table
.get(&br.version, &EmptyKey)
.await?
{
Some(v) => v,
None => continue,
};
if let Some(object) = self
.garage
.object_table
.get(&version.bucket_id, &version.key)
.await?
{
let ov = object.versions().iter().rev().find(|v| v.is_complete());
if let Some(ov) = ov {
if ov.uuid == br.version {
let del_uuid = gen_uuid();
let deleted_object = Object::new(
version.bucket_id,
version.key.clone(),
vec![ObjectVersion {
uuid: del_uuid,
timestamp: ov.timestamp + 1,
state: ObjectVersionState::Complete(
ObjectVersionData::DeleteMarker,
),
}],
);
self.garage.object_table.insert(&deleted_object).await?;
obj_dels += 1;
}
}
}
if !version.deleted.get() {
let deleted_version = Version::new(
version.uuid,
version.bucket_id,
version.key.clone(),
true,
);
self.garage.version_table.insert(&deleted_version).await?;
ver_dels += 1;
}
}
}
Ok(AdminRpc::Ok(format!(
"{} blocks were purged: {} object deletion markers added, {} versions marked deleted",
blocks.len(),
obj_dels,
ver_dels
)))
}
}
}
}
#[async_trait]
@ -923,7 +1127,8 @@ impl EndpointHandler<AdminRpc> for AdminRpcHandler {
AdminRpc::Migrate(opt) => self.handle_migrate(opt.clone()).await,
AdminRpc::LaunchRepair(opt) => self.handle_launch_repair(opt.clone()).await,
AdminRpc::Stats(opt) => self.handle_stats(opt.clone()).await,
AdminRpc::Worker(opt) => self.handle_worker_cmd(opt.clone()).await,
AdminRpc::Worker(wo) => self.handle_worker_cmd(wo).await,
AdminRpc::BlockOperation(bo) => self.handle_block_cmd(bo).await,
m => Err(GarageError::unexpected_rpc_message(m).into()),
}
}

View File

@ -41,6 +41,9 @@ pub async fn cli_command_dispatch(
}
Command::Stats(so) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Stats(so)).await,
Command::Worker(wo) => cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::Worker(wo)).await,
Command::Block(bo) => {
cmd_admin(admin_rpc_endpoint, rpc_host, AdminRpc::BlockOperation(bo)).await
}
_ => unreachable!(),
}
}
@ -186,7 +189,20 @@ pub async fn cmd_admin(
print_key_info(&key, &rb);
}
AdminRpc::WorkerList(wi, wlo) => {
print_worker_info(wi, wlo);
print_worker_list(wi, wlo);
}
AdminRpc::WorkerInfo(tid, wi) => {
print_worker_info(tid, wi);
}
AdminRpc::BlockErrorList(el) => {
print_block_error_list(el);
}
AdminRpc::BlockInfo {
hash,
refcount,
versions,
} => {
print_block_info(hash, refcount, versions);
}
r => {
error!("Unexpected response: {:?}", r);

View File

@ -49,7 +49,11 @@ pub enum Command {
/// Manage background workers
#[structopt(name = "worker", version = garage_version())]
Worker(WorkerOpt),
Worker(WorkerOperation),
/// Low-level debug operations on data blocks
#[structopt(name = "block", version = garage_version())]
Block(BlockOperation),
}
#[derive(StructOpt, Debug)]
@ -513,20 +517,17 @@ pub struct StatsOpt {
pub detailed: bool,
}
#[derive(Serialize, Deserialize, StructOpt, Debug, Clone)]
pub struct WorkerOpt {
#[structopt(subcommand)]
pub cmd: WorkerCmd,
}
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
pub enum WorkerCmd {
pub enum WorkerOperation {
/// List all workers on Garage node
#[structopt(name = "list", version = garage_version())]
List {
#[structopt(flatten)]
opt: WorkerListOpt,
},
/// Get detailed information about a worker
#[structopt(name = "info", version = garage_version())]
Info { tid: usize },
/// Set worker parameter
#[structopt(name = "set", version = garage_version())]
Set {
@ -551,9 +552,41 @@ pub enum WorkerSetCmd {
#[structopt(name = "scrub-tranquility", version = garage_version())]
ScrubTranquility { tranquility: u32 },
/// Set number of concurrent block resync workers
#[structopt(name = "resync-n-workers", version = garage_version())]
ResyncNWorkers { n_workers: usize },
#[structopt(name = "resync-worker-count", version = garage_version())]
ResyncWorkerCount { worker_count: usize },
/// Set tranquility of block resync operations
#[structopt(name = "resync-tranquility", version = garage_version())]
ResyncTranquility { tranquility: u32 },
}
#[derive(Serialize, Deserialize, StructOpt, Debug, Eq, PartialEq, Clone)]
pub enum BlockOperation {
/// List all blocks that currently have a resync error
#[structopt(name = "list-errors", version = garage_version())]
ListErrors,
/// Get detailed information about a single block
#[structopt(name = "info", version = garage_version())]
Info {
/// Hash of the block for which to retrieve information
hash: String,
},
/// Retry now the resync of one or many blocks
#[structopt(name = "retry-now", version = garage_version())]
RetryNow {
/// Retry all blocks that have a resync error
#[structopt(long = "all")]
all: bool,
/// Hashes of the block to retry to resync now
blocks: Vec<String>,
},
/// Delete all objects referencing a missing block
#[structopt(name = "purge", version = garage_version())]
Purge {
/// Mandatory to confirm this operation
#[structopt(long = "yes")]
yes: bool,
/// Hashes of the block to purge
#[structopt(required = true)]
blocks: Vec<String>,
},
}

View File

@ -3,14 +3,17 @@ use std::time::Duration;
use garage_util::background::*;
use garage_util::crdt::*;
use garage_util::data::Uuid;
use garage_util::data::*;
use garage_util::error::*;
use garage_util::formater::format_table;
use garage_util::time::*;
use garage_block::manager::BlockResyncErrorInfo;
use garage_model::bucket_table::*;
use garage_model::key_table::*;
use garage_model::s3::object_table::{BYTES, OBJECTS, UNFINISHED_UPLOADS};
use garage_model::s3::version_table::Version;
use crate::cli::structs::WorkerListOpt;
@ -241,7 +244,7 @@ pub fn find_matching_node(
}
}
pub fn print_worker_info(wi: HashMap<usize, WorkerInfo>, wlo: WorkerListOpt) {
pub fn print_worker_list(wi: HashMap<usize, WorkerInfo>, wlo: WorkerListOpt) {
let mut wi = wi.into_iter().collect::<Vec<_>>();
wi.sort_by_key(|(tid, info)| {
(
@ -254,7 +257,7 @@ pub fn print_worker_info(wi: HashMap<usize, WorkerInfo>, wlo: WorkerListOpt) {
)
});
let mut table = vec![];
let mut table = vec!["TID\tState\tName\tTranq\tDone\tQueue\tErrors\tConsec\tLast".to_string()];
for (tid, info) in wi.iter() {
if wlo.busy && !matches!(info.state, WorkerState::Busy | WorkerState::Throttled(_)) {
continue;
@ -263,33 +266,147 @@ pub fn print_worker_info(wi: HashMap<usize, WorkerInfo>, wlo: WorkerListOpt) {
continue;
}
table.push(format!("{}\t{}\t{}", tid, info.state, info.name));
if let Some(i) = &info.info {
table.push(format!("\t\t {}", i));
}
let tf = timeago::Formatter::new();
let (err_ago, err_msg) = info
let err_ago = info
.last_error
.as_ref()
.map(|(m, t)| {
(
tf.convert(Duration::from_millis(now_msec() - t)),
m.as_str(),
)
})
.unwrap_or(("(?) ago".into(), "(?)"));
if info.consecutive_errors > 0 {
.map(|(_, t)| tf.convert(Duration::from_millis(now_msec() - t)))
.unwrap_or_default();
let (total_err, consec_err) = if info.errors > 0 {
(info.errors.to_string(), info.consecutive_errors.to_string())
} else {
("-".into(), "-".into())
};
table.push(format!(
"\t\t {} consecutive errors ({} total), last {}",
info.consecutive_errors, info.errors, err_ago,
"{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}",
tid,
info.state,
info.name,
info.status
.tranquility
.as_ref()
.map(ToString::to_string)
.unwrap_or_else(|| "-".into()),
info.status.progress.as_deref().unwrap_or("-"),
info.status
.queue_length
.as_ref()
.map(ToString::to_string)
.unwrap_or_else(|| "-".into()),
total_err,
consec_err,
err_ago,
));
table.push(format!("\t\t {}", err_msg));
} else if info.errors > 0 {
table.push(format!("\t\t ({} errors, last {})", info.errors, err_ago,));
if wlo.errors {
table.push(format!("\t\t {}", err_msg));
}
format_table(table);
}
pub fn print_worker_info(tid: usize, info: WorkerInfo) {
let mut table = vec![];
table.push(format!("Task id:\t{}", tid));
table.push(format!("Worker name:\t{}", info.name));
match info.state {
WorkerState::Throttled(t) => {
table.push(format!(
"Worker state:\tBusy (throttled, paused for {:.3}s)",
t
));
}
s => {
table.push(format!("Worker state:\t{}", s));
}
};
if let Some(tql) = info.status.tranquility {
table.push(format!("Tranquility:\t{}", tql));
}
table.push("".into());
table.push(format!("Total errors:\t{}", info.errors));
table.push(format!("Consecutive errs:\t{}", info.consecutive_errors));
if let Some((s, t)) = info.last_error {
table.push(format!("Last error:\t{}", s));
let tf = timeago::Formatter::new();
table.push(format!(
"Last error time:\t{}",
tf.convert(Duration::from_millis(now_msec() - t))
));
}
table.push("".into());
if let Some(p) = info.status.progress {
table.push(format!("Progress:\t{}", p));
}
if let Some(ql) = info.status.queue_length {
table.push(format!("Queue length:\t{}", ql));
}
if let Some(pe) = info.status.persistent_errors {
table.push(format!("Persistent errors:\t{}", pe));
}
for (i, s) in info.status.freeform.iter().enumerate() {
if i == 0 {
if table.last() != Some(&"".into()) {
table.push("".into());
}
table.push(format!("Message:\t{}", s));
} else {
table.push(format!("\t{}", s));
}
}
format_table(table);
}
pub fn print_block_error_list(el: Vec<BlockResyncErrorInfo>) {
let now = now_msec();
let tf = timeago::Formatter::new();
let mut tf2 = timeago::Formatter::new();
tf2.ago("");
let mut table = vec!["Hash\tRC\tErrors\tLast error\tNext try".into()];
for e in el {
table.push(format!(
"{}\t{}\t{}\t{}\tin {}",
hex::encode(e.hash.as_slice()),
e.refcount,
e.error_count,
tf.convert(Duration::from_millis(now - e.last_try)),
tf2.convert(Duration::from_millis(e.next_try - now))
));
}
format_table(table);
}
pub fn print_block_info(hash: Hash, refcount: u64, versions: Vec<Result<Version, Uuid>>) {
println!("Block hash: {}", hex::encode(hash.as_slice()));
println!("Refcount: {}", refcount);
println!();
let mut table = vec!["Version\tBucket\tKey\tDeleted".into()];
let mut nondeleted_count = 0;
for v in versions.iter() {
match v {
Ok(ver) => {
table.push(format!(
"{:?}\t{:?}\t{}\t{:?}",
ver.uuid,
ver.bucket_id,
ver.key,
ver.deleted.get()
));
if !ver.deleted.get() {
nondeleted_count += 1;
}
}
Err(vh) => {
table.push(format!("{:?}\t\t\tyes", vh));
}
}
}
format_table(table);
if refcount != nondeleted_count {
println!();
println!("Warning: refcount does not match number of non-deleted versions");
}
}

View File

@ -130,9 +130,16 @@ async fn main() {
std::process::abort();
}));
// Parse arguments and dispatch command line
let opt = Opt::from_clap(&Opt::clap().version(version.as_str()).get_matches());
// Initialize logging as well as other libraries used in Garage
if std::env::var("RUST_LOG").is_err() {
std::env::set_var("RUST_LOG", "netapp=info,garage=info")
let default_log = match &opt.cmd {
Command::Server => "netapp=info,garage=info",
_ => "netapp=warn,garage=warn",
};
std::env::set_var("RUST_LOG", default_log)
}
tracing_subscriber::fmt()
.with_writer(std::io::stderr)
@ -140,9 +147,6 @@ async fn main() {
.init();
sodiumoxide::init().expect("Unable to init sodiumoxide");
// Parse arguments and dispatch command line
let opt = Opt::from_clap(&Opt::clap().version(version.as_str()).get_matches());
let res = match opt.cmd {
Command::Server => server::run_server(opt.config_file).await,
Command::OfflineRepair(repair_opt) => {
@ -185,9 +189,9 @@ async fn cli_command(opt: Opt) -> Result<(), Error> {
let netapp = NetApp::new(GARAGE_VERSION_TAG, network_key, sk);
// Find and parse the address of the target host
let (id, addr) = if let Some(h) = opt.rpc_host {
let (id, addr, is_default_addr) = if let Some(h) = opt.rpc_host {
let (id, addrs) = parse_and_resolve_peer_addr(&h).ok_or_else(|| format!("Invalid RPC remote node identifier: {}. Expected format is <pubkey>@<IP or hostname>:<port>.", h))?;
(id, addrs[0])
(id, addrs[0], false)
} else {
let node_id = garage_rpc::system::read_node_id(&config.as_ref().unwrap().metadata_dir)
.err_context(READ_KEY_ERROR)?;
@ -198,24 +202,26 @@ async fn cli_command(opt: Opt) -> Result<(), Error> {
.ok_or_message("unable to resolve rpc_public_addr specified in config file")?
.next()
.ok_or_message("unable to resolve rpc_public_addr specified in config file")?;
(node_id, a)
(node_id, a, false)
} else {
let default_addr = SocketAddr::new(
"127.0.0.1".parse().unwrap(),
config.as_ref().unwrap().rpc_bind_addr.port(),
);
warn!(
"Trying to contact Garage node at default address {}",
default_addr
);
warn!("If this doesn't work, consider adding rpc_public_addr in your config file or specifying the -h command line parameter.");
(node_id, default_addr)
(node_id, default_addr, true)
}
};
// Connect to target host
netapp.clone().try_connect(addr, id).await
.err_context("Unable to connect to destination RPC host. Check that you are using the same value of rpc_secret as them, and that you have their correct public key.")?;
if let Err(e) = netapp.clone().try_connect(addr, id).await {
if is_default_addr {
warn!(
"Tried to contact Garage node at default address {}, which didn't work. If that address is wrong, consider setting rpc_public_addr in your config file.",
addr
);
}
Err(e).err_context("Unable to connect to destination RPC host. Check that you are using the same value of rpc_secret as them, and that you have their correct public key.")?;
}
let system_rpc_endpoint = netapp.endpoint::<SystemRpc, ()>(SYSTEM_RPC_PATH.into());
let admin_rpc_endpoint = netapp.endpoint::<AdminRpc, ()>(ADMIN_RPC_PATH.into());

View File

@ -1,8 +1,5 @@
use std::path::PathBuf;
use tokio::sync::watch;
use garage_util::background::*;
use garage_util::config::*;
use garage_util::error::*;
@ -20,12 +17,8 @@ pub async fn offline_repair(config_file: PathBuf, opt: OfflineRepairOpt) -> Resu
info!("Loading configuration...");
let config = read_config(config_file)?;
info!("Initializing background runner...");
let (done_tx, done_rx) = watch::channel(false);
let (background, await_background_done) = BackgroundRunner::new(16, done_rx);
info!("Initializing Garage main data store...");
let garage = Garage::new(config.clone(), background)?;
let garage = Garage::new(config)?;
info!("Launching repair operation...");
match opt.what {
@ -43,13 +36,7 @@ pub async fn offline_repair(config_file: PathBuf, opt: OfflineRepairOpt) -> Resu
}
}
info!("Repair operation finished, shutting down Garage internals...");
done_tx.send(true).unwrap();
drop(garage);
await_background_done.await?;
info!("Cleaning up...");
info!("Repair operation finished, shutting down...");
Ok(())
}

View File

@ -12,36 +12,35 @@ use garage_model::s3::version_table::*;
use garage_table::*;
use garage_util::background::*;
use garage_util::error::Error;
use garage_util::migrate::Migrate;
use crate::*;
pub async fn launch_online_repair(garage: Arc<Garage>, opt: RepairOpt) {
pub async fn launch_online_repair(
garage: &Arc<Garage>,
bg: &BackgroundRunner,
opt: RepairOpt,
) -> Result<(), Error> {
match opt.what {
RepairWhat::Tables => {
info!("Launching a full sync of tables");
garage.bucket_table.syncer.add_full_sync();
garage.object_table.syncer.add_full_sync();
garage.version_table.syncer.add_full_sync();
garage.block_ref_table.syncer.add_full_sync();
garage.key_table.syncer.add_full_sync();
garage.bucket_table.syncer.add_full_sync()?;
garage.object_table.syncer.add_full_sync()?;
garage.version_table.syncer.add_full_sync()?;
garage.block_ref_table.syncer.add_full_sync()?;
garage.key_table.syncer.add_full_sync()?;
}
RepairWhat::Versions => {
info!("Repairing the versions table");
garage
.background
.spawn_worker(RepairVersionsWorker::new(garage.clone()));
bg.spawn_worker(RepairVersionsWorker::new(garage.clone()));
}
RepairWhat::BlockRefs => {
info!("Repairing the block refs table");
garage
.background
.spawn_worker(RepairBlockrefsWorker::new(garage.clone()));
bg.spawn_worker(RepairBlockrefsWorker::new(garage.clone()));
}
RepairWhat::Blocks => {
info!("Repairing the stored blocks");
garage
.background
.spawn_worker(garage_block::repair::RepairWorker::new(
bg.spawn_worker(garage_block::repair::RepairWorker::new(
garage.block_manager.clone(),
));
}
@ -56,9 +55,10 @@ pub async fn launch_online_repair(garage: Arc<Garage>, opt: RepairOpt) {
}
};
info!("Sending command to scrub worker: {:?}", cmd);
garage.block_manager.send_scrub_command(cmd).await;
garage.block_manager.send_scrub_command(cmd).await?;
}
}
Ok(())
}
// ----
@ -85,25 +85,23 @@ impl Worker for RepairVersionsWorker {
"Version repair worker".into()
}
fn info(&self) -> Option<String> {
Some(format!("{} items done", self.counter))
fn status(&self) -> WorkerStatus {
WorkerStatus {
progress: Some(self.counter.to_string()),
..Default::default()
}
}
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
let item_bytes = match self.garage.version_table.data.store.get_gt(&self.pos)? {
Some((k, v)) => {
self.pos = k;
v
}
let (item_bytes, next_pos) = match self.garage.version_table.data.store.get_gt(&self.pos)? {
Some((k, v)) => (v, k),
None => {
info!("repair_versions: finished, done {}", self.counter);
return Ok(WorkerState::Done);
}
};
self.counter += 1;
let version = rmp_serde::decode::from_read_ref::<_, Version>(&item_bytes)?;
let version = Version::decode(&item_bytes).ok_or_message("Cannot decode Version")?;
if !version.deleted.get() {
let object = self
.garage
@ -131,10 +129,13 @@ impl Worker for RepairVersionsWorker {
}
}
self.counter += 1;
self.pos = next_pos;
Ok(WorkerState::Busy)
}
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
async fn wait_for_work(&mut self) -> WorkerState {
unreachable!()
}
}
@ -163,25 +164,24 @@ impl Worker for RepairBlockrefsWorker {
"Block refs repair worker".into()
}
fn info(&self) -> Option<String> {
Some(format!("{} items done", self.counter))
fn status(&self) -> WorkerStatus {
WorkerStatus {
progress: Some(self.counter.to_string()),
..Default::default()
}
}
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
let item_bytes = match self.garage.block_ref_table.data.store.get_gt(&self.pos)? {
Some((k, v)) => {
self.pos = k;
v
}
let (item_bytes, next_pos) =
match self.garage.block_ref_table.data.store.get_gt(&self.pos)? {
Some((k, v)) => (v, k),
None => {
info!("repair_block_ref: finished, done {}", self.counter);
return Ok(WorkerState::Done);
}
};
self.counter += 1;
let block_ref = rmp_serde::decode::from_read_ref::<_, BlockRef>(&item_bytes)?;
let block_ref = BlockRef::decode(&item_bytes).ok_or_message("Cannot decode BlockRef")?;
if !block_ref.deleted.get() {
let version = self
.garage
@ -206,10 +206,13 @@ impl Worker for RepairBlockrefsWorker {
}
}
self.counter += 1;
self.pos = next_pos;
Ok(WorkerState::Busy)
}
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
async fn wait_for_work(&mut self) -> WorkerState {
unreachable!()
}
}

View File

@ -35,12 +35,15 @@ pub async fn run_server(config_file: PathBuf) -> Result<(), Error> {
#[cfg(feature = "metrics")]
let metrics_exporter = opentelemetry_prometheus::exporter().init();
info!("Initializing Garage main data store...");
let garage = Garage::new(config.clone())?;
info!("Initializing background runner...");
let watch_cancel = watch_shutdown_signal();
let (background, await_background_done) = BackgroundRunner::new(16, watch_cancel.clone());
let (background, await_background_done) = BackgroundRunner::new(watch_cancel.clone());
info!("Initializing Garage main data store...");
let garage = Garage::new(config.clone(), background)?;
info!("Spawning Garage workers...");
garage.spawn_workers(&background);
if config.admin.trace_sink.is_some() {
info!("Initialize tracing...");
@ -63,7 +66,7 @@ pub async fn run_server(config_file: PathBuf) -> Result<(), Error> {
let run_system = tokio::spawn(garage.system.clone().run(watch_cancel.clone()));
info!("Create admin RPC handler...");
AdminRpcHandler::new(garage.clone());
AdminRpcHandler::new(garage.clone(), background.clone());
// ---- Launch public-facing API servers ----

View File

@ -1,4 +1,5 @@
use crate::common;
use crate::common::ext::CommandExt;
use aws_sdk_s3::model::BucketLocationConstraint;
use aws_sdk_s3::output::DeleteBucketOutput;
@ -8,6 +9,27 @@ async fn test_bucket_all() {
let bucket_name = "hello";
{
// Check bucket cannot be created if not authorized
ctx.garage
.command()
.args(["key", "deny"])
.args(["--create-bucket", &ctx.garage.key.id])
.quiet()
.expect_success_output("Could not deny key to create buckets");
// Try create bucket, should fail
let r = ctx.client.create_bucket().bucket(bucket_name).send().await;
assert!(r.is_err());
}
{
// Now allow key to create bucket
ctx.garage
.command()
.args(["key", "allow"])
.args(["--create-bucket", &ctx.garage.key.id])
.quiet()
.expect_success_output("Could not deny key to create buckets");
// Create bucket
//@TODO check with an invalid bucket name + with an already existing bucket
let r = ctx

View File

@ -1,6 +1,7 @@
use std::collections::HashMap;
use crate::common;
use crate::common::ext::CommandExt;
use common::custom_requester::BodySignature;
use hyper::Method;
@ -105,6 +106,13 @@ async fn test_create_bucket_streaming() {
let ctx = common::context();
let bucket = "createbucket-streaming";
ctx.garage
.command()
.args(["key", "allow"])
.args(["--create-bucket", &ctx.garage.key.id])
.quiet()
.expect_success_output("Could not allow key to create buckets");
{
// create bucket
let _ = ctx

View File

@ -22,7 +22,7 @@ tokio = "1.17.0"
# cli deps
clap = { version = "3.1.18", optional = true, features = ["derive", "env"] }
garage_util = { version = "0.8.0", path = "../util", optional = true }
garage_util = { version = "0.8.1", path = "../util", optional = true }
[features]

View File

@ -1,6 +1,6 @@
[package]
name = "garage_model"
version = "0.8.0"
version = "0.8.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -14,11 +14,11 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_db = { version = "0.8.0", default-features = false, path = "../db" }
garage_rpc = { version = "0.8.0", path = "../rpc" }
garage_table = { version = "0.8.0", path = "../table" }
garage_block = { version = "0.8.0", path = "../block" }
garage_util = { version = "0.8.0", path = "../util" }
garage_db = { version = "0.8.1", default-features = false, path = "../db" }
garage_rpc = { version = "0.8.1", path = "../rpc" }
garage_table = { version = "0.8.1", path = "../table" }
garage_block = { version = "0.8.1", path = "../block" }
garage_util = { version = "0.8.1", path = "../util" }
async-trait = "0.1.7"
arc-swap = "1.0"
@ -30,7 +30,6 @@ tracing = "0.1.30"
rand = "0.8"
zstd = { version = "0.9", default-features = false }
rmp-serde = "0.15"
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
serde_bytes = "0.11"

View File

@ -1,18 +1,26 @@
use serde::{Deserialize, Serialize};
use garage_util::data::*;
use garage_table::crdt::*;
use garage_table::*;
/// The bucket alias table holds the names given to buckets
/// in the global namespace.
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct BucketAlias {
name: String,
mod v08 {
use garage_util::crdt;
use garage_util::data::Uuid;
use serde::{Deserialize, Serialize};
/// The bucket alias table holds the names given to buckets
/// in the global namespace.
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct BucketAlias {
pub(super) name: String,
pub state: crdt::Lww<Option<Uuid>>,
}
impl garage_util::migrate::InitialFormat for BucketAlias {}
}
pub use v08::*;
impl BucketAlias {
pub fn new(name: String, ts: u64, bucket_id: Option<Uuid>) -> Option<Self> {
if !is_valid_bucket_name(&name) {

View File

@ -1,5 +1,3 @@
use serde::{Deserialize, Serialize};
use garage_table::crdt::*;
use garage_table::*;
use garage_util::data::*;
@ -7,22 +5,28 @@ use garage_util::time::*;
use crate::permission::BucketKeyPerm;
/// A bucket is a collection of objects
///
/// Its parameters are not directly accessible as:
/// - It must be possible to merge paramaters, hence the use of a LWW CRDT.
/// - A bucket has 2 states, Present or Deleted and parameters make sense only if present.
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Bucket {
mod v08 {
use crate::permission::BucketKeyPerm;
use garage_util::crdt;
use garage_util::data::Uuid;
use serde::{Deserialize, Serialize};
/// A bucket is a collection of objects
///
/// Its parameters are not directly accessible as:
/// - It must be possible to merge paramaters, hence the use of a LWW CRDT.
/// - A bucket has 2 states, Present or Deleted and parameters make sense only if present.
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Bucket {
/// ID of the bucket
pub id: Uuid,
/// State, and configuration if not deleted, of the bucket
pub state: crdt::Deletable<BucketParams>,
}
}
/// Configuration for a bucket
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct BucketParams {
/// Configuration for a bucket
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct BucketParams {
/// Bucket's creation date
pub creation_date: u64,
/// Map of key with access to the bucket, and what kind of access they give
@ -47,32 +51,37 @@ pub struct BucketParams {
/// Bucket quotas
#[serde(default)]
pub quotas: crdt::Lww<BucketQuotas>,
}
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct WebsiteConfig {
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct WebsiteConfig {
pub index_document: String,
pub error_document: Option<String>,
}
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct CorsRule {
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct CorsRule {
pub id: Option<String>,
pub max_age_seconds: Option<u64>,
pub allow_origins: Vec<String>,
pub allow_methods: Vec<String>,
pub allow_headers: Vec<String>,
pub expose_headers: Vec<String>,
}
}
#[derive(Default, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
pub struct BucketQuotas {
#[derive(Default, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
pub struct BucketQuotas {
/// Maximum size in bytes (bucket size = sum of sizes of objects in the bucket)
pub max_size: Option<u64>,
/// Maximum number of non-deleted objects in the bucket
pub max_objects: Option<u64>,
}
impl garage_util::migrate::InitialFormat for Bucket {}
}
pub use v08::*;
impl AutoCrdt for BucketQuotas {
const WARN_IF_DIFFERENT: bool = true;
}

View File

@ -39,8 +39,6 @@ pub struct Garage {
/// The local database
pub db: db::Db,
/// A background job runner
pub background: Arc<BackgroundRunner>,
/// The membership manager
pub system: Arc<System>,
/// The block manager
@ -78,7 +76,7 @@ pub struct GarageK2V {
impl Garage {
/// Create and run garage
pub fn new(config: Config, background: Arc<BackgroundRunner>) -> Result<Arc<Self>, Error> {
pub fn new(config: Config) -> Result<Arc<Self>, Error> {
// Create meta dir and data dir if they don't exist already
std::fs::create_dir_all(&config.metadata_dir)
.ok_or_message("Unable to create Garage metadata directory")?;
@ -167,7 +165,7 @@ impl Garage {
.expect("Invalid replication_mode in config file.");
info!("Initialize membership management system...");
let system = System::new(network_key, background.clone(), replication_mode, &config)?;
let system = System::new(network_key, replication_mode, &config)?;
let data_rep_param = TableShardedReplication {
system: system.clone(),
@ -225,7 +223,6 @@ impl Garage {
info!("Initialize version_table...");
let version_table = Table::new(
VersionTable {
background: background.clone(),
block_ref_table: block_ref_table.clone(),
},
meta_rep_param.clone(),
@ -240,7 +237,6 @@ impl Garage {
#[allow(clippy::redundant_clone)]
let object_table = Table::new(
ObjectTable {
background: background.clone(),
version_table: version_table.clone(),
object_counter_table: object_counter_table.clone(),
},
@ -258,7 +254,6 @@ impl Garage {
config,
replication_mode,
db,
background,
system,
block_manager,
bucket_table,
@ -273,6 +268,22 @@ impl Garage {
}))
}
pub fn spawn_workers(&self, bg: &BackgroundRunner) {
self.block_manager.spawn_workers(bg);
self.bucket_table.spawn_workers(bg);
self.bucket_alias_table.spawn_workers(bg);
self.key_table.spawn_workers(bg);
self.object_table.spawn_workers(bg);
self.object_counter_table.spawn_workers(bg);
self.version_table.spawn_workers(bg);
self.block_ref_table.spawn_workers(bg);
#[cfg(feature = "k2v")]
self.k2v.spawn_workers(bg);
}
pub fn bucket_helper(&self) -> helper::bucket::BucketHelper {
helper::bucket::BucketHelper(self)
}
@ -307,4 +318,9 @@ impl GarageK2V {
rpc,
}
}
pub fn spawn_workers(&self, bg: &BackgroundRunner) {
self.item_table.spawn_workers(bg);
self.counter_table.spawn_workers(bg);
}
}

View File

@ -1,19 +1,18 @@
use core::ops::Bound;
use std::collections::{hash_map, BTreeMap, HashMap};
use std::collections::{BTreeMap, HashMap};
use std::marker::PhantomData;
use std::sync::Arc;
use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use tokio::sync::{mpsc, watch};
use garage_db as db;
use garage_rpc::ring::Ring;
use garage_rpc::system::System;
use garage_util::background::*;
use garage_util::background::BackgroundRunner;
use garage_util::data::*;
use garage_util::error::*;
use garage_util::migrate::Migrate;
use garage_util::time::*;
use garage_table::crdt::*;
@ -31,14 +30,44 @@ pub trait CountedItem: Clone + PartialEq + Send + Sync + 'static {
fn counts(&self) -> Vec<(&'static str, i64)>;
}
/// A counter entry in the global table
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct CounterEntry<T: CountedItem> {
mod v08 {
use super::CountedItem;
use garage_util::data::Uuid;
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
// ---- Global part (the table everyone queries) ----
/// A counter entry in the global table
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct CounterEntry<T: CountedItem> {
pub pk: T::CP,
pub sk: T::CS,
pub values: BTreeMap<String, CounterValue>,
}
/// A counter entry in the global table
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct CounterValue {
pub node_values: BTreeMap<Uuid, (u64, i64)>,
}
impl<T: CountedItem> garage_util::migrate::InitialFormat for CounterEntry<T> {}
// ---- Local part (the counter we maintain transactionnaly on each node) ----
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
pub(super) struct LocalCounterEntry<T: CountedItem> {
pub(super) pk: T::CP,
pub(super) sk: T::CS,
pub(super) values: BTreeMap<String, (u64, i64)>,
}
impl<T: CountedItem> garage_util::migrate::InitialFormat for LocalCounterEntry<T> {}
}
pub use v08::*;
impl<T: CountedItem> Entry<T::CP, T::CS> for CounterEntry<T> {
fn partition_key(&self) -> &T::CP {
&self.pk
@ -80,12 +109,6 @@ impl<T: CountedItem> CounterEntry<T> {
}
}
/// A counter entry in the global table
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct CounterValue {
pub node_values: BTreeMap<Uuid, (u64, i64)>,
}
impl<T: CountedItem> Crdt for CounterEntry<T> {
fn merge(&mut self, other: &Self) {
for (name, e2) in other.values.iter() {
@ -142,7 +165,6 @@ impl<T: CountedItem> TableSchema for CounterTable<T> {
pub struct IndexCounter<T: CountedItem> {
this_node: Uuid,
local_counter: db::Tree,
propagate_tx: mpsc::UnboundedSender<(T::CP, T::CS, LocalCounterEntry<T>)>,
pub table: Arc<Table<CounterTable<T>, TableShardedReplication>>,
}
@ -152,16 +174,11 @@ impl<T: CountedItem> IndexCounter<T> {
replication: TableShardedReplication,
db: &db::Db,
) -> Arc<Self> {
let background = system.background.clone();
let (propagate_tx, propagate_rx) = mpsc::unbounded_channel();
let this = Arc::new(Self {
Arc::new(Self {
this_node: system.id,
local_counter: db
.open_tree(format!("local_counter_v2:{}", T::COUNTER_TABLE_NAME))
.expect("Unable to open local counter tree"),
propagate_tx,
table: Table::new(
CounterTable {
_phantom_t: Default::default(),
@ -170,16 +187,11 @@ impl<T: CountedItem> IndexCounter<T> {
system,
db,
),
});
})
}
background.spawn_worker(IndexPropagatorWorker {
index_counter: this.clone(),
propagate_rx,
buf: HashMap::new(),
errors: 0,
});
this
pub fn spawn_workers(&self, bg: &BackgroundRunner) {
self.table.spawn_workers(bg);
}
pub fn count(
@ -208,11 +220,9 @@ impl<T: CountedItem> IndexCounter<T> {
let tree_key = self.table.data.tree_key(pk, sk);
let mut entry = match tx.get(&self.local_counter, &tree_key[..])? {
Some(old_bytes) => {
rmp_serde::decode::from_read_ref::<_, LocalCounterEntry<T>>(&old_bytes)
.map_err(Error::RmpDecode)
.map_err(db::TxError::Abort)?
}
Some(old_bytes) => LocalCounterEntry::<T>::decode(&old_bytes)
.ok_or_message("Cannot decode local counter entry")
.map_err(db::TxError::Abort)?,
None => LocalCounterEntry {
pk: pk.clone(),
sk: sk.clone(),
@ -227,17 +237,14 @@ impl<T: CountedItem> IndexCounter<T> {
ent.1 += *inc;
}
let new_entry_bytes = rmp_to_vec_all_named(&entry)
let new_entry_bytes = entry
.encode()
.map_err(Error::RmpEncode)
.map_err(db::TxError::Abort)?;
tx.insert(&self.local_counter, &tree_key[..], new_entry_bytes)?;
if let Err(e) = self.propagate_tx.send((pk.clone(), sk.clone(), entry)) {
error!(
"Could not propagate updated counter values, failed to send to channel: {}",
e
);
}
let dist_entry = entry.into_counter_entry(self.this_node);
self.table.queue_insert(tx, &dist_entry)?;
Ok(())
}
@ -250,23 +257,6 @@ impl<T: CountedItem> IndexCounter<T> {
TS: TableSchema<E = T>,
TR: TableReplication,
{
let save_counter_entry = |entry: CounterEntry<T>| -> Result<(), Error> {
let entry_k = self
.table
.data
.tree_key(entry.partition_key(), entry.sort_key());
self.table
.data
.update_entry_with(&entry_k, |ent| match ent {
Some(mut ent) => {
ent.merge(&entry);
ent
}
None => entry.clone(),
})?;
Ok(())
};
// 1. Set all old local counters to zero
let now = now_msec();
let mut next_start: Option<Vec<u8>> = None;
@ -289,20 +279,22 @@ impl<T: CountedItem> IndexCounter<T> {
info!("zeroing old counters... ({})", hex::encode(&batch[0].0));
for (local_counter_k, local_counter) in batch {
let mut local_counter =
rmp_serde::decode::from_read_ref::<_, LocalCounterEntry<T>>(&local_counter)?;
let mut local_counter = LocalCounterEntry::<T>::decode(&local_counter)
.ok_or_message("Cannot decode local counter entry")?;
for (_, tv) in local_counter.values.iter_mut() {
tv.0 = std::cmp::max(tv.0 + 1, now);
tv.1 = 0;
}
let local_counter_bytes = rmp_to_vec_all_named(&local_counter)?;
let local_counter_bytes = local_counter.encode()?;
self.local_counter
.insert(&local_counter_k, &local_counter_bytes)?;
let counter_entry = local_counter.into_counter_entry(self.this_node);
save_counter_entry(counter_entry)?;
self.local_counter
.db()
.transaction(|mut tx| self.table.queue_insert(&mut tx, &counter_entry))?;
next_start = Some(local_counter_k);
}
@ -343,9 +335,8 @@ impl<T: CountedItem> IndexCounter<T> {
let local_counter_key = self.table.data.tree_key(pk, sk);
let mut local_counter = match self.local_counter.get(&local_counter_key)? {
Some(old_bytes) => {
let ent = rmp_serde::decode::from_read_ref::<_, LocalCounterEntry<T>>(
&old_bytes,
)?;
let ent = LocalCounterEntry::<T>::decode(&old_bytes)
.ok_or_message("Cannot decode local counter entry")?;
assert!(ent.pk == *pk);
assert!(ent.sk == *sk);
ent
@ -362,12 +353,14 @@ impl<T: CountedItem> IndexCounter<T> {
tv.1 += v;
}
let local_counter_bytes = rmp_to_vec_all_named(&local_counter)?;
let local_counter_bytes = local_counter.encode()?;
self.local_counter
.insert(&local_counter_key, local_counter_bytes)?;
let counter_entry = local_counter.into_counter_entry(self.this_node);
save_counter_entry(counter_entry)?;
self.local_counter
.db()
.transaction(|mut tx| self.table.queue_insert(&mut tx, &counter_entry))?;
next_start = Some(counted_entry_k);
}
@ -378,104 +371,7 @@ impl<T: CountedItem> IndexCounter<T> {
}
}
struct IndexPropagatorWorker<T: CountedItem> {
index_counter: Arc<IndexCounter<T>>,
propagate_rx: mpsc::UnboundedReceiver<(T::CP, T::CS, LocalCounterEntry<T>)>,
buf: HashMap<Vec<u8>, CounterEntry<T>>,
errors: usize,
}
impl<T: CountedItem> IndexPropagatorWorker<T> {
fn add_ent(&mut self, pk: T::CP, sk: T::CS, counters: LocalCounterEntry<T>) {
let tree_key = self.index_counter.table.data.tree_key(&pk, &sk);
let dist_entry = counters.into_counter_entry(self.index_counter.this_node);
match self.buf.entry(tree_key) {
hash_map::Entry::Vacant(e) => {
e.insert(dist_entry);
}
hash_map::Entry::Occupied(mut e) => {
e.get_mut().merge(&dist_entry);
}
}
}
}
#[async_trait]
impl<T: CountedItem> Worker for IndexPropagatorWorker<T> {
fn name(&self) -> String {
format!("{} index counter propagator", T::COUNTER_TABLE_NAME)
}
fn info(&self) -> Option<String> {
if !self.buf.is_empty() {
Some(format!("{} items in queue", self.buf.len()))
} else {
None
}
}
async fn work(&mut self, must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
// This loop batches updates to counters to be sent all at once.
// They are sent once the propagate_rx channel has been emptied (or is closed).
let closed = loop {
match self.propagate_rx.try_recv() {
Ok((pk, sk, counters)) => {
self.add_ent(pk, sk, counters);
}
Err(mpsc::error::TryRecvError::Empty) => break false,
Err(mpsc::error::TryRecvError::Disconnected) => break true,
}
};
if !self.buf.is_empty() {
let entries_k = self.buf.keys().take(100).cloned().collect::<Vec<_>>();
let entries = entries_k.iter().map(|k| self.buf.get(k).unwrap());
if let Err(e) = self.index_counter.table.insert_many(entries).await {
self.errors += 1;
if self.errors >= 2 && *must_exit.borrow() {
error!("({}) Could not propagate {} counter values: {}, these counters will not be updated correctly.", T::COUNTER_TABLE_NAME, self.buf.len(), e);
return Ok(WorkerState::Done);
}
// Propagate error up to worker manager, it will log it, increment a counter,
// and sleep for a certain delay (with exponential backoff), waiting for
// things to go back to normal
return Err(e);
} else {
for k in entries_k {
self.buf.remove(&k);
}
self.errors = 0;
}
return Ok(WorkerState::Busy);
} else if closed {
return Ok(WorkerState::Done);
} else {
return Ok(WorkerState::Idle);
}
}
async fn wait_for_work(&mut self, _must_exit: &watch::Receiver<bool>) -> WorkerState {
match self.propagate_rx.recv().await {
Some((pk, sk, counters)) => {
self.add_ent(pk, sk, counters);
WorkerState::Busy
}
None => match self.buf.is_empty() {
false => WorkerState::Busy,
true => WorkerState::Done,
},
}
}
}
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
struct LocalCounterEntry<T: CountedItem> {
pk: T::CP,
sk: T::CS,
values: BTreeMap<String, (u64, i64)>,
}
// ----
impl<T: CountedItem> LocalCounterEntry<T> {
fn into_counter_entry(self, this_node: Uuid) -> CounterEntry<T> {

View File

@ -1,7 +1,8 @@
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use std::sync::Arc;
use serde::{Deserialize, Serialize};
use garage_db as db;
use garage_util::data::*;
@ -17,32 +18,43 @@ pub const CONFLICTS: &str = "conflicts";
pub const VALUES: &str = "values";
pub const BYTES: &str = "bytes";
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct K2VItem {
mod v08 {
use crate::k2v::causality::K2VNodeId;
use garage_util::data::Uuid;
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct K2VItem {
pub partition: K2VItemPartition,
pub sort_key: String,
items: BTreeMap<K2VNodeId, DvvsEntry>,
}
pub(super) items: BTreeMap<K2VNodeId, DvvsEntry>,
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, Hash)]
pub struct K2VItemPartition {
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, Hash)]
pub struct K2VItemPartition {
pub bucket_id: Uuid,
pub partition_key: String,
}
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
struct DvvsEntry {
t_discard: u64,
values: Vec<(u64, DvvsValue)>,
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct DvvsEntry {
pub(super) t_discard: u64,
pub(super) values: Vec<(u64, DvvsValue)>,
}
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub enum DvvsValue {
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub enum DvvsValue {
Value(#[serde(with = "serde_bytes")] Vec<u8>),
Deleted,
}
impl garage_util::migrate::InitialFormat for K2VItem {}
}
pub use v08::*;
impl K2VItem {
/// Creates a new K2VItem when no previous entry existed in the db
pub fn new(bucket_id: Uuid, partition_key: String, sort_key: String) -> Self {

View File

@ -273,14 +273,9 @@ impl K2VRpcHandler {
}
fn local_insert(&self, item: &InsertedItem) -> Result<Option<K2VItem>, Error> {
let tree_key = self
.item_table
.data
.tree_key(&item.partition, &item.sort_key);
self.item_table
.data
.update_entry_with(&tree_key[..], |ent| {
.update_entry_with(&item.partition, &item.sort_key, |ent| {
let mut ent = ent.unwrap_or_else(|| {
K2VItem::new(
item.partition.bucket_id,

View File

@ -1,26 +1,72 @@
use serde::{Deserialize, Serialize};
use garage_table::crdt::*;
use garage_table::*;
use garage_util::crdt::{self, Crdt};
use garage_util::data::*;
use garage_table::{DeletedFilter, EmptyKey, Entry, TableSchema};
use crate::permission::BucketKeyPerm;
use crate::prev::v051::key_table as old;
pub(crate) mod v05 {
use garage_util::crdt;
use serde::{Deserialize, Serialize};
/// An api key
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Key {
/// An api key
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Key {
/// The id of the key (immutable), used as partition key
pub key_id: String,
/// The secret_key associated
pub secret_key: String,
/// Name for the key
pub name: crdt::Lww<String>,
/// Is the key deleted
pub deleted: crdt::Bool,
/// Buckets in which the key is authorized. Empty if `Key` is deleted
// CRDT interaction: deleted implies authorized_buckets is empty
pub authorized_buckets: crdt::LwwMap<String, PermissionSet>,
}
/// Permission given to a key in a bucket
#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct PermissionSet {
/// The key can be used to read the bucket
pub allow_read: bool,
/// The key can be used to write in the bucket
pub allow_write: bool,
}
impl crdt::AutoCrdt for PermissionSet {
const WARN_IF_DIFFERENT: bool = true;
}
impl garage_util::migrate::InitialFormat for Key {}
}
mod v08 {
use super::v05;
use crate::permission::BucketKeyPerm;
use garage_util::crdt;
use garage_util::data::Uuid;
use serde::{Deserialize, Serialize};
/// An api key
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Key {
/// The id of the key (immutable), used as partition key
pub key_id: String,
/// Internal state of the key
pub state: crdt::Deletable<KeyParams>,
}
}
/// Configuration for a key
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct KeyParams {
/// Configuration for a key
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct KeyParams {
/// The secret_key associated (immutable)
pub secret_key: String,
@ -38,8 +84,38 @@ pub struct KeyParams {
/// A key can have a local view of buckets names it is
/// the only one to see, this is the namespace for these aliases
pub local_aliases: crdt::LwwMap<String, Option<Uuid>>,
}
impl garage_util::migrate::Migrate for Key {
type Previous = v05::Key;
fn migrate(old_k: v05::Key) -> Key {
let name = crdt::Lww::raw(old_k.name.timestamp(), old_k.name.get().clone());
let state = if old_k.deleted.get() {
crdt::Deletable::Deleted
} else {
// Authorized buckets is ignored here,
// migration is performed in specific migration code in
// garage/migrate.rs
crdt::Deletable::Present(KeyParams {
secret_key: old_k.secret_key,
name,
allow_create_bucket: crdt::Lww::new(false),
authorized_buckets: crdt::Map::new(),
local_aliases: crdt::LwwMap::new(),
})
};
Key {
key_id: old_k.key_id,
state,
}
}
}
}
pub use v08::*;
impl KeyParams {
fn new(secret_key: &str, name: &str) -> Self {
KeyParams {
@ -173,28 +249,4 @@ impl TableSchema for KeyTable {
}
}
}
fn try_migrate(bytes: &[u8]) -> Option<Self::E> {
let old_k = rmp_serde::decode::from_read_ref::<_, old::Key>(bytes).ok()?;
let name = crdt::Lww::raw(old_k.name.timestamp(), old_k.name.get().clone());
let state = if old_k.deleted.get() {
crdt::Deletable::Deleted
} else {
// Authorized buckets is ignored here,
// migration is performed in specific migration code in
// garage/migrate.rs
crdt::Deletable::Present(KeyParams {
secret_key: old_k.secret_key,
name,
allow_create_bucket: crdt::Lww::new(false),
authorized_buckets: crdt::Map::new(),
local_aliases: crdt::LwwMap::new(),
})
};
Some(Key {
key_id: old_k.key_id,
state,
})
}
}

View File

@ -2,6 +2,7 @@ use std::sync::Arc;
use garage_util::crdt::*;
use garage_util::data::*;
use garage_util::encode::nonversioned_decode;
use garage_util::error::Error as GarageError;
use garage_util::time::*;
@ -28,8 +29,8 @@ impl Migrate {
let mut old_buckets = vec![];
for res in tree.iter().map_err(GarageError::from)? {
let (_k, v) = res.map_err(GarageError::from)?;
let bucket = rmp_serde::decode::from_read_ref::<_, old_bucket::Bucket>(&v[..])
.map_err(GarageError::from)?;
let bucket =
nonversioned_decode::<old_bucket::Bucket>(&v[..]).map_err(GarageError::from)?;
old_buckets.push(bucket);
}

View File

@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize};
use garage_table::crdt::Crdt;
use garage_table::*;
use super::key_table::PermissionSet;
use crate::key_table::v05::PermissionSet;
/// A bucket is a collection of objects
///

View File

@ -1,50 +0,0 @@
use serde::{Deserialize, Serialize};
use garage_table::crdt::*;
use garage_table::*;
/// An api key
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Key {
/// The id of the key (immutable), used as partition key
pub key_id: String,
/// The secret_key associated
pub secret_key: String,
/// Name for the key
pub name: crdt::Lww<String>,
/// Is the key deleted
pub deleted: crdt::Bool,
/// Buckets in which the key is authorized. Empty if `Key` is deleted
// CRDT interaction: deleted implies authorized_buckets is empty
pub authorized_buckets: crdt::LwwMap<String, PermissionSet>,
}
/// Permission given to a key in a bucket
#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct PermissionSet {
/// The key can be used to read the bucket
pub allow_read: bool,
/// The key can be used to write in the bucket
pub allow_write: bool,
}
impl AutoCrdt for PermissionSet {
const WARN_IF_DIFFERENT: bool = true;
}
impl Crdt for Key {
fn merge(&mut self, other: &Self) {
self.name.merge(&other.name);
self.deleted.merge(&other.deleted);
if self.deleted.get() {
self.authorized_buckets.clear();
} else {
self.authorized_buckets.merge(&other.authorized_buckets);
}
}
}

View File

@ -1,4 +1 @@
pub(crate) mod bucket_table;
pub(crate) mod key_table;
pub(crate) mod object_table;
pub(crate) mod version_table;

View File

@ -1,149 +0,0 @@
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use garage_util::data::*;
use garage_table::crdt::*;
/// An object
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Object {
/// The bucket in which the object is stored, used as partition key
pub bucket: String,
/// The key at which the object is stored in its bucket, used as sorting key
pub key: String,
/// The list of currenty stored versions of the object
versions: Vec<ObjectVersion>,
}
impl Object {
/// Get a list of currently stored versions of `Object`
pub fn versions(&self) -> &[ObjectVersion] {
&self.versions[..]
}
}
/// Informations about a version of an object
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct ObjectVersion {
/// Id of the version
pub uuid: Uuid,
/// Timestamp of when the object was created
pub timestamp: u64,
/// State of the version
pub state: ObjectVersionState,
}
/// State of an object version
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub enum ObjectVersionState {
/// The version is being received
Uploading(ObjectVersionHeaders),
/// The version is fully received
Complete(ObjectVersionData),
/// The version uploaded containded errors or the upload was explicitly aborted
Aborted,
}
impl Crdt for ObjectVersionState {
fn merge(&mut self, other: &Self) {
use ObjectVersionState::*;
match other {
Aborted => {
*self = Aborted;
}
Complete(b) => match self {
Aborted => {}
Complete(a) => {
a.merge(b);
}
Uploading(_) => {
*self = Complete(b.clone());
}
},
Uploading(_) => {}
}
}
}
/// Data stored in object version
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
pub enum ObjectVersionData {
/// The object was deleted, this Version is a tombstone to mark it as such
DeleteMarker,
/// The object is short, it's stored inlined
Inline(ObjectVersionMeta, #[serde(with = "serde_bytes")] Vec<u8>),
/// The object is not short, Hash of first block is stored here, next segments hashes are
/// stored in the version table
FirstBlock(ObjectVersionMeta, Hash),
}
impl AutoCrdt for ObjectVersionData {
const WARN_IF_DIFFERENT: bool = true;
}
/// Metadata about the object version
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
pub struct ObjectVersionMeta {
/// Headers to send to the client
pub headers: ObjectVersionHeaders,
/// Size of the object
pub size: u64,
/// etag of the object
pub etag: String,
}
/// Additional headers for an object
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
pub struct ObjectVersionHeaders {
/// Content type of the object
pub content_type: String,
/// Any other http headers to send
pub other: BTreeMap<String, String>,
}
impl ObjectVersion {
fn cmp_key(&self) -> (u64, Uuid) {
(self.timestamp, self.uuid)
}
/// Is the object version completely received
pub fn is_complete(&self) -> bool {
matches!(self.state, ObjectVersionState::Complete(_))
}
}
impl Crdt for Object {
fn merge(&mut self, other: &Self) {
// Merge versions from other into here
for other_v in other.versions.iter() {
match self
.versions
.binary_search_by(|v| v.cmp_key().cmp(&other_v.cmp_key()))
{
Ok(i) => {
self.versions[i].state.merge(&other_v.state);
}
Err(i) => {
self.versions.insert(i, other_v.clone());
}
}
}
// Remove versions which are obsolete, i.e. those that come
// before the last version which .is_complete().
let last_complete = self
.versions
.iter()
.enumerate()
.rev()
.find(|(_, v)| v.is_complete())
.map(|(vi, _)| vi);
if let Some(last_vi) = last_complete {
self.versions = self.versions.drain(last_vi..).collect::<Vec<_>>();
}
}
}

View File

@ -1,79 +0,0 @@
use serde::{Deserialize, Serialize};
use garage_util::data::*;
use garage_table::crdt::*;
use garage_table::*;
/// A version of an object
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Version {
/// UUID of the version, used as partition key
pub uuid: Uuid,
// Actual data: the blocks for this version
// In the case of a multipart upload, also store the etags
// of individual parts and check them when doing CompleteMultipartUpload
/// Is this version deleted
pub deleted: crdt::Bool,
/// list of blocks of data composing the version
pub blocks: crdt::Map<VersionBlockKey, VersionBlock>,
/// Etag of each part in case of a multipart upload, empty otherwise
pub parts_etags: crdt::Map<u64, String>,
// Back link to bucket+key so that we can figure if
// this was deleted later on
/// Bucket in which the related object is stored
pub bucket: String,
/// Key in which the related object is stored
pub key: String,
}
#[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)]
pub struct VersionBlockKey {
/// Number of the part
pub part_number: u64,
/// Offset of this sub-segment in its part
pub offset: u64,
}
impl Ord for VersionBlockKey {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.part_number
.cmp(&other.part_number)
.then(self.offset.cmp(&other.offset))
}
}
impl PartialOrd for VersionBlockKey {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
/// Informations about a single block
#[derive(PartialEq, Eq, Ord, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)]
pub struct VersionBlock {
/// Blake2 sum of the block
pub hash: Hash,
/// Size of the block
pub size: u64,
}
impl AutoCrdt for VersionBlock {
const WARN_IF_DIFFERENT: bool = true;
}
impl Crdt for Version {
fn merge(&mut self, other: &Self) {
self.deleted.merge(&other.deleted);
if self.deleted.get() {
self.blocks.clear();
self.parts_etags.clear();
} else {
self.blocks.merge(&other.blocks);
self.parts_etags.merge(&other.parts_etags);
}
}
}

View File

@ -1,4 +1,3 @@
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use garage_db as db;
@ -10,8 +9,13 @@ use garage_table::*;
use garage_block::manager::*;
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct BlockRef {
mod v08 {
use garage_util::crdt;
use garage_util::data::{Hash, Uuid};
use serde::{Deserialize, Serialize};
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct BlockRef {
/// Hash (blake2 sum) of the block, used as partition key
pub block: Hash,
@ -21,8 +25,13 @@ pub struct BlockRef {
// Keep track of deleted status
/// Is the Version that contains this block deleted
pub deleted: crdt::Bool,
}
impl garage_util::migrate::InitialFormat for BlockRef {}
}
pub use v08::*;
impl Entry<Hash, Uuid> for BlockRef {
fn partition_key(&self) -> &Hash {
&self.block

View File

@ -1,10 +1,8 @@
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use std::sync::Arc;
use garage_db as db;
use garage_util::background::BackgroundRunner;
use garage_util::data::*;
use garage_table::crdt::*;
@ -14,15 +12,99 @@ use garage_table::*;
use crate::index_counter::*;
use crate::s3::version_table::*;
use crate::prev::v051::object_table as old;
pub const OBJECTS: &str = "objects";
pub const UNFINISHED_UPLOADS: &str = "unfinished_uploads";
pub const BYTES: &str = "bytes";
/// An object
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Object {
mod v05 {
use garage_util::data::{Hash, Uuid};
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
/// An object
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Object {
/// The bucket in which the object is stored, used as partition key
pub bucket: String,
/// The key at which the object is stored in its bucket, used as sorting key
pub key: String,
/// The list of currenty stored versions of the object
pub(super) versions: Vec<ObjectVersion>,
}
/// Informations about a version of an object
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct ObjectVersion {
/// Id of the version
pub uuid: Uuid,
/// Timestamp of when the object was created
pub timestamp: u64,
/// State of the version
pub state: ObjectVersionState,
}
/// State of an object version
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub enum ObjectVersionState {
/// The version is being received
Uploading(ObjectVersionHeaders),
/// The version is fully received
Complete(ObjectVersionData),
/// The version uploaded containded errors or the upload was explicitly aborted
Aborted,
}
/// Data stored in object version
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
pub enum ObjectVersionData {
/// The object was deleted, this Version is a tombstone to mark it as such
DeleteMarker,
/// The object is short, it's stored inlined
Inline(ObjectVersionMeta, #[serde(with = "serde_bytes")] Vec<u8>),
/// The object is not short, Hash of first block is stored here, next segments hashes are
/// stored in the version table
FirstBlock(ObjectVersionMeta, Hash),
}
/// Metadata about the object version
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
pub struct ObjectVersionMeta {
/// Headers to send to the client
pub headers: ObjectVersionHeaders,
/// Size of the object
pub size: u64,
/// etag of the object
pub etag: String,
}
/// Additional headers for an object
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
pub struct ObjectVersionHeaders {
/// Content type of the object
pub content_type: String,
/// Any other http headers to send
pub other: BTreeMap<String, String>,
}
impl garage_util::migrate::InitialFormat for Object {}
}
mod v08 {
use garage_util::data::Uuid;
use serde::{Deserialize, Serialize};
use super::v05;
pub use v05::{
ObjectVersion, ObjectVersionData, ObjectVersionHeaders, ObjectVersionMeta,
ObjectVersionState,
};
/// An object
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Object {
/// The bucket in which the object is stored, used as partition key
pub bucket_id: Uuid,
@ -30,9 +112,26 @@ pub struct Object {
pub key: String,
/// The list of currenty stored versions of the object
versions: Vec<ObjectVersion>,
pub(super) versions: Vec<ObjectVersion>,
}
impl garage_util::migrate::Migrate for Object {
type Previous = v05::Object;
fn migrate(old: v05::Object) -> Object {
use garage_util::data::blake2sum;
Object {
bucket_id: blake2sum(old.bucket.as_bytes()),
key: old.key,
versions: old.versions,
}
}
}
}
pub use v08::*;
impl Object {
/// Initialize an Object struct from parts
pub fn new(bucket_id: Uuid, key: String, versions: Vec<ObjectVersion>) -> Self {
@ -69,28 +168,6 @@ impl Object {
}
}
/// Informations about a version of an object
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct ObjectVersion {
/// Id of the version
pub uuid: Uuid,
/// Timestamp of when the object was created
pub timestamp: u64,
/// State of the version
pub state: ObjectVersionState,
}
/// State of an object version
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub enum ObjectVersionState {
/// The version is being received
Uploading(ObjectVersionHeaders),
/// The version is fully received
Complete(ObjectVersionData),
/// The version uploaded containded errors or the upload was explicitly aborted
Aborted,
}
impl Crdt for ObjectVersionState {
fn merge(&mut self, other: &Self) {
use ObjectVersionState::*;
@ -112,42 +189,10 @@ impl Crdt for ObjectVersionState {
}
}
/// Data stored in object version
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
pub enum ObjectVersionData {
/// The object was deleted, this Version is a tombstone to mark it as such
DeleteMarker,
/// The object is short, it's stored inlined
Inline(ObjectVersionMeta, #[serde(with = "serde_bytes")] Vec<u8>),
/// The object is not short, Hash of first block is stored here, next segments hashes are
/// stored in the version table
FirstBlock(ObjectVersionMeta, Hash),
}
impl AutoCrdt for ObjectVersionData {
const WARN_IF_DIFFERENT: bool = true;
}
/// Metadata about the object version
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
pub struct ObjectVersionMeta {
/// Headers to send to the client
pub headers: ObjectVersionHeaders,
/// Size of the object
pub size: u64,
/// etag of the object
pub etag: String,
}
/// Additional headers for an object
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
pub struct ObjectVersionHeaders {
/// Content type of the object
pub content_type: String,
/// Any other http headers to send
pub other: BTreeMap<String, String>,
}
impl ObjectVersion {
fn cmp_key(&self) -> (u64, Uuid) {
(self.timestamp, self.uuid)
@ -221,7 +266,6 @@ impl Crdt for Object {
}
pub struct ObjectTable {
pub background: Arc<BackgroundRunner>,
pub version_table: Arc<Table<VersionTable, TableShardedReplication>>,
pub object_counter_table: Arc<IndexCounter<Object>>,
}
@ -255,12 +299,7 @@ impl TableSchema for ObjectTable {
);
}
// 2. Spawn threads that propagates deletions to version table
let version_table = self.version_table.clone();
let old = old.cloned();
let new = new.cloned();
self.background.spawn(async move {
// 2. Enqueue propagation deletions to version table
if let (Some(old_v), Some(new_v)) = (old, new) {
// Propagate deletion of old versions
for v in old_v.versions.iter() {
@ -277,12 +316,17 @@ impl TableSchema for ObjectTable {
if newly_deleted {
let deleted_version =
Version::new(v.uuid, old_v.bucket_id, old_v.key.clone(), true);
version_table.insert(&deleted_version).await?;
let res = self.version_table.queue_insert(tx, &deleted_version);
if let Err(e) = db::unabort(res)? {
error!(
"Unable to enqueue version deletion propagation: {}. A repair will be needed.",
e
);
}
}
}
Ok(())
});
}
Ok(())
}
@ -292,11 +336,6 @@ impl TableSchema for ObjectTable {
ObjectFilter::IsUploading => entry.versions.iter().any(|v| v.is_uploading()),
}
}
fn try_migrate(bytes: &[u8]) -> Option<Self::E> {
let old_obj = rmp_serde::decode::from_read_ref::<_, old::Object>(bytes).ok()?;
Some(migrate_object(old_obj))
}
}
impl CountedItem for Object {
@ -341,64 +380,3 @@ impl CountedItem for Object {
]
}
}
// vvvvvvvv migration code, stupid stuff vvvvvvvvvvvv
// (we just want to change bucket into bucket_id by hashing it)
fn migrate_object(o: old::Object) -> Object {
let versions = o
.versions()
.iter()
.cloned()
.map(migrate_object_version)
.collect();
Object {
bucket_id: blake2sum(o.bucket.as_bytes()),
key: o.key,
versions,
}
}
fn migrate_object_version(v: old::ObjectVersion) -> ObjectVersion {
ObjectVersion {
uuid: Uuid::try_from(v.uuid.as_slice()).unwrap(),
timestamp: v.timestamp,
state: match v.state {
old::ObjectVersionState::Uploading(h) => {
ObjectVersionState::Uploading(migrate_object_version_headers(h))
}
old::ObjectVersionState::Complete(d) => {
ObjectVersionState::Complete(migrate_object_version_data(d))
}
old::ObjectVersionState::Aborted => ObjectVersionState::Aborted,
},
}
}
fn migrate_object_version_headers(h: old::ObjectVersionHeaders) -> ObjectVersionHeaders {
ObjectVersionHeaders {
content_type: h.content_type,
other: h.other,
}
}
fn migrate_object_version_data(d: old::ObjectVersionData) -> ObjectVersionData {
match d {
old::ObjectVersionData::DeleteMarker => ObjectVersionData::DeleteMarker,
old::ObjectVersionData::Inline(m, b) => {
ObjectVersionData::Inline(migrate_object_version_meta(m), b)
}
old::ObjectVersionData::FirstBlock(m, h) => ObjectVersionData::FirstBlock(
migrate_object_version_meta(m),
Hash::try_from(h.as_slice()).unwrap(),
),
}
}
fn migrate_object_version_meta(m: old::ObjectVersionMeta) -> ObjectVersionMeta {
ObjectVersionMeta {
headers: migrate_object_version_headers(m.headers),
size: m.size,
etag: m.etag,
}
}

View File

@ -1,9 +1,7 @@
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use garage_db as db;
use garage_util::background::BackgroundRunner;
use garage_util::data::*;
use garage_table::crdt::*;
@ -12,11 +10,65 @@ use garage_table::*;
use crate::s3::block_ref_table::*;
use crate::prev::v051::version_table as old;
mod v05 {
use garage_util::crdt;
use garage_util::data::{Hash, Uuid};
use serde::{Deserialize, Serialize};
/// A version of an object
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Version {
/// A version of an object
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Version {
/// UUID of the version, used as partition key
pub uuid: Uuid,
// Actual data: the blocks for this version
// In the case of a multipart upload, also store the etags
// of individual parts and check them when doing CompleteMultipartUpload
/// Is this version deleted
pub deleted: crdt::Bool,
/// list of blocks of data composing the version
pub blocks: crdt::Map<VersionBlockKey, VersionBlock>,
/// Etag of each part in case of a multipart upload, empty otherwise
pub parts_etags: crdt::Map<u64, String>,
// Back link to bucket+key so that we can figure if
// this was deleted later on
/// Bucket in which the related object is stored
pub bucket: String,
/// Key in which the related object is stored
pub key: String,
}
#[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)]
pub struct VersionBlockKey {
/// Number of the part
pub part_number: u64,
/// Offset of this sub-segment in its part
pub offset: u64,
}
/// Informations about a single block
#[derive(PartialEq, Eq, Ord, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)]
pub struct VersionBlock {
/// Blake2 sum of the block
pub hash: Hash,
/// Size of the block
pub size: u64,
}
impl garage_util::migrate::InitialFormat for Version {}
}
mod v08 {
use garage_util::crdt;
use garage_util::data::Uuid;
use serde::{Deserialize, Serialize};
use super::v05;
/// A version of an object
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub struct Version {
/// UUID of the version, used as partition key
pub uuid: Uuid,
@ -36,8 +88,30 @@ pub struct Version {
pub bucket_id: Uuid,
/// Key in which the related object is stored
pub key: String,
}
pub use v05::{VersionBlock, VersionBlockKey};
impl garage_util::migrate::Migrate for Version {
type Previous = v05::Version;
fn migrate(old: v05::Version) -> Version {
use garage_util::data::blake2sum;
Version {
uuid: old.uuid,
deleted: old.deleted,
blocks: old.blocks,
parts_etags: old.parts_etags,
bucket_id: blake2sum(old.bucket.as_bytes()),
key: old.key,
}
}
}
}
pub use v08::*;
impl Version {
pub fn new(uuid: Uuid, bucket_id: Uuid, key: String, deleted: bool) -> Self {
Self {
@ -65,14 +139,6 @@ impl Version {
}
}
#[derive(PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)]
pub struct VersionBlockKey {
/// Number of the part
pub part_number: u64,
/// Offset of this sub-segment in its part
pub offset: u64,
}
impl Ord for VersionBlockKey {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.part_number
@ -87,15 +153,6 @@ impl PartialOrd for VersionBlockKey {
}
}
/// Informations about a single block
#[derive(PartialEq, Eq, Ord, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)]
pub struct VersionBlock {
/// Blake2 sum of the block
pub hash: Hash,
/// Size of the block
pub size: u64,
}
impl AutoCrdt for VersionBlock {
const WARN_IF_DIFFERENT: bool = true;
}
@ -127,7 +184,6 @@ impl Crdt for Version {
}
pub struct VersionTable {
pub background: Arc<BackgroundRunner>,
pub block_ref_table: Arc<Table<BlockRefTable, TableShardedReplication>>,
}
@ -141,33 +197,26 @@ impl TableSchema for VersionTable {
fn updated(
&self,
_tx: &mut db::Transaction,
tx: &mut db::Transaction,
old: Option<&Self::E>,
new: Option<&Self::E>,
) -> db::TxOpResult<()> {
let block_ref_table = self.block_ref_table.clone();
let old = old.cloned();
let new = new.cloned();
self.background.spawn(async move {
if let (Some(old_v), Some(new_v)) = (old, new) {
// Propagate deletion of version blocks
if new_v.deleted.get() && !old_v.deleted.get() {
let deleted_block_refs = old_v
.blocks
.items()
.iter()
.map(|(_k, vb)| BlockRef {
let deleted_block_refs = old_v.blocks.items().iter().map(|(_k, vb)| BlockRef {
block: vb.hash,
version: old_v.uuid,
deleted: true.into(),
})
.collect::<Vec<_>>();
block_ref_table.insert_many(&deleted_block_refs[..]).await?;
}
}
Ok(())
});
for block_ref in deleted_block_refs {
let res = self.block_ref_table.queue_insert(tx, &block_ref);
if let Err(e) = db::unabort(res)? {
error!("Unable to enqueue block ref deletion propagation: {}. A repair will be needed.", e);
}
}
}
}
Ok(())
}
@ -175,42 +224,4 @@ impl TableSchema for VersionTable {
fn matches_filter(entry: &Self::E, filter: &Self::Filter) -> bool {
filter.apply(entry.deleted.get())
}
fn try_migrate(bytes: &[u8]) -> Option<Self::E> {
let old = rmp_serde::decode::from_read_ref::<_, old::Version>(bytes).ok()?;
let blocks = old
.blocks
.items()
.iter()
.map(|(k, v)| {
(
VersionBlockKey {
part_number: k.part_number,
offset: k.offset,
},
VersionBlock {
hash: Hash::try_from(v.hash.as_slice()).unwrap(),
size: v.size,
},
)
})
.collect::<crdt::Map<_, _>>();
let parts_etags = old
.parts_etags
.items()
.iter()
.map(|(k, v)| (*k, v.clone()))
.collect::<crdt::Map<_, _>>();
Some(Version {
uuid: Hash::try_from(old.uuid.as_slice()).unwrap(),
deleted: crdt::Bool::new(old.deleted.get()),
blocks,
parts_etags,
bucket_id: blake2sum(old.bucket.as_bytes()),
key: old.key,
})
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "garage_rpc"
version = "0.8.0"
version = "0.8.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -14,7 +14,7 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_util = { version = "0.8.0", path = "../util" }
garage_util = { version = "0.8.1", path = "../util" }
arc-swap = "1.0"
bytes = "1.0"
@ -27,7 +27,6 @@ itertools="0.10"
sodiumoxide = { version = "0.2.5-0", package = "kuska-sodiumoxide" }
async-trait = "0.1.7"
rmp-serde = "0.15"
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
serde_bytes = "0.11"
serde_json = "1.0"

View File

@ -9,6 +9,7 @@ use serde::{Deserialize, Serialize};
use garage_util::crdt::{AutoCrdt, Crdt, Lww, LwwMap};
use garage_util::data::*;
use garage_util::encode::nonversioned_encode;
use garage_util::error::*;
use crate::graph_algo::*;
@ -58,6 +59,7 @@ pub struct ClusterLayout {
pub staging_roles: LwwMap<Uuid, NodeRoleV>,
pub staging_hash: Hash,
}
impl garage_util::migrate::InitialFormat for ClusterLayout {}
/// This struct is used to set the parameters to be used in the assignation computation
/// algorithm. It is stored as a Crdt.
@ -134,7 +136,7 @@ impl ClusterLayout {
fn calculate_staging_hash(&self) -> Hash {
let hashed_tuple = (&self.staging_roles, &self.staging_parameters);
blake2sum(&rmp_to_vec_all_named(&hashed_tuple).unwrap()[..])
blake2sum(&nonversioned_encode(&hashed_tuple).unwrap()[..])
}
pub fn merge(&mut self, other: &ClusterLayout) -> bool {

View File

@ -5,7 +5,6 @@ use std::time::Duration;
use futures::future::join_all;
use futures::stream::futures_unordered::FuturesUnordered;
use futures::stream::StreamExt;
use futures_util::future::FutureExt;
use tokio::select;
use tokio::sync::watch;
@ -24,7 +23,6 @@ pub use netapp::message::{
use netapp::peering::fullmesh::FullMeshPeeringStrategy;
pub use netapp::{self, NetApp, NodeID};
use garage_util::background::BackgroundRunner;
use garage_util::data::*;
use garage_util::error::Error;
use garage_util::metrics::RecordDuration;
@ -94,7 +92,6 @@ pub struct RpcHelper(Arc<RpcHelperInner>);
struct RpcHelperInner {
our_node_id: Uuid,
fullmesh: Arc<FullMeshPeeringStrategy>,
background: Arc<BackgroundRunner>,
ring: watch::Receiver<Arc<Ring>>,
metrics: RpcMetrics,
rpc_timeout: Duration,
@ -104,7 +101,6 @@ impl RpcHelper {
pub(crate) fn new(
our_node_id: Uuid,
fullmesh: Arc<FullMeshPeeringStrategy>,
background: Arc<BackgroundRunner>,
ring: watch::Receiver<Arc<Ring>>,
rpc_timeout: Option<Duration>,
) -> Self {
@ -113,7 +109,6 @@ impl RpcHelper {
Self(Arc::new(RpcHelperInner {
our_node_id,
fullmesh,
background,
ring,
metrics,
rpc_timeout: rpc_timeout.unwrap_or(DEFAULT_TIMEOUT),
@ -377,16 +372,13 @@ impl RpcHelper {
if !resp_stream.is_empty() {
// Continue remaining requests in background.
// Continue the remaining requests immediately using tokio::spawn
// but enqueue a task in the background runner
// to ensure that the process won't exit until the requests are done
// (if we had just enqueued the resp_stream.collect directly in the background runner,
// the requests might have been put on hold in the background runner's queue,
// in which case they might timeout or otherwise fail)
let wait_finished_fut = tokio::spawn(async move {
// Note: these requests can get interrupted on process shutdown,
// we must not count on them being executed for certain.
// For all background things that have to happen with certainty,
// they have to be put in a proper queue that is persisted to disk.
tokio::spawn(async move {
resp_stream.collect::<Vec<Result<_, _>>>().await;
});
self.0.background.spawn(wait_finished_fut.map(|_| Ok(())));
}
}

View File

@ -21,7 +21,6 @@ use netapp::peering::fullmesh::FullMeshPeeringStrategy;
use netapp::util::parse_and_resolve_peer_addr_async;
use netapp::{NetApp, NetworkKey, NodeID, NodeKey};
use garage_util::background::BackgroundRunner;
use garage_util::config::Config;
#[cfg(feature = "kubernetes-discovery")]
use garage_util::config::KubernetesDiscoveryConfig;
@ -50,8 +49,6 @@ pub const GARAGE_VERSION_TAG: u64 = 0x6761726167650008; // garage 0x0008
/// RPC endpoint used for calls related to membership
pub const SYSTEM_RPC_PATH: &str = "garage_rpc/membership.rs/SystemRpc";
pub const CONNECT_ERROR_MESSAGE: &str = "Error establishing RPC connection to remote node. This can happen if the remote node is not reachable on the network, but also if the two nodes are not configured with the same rpc_secret";
/// RPC messages related to membership
#[derive(Debug, Serialize, Deserialize, Clone)]
pub enum SystemRpc {
@ -76,13 +73,17 @@ impl Rpc for SystemRpc {
type Response = Result<SystemRpc, Error>;
}
#[derive(Serialize, Deserialize)]
pub struct PeerList(Vec<(Uuid, SocketAddr)>);
impl garage_util::migrate::InitialFormat for PeerList {}
/// This node's membership manager
pub struct System {
/// The id of this node
pub id: Uuid,
persist_cluster_layout: Persister<ClusterLayout>,
persist_peer_list: Persister<Vec<(Uuid, SocketAddr)>>,
persist_peer_list: Persister<PeerList>,
local_status: ArcSwap<NodeStatus>,
node_status: RwLock<HashMap<Uuid, (u64, NodeStatus)>>,
@ -110,9 +111,6 @@ pub struct System {
pub ring: watch::Receiver<Arc<Ring>>,
update_ring: Mutex<watch::Sender<Arc<Ring>>>,
/// The job runner of this node
pub background: Arc<BackgroundRunner>,
/// Path to metadata directory
pub metadata_dir: PathBuf,
}
@ -232,7 +230,6 @@ impl System {
/// Create this node's membership manager
pub fn new(
network_key: NetworkKey,
background: Arc<BackgroundRunner>,
replication_mode: ReplicationMode,
config: &Config,
) -> Result<Arc<Self>, Error> {
@ -354,7 +351,6 @@ impl System {
rpc: RpcHelper::new(
netapp.id.into(),
fullmesh,
background.clone(),
ring.clone(),
config.rpc_timeout_msec.map(Duration::from_millis),
),
@ -372,7 +368,6 @@ impl System {
ring,
update_ring: Mutex::new(update_ring),
background,
metadata_dir: config.metadata_dir.clone(),
});
sys.system_endpoint.set_handler(sys.clone());
@ -444,17 +439,14 @@ impl System {
))
})?;
let mut errors = vec![];
for ip in addrs.iter() {
match self
.netapp
.clone()
.try_connect(*ip, pubkey)
.await
.err_context(CONNECT_ERROR_MESSAGE)
{
for addr in addrs.iter() {
match self.netapp.clone().try_connect(*addr, pubkey).await {
Ok(()) => return Ok(()),
Err(e) => {
errors.push((*ip, e));
errors.push((
*addr,
Error::Message(connect_error_message(*addr, pubkey, e)),
));
}
}
}
@ -529,56 +521,61 @@ impl System {
// ---- INTERNALS ----
#[cfg(feature = "consul-discovery")]
async fn advertise_to_consul(self: Arc<Self>) -> Result<(), Error> {
async fn advertise_to_consul(self: Arc<Self>) {
let c = match &self.consul_discovery {
Some(c) => c,
_ => return Ok(()),
_ => return,
};
let rpc_public_addr = match self.rpc_public_addr {
Some(addr) => addr,
None => {
warn!("Not advertising to Consul because rpc_public_addr is not defined in config file and could not be autodetected.");
return Ok(());
return;
}
};
c.publish_consul_service(
if let Err(e) = c
.publish_consul_service(
self.netapp.id,
&self.local_status.load_full().hostname,
rpc_public_addr,
)
.await
.err_context("Error while publishing Consul service")
{
error!("Error while publishing Consul service: {}", e);
}
}
#[cfg(feature = "kubernetes-discovery")]
async fn advertise_to_kubernetes(self: Arc<Self>) -> Result<(), Error> {
async fn advertise_to_kubernetes(self: Arc<Self>) {
let k = match &self.kubernetes_discovery {
Some(k) => k,
_ => return Ok(()),
_ => return,
};
let rpc_public_addr = match self.rpc_public_addr {
Some(addr) => addr,
None => {
warn!("Not advertising to Kubernetes because rpc_public_addr is not defined in config file and could not be autodetected.");
return Ok(());
return;
}
};
publish_kubernetes_node(
if let Err(e) = publish_kubernetes_node(
k,
self.netapp.id,
&self.local_status.load_full().hostname,
rpc_public_addr,
)
.await
.err_context("Error while publishing node to kubernetes")
{
error!("Error while publishing node to Kubernetes: {}", e);
}
}
/// Save network configuration to disc
async fn save_cluster_layout(self: Arc<Self>) -> Result<(), Error> {
async fn save_cluster_layout(&self) -> Result<(), Error> {
let ring: Arc<Ring> = self.ring.borrow().clone();
self.persist_cluster_layout
.save_async(&ring.layout)
@ -630,11 +627,7 @@ impl System {
if info.cluster_layout_version > local_info.cluster_layout_version
|| info.cluster_layout_staging_hash != local_info.cluster_layout_staging_hash
{
let self2 = self.clone();
self.background.spawn_cancellable(async move {
self2.pull_cluster_layout(from).await;
Ok(())
});
tokio::spawn(self.clone().pull_cluster_layout(from));
}
self.node_status
@ -676,18 +669,21 @@ impl System {
drop(update_ring);
let self2 = self.clone();
self.background.spawn_cancellable(async move {
self2
tokio::spawn(async move {
if let Err(e) = self2
.rpc
.broadcast(
&self2.system_endpoint,
SystemRpc::AdvertiseClusterLayout(layout),
RequestStrategy::with_priority(PRIO_HIGH),
)
.await?;
Ok(())
.await
{
warn!("Error while broadcasting new cluster layout: {}", e);
}
});
self.background.spawn(self.clone().save_cluster_layout());
self.save_cluster_layout().await?;
}
Ok(SystemRpc::Ok)
@ -734,7 +730,7 @@ impl System {
// Add peer list from list stored on disk
if let Ok(peers) = self.persist_peer_list.load_async().await {
ping_list.extend(peers.iter().map(|(id, addr)| ((*id).into(), *addr)))
ping_list.extend(peers.0.iter().map(|(id, addr)| ((*id).into(), *addr)))
}
// Fetch peer list from Consul
@ -773,12 +769,12 @@ impl System {
}
for (node_id, node_addr) in ping_list {
tokio::spawn(
self.netapp
.clone()
.try_connect(node_addr, node_id)
.map(|r| r.err_context(CONNECT_ERROR_MESSAGE)),
);
let self2 = self.clone();
tokio::spawn(async move {
if let Err(e) = self2.netapp.clone().try_connect(node_addr, node_id).await {
error!("{}", connect_error_message(node_addr, node_id, e));
}
});
}
}
@ -787,11 +783,10 @@ impl System {
}
#[cfg(feature = "consul-discovery")]
self.background.spawn(self.clone().advertise_to_consul());
tokio::spawn(self.clone().advertise_to_consul());
#[cfg(feature = "kubernetes-discovery")]
self.background
.spawn(self.clone().advertise_to_kubernetes());
tokio::spawn(self.clone().advertise_to_kubernetes());
let restart_at = tokio::time::sleep(DISCOVERY_INTERVAL);
select! {
@ -815,12 +810,16 @@ impl System {
// and append it to the list we are about to save,
// so that no peer ID gets lost in the process.
if let Ok(mut prev_peer_list) = self.persist_peer_list.load_async().await {
prev_peer_list.retain(|(id, _ip)| peer_list.iter().all(|(id2, _ip2)| id2 != id));
peer_list.extend(prev_peer_list);
prev_peer_list
.0
.retain(|(id, _ip)| peer_list.iter().all(|(id2, _ip2)| id2 != id));
peer_list.extend(prev_peer_list.0);
}
// Save new peer list to file
self.persist_peer_list.save_async(&peer_list).await
self.persist_peer_list
.save_async(&PeerList(peer_list))
.await
}
async fn pull_cluster_layout(self: Arc<Self>, peer: Uuid) {
@ -881,3 +880,11 @@ async fn resolve_peers(peers: &[String]) -> Vec<(NodeID, SocketAddr)> {
ret
}
fn connect_error_message(
addr: SocketAddr,
pubkey: ed25519::PublicKey,
e: netapp::error::Error,
) -> String {
format!("Error establishing RPC connection to remote node: {}@{}.\nThis can happen if the remote node is not reachable on the network, but also if the two nodes are not configured with the same rpc_secret.\n{}", hex::encode(pubkey), addr, e)
}

View File

@ -1,6 +1,6 @@
[package]
name = "garage_table"
version = "0.8.0"
version = "0.8.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -14,20 +14,20 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_db = { version = "0.8.0", path = "../db" }
garage_rpc = { version = "0.8.0", path = "../rpc" }
garage_util = { version = "0.8.0", path = "../util" }
garage_db = { version = "0.8.1", path = "../db" }
garage_rpc = { version = "0.8.1", path = "../rpc" }
garage_util = { version = "0.8.1", path = "../util" }
opentelemetry = "0.17"
async-trait = "0.1.7"
arc-swap = "1.0"
bytes = "1.0"
hex = "0.4"
hexdump = "0.1"
tracing = "0.1.30"
rand = "0.8"
rmp-serde = "0.15"
serde = { version = "1.0", default-features = false, features = ["derive", "rc"] }
serde_bytes = "0.11"

View File

@ -10,6 +10,7 @@ use garage_db::counted_tree_hack::CountedTree;
use garage_util::data::*;
use garage_util::error::*;
use garage_util::migrate::Migrate;
use garage_rpc::system::System;
@ -31,16 +32,16 @@ pub struct TableData<F: TableSchema, R: TableReplication> {
pub(crate) merkle_tree: db::Tree,
pub(crate) merkle_todo: db::Tree,
pub(crate) merkle_todo_notify: Notify,
pub(crate) insert_queue: db::Tree,
pub(crate) insert_queue_notify: Notify,
pub(crate) gc_todo: CountedTree,
pub(crate) metrics: TableMetrics,
}
impl<F, R> TableData<F, R>
where
F: TableSchema,
R: TableReplication,
{
impl<F: TableSchema, R: TableReplication> TableData<F, R> {
pub fn new(system: Arc<System>, instance: F, replication: R, db: &db::Db) -> Arc<Self> {
let store = db
.open_tree(&format!("{}:table", F::TABLE_NAME))
@ -53,12 +54,22 @@ where
.open_tree(&format!("{}:merkle_todo", F::TABLE_NAME))
.expect("Unable to open DB Merkle TODO tree");
let insert_queue = db
.open_tree(&format!("{}:insert_queue", F::TABLE_NAME))
.expect("Unable to open insert queue DB tree");
let gc_todo = db
.open_tree(&format!("{}:gc_todo_v2", F::TABLE_NAME))
.expect("Unable to open DB tree");
.expect("Unable to open GC DB tree");
let gc_todo = CountedTree::new(gc_todo).expect("Cannot count gc_todo_v2");
let metrics = TableMetrics::new(F::TABLE_NAME, merkle_todo.clone(), gc_todo.clone());
let metrics = TableMetrics::new(
F::TABLE_NAME,
store.clone(),
merkle_tree.clone(),
merkle_todo.clone(),
gc_todo.clone(),
);
Arc::new(Self {
system,
@ -68,6 +79,8 @@ where
merkle_tree,
merkle_todo,
merkle_todo_notify: Notify::new(),
insert_queue,
insert_queue_notify: Notify::new(),
gc_todo,
metrics,
})
@ -167,9 +180,8 @@ where
pub(crate) fn update_entry(&self, update_bytes: &[u8]) -> Result<(), Error> {
let update = self.decode_entry(update_bytes)?;
let tree_key = self.tree_key(update.partition_key(), update.sort_key());
self.update_entry_with(&tree_key[..], |ent| match ent {
self.update_entry_with(update.partition_key(), update.sort_key(), |ent| match ent {
Some(mut ent) => {
ent.merge(&update);
ent
@ -181,11 +193,14 @@ where
pub fn update_entry_with(
&self,
tree_key: &[u8],
partition_key: &F::P,
sort_key: &F::S,
f: impl Fn(Option<F::E>) -> F::E,
) -> Result<Option<F::E>, Error> {
let tree_key = self.tree_key(partition_key, sort_key);
let changed = self.store.db().transaction(|mut tx| {
let (old_entry, old_bytes, new_entry) = match tx.get(&self.store, tree_key)? {
let (old_entry, old_bytes, new_entry) = match tx.get(&self.store, &tree_key)? {
Some(old_bytes) => {
let old_entry = self.decode_entry(&old_bytes).map_err(db::TxError::Abort)?;
let new_entry = f(Some(old_entry.clone()));
@ -194,23 +209,24 @@ where
None => (None, None, f(None)),
};
// Scenario 1: the value changed, so of course there is a change
let value_changed = Some(&new_entry) != old_entry.as_ref();
// Changed can be true in two scenarios
// Scenario 1: the actual represented value changed,
// so of course the messagepack encoding changed as well
// Scenario 2: the value didn't change but due to a migration in the
// data format, the messagepack encoding changed. In this case
// we have to write the migrated value in the table and update
// data format, the messagepack encoding changed. In this case,
// we also have to write the migrated value in the table and update
// the associated Merkle tree entry.
let new_bytes = rmp_to_vec_all_named(&new_entry)
let new_bytes = new_entry
.encode()
.map_err(Error::RmpEncode)
.map_err(db::TxError::Abort)?;
let encoding_changed = Some(&new_bytes[..]) != old_bytes.as_ref().map(|x| &x[..]);
let changed = Some(&new_bytes[..]) != old_bytes.as_deref();
drop(old_bytes);
if value_changed || encoding_changed {
let new_bytes_hash = blake2sum(&new_bytes[..]);
tx.insert(&self.merkle_todo, tree_key, new_bytes_hash.as_slice())?;
tx.insert(&self.store, tree_key, new_bytes)?;
if changed {
let new_bytes_hash = blake2sum(&new_bytes);
tx.insert(&self.merkle_todo, &tree_key, new_bytes_hash.as_slice())?;
tx.insert(&self.store, &tree_key, new_bytes)?;
self.instance
.updated(&mut tx, old_entry.as_ref(), Some(&new_entry))?;
@ -236,7 +252,7 @@ where
let pk_hash = Hash::try_from(&tree_key[..32]).unwrap();
let nodes = self.replication.write_nodes(&pk_hash);
if nodes.first() == Some(&self.system.id) {
GcTodoEntry::new(tree_key.to_vec(), new_bytes_hash).save(&self.gc_todo)?;
GcTodoEntry::new(tree_key, new_bytes_hash).save(&self.gc_todo)?;
}
}
@ -252,10 +268,11 @@ where
.db()
.transaction(|mut tx| match tx.get(&self.store, k)? {
Some(cur_v) if cur_v == v => {
let old_entry = self.decode_entry(v).map_err(db::TxError::Abort)?;
tx.remove(&self.store, k)?;
tx.insert(&self.merkle_todo, k, vec![])?;
let old_entry = self.decode_entry(v).map_err(db::TxError::Abort)?;
self.instance.updated(&mut tx, Some(&old_entry), None)?;
Ok(true)
}
@ -279,10 +296,11 @@ where
.db()
.transaction(|mut tx| match tx.get(&self.store, k)? {
Some(cur_v) if blake2sum(&cur_v[..]) == vhash => {
let old_entry = self.decode_entry(&cur_v[..]).map_err(db::TxError::Abort)?;
tx.remove(&self.store, k)?;
tx.insert(&self.merkle_todo, k, vec![])?;
let old_entry = self.decode_entry(&cur_v[..]).map_err(db::TxError::Abort)?;
self.instance.updated(&mut tx, Some(&old_entry), None)?;
Ok(true)
}
@ -296,6 +314,32 @@ where
Ok(removed)
}
// ---- Insert queue functions ----
pub(crate) fn queue_insert(
&self,
tx: &mut db::Transaction,
ins: &F::E,
) -> db::TxResult<(), Error> {
let tree_key = self.tree_key(ins.partition_key(), ins.sort_key());
let new_entry = match tx.get(&self.insert_queue, &tree_key)? {
Some(old_v) => {
let mut entry = self.decode_entry(&old_v).map_err(db::TxError::Abort)?;
entry.merge(ins);
entry.encode()
}
None => ins.encode(),
};
let new_entry = new_entry
.map_err(Error::RmpEncode)
.map_err(db::TxError::Abort)?;
tx.insert(&self.insert_queue, &tree_key, new_entry)?;
self.insert_queue_notify.notify_one();
Ok(())
}
// ---- Utility functions ----
pub fn tree_key(&self, p: &F::P, s: &F::S) -> Vec<u8> {
@ -305,18 +349,18 @@ where
}
pub fn decode_entry(&self, bytes: &[u8]) -> Result<F::E, Error> {
match rmp_serde::decode::from_read_ref::<_, F::E>(bytes) {
Ok(x) => Ok(x),
Err(e) => match F::try_migrate(bytes) {
match F::E::decode(bytes) {
Some(x) => Ok(x),
None => {
warn!("Unable to decode entry of {}: {}", F::TABLE_NAME, e);
error!("Unable to decode entry of {}", F::TABLE_NAME);
for line in hexdump::hexdump_iter(bytes) {
debug!("{}", line);
}
Err(e.into())
Err(Error::Message(format!(
"Unable to decode entry of {}",
F::TABLE_NAME
)))
}
},
}
}

View File

@ -31,7 +31,7 @@ const TABLE_GC_BATCH_SIZE: usize = 1024;
// and the moment the garbage collection actually happens)
const TABLE_GC_DELAY: Duration = Duration::from_secs(24 * 3600);
pub(crate) struct TableGc<F: TableSchema + 'static, R: TableReplication + 'static> {
pub(crate) struct TableGc<F: TableSchema, R: TableReplication> {
system: Arc<System>,
data: Arc<TableData<F, R>>,
@ -49,29 +49,26 @@ impl Rpc for GcRpc {
type Response = Result<GcRpc, Error>;
}
impl<F, R> TableGc<F, R>
where
F: TableSchema + 'static,
R: TableReplication + 'static,
{
pub(crate) fn launch(system: Arc<System>, data: Arc<TableData<F, R>>) -> Arc<Self> {
impl<F: TableSchema, R: TableReplication> TableGc<F, R> {
pub(crate) fn new(system: Arc<System>, data: Arc<TableData<F, R>>) -> Arc<Self> {
let endpoint = system
.netapp
.endpoint(format!("garage_table/gc.rs/Rpc:{}", F::TABLE_NAME));
let gc = Arc::new(Self {
system: system.clone(),
system,
data,
endpoint,
});
gc.endpoint.set_handler(gc.clone());
system.background.spawn_worker(GcWorker::new(gc.clone()));
gc
}
pub(crate) fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
bg.spawn_worker(GcWorker::new(self.clone()));
}
async fn gc_loop_iter(&self) -> Result<Option<Duration>, Error> {
let now = now_msec();
@ -276,11 +273,7 @@ where
}
#[async_trait]
impl<F, R> EndpointHandler<GcRpc> for TableGc<F, R>
where
F: TableSchema + 'static,
R: TableReplication + 'static,
{
impl<F: TableSchema, R: TableReplication> EndpointHandler<GcRpc> for TableGc<F, R> {
async fn handle(self: &Arc<Self>, message: &GcRpc, _from: NodeID) -> Result<GcRpc, Error> {
match message {
GcRpc::Update(items) => {
@ -298,20 +291,12 @@ where
}
}
struct GcWorker<F, R>
where
F: TableSchema + 'static,
R: TableReplication + 'static,
{
struct GcWorker<F: TableSchema, R: TableReplication> {
gc: Arc<TableGc<F, R>>,
wait_delay: Duration,
}
impl<F, R> GcWorker<F, R>
where
F: TableSchema + 'static,
R: TableReplication + 'static,
{
impl<F: TableSchema, R: TableReplication> GcWorker<F, R> {
fn new(gc: Arc<TableGc<F, R>>) -> Self {
Self {
gc,
@ -321,21 +306,15 @@ where
}
#[async_trait]
impl<F, R> Worker for GcWorker<F, R>
where
F: TableSchema + 'static,
R: TableReplication + 'static,
{
impl<F: TableSchema, R: TableReplication> Worker for GcWorker<F, R> {
fn name(&self) -> String {
format!("{} GC", F::TABLE_NAME)
}
fn info(&self) -> Option<String> {
let l = self.gc.data.gc_todo_len().unwrap_or(0);
if l > 0 {
Some(format!("{} items in queue", l))
} else {
None
fn status(&self) -> WorkerStatus {
WorkerStatus {
queue_length: Some(self.gc.data.gc_todo_len().unwrap_or(0) as u64),
..Default::default()
}
}
@ -349,10 +328,7 @@ where
}
}
async fn wait_for_work(&mut self, must_exit: &watch::Receiver<bool>) -> WorkerState {
if *must_exit.borrow() {
return WorkerState::Done;
}
async fn wait_for_work(&mut self) -> WorkerState {
tokio::time::sleep(self.wait_delay).await;
WorkerState::Busy
}

View File

@ -4,16 +4,18 @@
#[macro_use]
extern crate tracing;
mod metrics;
pub mod schema;
pub mod util;
pub mod data;
pub mod replication;
pub mod table;
mod gc;
mod merkle;
pub mod replication;
mod metrics;
mod queue;
mod sync;
pub mod table;
pub use schema::*;
pub use table::*;

View File

@ -3,12 +3,14 @@ use std::time::Duration;
use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use tokio::select;
use tokio::sync::watch;
use garage_db as db;
use garage_util::background::*;
use garage_util::data::*;
use garage_util::encode::{nonversioned_decode, nonversioned_encode};
use garage_util::error::Error;
use garage_rpc::ring::*;
@ -64,22 +66,18 @@ pub enum MerkleNode {
Leaf(Vec<u8>, Hash),
}
impl<F, R> MerkleUpdater<F, R>
where
F: TableSchema + 'static,
R: TableReplication + 'static,
{
pub(crate) fn launch(background: &BackgroundRunner, data: Arc<TableData<F, R>>) -> Arc<Self> {
let empty_node_hash = blake2sum(&rmp_to_vec_all_named(&MerkleNode::Empty).unwrap()[..]);
impl<F: TableSchema, R: TableReplication> MerkleUpdater<F, R> {
pub(crate) fn new(data: Arc<TableData<F, R>>) -> Arc<Self> {
let empty_node_hash = blake2sum(&nonversioned_encode(&MerkleNode::Empty).unwrap()[..]);
let ret = Arc::new(Self {
Arc::new(Self {
data,
empty_node_hash,
});
})
}
background.spawn_worker(MerkleWorker(ret.clone()));
ret
pub(crate) fn spawn_workers(self: &Arc<Self>, background: &BackgroundRunner) {
background.spawn_worker(MerkleWorker(self.clone()));
}
fn updater_loop_iter(&self) -> Result<WorkerState, Error> {
@ -276,7 +274,7 @@ where
tx.remove(&self.data.merkle_tree, k.encode())?;
Ok(self.empty_node_hash)
} else {
let vby = rmp_to_vec_all_named(v).map_err(|e| db::TxError::Abort(e.into()))?;
let vby = nonversioned_encode(v).map_err(|e| db::TxError::Abort(e.into()))?;
let rethash = blake2sum(&vby[..]);
tx.insert(&self.data.merkle_tree, k.encode(), vby)?;
Ok(rethash)
@ -293,32 +291,27 @@ where
Ok(self.data.merkle_tree.len()?)
}
pub fn merkle_tree_fast_len(&self) -> Result<Option<usize>, Error> {
Ok(self.data.merkle_tree.fast_len()?)
}
pub fn todo_len(&self) -> Result<usize, Error> {
Ok(self.data.merkle_todo.len()?)
}
}
struct MerkleWorker<F, R>(Arc<MerkleUpdater<F, R>>)
where
F: TableSchema + 'static,
R: TableReplication + 'static;
struct MerkleWorker<F: TableSchema, R: TableReplication>(Arc<MerkleUpdater<F, R>>);
#[async_trait]
impl<F, R> Worker for MerkleWorker<F, R>
where
F: TableSchema + 'static,
R: TableReplication + 'static,
{
impl<F: TableSchema, R: TableReplication> Worker for MerkleWorker<F, R> {
fn name(&self) -> String {
format!("{} Merkle tree updater", F::TABLE_NAME)
format!("{} Merkle", F::TABLE_NAME)
}
fn info(&self) -> Option<String> {
let l = self.0.todo_len().unwrap_or(0);
if l > 0 {
Some(format!("{} items in queue", l))
} else {
None
fn status(&self) -> WorkerStatus {
WorkerStatus {
queue_length: Some(self.0.todo_len().unwrap_or(0) as u64),
..Default::default()
}
}
@ -337,11 +330,11 @@ where
.unwrap()
}
async fn wait_for_work(&mut self, must_exit: &watch::Receiver<bool>) -> WorkerState {
if *must_exit.borrow() {
return WorkerState::Done;
async fn wait_for_work(&mut self) -> WorkerState {
select! {
_ = tokio::time::sleep(Duration::from_secs(60)) => (),
_ = self.0.data.merkle_todo_notify.notified() => (),
}
tokio::time::sleep(Duration::from_secs(10)).await;
WorkerState::Busy
}
}
@ -372,7 +365,7 @@ impl MerkleNode {
fn decode_opt(ent: &Option<db::Value>) -> Result<Self, Error> {
match ent {
None => Ok(MerkleNode::Empty),
Some(v) => Ok(rmp_serde::decode::from_read_ref::<_, MerkleNode>(&v[..])?),
Some(v) => Ok(nonversioned_decode::<MerkleNode>(&v[..])?),
}
}

View File

@ -5,6 +5,8 @@ use garage_db::counted_tree_hack::CountedTree;
/// TableMetrics reference all counter used for metrics
pub struct TableMetrics {
pub(crate) _table_size: ValueObserver<u64>,
pub(crate) _merkle_tree_size: ValueObserver<u64>,
pub(crate) _merkle_todo_len: ValueObserver<u64>,
pub(crate) _gc_todo_len: ValueObserver<u64>,
@ -20,9 +22,43 @@ pub struct TableMetrics {
pub(crate) sync_items_received: Counter<u64>,
}
impl TableMetrics {
pub fn new(table_name: &'static str, merkle_todo: db::Tree, gc_todo: CountedTree) -> Self {
pub fn new(
table_name: &'static str,
store: db::Tree,
merkle_tree: db::Tree,
merkle_todo: db::Tree,
gc_todo: CountedTree,
) -> Self {
let meter = global::meter(table_name);
TableMetrics {
_table_size: meter
.u64_value_observer(
"table.size",
move |observer| {
if let Ok(Some(v)) = store.fast_len() {
observer.observe(
v as u64,
&[KeyValue::new("table_name", table_name)],
);
}
},
)
.with_description("Number of items in table")
.init(),
_merkle_tree_size: meter
.u64_value_observer(
"table.merkle_tree_size",
move |observer| {
if let Ok(Some(v)) = merkle_tree.fast_len() {
observer.observe(
v as u64,
&[KeyValue::new("table_name", table_name)],
);
}
},
)
.with_description("Number of nodes in table's Merkle tree")
.init(),
_merkle_todo_len: meter
.u64_value_observer(
"table.merkle_updater_todo_queue_length",

77
src/table/queue.rs Normal file
View File

@ -0,0 +1,77 @@
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use tokio::select;
use tokio::sync::watch;
use garage_util::background::*;
use garage_util::error::Error;
use crate::replication::*;
use crate::schema::*;
use crate::table::*;
const BATCH_SIZE: usize = 100;
pub(crate) struct InsertQueueWorker<F, R>(pub(crate) Arc<Table<F, R>>)
where
F: TableSchema,
R: TableReplication;
#[async_trait]
impl<F: TableSchema, R: TableReplication> Worker for InsertQueueWorker<F, R> {
fn name(&self) -> String {
format!("{} queue", F::TABLE_NAME)
}
fn status(&self) -> WorkerStatus {
WorkerStatus {
queue_length: Some(self.0.data.insert_queue.len().unwrap_or(0) as u64),
..Default::default()
}
}
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
let mut kv_pairs = vec![];
let mut values = vec![];
for entry_kv in self.0.data.insert_queue.iter()? {
let (k, v) = entry_kv?;
values.push(self.0.data.decode_entry(&v)?);
kv_pairs.push((k, v));
if kv_pairs.len() > BATCH_SIZE {
break;
}
}
if kv_pairs.is_empty() {
return Ok(WorkerState::Idle);
}
self.0.insert_many(values).await?;
self.0.data.insert_queue.db().transaction(|mut tx| {
for (k, v) in kv_pairs.iter() {
if let Some(v2) = tx.get(&self.0.data.insert_queue, k)? {
if &v2 == v {
tx.remove(&self.0.data.insert_queue, k)?;
}
}
}
Ok(())
})?;
Ok(WorkerState::Busy)
}
async fn wait_for_work(&mut self) -> WorkerState {
select! {
_ = tokio::time::sleep(Duration::from_secs(600)) => (),
_ = self.0.data.insert_queue_notify.notified() => (),
}
WorkerState::Busy
}
}

View File

@ -2,7 +2,7 @@ use garage_rpc::ring::*;
use garage_util::data::*;
/// Trait to describe how a table shall be replicated
pub trait TableReplication: Send + Sync {
pub trait TableReplication: Send + Sync + 'static {
// See examples in table_sharded.rs and table_fullcopy.rs
// To understand various replication methods

View File

@ -2,11 +2,14 @@ use serde::{Deserialize, Serialize};
use garage_db as db;
use garage_util::data::*;
use garage_util::migrate::Migrate;
use crate::crdt::Crdt;
/// Trait for field used to partition data
pub trait PartitionKey {
pub trait PartitionKey:
Clone + PartialEq + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static
{
/// Get the key used to partition
fn hash(&self) -> Hash;
}
@ -27,7 +30,7 @@ impl PartitionKey for FixedBytes32 {
}
/// Trait for field used to sort data
pub trait SortKey {
pub trait SortKey: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static {
/// Get the key used to sort
fn sort_key(&self) -> &[u8];
}
@ -46,7 +49,7 @@ impl SortKey for FixedBytes32 {
/// Trait for an entry in a table. It must be sortable and partitionnable.
pub trait Entry<P: PartitionKey, S: SortKey>:
Crdt + PartialEq + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync
Crdt + PartialEq + Clone + Migrate + Send + Sync + 'static
{
/// Get the key used to partition
fn partition_key(&self) -> &P;
@ -65,23 +68,16 @@ pub trait TableSchema: Send + Sync + 'static {
const TABLE_NAME: &'static str;
/// The partition key used in that table
type P: PartitionKey + Clone + PartialEq + Serialize + for<'de> Deserialize<'de> + Send + Sync;
type P: PartitionKey;
/// The sort key used int that table
type S: SortKey + Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync;
type S: SortKey;
/// They type for an entry in that table
type E: Entry<Self::P, Self::S>;
/// The type for a filter that can be applied to select entries
/// (e.g. filter out deleted entries)
type Filter: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync;
// Action to take if not able to decode current version:
// try loading from an older version
/// Try migrating an entry from an older version
fn try_migrate(_bytes: &[u8]) -> Option<Self::E> {
None
}
type Filter: Clone + Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static;
/// Actions triggered by data changing in a table. If such actions
/// include updates to the local database that should be applied

View File

@ -2,6 +2,7 @@ use std::collections::VecDeque;
use std::sync::Arc;
use std::time::{Duration, Instant};
use arc_swap::ArcSwapOption;
use async_trait::async_trait;
use futures_util::stream::*;
use opentelemetry::KeyValue;
@ -13,7 +14,8 @@ use tokio::sync::{mpsc, watch};
use garage_util::background::*;
use garage_util::data::*;
use garage_util::error::Error;
use garage_util::encode::{debug_serialize, nonversioned_encode};
use garage_util::error::{Error, OkOrMessage};
use garage_rpc::ring::*;
use garage_rpc::system::System;
@ -27,12 +29,12 @@ use crate::*;
// Do anti-entropy every 10 minutes
const ANTI_ENTROPY_INTERVAL: Duration = Duration::from_secs(10 * 60);
pub struct TableSyncer<F: TableSchema + 'static, R: TableReplication + 'static> {
pub struct TableSyncer<F: TableSchema, R: TableReplication> {
system: Arc<System>,
data: Arc<TableData<F, R>>,
merkle: Arc<MerkleUpdater<F, R>>,
add_full_sync_tx: mpsc::UnboundedSender<()>,
add_full_sync_tx: ArcSwapOption<mpsc::UnboundedSender<()>>,
endpoint: Arc<Endpoint<SyncRpc, Self>>,
}
@ -60,12 +62,8 @@ struct TodoPartition {
retain: bool,
}
impl<F, R> TableSyncer<F, R>
where
F: TableSchema + 'static,
R: TableReplication + 'static,
{
pub(crate) fn launch(
impl<F: TableSchema, R: TableReplication> TableSyncer<F, R> {
pub(crate) fn new(
system: Arc<System>,
data: Arc<TableData<F, R>>,
merkle: Arc<MerkleUpdater<F, R>>,
@ -74,34 +72,40 @@ where
.netapp
.endpoint(format!("garage_table/sync.rs/Rpc:{}", F::TABLE_NAME));
let (add_full_sync_tx, add_full_sync_rx) = mpsc::unbounded_channel();
let syncer = Arc::new(Self {
system: system.clone(),
system,
data,
merkle,
add_full_sync_tx,
add_full_sync_tx: ArcSwapOption::new(None),
endpoint,
});
syncer.endpoint.set_handler(syncer.clone());
system.background.spawn_worker(SyncWorker {
syncer: syncer.clone(),
ring_recv: system.ring.clone(),
ring: system.ring.borrow().clone(),
add_full_sync_rx,
todo: vec![],
next_full_sync: Instant::now() + Duration::from_secs(20),
});
syncer
}
pub fn add_full_sync(&self) {
if self.add_full_sync_tx.send(()).is_err() {
error!("({}) Could not add full sync", F::TABLE_NAME);
pub(crate) fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
let (add_full_sync_tx, add_full_sync_rx) = mpsc::unbounded_channel();
self.add_full_sync_tx
.store(Some(Arc::new(add_full_sync_tx)));
bg.spawn_worker(SyncWorker {
syncer: self.clone(),
ring_recv: self.system.ring.clone(),
ring: self.system.ring.borrow().clone(),
add_full_sync_rx,
todo: vec![],
next_full_sync: Instant::now() + Duration::from_secs(20),
});
}
pub fn add_full_sync(&self) -> Result<(), Error> {
let tx = self.add_full_sync_tx.load();
let tx = tx
.as_ref()
.ok_or_message("table sync worker is not running")?;
tx.send(()).ok_or_message("send error")?;
Ok(())
}
// ----
@ -295,7 +299,7 @@ where
);
return Ok(());
}
let root_ck_hash = hash_of::<MerkleNode>(&root_ck)?;
let root_ck_hash = hash_of_merkle_node(&root_ck)?;
// Check if they have the same root checksum
// If so, do nothing.
@ -452,16 +456,12 @@ where
// ======= SYNCHRONIZATION PROCEDURE -- RECEIVER SIDE ======
#[async_trait]
impl<F, R> EndpointHandler<SyncRpc> for TableSyncer<F, R>
where
F: TableSchema + 'static,
R: TableReplication + 'static,
{
impl<F: TableSchema, R: TableReplication> EndpointHandler<SyncRpc> for TableSyncer<F, R> {
async fn handle(self: &Arc<Self>, message: &SyncRpc, from: NodeID) -> Result<SyncRpc, Error> {
match message {
SyncRpc::RootCkHash(range, h) => {
let (_root_ck_key, root_ck) = self.get_root_ck(*range)?;
let hash = hash_of::<MerkleNode>(&root_ck)?;
let hash = hash_of_merkle_node(&root_ck)?;
Ok(SyncRpc::RootCkDifferent(hash != *h))
}
SyncRpc::GetNode(k) => {
@ -490,7 +490,7 @@ where
// -------- Sync Worker ---------
struct SyncWorker<F: TableSchema + 'static, R: TableReplication + 'static> {
struct SyncWorker<F: TableSchema, R: TableReplication> {
syncer: Arc<TableSyncer<F, R>>,
ring_recv: watch::Receiver<Arc<Ring>>,
ring: Arc<Ring>,
@ -499,7 +499,7 @@ struct SyncWorker<F: TableSchema + 'static, R: TableReplication + 'static> {
next_full_sync: Instant,
}
impl<F: TableSchema + 'static, R: TableReplication + 'static> SyncWorker<F, R> {
impl<F: TableSchema, R: TableReplication> SyncWorker<F, R> {
fn add_full_sync(&mut self) {
let system = &self.syncer.system;
let data = &self.syncer.data;
@ -565,17 +565,15 @@ impl<F: TableSchema + 'static, R: TableReplication + 'static> SyncWorker<F, R> {
}
#[async_trait]
impl<F: TableSchema + 'static, R: TableReplication + 'static> Worker for SyncWorker<F, R> {
impl<F: TableSchema, R: TableReplication> Worker for SyncWorker<F, R> {
fn name(&self) -> String {
format!("{} sync", F::TABLE_NAME)
}
fn info(&self) -> Option<String> {
let l = self.todo.len();
if l > 0 {
Some(format!("{} partitions remaining", l))
} else {
None
fn status(&self) -> WorkerStatus {
WorkerStatus {
queue_length: Some(self.todo.len() as u64),
..Default::default()
}
}
@ -588,10 +586,7 @@ impl<F: TableSchema + 'static, R: TableReplication + 'static> Worker for SyncWor
}
}
async fn wait_for_work(&mut self, must_exit: &watch::Receiver<bool>) -> WorkerState {
if *must_exit.borrow() {
return WorkerState::Done;
}
async fn wait_for_work(&mut self) -> WorkerState {
select! {
s = self.add_full_sync_rx.recv() => {
if let Some(()) = s {
@ -620,8 +615,8 @@ impl<F: TableSchema + 'static, R: TableReplication + 'static> Worker for SyncWor
// ---- UTIL ----
fn hash_of<T: Serialize>(x: &T) -> Result<Hash, Error> {
Ok(blake2sum(&rmp_to_vec_all_named(x)?[..]))
fn hash_of_merkle_node(x: &MerkleNode) -> Result<Hash, Error> {
Ok(blake2sum(&nonversioned_encode(x)?[..]))
}
fn join_ordered<'a, K: Ord + Eq, V1, V2>(

View File

@ -14,9 +14,11 @@ use opentelemetry::{
use garage_db as db;
use garage_util::background::BackgroundRunner;
use garage_util::data::*;
use garage_util::error::Error;
use garage_util::metrics::RecordDuration;
use garage_util::migrate::Migrate;
use garage_rpc::system::System;
use garage_rpc::*;
@ -25,16 +27,18 @@ use crate::crdt::Crdt;
use crate::data::*;
use crate::gc::*;
use crate::merkle::*;
use crate::queue::InsertQueueWorker;
use crate::replication::*;
use crate::schema::*;
use crate::sync::*;
use crate::util::*;
pub struct Table<F: TableSchema + 'static, R: TableReplication + 'static> {
pub struct Table<F: TableSchema, R: TableReplication> {
pub system: Arc<System>,
pub data: Arc<TableData<F, R>>,
pub merkle_updater: Arc<MerkleUpdater<F, R>>,
pub syncer: Arc<TableSyncer<F, R>>,
gc: Arc<TableGc<F, R>>,
endpoint: Arc<Endpoint<TableRpc<F>, Self>>,
}
@ -61,11 +65,7 @@ impl<F: TableSchema> Rpc for TableRpc<F> {
type Response = Result<TableRpc<F>, Error>;
}
impl<F, R> Table<F, R>
where
F: TableSchema + 'static,
R: TableReplication + 'static,
{
impl<F: TableSchema, R: TableReplication> Table<F, R> {
// =============== PUBLIC INTERFACE FUNCTIONS (new, insert, get, etc) ===============
pub fn new(instance: F, replication: R, system: Arc<System>, db: &db::Db) -> Arc<Self> {
@ -75,15 +75,16 @@ where
let data = TableData::new(system.clone(), instance, replication, db);
let merkle_updater = MerkleUpdater::launch(&system.background, data.clone());
let merkle_updater = MerkleUpdater::new(data.clone());
let syncer = TableSyncer::launch(system.clone(), data.clone(), merkle_updater.clone());
TableGc::launch(system.clone(), data.clone());
let syncer = TableSyncer::new(system.clone(), data.clone(), merkle_updater.clone());
let gc = TableGc::new(system.clone(), data.clone());
let table = Arc::new(Self {
system,
data,
merkle_updater,
gc,
syncer,
endpoint,
});
@ -93,6 +94,13 @@ where
table
}
pub fn spawn_workers(self: &Arc<Self>, bg: &BackgroundRunner) {
self.merkle_updater.spawn_workers(bg);
self.syncer.spawn_workers(bg);
self.gc.spawn_workers(bg);
bg.spawn_worker(InsertQueueWorker(self.clone()));
}
pub async fn insert(&self, e: &F::E) -> Result<(), Error> {
let tracer = opentelemetry::global::tracer("garage_table");
let span = tracer.start(format!("{} insert", F::TABLE_NAME));
@ -111,7 +119,7 @@ where
let hash = e.partition_key().hash();
let who = self.data.replication.write_nodes(&hash);
let e_enc = Arc::new(ByteBuf::from(rmp_to_vec_all_named(e)?));
let e_enc = Arc::new(ByteBuf::from(e.encode()?));
let rpc = TableRpc::<F>::Update(vec![e_enc]);
self.system
@ -128,6 +136,11 @@ where
Ok(())
}
/// Insert item locally
pub fn queue_insert(&self, tx: &mut db::Transaction, e: &F::E) -> db::TxResult<(), Error> {
self.data.queue_insert(tx, e)
}
pub async fn insert_many<I, IE>(&self, entries: I) -> Result<(), Error>
where
I: IntoIterator<Item = IE> + Send + Sync,
@ -157,7 +170,7 @@ where
let entry = entry.borrow();
let hash = entry.partition_key().hash();
let who = self.data.replication.write_nodes(&hash);
let e_enc = Arc::new(ByteBuf::from(rmp_to_vec_all_named(entry)?));
let e_enc = Arc::new(ByteBuf::from(entry.encode()?));
for node in who {
call_list.entry(node).or_default().push(e_enc.clone());
}
@ -259,9 +272,11 @@ where
if not_all_same {
let self2 = self.clone();
let ent2 = ret_entry.clone();
self.system
.background
.spawn_cancellable(async move { self2.repair_on_read(&who[..], ent2).await });
tokio::spawn(async move {
if let Err(e) = self2.repair_on_read(&who[..], ent2).await {
warn!("Error doing repair on read: {}", e);
}
});
}
}
@ -358,11 +373,12 @@ where
.into_iter()
.map(|k| ret.get(&k).unwrap().clone())
.collect::<Vec<_>>();
self.system.background.spawn_cancellable(async move {
tokio::spawn(async move {
for v in to_repair {
self2.repair_on_read(&who[..], v).await?;
if let Err(e) = self2.repair_on_read(&who[..], v).await {
warn!("Error doing repair on read: {}", e);
}
}
Ok(())
});
}
@ -393,7 +409,7 @@ where
// =============== UTILITY FUNCTION FOR CLIENT OPERATIONS ===============
async fn repair_on_read(&self, who: &[Uuid], what: F::E) -> Result<(), Error> {
let what_enc = Arc::new(ByteBuf::from(rmp_to_vec_all_named(&what)?));
let what_enc = Arc::new(ByteBuf::from(what.encode()?));
self.system
.rpc
.try_call_many(
@ -408,11 +424,7 @@ where
}
#[async_trait]
impl<F, R> EndpointHandler<TableRpc<F>> for Table<F, R>
where
F: TableSchema + 'static,
R: TableReplication + 'static,
{
impl<F: TableSchema, R: TableReplication> EndpointHandler<TableRpc<F>> for Table<F, R> {
async fn handle(
self: &Arc<Self>,
msg: &TableRpc<F>,

View File

@ -49,3 +49,9 @@ impl EnumerationOrder {
}
}
}
impl Default for EnumerationOrder {
fn default() -> Self {
EnumerationOrder::Forward
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "garage_util"
version = "0.8.0"
version = "0.8.1"
authors = ["Alex Auvolat <alex@adnab.me>"]
edition = "2018"
license = "AGPL-3.0"
@ -14,7 +14,7 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_db = { version = "0.8.0", path = "../db" }
garage_db = { version = "0.8.1", path = "../db" }
arc-swap = "1.0"
async-trait = "0.1"
@ -23,6 +23,7 @@ bytes = "1.0"
digest = "0.10"
err-derive = "0.3"
git-version = "0.3.4"
hexdump = "0.1"
xxhash-rust = { version = "0.8", default-features = false, features = ["xxh3"] }
hex = "0.4"
lazy_static = "1.4"

View File

@ -1,48 +0,0 @@
//! Job worker: a generic worker that just processes incoming
//! jobs one by one
use std::sync::Arc;
use async_trait::async_trait;
use tokio::sync::{mpsc, Mutex};
use crate::background::worker::*;
use crate::background::*;
pub(crate) struct JobWorker {
pub(crate) index: usize,
pub(crate) job_chan: Arc<Mutex<mpsc::UnboundedReceiver<(Job, bool)>>>,
pub(crate) next_job: Option<Job>,
}
#[async_trait]
impl Worker for JobWorker {
fn name(&self) -> String {
format!("Job worker #{}", self.index)
}
async fn work(&mut self, _must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error> {
match self.next_job.take() {
None => return Ok(WorkerState::Idle),
Some(job) => {
job.await?;
Ok(WorkerState::Busy)
}
}
}
async fn wait_for_work(&mut self, must_exit: &watch::Receiver<bool>) -> WorkerState {
loop {
match self.job_chan.lock().await.recv().await {
Some((job, cancellable)) => {
if cancellable && *must_exit.borrow() {
continue;
}
self.next_job = Some(job);
return WorkerState::Busy;
}
None => return WorkerState::Done,
}
}
}
}

View File

@ -1,27 +1,18 @@
//! Job runner for futures and async functions
pub mod job_worker;
pub mod worker;
use core::future::Future;
use std::collections::HashMap;
use std::pin::Pin;
use std::sync::Arc;
use serde::{Deserialize, Serialize};
use tokio::sync::{mpsc, watch, Mutex};
use tokio::sync::{mpsc, watch};
use crate::error::Error;
use worker::WorkerProcessor;
pub use worker::{Worker, WorkerState};
pub(crate) type JobOutput = Result<(), Error>;
pub(crate) type Job = Pin<Box<dyn Future<Output = JobOutput> + Send>>;
/// Job runner for futures and async functions
pub struct BackgroundRunner {
send_job: mpsc::UnboundedSender<(Job, bool)>,
send_worker: mpsc::UnboundedSender<Box<dyn Worker>>,
worker_info: Arc<std::sync::Mutex<HashMap<usize, WorkerInfo>>>,
}
@ -29,19 +20,27 @@ pub struct BackgroundRunner {
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct WorkerInfo {
pub name: String,
pub info: Option<String>,
pub status: WorkerStatus,
pub state: WorkerState,
pub errors: usize,
pub consecutive_errors: usize,
pub last_error: Option<(String, u64)>,
}
/// WorkerStatus is a struct returned by the worker with a bunch of canonical
/// fields to indicate their status to CLI users. All fields are optional.
#[derive(Clone, Serialize, Deserialize, Debug, Default)]
pub struct WorkerStatus {
pub tranquility: Option<u32>,
pub progress: Option<String>,
pub queue_length: Option<u64>,
pub persistent_errors: Option<u64>,
pub freeform: Vec<String>,
}
impl BackgroundRunner {
/// Create a new BackgroundRunner
pub fn new(
n_runners: usize,
stop_signal: watch::Receiver<bool>,
) -> (Arc<Self>, tokio::task::JoinHandle<()>) {
pub fn new(stop_signal: watch::Receiver<bool>) -> (Arc<Self>, tokio::task::JoinHandle<()>) {
let (send_worker, worker_out) = mpsc::unbounded_channel::<Box<dyn Worker>>();
let worker_info = Arc::new(std::sync::Mutex::new(HashMap::new()));
@ -52,24 +51,7 @@ impl BackgroundRunner {
worker_processor.run().await;
});
let (send_job, queue_out) = mpsc::unbounded_channel();
let queue_out = Arc::new(Mutex::new(queue_out));
for i in 0..n_runners {
let queue_out = queue_out.clone();
send_worker
.send(Box::new(job_worker::JobWorker {
index: i,
job_chan: queue_out.clone(),
next_job: None,
}))
.ok()
.unwrap();
}
let bgrunner = Arc::new(Self {
send_job,
send_worker,
worker_info,
});
@ -80,31 +62,6 @@ impl BackgroundRunner {
self.worker_info.lock().unwrap().clone()
}
/// Spawn a task to be run in background
pub fn spawn<T>(&self, job: T)
where
T: Future<Output = JobOutput> + Send + 'static,
{
let boxed: Job = Box::pin(job);
self.send_job
.send((boxed, false))
.ok()
.expect("Could not put job in queue");
}
/// Spawn a task to be run in background. It may get discarded before running if spawned while
/// the runner is stopping
pub fn spawn_cancellable<T>(&self, job: T)
where
T: Future<Output = JobOutput> + Send + 'static,
{
let boxed: Job = Box::pin(job);
self.send_job
.send((boxed, true))
.ok()
.expect("Could not put job in queue");
}
pub fn spawn_worker<W>(&self, worker: W)
where
W: Worker + 'static,

View File

@ -1,6 +1,6 @@
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};
use std::time::Duration;
use async_trait::async_trait;
use futures::future::*;
@ -10,10 +10,14 @@ use serde::{Deserialize, Serialize};
use tokio::select;
use tokio::sync::{mpsc, watch};
use crate::background::WorkerInfo;
use crate::background::{WorkerInfo, WorkerStatus};
use crate::error::Error;
use crate::time::now_msec;
// All workers that haven't exited for this time after an exit signal was recieved
// will be interrupted in the middle of whatever they are doing.
const EXIT_DEADLINE: Duration = Duration::from_secs(8);
#[derive(PartialEq, Copy, Clone, Serialize, Deserialize, Debug)]
pub enum WorkerState {
Busy,
@ -26,7 +30,7 @@ impl std::fmt::Display for WorkerState {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
WorkerState::Busy => write!(f, "Busy"),
WorkerState::Throttled(t) => write!(f, "Thr:{:.3}", t),
WorkerState::Throttled(_) => write!(f, "Busy*"),
WorkerState::Idle => write!(f, "Idle"),
WorkerState::Done => write!(f, "Done"),
}
@ -37,8 +41,8 @@ impl std::fmt::Display for WorkerState {
pub trait Worker: Send {
fn name(&self) -> String;
fn info(&self) -> Option<String> {
None
fn status(&self) -> WorkerStatus {
Default::default()
}
/// Work: do a basic unit of work, if one is available (otherwise, should return
@ -50,10 +54,8 @@ pub trait Worker: Send {
async fn work(&mut self, must_exit: &mut watch::Receiver<bool>) -> Result<WorkerState, Error>;
/// Wait for work: await for some task to become available. This future can be interrupted in
/// the middle for any reason. This future doesn't have to await on must_exit.changed(), we
/// are doing it for you. Therefore it only receives a read refernce to must_exit which allows
/// it to check if we are exiting.
async fn wait_for_work(&mut self, must_exit: &watch::Receiver<bool>) -> WorkerState;
/// the middle for any reason, for example if an interrupt signal was recieved.
async fn wait_for_work(&mut self) -> WorkerState;
}
pub(crate) struct WorkerProcessor {
@ -93,11 +95,9 @@ impl WorkerProcessor {
let task_id = next_task_id;
next_task_id += 1;
let stop_signal = self.stop_signal.clone();
let stop_signal_worker = self.stop_signal.clone();
let mut worker = WorkerHandler {
task_id,
stop_signal,
stop_signal_worker,
worker: new_worker,
state: WorkerState::Busy,
errors: 0,
@ -119,7 +119,7 @@ impl WorkerProcessor {
match wi.get_mut(&worker.task_id) {
Some(i) => {
i.state = worker.state;
i.info = worker.worker.info();
i.status = worker.worker.status();
i.errors = worker.errors;
i.consecutive_errors = worker.consecutive_errors;
if worker.last_error.is_some() {
@ -130,7 +130,7 @@ impl WorkerProcessor {
wi.insert(worker.task_id, WorkerInfo {
name: worker.worker.name(),
state: worker.state,
info: worker.worker.info(),
status: worker.worker.status(),
errors: worker.errors,
consecutive_errors: worker.consecutive_errors,
last_error: worker.last_error.take(),
@ -153,26 +153,14 @@ impl WorkerProcessor {
}
// We are exiting, drain everything
let drain_half_time = Instant::now() + Duration::from_secs(5);
let drain_everything = async move {
while let Some(mut worker) = workers.next().await {
if worker.state == WorkerState::Done {
while let Some(worker) = workers.next().await {
info!(
"Worker {} (TID {}) exited",
"Worker {} (TID {}) exited (last state: {:?})",
worker.worker.name(),
worker.task_id
worker.task_id,
worker.state
);
} else if Instant::now() > drain_half_time {
warn!("Worker {} (TID {}) interrupted between two iterations in state {:?} (this should be fine)", worker.worker.name(), worker.task_id, worker.state);
} else {
workers.push(
async move {
worker.step().await;
worker
}
.boxed(),
);
}
}
};
@ -180,7 +168,7 @@ impl WorkerProcessor {
_ = drain_everything => {
info!("All workers exited peacefully \\o/");
}
_ = tokio::time::sleep(Duration::from_secs(9)) => {
_ = tokio::time::sleep(EXIT_DEADLINE) => {
error!("Some workers could not exit in time, we are cancelling some things in the middle");
}
}
@ -190,7 +178,6 @@ impl WorkerProcessor {
struct WorkerHandler {
task_id: usize,
stop_signal: watch::Receiver<bool>,
stop_signal_worker: watch::Receiver<bool>,
worker: Box<dyn Worker>,
state: WorkerState,
errors: usize,
@ -225,33 +212,19 @@ impl WorkerHandler {
},
WorkerState::Throttled(delay) => {
// Sleep for given delay and go back to busy state
if !*self.stop_signal.borrow() {
select! {
_ = tokio::time::sleep(Duration::from_secs_f32(delay)) => (),
_ = tokio::time::sleep(Duration::from_secs_f32(delay)) => {
self.state = WorkerState::Busy;
}
_ = self.stop_signal.changed() => (),
}
}
self.state = WorkerState::Busy;
}
WorkerState::Idle => {
if *self.stop_signal.borrow() {
select! {
new_st = self.worker.wait_for_work(&self.stop_signal_worker) => {
new_st = self.worker.wait_for_work() => {
self.state = new_st;
}
_ = tokio::time::sleep(Duration::from_secs(1)) => {
// stay in Idle state
}
}
} else {
select! {
new_st = self.worker.wait_for_work(&self.stop_signal_worker) => {
self.state = new_st;
}
_ = self.stop_signal.changed() => {
// stay in Idle state
}
}
_ = self.stop_signal.changed() => (),
}
}
WorkerState::Done => unreachable!(),

View File

@ -140,34 +140,3 @@ pub fn fasthash(data: &[u8]) -> FastHash {
pub fn gen_uuid() -> Uuid {
rand::thread_rng().gen::<[u8; 32]>().into()
}
// RMP serialization with names of fields and variants
/// Serialize to MessagePack
pub fn rmp_to_vec_all_named<T>(val: &T) -> Result<Vec<u8>, rmp_serde::encode::Error>
where
T: Serialize + ?Sized,
{
let mut wr = Vec::with_capacity(128);
let mut se = rmp_serde::Serializer::new(&mut wr)
.with_struct_map()
.with_string_variants();
val.serialize(&mut se)?;
Ok(wr)
}
/// Serialize to JSON, truncating long result
pub fn debug_serialize<T: Serialize>(x: T) -> String {
match serde_json::to_string(&x) {
Ok(ss) => {
if ss.len() > 100 {
// TODO this can panic if 100 is not a codepoint boundary, but inside a 2 Bytes
// (or more) codepoint
ss[..100].to_string()
} else {
ss
}
}
Err(e) => format!("<JSON serialization error: {}>", e),
}
}

42
src/util/encode.rs Normal file
View File

@ -0,0 +1,42 @@
use serde::{Deserialize, Serialize};
/// Serialize to MessagePacki, without versionning
/// (see garage_util::migrate for functions that manage versionned
/// data formats)
pub fn nonversioned_encode<T>(val: &T) -> Result<Vec<u8>, rmp_serde::encode::Error>
where
T: Serialize + ?Sized,
{
let mut wr = Vec::with_capacity(128);
let mut se = rmp_serde::Serializer::new(&mut wr)
.with_struct_map()
.with_string_variants();
val.serialize(&mut se)?;
Ok(wr)
}
/// Deserialize from MessagePacki, without versionning
/// (see garage_util::migrate for functions that manage versionned
/// data formats)
pub fn nonversioned_decode<T>(bytes: &[u8]) -> Result<T, rmp_serde::decode::Error>
where
T: for<'de> Deserialize<'de> + ?Sized,
{
rmp_serde::decode::from_read_ref::<_, T>(bytes)
}
/// Serialize to JSON, truncating long result
pub fn debug_serialize<T: Serialize>(x: T) -> String {
match serde_json::to_string(&x) {
Ok(ss) => {
if ss.len() > 100 {
// TODO this can panic if 100 is not a codepoint boundary, but inside a 2 Bytes
// (or more) codepoint
ss[..100].to_string()
} else {
ss
}
}
Err(e) => format!("<JSON serialization error: {}>", e),
}
}

View File

@ -7,6 +7,7 @@ use err_derive::Error;
use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer};
use crate::data::*;
use crate::encode::debug_serialize;
/// Regroup all Garage errors
#[derive(Debug, Error)]

View File

@ -1,4 +1,4 @@
pub fn format_table(data: Vec<String>) {
pub fn format_table_to_string(data: Vec<String>) -> String {
let data = data
.iter()
.map(|s| s.split('\t').collect::<Vec<_>>())
@ -24,5 +24,9 @@ pub fn format_table(data: Vec<String>) {
out.push('\n');
}
print!("{}", out);
out
}
pub fn format_table(data: Vec<String>) {
print!("{}", format_table_to_string(data));
}

View File

@ -8,9 +8,11 @@ pub mod background;
pub mod config;
pub mod crdt;
pub mod data;
pub mod encode;
pub mod error;
pub mod formater;
pub mod metrics;
pub mod migrate;
pub mod persister;
pub mod time;
pub mod token_bucket;

159
src/util/migrate.rs Normal file
View File

@ -0,0 +1,159 @@
use serde::{Deserialize, Serialize};
/// Indicates that this type has an encoding that can be migrated from
/// a previous version upon upgrades of Garage.
pub trait Migrate: Serialize + for<'de> Deserialize<'de> + 'static {
/// A sequence of bytes to add at the beginning of the serialized
/// string, to identify that the data is of this version.
const VERSION_MARKER: &'static [u8] = b"";
/// The previous version of this data type, from which items of this version
/// can be migrated.
type Previous: Migrate;
/// The migration function that transforms a value decoded in the old format
/// to an up-to-date value.
fn migrate(previous: Self::Previous) -> Self;
/// Decode an encoded version of this type, going through a migration if necessary.
fn decode(bytes: &[u8]) -> Option<Self> {
let marker_len = Self::VERSION_MARKER.len();
if bytes.get(..marker_len) == Some(Self::VERSION_MARKER) {
if let Ok(value) = rmp_serde::decode::from_read_ref::<_, Self>(&bytes[marker_len..]) {
return Some(value);
}
}
Self::Previous::decode(bytes).map(Self::migrate)
}
/// Encode this type with optionnal version marker
fn encode(&self) -> Result<Vec<u8>, rmp_serde::encode::Error> {
let mut wr = Vec::with_capacity(128);
wr.extend_from_slice(Self::VERSION_MARKER);
let mut se = rmp_serde::Serializer::new(&mut wr)
.with_struct_map()
.with_string_variants();
self.serialize(&mut se)?;
Ok(wr)
}
}
/// Indicates that this type has no previous encoding version to be migrated from.
pub trait InitialFormat: Serialize + for<'de> Deserialize<'de> + 'static {
/// A sequence of bytes to add at the beginning of the serialized
/// string, to identify that the data is of this version.
const VERSION_MARKER: &'static [u8] = b"";
}
impl<T: InitialFormat> Migrate for T {
const VERSION_MARKER: &'static [u8] = <T as InitialFormat>::VERSION_MARKER;
type Previous = NoPrevious;
fn migrate(_previous: Self::Previous) -> Self {
unreachable!();
}
}
/// Internal type used by InitialFormat, not meant for general use.
#[derive(Serialize, Deserialize)]
pub enum NoPrevious {}
impl Migrate for NoPrevious {
type Previous = NoPrevious;
fn migrate(_previous: Self::Previous) -> Self {
unreachable!();
}
fn decode(_bytes: &[u8]) -> Option<Self> {
None
}
fn encode(&self) -> Result<Vec<u8>, rmp_serde::encode::Error> {
unreachable!()
}
}
#[cfg(test)]
mod test {
use super::*;
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)]
struct V1 {
a: usize,
b: String,
}
impl InitialFormat for V1 {}
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)]
struct V2 {
a: usize,
b: Vec<String>,
c: String,
}
impl Migrate for V2 {
const VERSION_MARKER: &'static [u8] = b"GtestV2";
type Previous = V1;
fn migrate(prev: V1) -> V2 {
V2 {
a: prev.a,
b: vec![prev.b],
c: String::new(),
}
}
}
#[test]
fn test_v1() {
let x = V1 {
a: 12,
b: "hello".into(),
};
let x_enc = x.encode().unwrap();
let y = V1::decode(&x_enc).unwrap();
assert_eq!(x, y);
}
#[test]
fn test_v2() {
let x = V2 {
a: 12,
b: vec!["hello".into(), "world".into()],
c: "plop".into(),
};
let x_enc = x.encode().unwrap();
assert_eq!(&x_enc[..V2::VERSION_MARKER.len()], V2::VERSION_MARKER);
let y = V2::decode(&x_enc).unwrap();
assert_eq!(x, y);
}
#[test]
fn test_migrate() {
let x = V1 {
a: 12,
b: "hello".into(),
};
let x_enc = x.encode().unwrap();
let xx = V1::decode(&x_enc).unwrap();
assert_eq!(x, xx);
let y = V2::decode(&x_enc).unwrap();
assert_eq!(
y,
V2 {
a: 12,
b: vec!["hello".into()],
c: "".into(),
}
);
let y_enc = y.encode().unwrap();
assert_eq!(&y_enc[..V2::VERSION_MARKER.len()], V2::VERSION_MARKER);
let z = V2::decode(&y_enc).unwrap();
assert_eq!(y, z);
}
}

View File

@ -3,21 +3,16 @@ use std::path::{Path, PathBuf};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use serde::{Deserialize, Serialize};
use crate::data::*;
use crate::error::Error;
use crate::migrate::Migrate;
pub struct Persister<T: Serialize + for<'de> Deserialize<'de>> {
pub struct Persister<T: Migrate> {
path: PathBuf,
_marker: std::marker::PhantomData<T>,
}
impl<T> Persister<T>
where
T: Serialize + for<'de> Deserialize<'de>,
{
impl<T: Migrate> Persister<T> {
pub fn new(base_dir: &Path, file_name: &str) -> Self {
let mut path = base_dir.to_path_buf();
path.push(file_name);
@ -27,18 +22,37 @@ where
}
}
fn decode(&self, bytes: &[u8]) -> Result<T, Error> {
match T::decode(bytes) {
Some(v) => Ok(v),
None => {
error!(
"Unable to decode persisted data file {}",
self.path.display()
);
for line in hexdump::hexdump_iter(bytes) {
debug!("{}", line);
}
Err(Error::Message(format!(
"Unable to decode persisted data file {}",
self.path.display()
)))
}
}
}
pub fn load(&self) -> Result<T, Error> {
let mut file = std::fs::OpenOptions::new().read(true).open(&self.path)?;
let mut bytes = vec![];
file.read_to_end(&mut bytes)?;
let value = rmp_serde::decode::from_read_ref(&bytes[..])?;
let value = self.decode(&bytes[..])?;
Ok(value)
}
pub fn save(&self, t: &T) -> Result<(), Error> {
let bytes = rmp_to_vec_all_named(t)?;
let bytes = t.encode()?;
let mut file = std::fs::OpenOptions::new()
.write(true)
@ -57,12 +71,12 @@ where
let mut bytes = vec![];
file.read_to_end(&mut bytes).await?;
let value = rmp_serde::decode::from_read_ref(&bytes[..])?;
let value = self.decode(&bytes[..])?;
Ok(value)
}
pub async fn save_async(&self, t: &T) -> Result<(), Error> {
let bytes = rmp_to_vec_all_named(t)?;
let bytes = t.encode()?;
let mut file = tokio::fs::File::create(&self.path).await?;
file.write_all(&bytes[..]).await?;

View File

@ -1,6 +1,6 @@
[package]
name = "garage_web"
version = "0.8.0"
version = "0.8.1"
authors = ["Alex Auvolat <alex@adnab.me>", "Quentin Dufour <quentin@dufour.io>"]
edition = "2018"
license = "AGPL-3.0"
@ -14,10 +14,10 @@ path = "lib.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
garage_api = { version = "0.8.0", path = "../api" }
garage_model = { version = "0.8.0", path = "../model" }
garage_util = { version = "0.8.0", path = "../util" }
garage_table = { version = "0.8.0", path = "../table" }
garage_api = { version = "0.8.1", path = "../api" }
garage_model = { version = "0.8.1", path = "../model" }
garage_util = { version = "0.8.1", path = "../util" }
garage_table = { version = "0.8.1", path = "../table" }
err-derive = "0.3"
tracing = "0.1.30"