Compare commits
3 Commits
main
...
bug/check_
Author | SHA1 | Date |
---|---|---|
Quentin Dufour | fe053957e5 | 2 years ago |
Quentin Dufour | e680efb361 | 2 years ago |
Quentin Dufour | 4d269787b2 | 2 years ago |
@ -0,0 +1 @@ |
||||
*.pdf filter=lfs diff=lfs merge=lfs -text |
File diff suppressed because it is too large
Load Diff
@ -1,166 +1,149 @@ |
||||
{ |
||||
buildSystem ? builtins.currentSystem, |
||||
targetSystem ? buildSystem, |
||||
gitVersion ? null, |
||||
system ? builtins.currentSystem, |
||||
release ? false, |
||||
features ? null, |
||||
target ? "x86_64-unknown-linux-musl", |
||||
compileMode ? null, |
||||
git_version ? null, |
||||
}: |
||||
|
||||
let |
||||
pkgsSrc = import ./nix/pkgs.nix; |
||||
newBuildTarget = { |
||||
nixPkgsSystem, |
||||
rustTarget ? nixPkgsSystem, |
||||
nativeBuildInputs ? pkgsCross: [], |
||||
rustFlags ? pkgsCross: [], |
||||
}: { |
||||
inherit nixPkgsSystem rustTarget nativeBuildInputs rustFlags; |
||||
}; |
||||
|
||||
# centralize per-target configuration in a single place. |
||||
buildTargets = { |
||||
"x86_64-linux" = newBuildTarget { |
||||
nixPkgsSystem = "x86_64-unknown-linux-musl"; |
||||
}; |
||||
with import ./nix/common.nix; |
||||
|
||||
"i686-linux" = newBuildTarget { |
||||
nixPkgsSystem = "i686-unknown-linux-musl"; |
||||
}; |
||||
|
||||
"aarch64-linux" = newBuildTarget { |
||||
nixPkgsSystem = "aarch64-unknown-linux-musl"; |
||||
}; |
||||
let |
||||
crossSystem = { config = target; }; |
||||
in let |
||||
log = v: builtins.trace v v; |
||||
|
||||
# Old Raspberry Pi's (not currently supported due to linking errors with |
||||
# libsqlite3 and libsodium |
||||
#"armv6l-linux" = newBuildTarget { |
||||
# nixPkgsSystem = "armv6l-unknown-linux-musleabihf"; |
||||
# rustTarget = "arm-unknown-linux-musleabihf"; |
||||
#}; |
||||
|
||||
"x86_64-windows" = newBuildTarget { |
||||
nixPkgsSystem = "x86_64-w64-mingw32"; |
||||
rustTarget = "x86_64-pc-windows-gnu"; |
||||
nativeBuildInputs = pkgsCross: [ pkgsCross.windows.pthreads ]; |
||||
rustFlags = pkgsCross: [ |
||||
"-C" "link-arg=-L${pkgsCross.windows.pthreads}/lib" |
||||
]; |
||||
}; |
||||
pkgs = import pkgsSrc { |
||||
inherit system crossSystem; |
||||
overlays = [ cargo2nixOverlay ]; |
||||
}; |
||||
|
||||
buildTarget = buildTargets.${targetSystem}; |
||||
|
||||
pkgs = import pkgsSrc { system = buildSystem; }; |
||||
pkgsCross = import pkgsSrc { |
||||
system = buildSystem; |
||||
crossSystem.config = buildTarget.nixPkgsSystem; |
||||
/* |
||||
Rust and Nix triples are not the same. Cargo2nix has a dedicated library |
||||
to convert Nix triples to Rust ones. We need this conversion as we want to |
||||
set later options linked to our (rust) target in a generic way. Not only |
||||
the triple terminology is different, but also the "roles" are named differently. |
||||
Nix uses a build/host/target terminology where Nix's "host" maps to Cargo's "target". |
||||
*/ |
||||
rustTarget = log (pkgs.rustBuilder.rustLib.rustTriple pkgs.stdenv.hostPlatform); |
||||
|
||||
/* |
||||
Cargo2nix is built for rustOverlay which installs Rust from Mozilla releases. |
||||
We want our own Rust to avoid incompatibilities, like we had with musl 1.2.0. |
||||
rustc was built with musl < 1.2.0 and nix shipped musl >= 1.2.0 which lead to compilation breakage. |
||||
So we want a Rust release that is bound to our Nix repository to avoid these problems. |
||||
See here for more info: https://musl.libc.org/time64.html |
||||
Because Cargo2nix does not support the Rust environment shipped by NixOS, |
||||
we emulate the structure of the Rust object created by rustOverlay. |
||||
In practise, rustOverlay ships rustc+cargo in a single derivation while |
||||
NixOS ships them in separate ones. We reunite them with symlinkJoin. |
||||
*/ |
||||
rustChannel = pkgs.symlinkJoin { |
||||
name ="rust-channel"; |
||||
paths = [ |
||||
pkgs.rustPlatform.rust.rustc |
||||
pkgs.rustPlatform.rust.cargo |
||||
]; |
||||
}; |
||||
|
||||
rustTarget = buildTarget.rustTarget; |
||||
|
||||
toolchain = let |
||||
fenix = import (pkgs.fetchFromGitHub { |
||||
owner = "nix-community"; |
||||
repo = "fenix"; |
||||
rev = "81ab0b4f7ae9ebb57daa0edf119c4891806e4d3a"; |
||||
hash = "sha256-bZmI7ytPAYLpyFNgj5xirDkKuAniOkj1xHdv5aIJ5GM="; |
||||
}) { |
||||
system = buildSystem; |
||||
}; |
||||
|
||||
mkToolchain = fenixTarget: fenixTarget.toolchainOf { |
||||
channel = "1.68.2"; |
||||
sha256 = "sha256-4vetmUhTUsew5FODnjlnQYInzyLNyDwocGa4IvMk3DM="; |
||||
}; |
||||
in |
||||
fenix.combine [ |
||||
(mkToolchain fenix).rustc |
||||
(mkToolchain fenix).rustfmt |
||||
(mkToolchain fenix).cargo |
||||
(mkToolchain fenix).clippy |
||||
(mkToolchain fenix.targets.${rustTarget}).rust-std |
||||
]; |
||||
/* |
||||
Cargo2nix provides many overrides by default, you can take inspiration from them: |
||||
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/overrides.nix |
||||
|
||||
You can have a complete list of the available options by looking at the overriden object, mkcrate: |
||||
https://github.com/cargo2nix/cargo2nix/blob/master/overlay/mkcrate.nix |
||||
*/ |
||||
overrides = pkgs.rustBuilder.overrides.all ++ [ |
||||
/* |
||||
[1] We need to alter Nix hardening to make static binaries: PIE, |
||||
Position Independent Executables seems to be supported only on amd64. Having |
||||
this flag set either 1. make our executables crash or 2. compile as dynamic on some platforms. |
||||
Here, we deactivate it. Later (find `codegenOpts`), we reactivate it for supported targets |
||||
(only amd64 curently) through the `-static-pie` flag. |
||||
PIE is a feature used by ASLR, which helps mitigate security issues. |
||||
Learn more about Nix Hardening at: https://github.com/NixOS/nixpkgs/blob/master/pkgs/build-support/cc-wrapper/add-hardening.sh |
||||
*/ |
||||
(pkgs.rustBuilder.rustLib.makeOverride { |
||||
name = "garage"; |
||||
overrideAttrs = drv: { hardeningDisable = [ "pie" ]; }; |
||||
}) |
||||
|
||||
(pkgs.rustBuilder.rustLib.makeOverride { |
||||
name = "garage_rpc"; |
||||
|
||||
/* |
||||
[2] We want to inject the git version while keeping the build deterministic. |
||||
As we do not want to consider the .git folder as part of the input source, |
||||
we ask the user (the CI often) to pass the value to Nix. |
||||
*/ |
||||
overrideAttrs = drv: |
||||
(if git_version != null then { |
||||
preConfigure = '' |
||||
${drv.preConfigure or ""} |
||||
export GIT_VERSION="${git_version}" |
||||
''; |
||||
} else {}); |
||||
|
||||
/* |
||||
[3] We ship some parts of the code disabled by default by putting them behind a flag. |
||||
It speeds up the compilation (when the feature is not required) and released crates have less dependency by default (less attack surface, disk space, etc.). |
||||
But we want to ship these additional features when we release Garage. |
||||
In the end, we chose to exclude all features from debug builds while putting (all of) them in the release builds. |
||||
Currently, the only feature of Garage is kubernetes-discovery from the garage_rpc crate. |
||||
*/ |
||||
overrideArgs = old: { |
||||
features = if release then [ "kubernetes-discovery" ] else []; |
||||
}; |
||||
}) |
||||
|
||||
]; |
||||
|
||||
packageFun = import ./Cargo.nix; |
||||
|
||||
/* |
||||
We compile fully static binaries with musl to simplify deployment on most systems. |
||||
When possible, we reactivate PIE hardening (see above). |
||||
|
||||
Also, if you set the RUSTFLAGS environment variable, the following parameters will |
||||
be ignored. |
||||
|
||||
For more information on static builds, please refer to Rust's RFC 1721. |
||||
https://rust-lang.github.io/rfcs/1721-crt-static.html#specifying-dynamicstatic-c-runtime-linkage |
||||
*/ |
||||
|
||||
codegenOpts = { |
||||
"armv6l-unknown-linux-musleabihf" = [ "target-feature=+crt-static" "link-arg=-static" ]; /* compile as dynamic with static-pie */ |
||||
"aarch64-unknown-linux-musl" = [ "target-feature=+crt-static" "link-arg=-static" ]; /* segfault with static-pie */ |
||||
"i686-unknown-linux-musl" = [ "target-feature=+crt-static" "link-arg=-static" ]; /* segfault with static-pie */ |
||||
"x86_64-unknown-linux-musl" = [ "target-feature=+crt-static" "link-arg=-static-pie" ]; |
||||
}; |
||||
|
||||
naersk = let |
||||
naerskSrc = pkgs.fetchFromGitHub { |
||||
owner = "nix-community"; |
||||
repo = "naersk"; |
||||
rev = "d9a33d69a9c421d64c8d925428864e93be895dcc"; |
||||
hash = "sha256-e136hTT7LqQ2QjOTZQMW+jnsevWwBpMj78u6FRUsH9I="; |
||||
/* |
||||
The following definition is not elegant as we use a low level function of Cargo2nix |
||||
that enables us to pass our custom rustChannel object. We need this low level definition |
||||
to pass Nix's Rust toolchains instead of Mozilla's one. |
||||
|
||||
target is mandatory but must be kept to null to allow cargo2nix to set it to the appropriate value |
||||
for each crate. |
||||
*/ |
||||
rustPkgs = pkgs.rustBuilder.makePackageSet { |
||||
inherit packageFun rustChannel release codegenOpts; |
||||
packageOverrides = overrides; |
||||
target = null; |
||||
|
||||
buildRustPackages = pkgs.buildPackages.rustBuilder.makePackageSet { |
||||
inherit rustChannel packageFun codegenOpts; |
||||
packageOverrides = overrides; |
||||
target = null; |
||||
}; |
||||
in |
||||
pkgs.callPackages naerskSrc { |
||||
cargo = toolchain; |
||||
rustc = toolchain; |
||||
}; |
||||
|
||||
builtFeatures = if features != null then |
||||
features |
||||
else ( |
||||
[ "garage/bundled-libs" "garage/sled" "garage/lmdb" "garage/k2v" ] ++ ( |
||||
if release then [ |
||||
"garage/consul-discovery" |
||||
"garage/kubernetes-discovery" |
||||
"garage/metrics" |
||||
"garage/telemetry-otlp" |
||||
"garage/sqlite" |
||||
] else [ ] |
||||
) |
||||
); |
||||
|
||||
# For some reason the pkgsCross.pkgsStatic build of libsodium doesn't contain |
||||
# a `.a` file when compiled to a windows target, but rather contains |
||||
# a `.dll.a` file which libsodium-sys doesn't pick up on. Copying the one to |
||||
# the be the other seems to work. |
||||
libsodium = pkgs.runCommand "libsodium-wrapped" { |
||||
libsodium = pkgsCross.pkgsStatic.libsodium; |
||||
} '' |
||||
cp -rL "$libsodium" "$out" |
||||
chmod -R +w "$out" |
||||
if [ ! -e "$out"/lib/libsodium.a ] && [ -f "$out"/lib/libsodium.dll.a ]; then |
||||
cp "$out"/lib/libsodium.dll.a "$out"/lib/libsodium.a |
||||
fi |
||||
''; |
||||
|
||||
in rec { |
||||
inherit pkgs pkgsCross; |
||||
|
||||
# Exported separately so it can be used from shell.nix |
||||
buildEnv = rec { |
||||
nativeBuildInputs = (buildTarget.nativeBuildInputs pkgsCross) ++ [ |
||||
toolchain |
||||
pkgs.protobuf |
||||
|
||||
# Required for shell because of rust dependency build scripts which must |
||||
# run on the build system. |
||||
pkgs.stdenv.cc |
||||
]; |
||||
|
||||
SODIUM_LIB_DIR = "${libsodium}/lib"; |
||||
|
||||
# Required because ring crate is special. This also seems to have |
||||
# fixed some issues with the x86_64-windows cross-compile :shrug: |
||||
TARGET_CC = "${pkgsCross.stdenv.cc}/bin/${pkgsCross.stdenv.cc.targetPrefix}cc"; |
||||
|
||||
CARGO_BUILD_TARGET = rustTarget; |
||||
CARGO_BUILD_RUSTFLAGS = [ |
||||
"-C" "target-feature=+crt-static" |
||||
"-C" "link-arg=-static" |
||||
|
||||
# https://github.com/rust-lang/cargo/issues/4133 |
||||
"-C" "linker=${TARGET_CC}" |
||||
] ++ (buildTarget.rustFlags pkgsCross); |
||||
}; |
||||
|
||||
build = naersk.buildPackage (rec { |
||||
inherit release; |
||||
|
||||
src = ./.; |
||||
strictDeps = true; |
||||
doCheck = false; |
||||
|
||||
cargoBuildOptions = prev: prev++[ |
||||
"--features=${builtins.concatStringsSep "," builtFeatures}" |
||||
]; |
||||
} // buildEnv); |
||||
} |
||||
in |
||||
if compileMode == "test" |
||||
then pkgs.symlinkJoin { |
||||
name ="garage-tests"; |
||||
paths = builtins.map (key: rustPkgs.workspace.${key} { inherit compileMode; }) (builtins.attrNames rustPkgs.workspace); |
||||
} |
||||
else rustPkgs.workspace.garage { inherit compileMode; } |
||||
|
@ -1,17 +0,0 @@ |
||||
# Browse doc |
||||
|
||||
Run in this directory: |
||||
|
||||
``` |
||||
python3 -m http.server |
||||
``` |
||||
|
||||
And open in your browser: |
||||
- http://localhost:8000/garage-admin-v0.html |
||||
|
||||
# Validate doc |
||||
|
||||
``` |
||||
wget https://repo1.maven.org/maven2/org/openapitools/openapi-generator-cli/6.1.0/openapi-generator-cli-6.1.0.jar -O openapi-generator-cli.jar |
||||
java -jar openapi-generator-cli.jar validate -i garage-admin-v0.yml |
||||
``` |
@ -1,59 +0,0 @@ |
||||
/* montserrat-300 - latin */ |
||||
@font-face { |
||||
font-family: 'Montserrat'; |
||||
font-style: normal; |
||||
font-weight: 300; |
||||
src: local(''), |
||||
url('../fonts/montserrat-v25-latin-300.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */ |
||||
url('../fonts/montserrat-v25-latin-300.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */ |
||||
} |
||||
|
||||
/* montserrat-regular - latin */ |
||||
@font-face { |
||||
font-family: 'Montserrat'; |
||||
font-style: normal; |
||||
font-weight: 400; |
||||
src: local(''), |
||||
url('../fonts/montserrat-v25-latin-regular.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */ |
||||
url('../fonts/montserrat-v25-latin-regular.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */ |
||||
} |
||||
|
||||
/* montserrat-700 - latin */ |
||||
@font-face { |
||||
font-family: 'Montserrat'; |
||||
font-style: normal; |
||||
font-weight: 700; |
||||
src: local(''), |
||||
url('../fonts/montserrat-v25-latin-700.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */ |
||||
url('../fonts/montserrat-v25-latin-700.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */ |
||||
} |
||||
/* roboto-300 - latin */ |
||||
@font-face { |
||||
font-family: 'Roboto'; |
||||
font-style: normal; |
||||
font-weight: 300; |
||||
src: local(''), |
||||
url('../fonts/roboto-v30-latin-300.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */ |
||||
url('../fonts/roboto-v30-latin-300.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */ |
||||
} |
||||
|
||||
/* roboto-regular - latin */ |
||||
@font-face { |
||||
font-family: 'Roboto'; |
||||
font-style: normal; |
||||
font-weight: 400; |
||||
src: local(''), |
||||
url('../fonts/roboto-v30-latin-regular.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */ |
||||
url('../fonts/roboto-v30-latin-regular.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */ |
||||
} |
||||
|
||||
/* roboto-700 - latin */ |
||||
@font-face { |
||||
font-family: 'Roboto'; |
||||
font-style: normal; |
||||
font-weight: 700; |
||||
src: local(''), |
||||
url('../fonts/roboto-v30-latin-700.woff2') format('woff2'), /* Chrome 26+, Opera 23+, Firefox 39+ */ |
||||
url('../fonts/roboto-v30-latin-700.woff') format('woff'); /* Chrome 6+, Firefox 3.6+, IE 9+, Safari 5.1+ */ |
||||
} |
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,24 +0,0 @@ |
||||
<!DOCTYPE html> |
||||
<html> |
||||
<head> |
||||
<title>Garage Adminstration API v0</title> |
||||
<!-- needed for adaptive design --> |
||||
<meta charset="utf-8"/> |
||||
<meta name="viewport" content="width=device-width, initial-scale=1"> |
||||
<link href="./css/redoc.css" rel="stylesheet"> |
||||
|
||||
<!-- |
||||
Redoc doesn't change outer page styles |
||||
--> |
||||
<style> |
||||
body { |
||||
margin: 0; |
||||
padding: 0; |
||||
} |
||||
</style> |
||||
</head> |
||||
<body> |
||||
<redoc spec-url='./garage-admin-v0.yml'></redoc> |
||||
<script src="./redoc.standalone.js"> </script> |
||||
</body> |
||||
</html> |
File diff suppressed because it is too large
Load Diff
@ -1,24 +0,0 @@ |
||||
<!DOCTYPE html> |
||||
<html> |
||||
<head> |
||||
<title>Garage Adminstration API v0</title> |
||||
<!-- needed for adaptive design --> |
||||
<meta charset="utf-8"/> |
||||
<meta name="viewport" content="width=device-width, initial-scale=1"> |
||||
<link href="./css/redoc.css" rel="stylesheet"> |
||||
|
||||
<!-- |
||||
Redoc doesn't change outer page styles |
||||
--> |
||||
<style> |
||||
body { |
||||
margin: 0; |
||||
padding: 0; |
||||
} |
||||
</style> |
||||
</head> |
||||
<body> |
||||
<redoc spec-url='./garage-admin-v1.yml'></redoc> |
||||
<script src="./redoc.standalone.js"> </script> |
||||
</body> |
||||
</html> |
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@ -1,123 +0,0 @@ |
||||
+++ |
||||
title = "Golang" |
||||
weight = 30 |
||||
+++ |
||||
|
||||
## S3 |
||||
|
||||
*Coming soon* |
||||
|
||||
Some refs: |
||||
- Minio minio-go-sdk |
||||
- [Reference](https://docs.min.io/docs/golang-client-api-reference.html) |
||||
|
||||
- Amazon aws-sdk-go-v2 |
||||
- [Installation](https://aws.github.io/aws-sdk-go-v2/docs/getting-started/) |
||||
- [Reference](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3) |
||||
- [Example](https://aws.github.io/aws-sdk-go-v2/docs/code-examples/s3/putobject/) |
||||
|
||||
## K2V |
||||
|
||||
*Coming soon* |
||||
|
||||
## Administration |
||||
|
||||
Install the SDK with: |
||||
|
||||
```bash |
||||
go get git.deuxfleurs.fr/garage-sdk/garage-admin-sdk-golang |
||||
``` |
||||
|
||||
A short example: |
||||
|
||||
```go |
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"os" |
||||
"strings" |
||||
garage "git.deuxfleurs.fr/garage-sdk/garage-admin-sdk-golang" |
||||
) |
||||
|
||||
func main() { |
||||
// Initialization |
||||
configuration := garage.NewConfiguration() |
||||
configuration.Host = "127.0.0.1:3903" |
||||
client := garage.NewAPIClient(configuration) |
||||
ctx := context.WithValue(context.Background(), garage.ContextAccessToken, "s3cr3t") |
||||
|
||||
// Nodes |
||||
fmt.Println("--- nodes ---") |
||||
nodes, _, _ := client.NodesApi.GetNodes(ctx).Execute() |
||||
fmt.Fprintf(os.Stdout, "First hostname: %v\n", nodes.KnownNodes[0].Hostname) |
||||
capa := int64(1000000000) |
||||
change := []garage.NodeRoleChange{ |
||||
garage.NodeRoleChange{NodeRoleUpdate: &garage.NodeRoleUpdate { |
||||
Id: *nodes.KnownNodes[0].Id, |
||||
Zone: "dc1", |
||||
Capacity: *garage.NewNullableInt64(&capa), |
||||
Tags: []string{ "fast", "amd64" }, |
||||
}}, |
||||
} |
||||
staged, _, _ := client.LayoutApi.AddLayout(ctx).NodeRoleChange(change).Execute() |
||||
msg, _, _ := client.LayoutApi.ApplyLayout(ctx).LayoutVersion(*garage.NewLayoutVersion(staged.Version + 1)).Execute() |
||||
fmt.Printf(strings.Join(msg.Message, "\n")) // Layout configured |
||||
|
||||
health, _, _ := client.NodesApi.GetHealth(ctx).Execute() |
||||
fmt.Printf("Status: %s, nodes: %v/%v, storage: %v/%v, partitions: %v/%v\n", health.Status, health.ConnectedNodes, health.KnownNodes, health.StorageNodesOk, health.StorageNodes, health.PartitionsAllOk, health.Partitions) |
||||
|
||||
// Key |
||||
fmt.Println("\n--- key ---") |
||||
key := "openapi-key" |
||||
keyInfo, _, _ := client.KeyApi.AddKey(ctx).AddKeyRequest(garage.AddKeyRequest{Name: *garage.NewNullableString(&key) }).Execute() |
||||
defer client.KeyApi.DeleteKey(ctx).Id(*keyInfo.AccessKeyId).Execute() |
||||
fmt.Printf("AWS_ACCESS_KEY_ID=%s\nAWS_SECRET_ACCESS_KEY=%s\n", *keyInfo.AccessKeyId, *keyInfo.SecretAccessKey.Get()) |
||||
|
||||
id := *keyInfo.AccessKeyId |
||||
canCreateBucket := true |
||||
updateKeyRequest := *garage.NewUpdateKeyRequest() |
||||
updateKeyRequest.SetName("openapi-key-updated") |
||||
updateKeyRequest.SetAllow(garage.UpdateKeyRequestAllow { CreateBucket: &canCreateBucket }) |
||||
update, _, _ := client.KeyApi.UpdateKey(ctx).Id(id).UpdateKeyRequest(updateKeyRequest).Execute() |
||||
fmt.Printf("Updated %v with key name %v\n", *update.AccessKeyId, *update.Name) |
||||
|
||||
keyList, _, _ := client.KeyApi.ListKeys(ctx).Execute() |
||||
fmt.Printf("Keys count: %v\n", len(keyList)) |
||||
|
||||
// Bucket |
||||
fmt.Println("\n--- bucket ---") |
||||
global_name := "global-ns-openapi-bucket" |
||||
local_name := "local-ns-openapi-bucket" |
||||
bucketInfo, _, _ := client.BucketApi.CreateBucket(ctx).CreateBucketRequest(garage.CreateBucketRequest{ |
||||
GlobalAlias: &global_name, |
||||
LocalAlias: &garage.CreateBucketRequestLocalAlias { |
||||
AccessKeyId: keyInfo.AccessKeyId, |
||||
Alias: &local_name, |
||||
}, |
||||
}).Execute() |
||||
defer client.BucketApi.DeleteBucket(ctx).Id(*bucketInfo.Id).Execute() |
||||
fmt.Printf("Bucket id: %s\n", *bucketInfo.Id) |
||||
|
||||
updateBucketRequest := *garage.NewUpdateBucketRequest() |
||||
website := garage.NewUpdateBucketRequestWebsiteAccess() |
||||
website.SetEnabled(true) |
||||
website.SetIndexDocument("index.html") |
||||
website.SetErrorDocument("errors/4xx.html") |
||||
updateBucketRequest.SetWebsiteAccess(*website) |
||||
quotas := garage.NewUpdateBucketRequestQuotas() |
||||
quotas.SetMaxSize(1000000000) |
||||
quotas.SetMaxObjects(999999999) |
||||
updateBucketRequest.SetQuotas(*quotas) |
||||
updatedBucket, _, _ := client.BucketApi.UpdateBucket(ctx).Id(*bucketInfo.Id).UpdateBucketRequest(updateBucketRequest).Execute() |
||||
fmt.Printf("Bucket %v website activation: %v\n", *updatedBucket.Id, *updatedBucket.WebsiteAccess) |
||||
|
||||
bucketList, _, _ := client.BucketApi.ListBuckets(ctx).Execute() |
||||
fmt.Printf("Bucket count: %v\n", len(bucketList)) |
||||
} |
||||
``` |
||||
|
||||
See also: |
||||
- [generated doc](https://git.deuxfleurs.fr/garage-sdk/garage-admin-sdk-golang) |
||||
- [examples](https://git.deuxfleurs.fr/garage-sdk/garage-admin-sdk-generator/src/branch/main/example/golang) |
@ -1,55 +0,0 @@ |
||||
+++ |
||||
title = "Javascript" |
||||
weight = 10 |
||||
+++ |
||||
|
||||
## S3 |
||||
|
||||
*Coming soon*. |
||||
|
||||
Some refs: |
||||
- Minio SDK |
||||
- [Reference](https://docs.min.io/docs/javascript-client-api-reference.html) |
||||
|
||||
- Amazon aws-sdk-js |
||||
- [Installation](https://docs.aws.amazon.com/sdk-for-javascript/v3/developer-guide/getting-started.html) |
||||
- [Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html) |
||||
- [Example](https://docs.aws.amazon.com/sdk-for-javascript/v3/developer-guide/s3-example-creating-buckets.html) |
||||
|
||||
## K2V |
||||
|
||||
*Coming soon* |
||||
|
||||
## Administration |
||||
|
||||
Install the SDK with: |
||||
|
||||
```bash |
||||
npm install --save git+https://git.deuxfleurs.fr/garage-sdk/garage-admin-sdk-js.git |
||||
``` |
||||
|
||||
A short example: |
||||
|
||||
```javascript |
||||
const garage = require('garage_administration_api_v1garage_v0_9_0'); |
||||
|
||||
const api = new garage.ApiClient("http://127.0.0.1:3903/v1"); |
||||
api.authentications['bearerAuth'].accessToken = "s3cr3t"; |
||||
|
||||
const [node, layout, key, bucket] = [ |
||||
new garage.NodesApi(api), |
||||
new garage.LayoutApi(api), |
||||
new garage.KeyApi(api), |
||||
new garage.BucketApi(api), |
||||
]; |
||||
|
||||
node.getNodes().then((data) => { |
||||
console.log(`nodes: ${Object.values(data.knownNodes).map(n => n.hostname)}`) |
||||
}, (error) => { |
||||
console.error(error); |
||||
}); |
||||
``` |
||||
|
||||
See also: |
||||
- [sdk repository](https://git.deuxfleurs.fr/garage-sdk/garage-admin-sdk-js) |
||||
- [examples](https://git.deuxfleurs.fr/garage-sdk/garage-admin-sdk-generator/src/branch/main/example/javascript) |
@ -1,139 +0,0 @@ |
||||
+++ |
||||
title = "Python" |
||||
weight = 20 |
||||
+++ |
||||
|
||||
## S3 |
||||
|
||||
### Using Minio SDK |
||||
|
||||
First install the SDK: |
||||
|
||||
```bash |
||||
pip3 install minio |
||||
``` |
||||
|
||||
Then instantiate a client object using garage root domain, api key and secret: |
||||
|
||||
```python |
||||
import minio |
||||
|
||||
client = minio.Minio( |
||||
"your.domain.tld", |
||||
"GKyourapikey", |
||||
"abcd[...]1234", |
||||
# Force the region, this is specific to garage |
||||
region="region", |
||||
) |
||||
``` |
||||
|
||||
Then use all the standard S3 endpoints as implemented by the Minio SDK: |
||||
|
||||
``` |
||||
# List buckets |
||||
print(client.list_buckets()) |
||||
|
||||
# Put an object containing 'content' to /path in bucket named 'bucket': |
||||
content = b"content" |
||||
client.put_object( |
||||
"bucket", |
||||
"path", |
||||
io.BytesIO(content), |
||||
len(content), |
||||
) |
||||
|
||||
# Read the object back and check contents |
||||
data = client.get_object("bucket", "path").read() |
||||
assert data == content |
||||
``` |
||||
|
||||
For further documentation, see the Minio SDK |
||||
[Reference](https://docs.min.io/docs/python-client-api-reference.html) |
||||
|
||||
### Using Amazon boto3 |
||||
|
||||
*Coming soon* |
||||
|
||||
See the official documentation: |
||||
- [Installation](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html) |
||||
- [Reference](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html) |
||||
- [Example](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html) |
||||
|
||||
## K2V |
||||
|
||||
*Coming soon* |
||||
|
||||
## Admin API |
||||
|
||||
You need at least Python 3.6, pip, and setuptools. |
||||
Because the python package is in a subfolder, the command is a bit more complicated than usual: |
||||
|
||||
```bash |
||||
pip3 install --user 'git+https://git.deuxfleurs.fr/garage-sdk/garage-admin-sdk-python' |
||||
``` |
||||
|
||||
Now, let imagine you have a fresh Garage instance running on localhost, with the admin API configured on port 3903 with the bearer `s3cr3t`: |
||||
|
||||
```python |
||||
import garage_admin_sdk |
||||
from garage_admin_sdk.apis import * |
||||
from garage_admin_sdk.models import * |
||||
|
||||
configuration = garage_admin_sdk.Configuration( |
||||
host = "http://localhost:3903/v1", |
||||
access_token = "s3cr3t" |
||||
) |
||||
|
||||
# Init APIs |
||||
api = garage_admin_sdk.ApiClient(configuration) |
||||
nodes, layout, keys, buckets = NodesApi(api), LayoutApi(api), KeyApi(api), BucketApi(api) |
||||
|
||||
# Display some info on the node |
||||
status = nodes.get_nodes() |
||||
print(f"running garage {status.garage_version}, node_id {status.node}") |
||||
|
||||
# Change layout of this node |
||||
current = layout.get_layout() |
||||
layout.add_layout([ |
||||
NodeRoleChange( |
||||
id = status.node, |
||||
zone = "dc1", |
||||
capacity = 1000000000, |
||||
tags = [ "dev" ], |
||||
) |
||||
]) |
||||
layout.apply_layout(LayoutVersion( |
||||
version = current.version + 1 |
||||
)) |
||||
|
||||
# Create key, allow it to create buckets |
||||
kinfo = keys.add_key(AddKeyRequest(name="openapi")) |
||||
|
||||
allow_create = UpdateKeyRequestAllow(create_bucket=True) |
||||
keys.update_key(kinfo.access_key_id, UpdateKeyRequest(allow=allow_create)) |
||||
|
||||
# Create a bucket, allow key, set quotas |
||||
binfo = buckets.create_bucket(CreateBucketRequest(global_alias="documentation")) |
||||
binfo = buckets.allow_bucket_key(AllowBucketKeyRequest( |
||||
bucket_id=binfo.id, |
||||
access_key_id=kinfo.access_key_id, |
||||
permissions=AllowBucketKeyRequestPermissions(read=True, write=True, owner=True), |
||||
)) |
||||
binfo = buckets.update_bucket(binfo.id, UpdateBucketRequest( |
||||
quotas=UpdateBucketRequestQuotas(max_size=19029801,max_objects=1500))) |
||||
|
||||
# Display key |
||||
print(f""" |
||||
cluster ready |
||||
key id is {kinfo.access_key_id} |
||||
secret key is {kinfo.secret_access_key} |
||||
bucket {binfo.global_aliases[0]} contains {binfo.objects}/{binfo.quotas.max_objects} objects |
||||
""") |
||||
``` |
||||
|
||||
*This example is named `short.py` in the example folder. Other python examples are also available.* |
||||
|
||||
See also: |
||||
- [sdk repo](https://git.deuxfleurs.fr/garage-sdk/garage-admin-sdk-python) |
||||
- [examples](https://git.deuxfleurs.fr/garage-sdk/garage-admin-sdk-generator/src/branch/main/example/python) |
||||
|
@ -1,47 +0,0 @@ |
||||
+++ |
||||
title = "Rust" |
||||
weight = 40 |
||||
+++ |
||||
|
||||
## S3 |
||||
|
||||
*Coming soon* |
||||
|
||||
Some refs: |
||||
- Amazon aws-rust-sdk |
||||
- [Github](https://github.com/awslabs/aws-sdk-rust) |
||||
|
||||
## K2V |
||||
|
||||
*Coming soon* |
||||
|
||||
Some refs: https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/main/src/k2v-client |
||||
|
||||
```bash |
||||
# all these values can be provided on the cli instead |
||||
export AWS_ACCESS_KEY_ID=GK123456 |
||||
export AWS_SECRET_ACCESS_KEY=0123..789 |
||||
export AWS_REGION=garage |
||||
export K2V_ENDPOINT=http://172.30.2.1:3903 |
||||
export K2V_BUCKET=my-bucket |
||||
|
||||
cargo run --features=cli -- read-range my-partition-key --all |
||||
|
||||
cargo run --features=cli -- insert my-partition-key my-sort-key --text "my string1" |
||||
cargo run --features=cli -- insert my-partition-key my-sort-key --text "my string2" |
||||
cargo run --features=cli -- insert my-partition-key my-sort-key2 --text "my string" |
||||
|
||||
cargo run --features=cli -- read-range my-partition-key --all |
||||
|
||||
causality=$(cargo run --features=cli -- read my-partition-key my-sort-key2 -b | head -n1) |
||||
cargo run --features=cli -- delete my-partition-key my-sort-key2 -c $causality |
||||
|
||||
causality=$(cargo run --features=cli -- read my-partition-key my-sort-key -b | head -n1) |
||||
cargo run --features=cli -- insert my-partition-key my-sort-key --text "my string3" -c $causality |
||||
|
||||
cargo run --features=cli -- read-range my-partition-key --all |
||||
``` |
||||
|
||||
## Admin API |
||||
|
||||
*Coming soon* |
@ -1,57 +0,0 @@ |
||||
+++ |
||||
title = "Observability" |
||||
weight = 25 |
||||
+++ |
||||
|
||||
An object store can be used as data storage location for metrics, and logs which |
||||
can then be leveraged for systems observability. |
||||
|
||||
## Metrics |
||||
|
||||
### Prometheus |
||||
|
||||
Prometheus itself has no object store capabilities, however two projects exist |
||||
which support storing metrics in an object store: |
||||
|
||||
- [Cortex](https://cortexmetrics.io/) |
||||
- [Thanos](https://thanos.io/) |
||||
|
||||
## System logs |
||||
|
||||
### Vector |
||||
|
||||
[Vector](https://vector.dev/) natively supports S3 as a |
||||
[data sink](https://vector.dev/docs/reference/configuration/sinks/aws_s3/) |
||||
(and [source](https://vector.dev/docs/reference/configuration/sources/aws_s3/)). |
||||
|
||||
This can be configured with Garage with the following: |
||||
|
||||
```bash |
||||
garage key new --name vector-system-logs |
||||
garage bucket create system-logs |
||||
garage bucket allow system-logs --read --write --key vector-system-logs |
||||
``` |
||||
|
||||
The `vector.toml` can then be configured as follows: |
||||
|
||||
```toml |
||||
[sources.journald] |
||||
type = "journald" |
||||
current_boot_only = true |
||||
|
||||
[sinks.out] |
||||
encoding.codec = "json" |
||||
type = "aws_s3" |
||||
inputs = [ "journald" ] |
||||
bucket = "system-logs" |
||||
key_prefix = "%F/" |
||||
compression = "none" |
||||
region = "garage" |
||||
endpoint = "https://my-garage-instance.mydomain.tld" |
||||
auth.access_key_id = "" |
||||
auth.secret_access_key = "" |
||||
``` |
||||
|
||||
This is an example configuration - please refer to the Vector documentation for |
||||
all configuration and transformation possibilities. Also note that Garage |
||||
performs its own compression, so this should be disabled in Vector. |
@ -1,51 +0,0 @@ |
||||
+++ |
||||
title = "Deploying with Ansible" |
||||
weight = 35 |
||||
+++ |
||||
|
||||
While Ansible is not officially supported to deploy Garage, several community members |
||||
have published Ansible roles. We list them and compare them below. |
||||
|
||||
## Comparison of Ansible roles |
||||
|
||||
| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | |
||||
|------------------------------------|---------------------------------------------|---------------------------------------------------------------| |
||||
| **Runtime** | Systemd | Docker | |
||||
| **Target OS** | Any Linux | Any Linux | |
||||
| **Architecture** | amd64, arm64, i686 | amd64, arm64 | |
||||
| **Additional software** | None | Traefik | |
||||
| **Automatic node connection** | ❌ | ✅ | |
||||
| **Layout management** | ❌ | ✅ | |
||||
| **Manage buckets & keys** | ❌ | ✅ (basic) | |
||||
| **Allow custom Garage config** | ✅ | ❌ | |
||||
| **Facilitate Garage upgrades** | ✅ | ❌ | |
||||
| **Multiple instances on one host** | ✅ | ✅ | |
||||
|
||||
|
||||
## zorun/ansible-role-garage |
||||
|
||||
[Source code](https://github.com/zorun/ansible-role-garage), [Ansible galaxy](https://galaxy.ansible.com/zorun/garage) |
||||
|
||||
This role is voluntarily simple: it relies on the official Garage static |
||||
binaries and only requires Systemd. As such, it should work on any |
||||
Linux-based OS. |
||||
|
||||
To make things more flexible, the user has to provide a Garage |
||||
configuration template. This allows to customize Garage configuration in |
||||
any way. |
||||
|
||||
Some more features might be added, such as a way to automatically connect |
||||
nodes to each other or to define a layout. |
||||
|
||||
## moan0s/garage-docker-ansible-deploy |
||||
|
||||
[Source code](https://github.com/moan0s/garage-docker-ansible-deploy), [Blog post](https://hyteck.de/post/garage/) |
||||
|
||||
This role is based on the Docker image for Garage, and comes with |
||||
"batteries included": it will additionally install Docker and Traefik. In |
||||
addition, it is "opinionated" in the sense that it expects a particular |
||||
deployment structure (one instance per disk, one gateway per host, |
||||
structured DNS names, etc). |
||||
|
||||
As a result, this role makes it easier to start with Garage on Ansible, |
||||
but is less flexible. |
@ -1,41 +0,0 @@ |
||||
+++ |
||||
title = "Binary packages" |
||||
weight = 11 |
||||
+++ |
||||
|
||||
Garage is also available in binary packages on: |
||||
|
||||
## Alpine Linux |
||||
|
||||
If you use Alpine Linux, you can simply install the |
||||
[garage](https://pkgs.alpinelinux.org/packages?name=garage) package from the |
||||
Alpine Linux repositories (available since v3.17): |
||||
|
||||
```bash |
||||
apk add garage |
||||
``` |
||||
|
||||
The default configuration file is installed to `/etc/garage.toml`. You can run |
||||
Garage using: `rc-service garage start`. If you don't specify `rpc_secret`, it |
||||
will be automatically replaced with a random string on the first start. |
||||
|
||||
Please note that this package is built without Consul discovery, Kubernetes |
||||
discovery, OpenTelemetry exporter, and K2V features (K2V will be enabled once |
||||
it's stable). |
||||
|
||||
|
||||
## Arch Linux |
||||
|
||||
Garage is available in the [AUR](https://aur.archlinux.org/packages/garage). |
||||
|
||||
## FreeBSD |
||||
|
||||
```bash |
||||
pkg install garage |
||||
``` |
||||
|
||||
## NixOS |
||||
|
||||
```bash |
||||
nix-shell -p garage |
||||
``` |
@ -1,116 +0,0 @@ |
||||
+++ |
||||
title = "Encryption" |
||||
weight = 50 |
||||
+++ |
||||
|
||||
Encryption is a recurring subject when discussing Garage. |
||||
Garage does not handle data encryption by itself, but many things can |
||||
already be done with Garage's current feature set and the existing ecosystem. |
||||
|
||||
This page takes a high level approach to security in general and data encryption |
||||
in particular. |
||||
|
||||
|
||||
# Examining your need for encryption |
||||
|
||||
- Why do you want encryption in Garage? |
||||
|
||||
- What is your threat model? What are you fearing? |
||||
- A stolen HDD? |
||||
- A curious administrator? |
||||
- A malicious administrator? |
||||
- A remote attacker? |
||||
- etc. |
||||
|
||||
- What services do you want to protect with encryption? |
||||
- An existing application? Which one? (eg. Nextcloud) |
||||
- An application that you are writing |
||||
|
||||
- Any expertise you may have on the subject |
||||
|
||||
This page explains what Garage provides, and how you can improve the situation by yourself |
||||
by adding encryption at different levels. |
||||
|
||||
We would be very curious to know your needs and thougs about ideas such as |
||||
encryption practices and things like key management, as we want Garage to be a |
||||
serious base platform for the developpment of secure, encrypted applications. |
||||
Do not hesitate to come talk to us if you have any thoughts or questions on the |
||||
subject. |
||||
|
||||
|
||||
# Capabilities provided by Garage |
||||
|
||||
## Traffic is encrypted between Garage nodes |
||||
|
||||
RPCs between Garage nodes are encrypted. More specifically, contrary to many |
||||
distributed software, it is impossible in Garage to have clear-text RPC. We |
||||
use the [kuska handshake](https://github.com/Kuska-ssb/handshake) library which |
||||
implements a protocol that has been clearly reviewed, Secure ScuttleButt's |
||||
Secret Handshake protocol. This is why setting a `rpc_secret` is mandatory, |
||||
and that's also why your nodes have super long identifiers. |
||||
|
||||
## HTTP API endpoints provided by Garage are in clear text |
||||
|
||||
Adding TLS support built into Garage is not currently planned. |
||||
|
||||
## Garage stores data in plain text on the filesystem |
||||
|
||||
Garage does not handle data encryption at rest by itself, and instead delegates |
||||
to the user to add encryption, either at the storage layer (LUKS, etc) or on |
||||
the client side (or both). There are no current plans to add data encryption |
||||
directly in Garage. |
||||
|
||||
Implementing data encryption directly in Garage might make things simpler for |
||||
end users, but also raises many more questions, especially around key |
||||
management: for encryption of data, where could Garage get the encryption keys |
||||
from ? If we encrypt data but keep the keys in a plaintext file next to them, |
||||
it's useless. We probably don't want to have to manage secrets in garage as it |
||||
would be very hard to do in a secure way. Maybe integrate with an external |
||||
system such as Hashicorp Vault? |
||||
|
||||
|
||||
# Adding data encryption using external tools |
||||
|
||||
## Encrypting traffic between a Garage node and your client |
||||
|
||||
You have multiple options to have encryption between your client and a node: |
||||
|
||||
- Setup a reverse proxy with TLS / ACME / Let's encrypt |
||||
- Setup a Garage gateway locally, and only contact the garage daemon on `localhost` |
||||
- Only contact your Garage daemon over a secure, encrypted overlay network such as Wireguard |
||||
|
||||
## Encrypting data at rest |
||||
|
||||
Protects against the following threats: |
||||
|
||||
- Stolen HDD |
||||
|
||||
Crucially, does not protect againt malicious sysadmins or remote attackers that |
||||
might gain access to your servers. |
||||
|
||||
Methods include full-disk encryption with tools such as LUKS. |
||||
|
||||
## Encrypting data on the client side |
||||
|
||||
Protects againt the following threats: |
||||
|
||||
- A honest-but-curious administrator |
||||
- A malicious administrator that tries to corrupt your data |
||||
- A remote attacker that can read your server's data |
||||
|
||||
Implementations are very specific to the various applications. Examples: |
||||
|
||||
- Matrix: uses the OLM protocol for E2EE of user messages. Media files stored |
||||
in Matrix are probably encrypted using symmetric encryption, with a key that is |
||||
distributed in the end-to-end encrypted message that contains the link to the object. |
||||
|
||||
- XMPP: clients normally support either OMEMO / OpenPGP for the E2EE of user |
||||
messages. Media files are encrypted per |
||||
[XEP-0454](https://xmpp.org/extensions/xep-0454.html). |
||||
|
||||
- Aerogramme: use the user's password as a key to decrypt data in the user's bucket |
||||
|
||||
- Cyberduck: comes with support for |
||||
[Cryptomator](https://docs.cyberduck.io/cryptomator/) which allows users to |
||||
create client-side vaults to encrypt files in before they are uploaded to a |
||||
cloud storage endpoint. |
@ -1,88 +0,0 @@ |
||||
+++ |
||||
title = "Deploying on Kubernetes" |
||||
weight = 32 |
||||
+++ |
||||
|
||||
Garage can also be deployed on a kubernetes cluster via helm chart. |
||||
|
||||
## Deploying |
||||
|
||||
Firstly clone the repository: |
||||
|
||||
```bash |
||||
git clone https://git.deuxfleurs.fr/Deuxfleurs/garage |
||||
cd garage/scripts/helm |
||||
``` |
||||
|
||||
Deploy with default options: |
||||
|
||||
```bash |
||||
helm install --create-namespace --namespace garage garage ./garage |
||||
``` |
||||
|
||||
Or deploy with custom values: |
||||
|
||||
```bash |
||||
helm install --create-namespace --namespace garage garage ./garage -f values.override.yaml |
||||
``` |
||||
|
||||
After deploying, cluster layout must be configured manually as described in [Creating a cluster layout](@/documentation/quick-start/_index.md#creating-a-cluster-layout). Use the following command to access garage CLI: |
||||
|
||||
```bash |
||||
kubectl exec --stdin --tty -n garage garage-0 -- ./garage status |
||||
``` |
||||
|
||||
## Overriding default values |
||||
|
||||
All possible configuration values can be found with: |
||||
|
||||
```bash |
||||
helm show values ./garage |
||||
``` |
||||
|
||||
This is an example `values.overrride.yaml` for deploying in a microk8s cluster with a https s3 api ingress route: |
||||
|
||||
```yaml |
||||
garage: |
||||
# Use only 2 replicas per object |
||||
replicationMode: "2" |
||||
|
||||
# Start 4 instances (StatefulSets) of garage |
||||
deployment: |
||||
replicaCount: 4 |
||||
|
||||
# Override default storage class and size |
||||
persistence: |
||||
meta: |
||||
storageClass: "openebs-hostpath" |
||||
size: 100Mi |
||||
data: |
||||
storageClass: "openebs-hostpath" |
||||
size: 1Gi |
||||
|
||||
ingress: |
||||
s3: |
||||
api: |
||||
enabled: true |
||||
className: "public" |
||||
annotations: |
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod" |
||||
nginx.ingress.kubernetes.io/proxy-body-size: 500m |
||||
hosts: |
||||
- host: s3-api.my-domain.com |
||||
paths: |
||||
- path: / |
||||
pathType: Prefix |
||||
tls: |
||||
- secretName: garage-ingress-cert |
||||
hosts: |
||||
- s3-api.my-domain.com |
||||
``` |
||||
|
||||
## Removing |
||||
|
||||
```bash |
||||
helm delete --namespace garage garage |
||||
``` |
||||
|
||||
Note that this will leave behind custom CRD `garagenodes.deuxfleurs.fr`, which must be removed manually if desired. |
@ -1,53 +0,0 @@ |
||||
+++ |
||||
title = "Monitoring Garage" |
||||
weight = 40 |
||||
+++ |
||||
|
||||
Garage exposes some internal metrics in the Prometheus data format. |
||||
This page explains how to exploit these metrics. |
||||
|
||||
## Setting up monitoring |
||||
|
||||
### Enabling the Admin API endpoint |
||||
|
||||
If you have not already enabled the [administration API endpoint](@/documentation/reference-manual/admin-api.md), do so by adding the following lines to your configuration file: |
||||
|
||||
```toml |
||||
[admin] |
||||
api_bind_addr = "0.0.0.0:3903" |
||||
``` |
||||
|
||||
This will allow anyone to scrape Prometheus metrics by fetching |
||||
`http://localhost:3093/metrics`. If you want to restrict access |
||||
to the exported metrics, set the `metrics_token` configuration value |
||||
to a bearer token to be used when fetching the metrics endpoint. |
||||
|
||||
### Setting up Prometheus and Grafana |
||||
|
||||
Add a scrape config to your Prometheus daemon to scrape metrics from |
||||
all of your nodes: |
||||
|
||||
```yaml |
||||
scrape_configs: |
||||
- job_name: 'garage' |
||||
static_configs: |
||||
- targets: |
||||
- 'node1.mycluster:3903' |
||||
- 'node2.mycluster:3903' |
||||
- 'node3.mycluster:3903' |
||||
``` |
||||
|
||||
If you have set a metrics token in your Garage configuration file, |
||||
add the following lines in your Prometheus scrape config: |
||||
|
||||
```yaml |
||||
authorization: |
||||
type: Bearer |
||||
credentials: 'your metrics token' |
||||
``` |
||||
|
||||
To visualize the scraped data in Grafana, |
||||
you can either import our [Grafana dashboard for Garage](https://git.deuxfleurs.fr/Deuxfleurs/garage/raw/branch/main/script/telemetry/grafana-garage-dashboard-prometheus.json) |
||||
or make your own. |
||||
|
||||
The list of exported metrics is available on our [dedicated page](@/documentation/reference-manual/monitoring.md) in the Reference manual section. |
@ -1,6 +1,6 @@ |
||||
+++ |
||||
title = "Recovering from failures" |
||||
weight = 40 |
||||
weight = 35 |
||||
+++ |
||||
|
||||
Garage is meant to work on old, second-hand hardware. |
@ -1,23 +0,0 @@ |
||||
+++ |
||||
title = "Operations & Maintenance" |
||||
weight = 50 |
||||
sort_by = "weight" |
||||
template = "documentation.html" |
||||
+++ |
||||
|
||||
This section contains a number of important information on how to best operate a Garage cluster, |
||||
to ensure integrity and availability of your data: |
||||
|
||||
- **[Upgrading Garage](@/documentation/operations/upgrading.md):** General instructions on how to |
||||
upgrade your cluster from one version to the next. Instructions specific for each version upgrade |
||||
can bef ound in the [working documents](@/documentation/working-documents/_index.md) section. |
||||
|
||||
- **[Layout management](@/documentation/operations/layout.md):** Best practices for using the `garage layout` |
||||
commands when adding or removing nodes from your cluster. |
||||
|
||||
- **[Durability and repairs](@/documentation/operations/durability-repairs.md):** How to check for small things |
||||
that might be going wrong, and how to recover from such failures. |
||||
|
||||
- **[Recovering from failures](@/documentation/operations/recovering.md):** Garage's first selling point is resilience |
||||
to hardware failures. This section explains how to recover from such a failure in the |
||||
best possible way. |
@ -1,126 +0,0 @@ |
||||
+++ |
||||
title = "Durability & Repairs" |
||||
weight = 30 |
||||
+++ |
||||
|
||||
To ensure the best durability of your data and to fix any inconsistencies that may |
||||
pop up in a distributed system, Garage provides a series of repair operations. |
||||
This guide will explain the meaning of each of them and when they should be applied. |
||||
|
||||
|
||||
# General syntax of repair operations |
||||
|
||||
Repair operations described below are of the form `garage repair <repair_name>`. |
||||
These repairs will not launch without the `--yes` flag, which should |
||||
be added as follows: `garage repair --yes <repair_name>`. |
||||
By default these repair procedures will only run on the Garage node your CLI is |
||||
connecting to. To run on all nodes, add the `-a` flag as follows: |
||||
`garage repair -a --yes <repair_name>`. |
||||
|
||||
# Data block operations |
||||
|
||||
## Data store scrub |
||||
|
||||
Scrubbing the data store means examining each individual data block to check that |
||||
their content is correct, by verifying their hash. Any block found to be corrupted |
||||
(e.g. by bitrot or by an accidental manipulation of the datastore) will be |
||||
restored from another node that holds a valid copy. |
||||
|
||||
Scrubs are automatically scheduled by Garage to run every 25-35 days (the |
||||
actual time is randomized to spread load across nodes). The next scheduled run |
||||
can be viewed with `garage worker get`. |
||||
|
||||
A scrub can also be launched manually using `garage repair scrub start`. |
||||
|
||||
To view the status of an ongoing scrub, first find the task ID of the scrub worker |
||||
using `garage worker list`. Then, run `garage worker info <scrub_task_id>` to |
||||
view detailed runtime statistics of the scrub. To gather cluster-wide information, |
||||
this command has to be run on each individual node. |
||||
|
||||
A scrub is a very disk-intensive operation that might slow down your cluster. |
||||
You may pause an ongoing scrub using `garage repair scrub pause`, but note that |
||||
the scrub will resume automatically 24 hours later as Garage will not let your |
||||
cluster run without a regular scrub. If the scrub procedure is too intensive |
||||
for your servers and is slowing down your workload, the recommended solution |
||||
is to increase the "scrub tranquility" using `garage repair scrub set-tranquility`. |
||||
A higher tranquility value will make Garage take longer pauses between two block |
||||
verifications. Of course, scrubbing the entire data store will also take longer. |
||||
|
||||
## Block check and resync |
||||
|
||||
In some cases, nodes hold a reference to a block but do not actually have the block |
||||
stored on disk. Conversely, they may also have on disk blocks that are not referenced |
||||
any more. To fix both cases, a block repair may be run with `garage repair blocks`. |
||||
This will scan the entire block reference counter table to check that the blocks |
||||
exist on disk, and will scan the entire disk store to check that stored blocks |
||||
are referenced. |
||||
|
||||
It is recommended to run this procedure when changing your cluster layout, |
||||
after the metadata tables have finished synchronizing between nodes |
||||
(usually a few hours after `garage layout apply`). |
||||
|
||||
## Inspecting lost blocks |
||||
|
||||
In extremely rare situations, data blocks may be unavailable from the entire cluster. |
||||
This means that even using `garage repair blocks`, some nodes may be unable |
||||
to fetch data blocks for which they hold a reference. |
||||
|
||||
These errors are stored on each node in a list of "block resync errors", i.e. |
||||
blocks for which the last resync operation failed. |
||||
This list can be inspected using `garage block list-errors`. |
||||
These errors usually fall into one of the following categories: |
||||
|
||||
1. a block is still referenced but the object was deleted, this is a case |
||||
of metadata reference inconsistency (see below for the fix) |
||||
2. a block is referenced by a non-deleted object, but could not be fetched due |
||||
to a transient error such as a network failure |
||||
3. a block is referenced by a non-deleted object, but could not be fetched due |
||||
to a permanent error such as there not being any valid copy of the block on the |
||||
entire cluster |
||||
|
||||
To help make the difference between cases 1 and cases 2 and 3, you may use the |
||||
`garage block info` command to see which objects hold a reference to each block. |
||||
|
||||
In the second case (transient errors), Garage will try to fetch the block again |
||||
after a certain time, so the error should disappear naturally. You can also |
||||
request Garage to try to fetch the block immediately using `garage block retry-now` |
||||
if you have fixed the transient issue. |
||||
|
||||
If you are confident that you are in the third scenario and that your data block |
||||
is definitely lost, then there is no other choice than to declare your S3 objects |
||||
as unrecoverable, and to delete them properly from the data store. This can be done |
||||
using the `garage block purge` command. |
||||
|
||||
## Rebalancing data directories |
||||
|
||||
In [multi-HDD setups](@/documentation/operations/multi-hdd.md), to ensure that |
||||
data blocks are well balanced between storage locations, you may run a |
||||
rebalance operation using `garage repair rebalance`. This is usefull when |
||||
adding storage locations or when capacities of the storage locations have been |
||||
changed. Once this is finished, Garage will know for each block of a single |
||||
possible location where it can be, which can increase access speed. This |
||||
operation will also move out all data from locations marked as read-only. |
||||
|
||||
|
||||
# Metadata operations |
||||
|
||||
## Metadata table resync |
||||
|
||||
Garage automatically resyncs all entries stored in the metadata tables every hour, |
||||
to ensure that all nodes have the most up-to-date version of all the information |
||||
they should be holding. |
||||
The resync procedure is based on a Merkle tree that allows to efficiently find |
||||
differences between nodes. |
||||
|
||||
In some special cases, e.g. before an upgrade, you might want to run a table |
||||
resync manually. This can be done using `garage repair tables`. |
||||
|
||||
## Metadata table reference fixes |
||||
|
||||
In some very rare cases where nodes are unavailable, some references between objects |
||||
are broken. For instance, if an object is deleted, the underlying versions or data |
||||
blocks may still be held by Garage. If you suspect that such corruption has occurred |
||||
in your cluster, you can run one of the following repair procedures: |
||||
|
||||
- `garage repair versions`: checks that all versions belong to a non-deleted object, and purges any orphan version |
||||
- `garage repair block_refs`: checks that all block references belong to a non-deleted object version, and purges any orphan block reference (this will then allow the blocks to be garbage-collected) |
@ -1,101 +0,0 @@ |
||||
+++ |
||||
title = "Multi-HDD support" |
||||
weight = 15 |
||||
+++ |
||||
|
||||
|
||||
Since v0.9, Garage natively supports nodes that have several storage drives |
||||
for storing data blocks (not for metadata storage). |
||||
|
||||
## Initial setup |
||||
|
||||
To set up a new Garage storage node with multiple HDDs, |
||||
format and mount all your drives in different directories, |
||||
and use a Garage configuration as follows: |
||||
|
||||
```toml |
||||
data_dir = [ |
||||
{ path = "/path/to/hdd1", capacity = "2T" }, |
||||
{ path = "/path/to/hdd2", capacity = "4T" }, |
||||
] |
||||
``` |
||||
|
||||
Garage will automatically balance all blocks stored by the node |
||||
among the different specified directories, proportionnally to the |
||||
specified capacities. |
||||
|
||||
## Updating the list of storage locations |
||||
|
||||
If you add new storage locations to your `data_dir`, |
||||
Garage will not rebalance existing data between storage locations. |
||||
Newly written blocks will be balanced proportionnally to the specified capacities, |
||||
and existing data may be moved between drives to improve balancing, |
||||
but only opportunistically when a data block is re-written (e.g. an object |
||||
is re-uploaded, or an object with a duplicate block is uploaded). |
||||
|
||||
To understand precisely what is happening, we need to dive in to how Garage |
||||
splits data among the different storage locations. |
||||
|
||||
First of all, Garage divides the set of all possible block hashes |
||||
in a fixed number of slices (currently 1024), and assigns |
||||
to each slice a primary storage location among the specified data directories. |
||||
The number of slices having their primary location in each data directory |
||||
is proportionnal to the capacity specified in the config file. |
||||
|
||||
When Garage receives a block to write, it will always write it in the primary |
||||
directory of the slice that contains its hash. |
||||
|
||||
Now, to be able to not lose existing data blocks when storage locations |
||||
are added, Garage also keeps a list of secondary data directories |
||||
for all of the hash slices. Secondary data directories for a slice indicates |
||||
storage locations that once were primary directories for that slice, i.e. where |
||||
Garage knows that data blocks of that slice might be stored. |
||||
When Garage is requested to read a certain data block, |
||||
it will first look in the primary storage directory of its slice, |
||||
and if it doesn't find it there it goes through all of the secondary storage |
||||
locations until it finds it. This allows Garage to continue operating |
||||
normally when storage locations are added, without having to shuffle |
||||
files between drives to place them in the correct location. |
||||
|
||||
This relatively simple strategy works well but does not ensure that data |
||||
is correctly balanced among drives according to their capacity. |
||||
To rebalance data, two strategies can be used: |
||||
|
||||
- Lazy rebalancing: when a block is re-written (e.g. the object is re-uploaded), |
||||
Garage checks whether the existing copy is in the primary directory of the slice |
||||
or in a secondary directory. If the current copy is in a secondary directory, |
||||
Garage re-writes a copy in the primary directory and deletes the one from the |
||||
secondary directory. This might never end up rebalancing everything if there |
||||
are data blocks that are only read and never written. |
||||
|
||||
- Active rebalancing: an operator of a Garage node can explicitly launch a repair |
||||
procedure that rebalances the data directories, moving all blocks to their |
||||
primary location. Once done, all secondary locations for all hash slices are |
||||
removed so that they won't be checked anymore when looking for a data block. |
||||
|
||||
## Read-only storage locations |
||||
|
||||
If you would like to move all data blocks from an existing data directory to one |
||||
or several new data directories, mark the old directory as read-only: |
||||
|
||||
```toml |
||||
data_dir = [ |
||||
{ path = "/path/to/old_data", read_only = true }, |
||||
{ path = "/path/to/new_hdd1", capacity = "2T" }, |
||||
{ path = "/path/to/new_hdd2", capacity = "4T" }, |
||||
] |
||||
``` |
||||
|
||||
Garage will be able to read requested blocks from the read-only directory. |
||||
Garage will also move data out of the read-only directory either progressively |
||||
(lazy rebalancing) or if requested explicitly (active rebalancing). |
||||
|
||||
Once an active rebalancing has finished, your read-only directory should be empty: |
||||
it might still contain subdirectories, but no data files. You can check that |
||||
it contains no files using: |
||||
|
||||
```bash |
||||
find -type f /path/to/old_data # should not print anything |
||||
``` |
||||
|
||||
at which point it can be removed from the `data_dir` list in your config file. |
@ -1,125 +0,0 @@ |
||||
+++ |
||||
title = "List of Garage features" |
||||
weight = 10 |
||||
+++ |
||||
|
||||
|
||||
### S3 API |
||||
|
||||
The main goal of Garage is to provide an object storage service that is compatible with the |
||||
[S3 API](https://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html) from Amazon Web Services. |
||||
We try to adhere as strictly as possible to the semantics of the API as implemented by Amazon |
||||
and other vendors such as Minio or CEPH. |
||||
|
||||
Of course Garage does not implement the full span of API endpoints that AWS S3 does; |
||||
the exact list of S3 features implemented by Garage can be found [on our S3 compatibility page](@/documentation/reference-manual/s3-compatibility.md). |
||||
|
||||
### Geo-distribution |
||||
|
||||
Garage allows you to store copies of your data in multiple geographical locations in order to maximize resilience |
||||
to adverse events, such as network/power outages or hardware failures. |
||||
This allows Garage to run very well even at home, using consumer-grade Internet connectivity |
||||
(such as FTTH) and power, as long as cluster nodes can be spawned at several physical locations. |
||||
Garage exploits knowledge of the capacity and physical location of each storage node to design |
||||
a storage plan that best exploits the available storage capacity while satisfying the geo-distributed replication constraint. |
||||
|
||||
To learn more about geo-distributed Garage clusters, |
||||
read our documentation on [setting up a real-world deployment](@/documentation/cookbook/real-world.md). |
||||
|
||||
### Standalone/self-contained |
||||
|
||||
Garage is extremely simple to deploy, and does not depend on any external service to run. |
||||
This makes setting up and administering storage clusters, we hope, as easy as it could be. |
||||
|
||||
### Flexible topology |
||||
|
||||
A Garage cluster can very easily evolve over time, as storage nodes are added or removed. |
||||
Garage will automatically rebalance data between nodes as needed to ensure the desired number of copies. |
||||
Read about cluster layout management [here](@/documentation/operations/layout.md). |
||||
|
||||
### No RAFT slowing you down |
||||
|
||||
It might seem strange to tout the absence of something as a desirable feature, |
||||
but this is in fact a very important point! Garage does not use RAFT or another |
||||
consensus algorithm internally to order incoming requests: this means that all requests |
||||
directed to a Garage cluster can be handled independently of one another instead |
||||
of going through a central bottleneck (the leader node). |
||||
As a consequence, requests can be handled much faster, even in cases where latency |
||||
between cluster nodes is important (see our [benchmarks](@/documentation/design/benchmarks/index.md) for data on this). |
||||
This is particularly usefull when nodes are far from one another and talk to one other through standard Internet connections. |
||||
|
||||
### Several replication modes |
||||
|
||||
Garage supports a variety of replication modes, with 1 copy, 2 copies or 3 copies of your data, |
||||
and with various levels of consistency, in order to adapt to a variety of usage scenarios. |
||||
Read our reference page on [supported replication modes](@/documentation/reference-manual/configuration.md#replication_mode) |
||||
to select the replication mode best suited to your use case (hint: in most cases, `replication_mode = "3"` is what you want). |
||||
|
||||
### Web server for static websites |
||||
|
||||
A storage bucket can easily be configured to be served directly by Garage as a static web site. |
||||
Domain names for multiple websites directly map to bucket names, making it easy to build |
||||
a platform for your users to autonomously build and host their websites over Garage. |
||||
Surprisingly, none of the other alternative S3 implementations we surveyed (such as Minio |
||||
or CEPH) support publishing static websites from S3 buckets, a feature that is however |
||||
directly inherited from S3 on AWS. |
||||
Read more on our [dedicated documentation page](@/documentation/cookbook/exposing-websites.md). |
||||
|
||||
### Bucket names as aliases |
||||
|
||||
In Garage, a bucket may have several names, known as aliases. |
||||
Aliases can easily be added and removed on demand: |
||||
this allows to easily rename buckets if needed |
||||
without having to copy all of their content, something that cannot be done on AWS. |
||||
For buckets served as static websites, having multiple aliases for a bucket can allow |
||||
exposing the same content under different domain names. |
||||
|
||||
Garage also supports bucket aliases which are local to a single user: |
||||
this allows different users to have different buckets with the same name, thus avoiding naming collisions. |
||||
This can be helpfull for instance if you want to write an application that creates per-user buckets with always the same name. |
||||
|
||||
This feature is totally invisible to S3 clients and does not break compatibility with AWS. |
||||
|
||||
### Cluster administration API |
||||
|
||||
Garage provides a fully-fledged REST API to administer your cluster programatically. |
||||
Functionality included in the admin API include: setting up and monitoring |
||||
cluster nodes, managing access credentials, and managing storage buckets and bucket aliases. |
||||
A full reference of the administration API is available [here](@/documentation/reference-manual/admin-api.md). |
||||
|
||||
### Metrics and traces |
||||
|
||||
Garage makes some internal metrics available in the Prometheus data format, |
||||
which allows you to build interactive dashboards to visualize the load and internal state of your storage cluster. |
||||
|
||||
For developpers and performance-savvy administrators, |
||||
Garage also supports exporting traces of what it does internally in OpenTelemetry format. |
||||
This allows to monitor the time spent at various steps of the processing of requests, |
||||
in order to detect potential performance bottlenecks. |
||||
|
||||
### Kubernetes and Nomad integrations |
||||
|
||||
Garage can automatically discover other nodes in the cluster thanks to integration |
||||
with orchestrators such as Kubernetes and Nomad (when used with Consul). |
||||
This eases the configuration of your cluster as it removes one step where nodes need |
||||
to be manually connected to one another. |
||||
|
||||
### Support for changing IP addresses |
||||
|
||||
As long as all of your nodes don't change their IP address at the same time, |
||||
Garage should be able to tolerate nodes with changing/dynamic IP addresses, |
||||
as nodes will regularly exchange the IP addresses of their peers and try to |
||||
reconnect using newer addresses when existing connections are broken. |
||||
|
||||
### K2V API (experimental) |
||||
|
||||
As part of an ongoing research project, Garage can expose an experimental key/value storage API called K2V. |
||||
K2V is made for the storage and retrieval of many small key/value pairs that need to be processed in bulk. |
||||
This completes the S3 API with an alternative that can be used to easily store and access metadata |
||||
related to objects stored in an S3 bucket. |
||||
|
||||
In the context of our research project, [Aérogramme](https://aerogramme.deuxfleurs.fr), |
||||
K2V is used to provide metadata and log storage for operations on encrypted e-mail storage. |
||||
|
||||
Learn more on the specification of K2V [here](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/k2v/doc/drafts/k2v-spec.md) |
||||
and on how to enable it in Garage [here](@/documentation/reference-manual/k2v.md). |
@ -0,0 +1,77 @@ |
||||
+++ |
||||
title = "Cluster layout management" |
||||
weight = 10 |
||||
+++ |
||||
|
||||
The cluster layout in Garage is a table that assigns to each node a role in |
||||
the cluster. The role of a node in Garage can either be a storage node with |
||||
a certain capacity, or a gateway node that does not store data and is only |
||||
used as an API entry point for faster cluster access. |
||||
An introduction to building cluster layouts can be found in the [production deployment](@/documentation/cookbook/real-world.md) page. |
||||
|
||||
## How cluster layouts work in Garage |
||||
|
||||
In Garage, a cluster layout is composed of the following components: |
||||
|
||||
- a table of roles assigned to nodes |
||||
- a version number |
||||
|
||||
Garage nodes will always use the cluster layout with the highest version number. |
||||
|
||||
Garage nodes also maintain and synchronize between them a set of proposed role |
||||
changes that haven't yet been applied. These changes will be applied (or |
||||
canceled) in the next version of the layout |
||||
|
||||
The following commands insert modifications to the set of proposed role changes |
||||
for the next layout version (but they do not create the new layout immediately): |
||||
|
||||
```bash |
||||
garage layout assign [...] |
||||
garage layout remove [...] |
||||
``` |
||||
|
||||
The following command can be used to inspect the layout that is currently set in the cluster |
||||
and the changes proposed for the next layout version, if any: |
||||
|
||||
```bash |
||||
garage layout show |
||||
``` |
||||
|
||||
The following commands create a new layout with the specified version number, |
||||
that either takes into account the proposed changes or cancels them: |
||||
|
||||
```bash |
||||
garage layout apply --version <new_version_number> |
||||
garage layout revert --version <new_version_number> |
||||
``` |
||||
|
||||
The version number of the new layout to create must be 1 + the version number |
||||
of the previous layout that existed in the cluster. The `apply` and `revert` |
||||
commands will fail otherwise. |
||||
|
||||
## Warnings about Garage cluster layout management |
||||
|
||||
**Warning: never make several calls to `garage layout apply` or `garage layout |
||||
revert` with the same value of the `--version` flag. Doing so can lead to the |
||||
creation of several different layouts with the same version number, in which |
||||
case your Garage cluster will become inconsistent until fixed.** If a call to |
||||
`garage layout apply` or `garage layout revert` has failed and `garage layout |
||||
show` indicates that a new layout with the given version number has not been |
||||
set in the cluster, then it is fine to call the command again with the same |
||||
version number. |
||||
|
||||
If you are using the `garage` CLI by typing individual commands in your |
||||
shell, you shouldn't have much issues as long as you run commands one after |
||||
the other and take care of checking the output of `garage layout show` |
||||
before applying any changes. |
||||
|
||||
If you are using the `garage` CLI to script layout changes, follow the following recommendations: |
||||
|
||||
- Make all of your `garage` CLI calls to the same RPC host. Do not use the |
||||
`garage` CLI to connect to individual nodes to send them each a piece of the |
||||
layout changes you are making, as the changes propagate asynchronously |
||||
between nodes and might not all be taken into account at the time when the |
||||
new layout is applied. |
||||
|
||||
- **Only call `garage layout apply` once**, and call it **strictly after** all |
||||
of the `layout assign` and `layout remove` commands have returned. |
@ -1,285 +0,0 @@ |
||||
|
||||
+++ |
||||
title = "Monitoring" |
||||
weight = 60 |
||||
+++ |
||||
|
||||
|
||||
For information on setting up monitoring, see our [dedicated page](@/documentation/cookbook/monitoring.md) in the Cookbook section. |
||||
|
||||
## List of exported metrics |
||||
|
||||
### Garage system metrics |
||||
|
||||
#### `garage_build_info` (counter) |
||||
|
||||
Exposes the Garage version number running on a node. |
||||
|
||||
``` |
||||
garage_build_info{version="1.0"} 1 |
||||
``` |
||||
|
||||
#### `garage_replication_factor` (counter) |
||||
|
||||
Exposes the Garage replication factor configured on the node |
||||
|
||||
``` |
||||
garage_replication_factor 3 |
||||
``` |
||||
|
||||
### Metrics of the API endpoints |
||||
|
||||
#### `api_admin_request_counter` (counter) |
||||
|
||||
Counts the number of requests to a given endpoint of the administration API. Example: |
||||
|
||||
``` |
||||
api_admin_request_counter{api_endpoint="Metrics"} 127041 |
||||
``` |
||||
|
||||
#### `api_admin_request_duration` (histogram) |
||||
|
||||
Evaluates the duration of API calls to the various administration API endpoint. Example: |
||||
|
||||
``` |
||||
api_admin_request_duration_bucket{api_endpoint="Metrics",le="0.5"} 127041 |
||||
api_admin_request_duration_sum{api_endpoint="Metrics"} 605.250344830999 |
||||
api_admin_request_duration_count{api_endpoint="Metrics"} 127041 |
||||
``` |
||||
|
||||
#### `api_s3_request_counter` (counter) |
||||
|
||||
Counts the number of requests to a given endpoint of the S3 API. Example: |
||||
|
||||
``` |
||||
api_s3_request_counter{api_endpoint="CreateMultipartUpload"} 1 |
||||
``` |
||||
|
||||
#### `api_s3_error_counter` (counter) |
||||
|
||||
Counts the number of requests to a given endpoint of the S3 API that returned an error. Example: |
||||
|
||||
``` |
||||
api_s3_error_counter{api_endpoint="GetObject",status_code="404"} 39 |
||||
``` |
||||
|
||||
#### `api_s3_request_duration` (histogram) |
||||
|
||||
Evaluates the duration of API calls to the various S3 API endpoints. Example: |
||||
|
||||
``` |
||||
api_s3_request_duration_bucket{api_endpoint="CreateMultipartUpload",le="0.5"} 1 |
||||
api_s3_request_duration_sum{api_endpoint="CreateMultipartUpload"} 0.046340762 |
||||
api_s3_request_duration_count{api_endpoint="CreateMultipartUpload"} 1 |
||||
``` |
||||
|
||||
#### `api_k2v_request_counter` (counter), `api_k2v_error_counter` (counter), `api_k2v_error_duration` (histogram) |
||||
|
||||
Same as for S3, for the K2V API. |
||||
|
||||
|
||||
### Metrics of the Web endpoint |
||||
|
||||
|
||||
#### `web_request_counter` (counter) |
||||
|
||||
Number of requests to the web endpoint |
||||
|
||||
``` |
||||
web_request_counter{method="GET"} 80 |
||||
``` |
||||
|
||||
#### `web_request_duration` (histogram) |
||||
|
||||
Duration of requests to the web endpoint |
||||
|
||||
``` |
||||
web_request_duration_bucket{method="GET",le="0.5"} 80 |
||||
web_request_duration_sum{method="GET"} 1.0528433229999998 |
||||
web_request_duration_count{method="GET"} 80 |
||||
``` |
||||
|
||||
#### `web_error_counter` (counter) |
||||
|
||||
Number of requests to the web endpoint resulting in errors |
||||
|
||||
``` |
||||
web_error_counter{method="GET",status_code="404 Not Found"} 64 |
||||
``` |
||||
|
||||
|
||||
### Metrics of the data block manager |
||||
|
||||
#### `block_bytes_read`, `block_bytes_written` (counter) |
||||
|
||||
Number of bytes read/written to/from disk in the data storage directory. |
||||
|
||||
``` |
||||
block_bytes_read 120586322022 |
||||
block_bytes_written 3386618077 |
||||
``` |
||||
|
||||
#### `block_compression_level` (counter) |
||||
|
||||
Exposes the block compression level configured for the Garage node. |
||||
|
||||
``` |
||||
block_compression_level 3 |
||||
``` |
||||
|
||||
#### `block_read_duration`, `block_write_duration` (histograms) |
||||
|
||||
Evaluates the duration of the reading/writing of individual data blocks in the data storage directory. |
||||
|
||||
``` |
||||
block_read_duration_bucket{le="0.5"} 169229 |
||||
block_read_duration_sum 2761.6902550310056 |
||||
block_read_duration_count 169240 |
||||
block_write_duration_bucket{le="0.5"} 3559 |
||||
block_write_duration_sum 195.59170078500006 |
||||
block_write_duration_count 3571 |
||||
``` |
||||
|
||||
#### `block_delete_counter` (counter) |
||||
|
||||
Counts the number of data blocks that have been deleted from storage. |
||||
|
||||
``` |
||||
block_delete_counter 122 |
||||
``` |
||||
|
||||
#### `block_resync_counter` (counter), `block_resync_duration` (histogram) |
||||
|
||||
Counts the number of resync operations the node has executed, and evaluates their duration. |
||||
|
||||
``` |
||||
block_resync_counter 308897 |
||||
block_resync_duration_bucket{le="0.5"} 308892 |
||||
block_resync_duration_sum 139.64204196100016 |
||||
block_resync_duration_count 308897 |
||||
``` |
||||
|
||||
#### `block_resync_queue_length` (gauge) |
||||
|
||||
The number of block hashes currently queued for a resync. |
||||
This is normal to be nonzero for long periods of time. |
||||
|
||||
``` |
||||
block_resync_queue_length 0 |
||||
``` |
||||
|
||||
#### `block_resync_errored_blocks` (gauge) |
||||
|
||||
The number of block hashes that we were unable to resync last time we tried. |
||||
**THIS SHOULD BE ZERO, OR FALL BACK TO ZERO RAPIDLY, IN A HEALTHY CLUSTER.** |
||||
Persistent nonzero values indicate that some data is likely to be lost. |
||||
|
||||
``` |
||||
block_resync_errored_blocks 0 |
||||
``` |
||||
|
||||
|
||||
### Metrics related to RPCs (remote procedure calls) between nodes |
||||
|
||||
#### `rpc_netapp_request_counter` (counter) |
||||
|
||||
Number of RPC requests emitted |
||||
|
||||
``` |
||||
rpc_request_counter{from="<this node>",rpc_endpoint="garage_block/manager.rs/Rpc",to="<remote node>"} 176 |
||||
``` |
||||
|
||||
#### `rpc_netapp_error_counter` (counter) |
||||
|
||||
Number of communication errors (errors in the Netapp library, generally due to disconnected nodes) |
||||
|
||||
``` |
||||
rpc_netapp_error_counter{from="<this node>",rpc_endpoint="garage_block/manager.rs/Rpc",to="<remote node>"} 354 |
||||
``` |
||||
|
||||
#### `rpc_timeout_counter` (counter) |
||||
|
||||
Number of RPC timeouts, should be close to zero in a healthy cluster. |
||||
|
||||
``` |
||||
rpc_timeout_counter{from="<this node>",rpc_endpoint="garage_rpc/membership.rs/SystemRpc",to="<remote node>"} 1 |
||||
``` |
||||
|
||||
#### `rpc_duration` (histogram) |
||||
|
||||
The duration of internal RPC calls between Garage nodes. |
||||
|
||||
``` |
||||
rpc_duration_bucket{from="<this node>",rpc_endpoint="garage_block/manager.rs/Rpc",to="<remote node>",le="0.5"} 166 |
||||
rpc_duration_sum{from="<this node>",rpc_endpoint="garage_block/manager.rs/Rpc",to="<remote node>"} 35.172253716 |
||||
rpc_duration_count{from="<this node>",rpc_endpoint="garage_block/manager.rs/Rpc",to="<remote node>"} 174 |
||||
``` |
||||
|
||||
|
||||
### Metrics of the metadata table manager |
||||
|
||||
#### `table_gc_todo_queue_length` (gauge) |
||||
|
||||
Table garbage collector TODO queue length |
||||
|
||||
``` |
||||
table_gc_todo_queue_length{table_name="block_ref"} 0 |
||||
``` |
||||
|
||||
#### `table_get_request_counter` (counter), `table_get_request_duration` (histogram) |
||||
|
||||
Number of get/get_range requests internally made on each table, and their duration. |
||||
|
||||
``` |
||||
table_get_request_counter{table_name="bucket_alias"} 315 |
||||
table_get_request_duration_bucket{table_name="bucket_alias",le="0.5"} 315 |
||||
table_get_request_duration_sum{table_name="bucket_alias"} 0.048509778000000024 |
||||
table_get_request_duration_count{table_name="bucket_alias"} 315 |
||||
``` |
||||
|
||||
|
||||
#### `table_put_request_counter` (counter), `table_put_request_duration` (histogram) |
||||
|
||||
Number of insert/insert_many requests internally made on this table, and their duration |
||||
|
||||
``` |
||||
table_put_request_counter{table_name="block_ref"} 677 |
||||
table_put_request_duration_bucket{table_name="block_ref",le="0.5"} 677 |
||||
table_put_request_duration_sum{table_name="block_ref"} 61.617528636 |
||||
table_put_request_duration_count{table_name="block_ref"} 677 |
||||
``` |
||||
|
||||
#### `table_internal_delete_counter` (counter) |
||||
|
||||
Number of value deletions in the tree (due to GC or repartitioning) |
||||
|
||||
``` |
||||
table_internal_delete_counter{table_name="block_ref"} 2296 |
||||
``` |
||||
|
||||
#### `table_internal_update_counter` (counter) |
||||
|
||||
Number of value updates where the value actually changes (includes creation of new key and update of existing key) |
||||
|
||||
``` |
||||
table_internal_update_counter{table_name="block_ref"} 5996 |
||||
``` |
||||
|
||||
#### `table_merkle_updater_todo_queue_length` (gauge) |
||||
|
||||
Merkle tree updater TODO queue length (should fall to zero rapidly) |
||||
|
||||
``` |
||||
table_merkle_updater_todo_queue_length{table_name="block_ref"} 0 |
||||
``` |
||||
|
||||
#### `table_sync_items_received`, `table_sync_items_sent` (counters) |
||||
|
||||
Number of data items sent to/recieved from other nodes during resync procedures |
||||
|
||||
``` |
||||
table_sync_items_received{from="<remote node>",table_name="bucket_v2"} 3 |
||||
table_sync_items_sent{table_name="block_ref",to="<remote node>"} 2 |
||||
``` |
||||
|
||||
|
@ -0,0 +1,45 @@ |
||||
+++ |
||||
title = "Request routing logic" |
||||
weight = 10 |
||||
+++ |
||||
|
||||
Data retrieval requests to Garage endpoints (S3 API and websites) are resolved |
||||
to an individual object in a bucket. Since objects are replicated to multiple nodes |
||||
Garage must ensure consistency before answering the request. |
||||
|
||||
## Using quorum to ensure consistency |
||||
|
||||
Garage ensures consistency by attempting to establish a quorum with the |
||||
data nodes responsible for the object. When a majority of the data nodes |
||||
have provided metadata on a object Garage can then answer the request. |
||||
|
||||
When a request arrives Garage will, assuming the recommended 3 replicas, perform the following actions: |
||||
|
||||
- Make a request to the two preferred nodes for object metadata |
||||
- Try the third node if one of the two initial requests fail |
||||
- Check that the metadata from at least 2 nodes match |
||||
- Check that the object hasn't been marked deleted |
||||
- Answer the request with inline data from metadata if object is small enough |
||||
- Or get data blocks from the preferred nodes and answer using the assembled object |
||||
|
||||
Garage dynamically determines which nodes to query based on health, preference, and |
||||
which nodes actually host a given data. Garage has no concept of "primary" so any |
||||
healthy node with the data can be used as long as a quorum is reached for the metadata. |
||||
|
||||
## Node health |
||||
|
||||
Garage keeps a TCP session open to each node in the cluster and periodically pings them. If a connection |
||||
cannot be established, or a node fails to answer a number of pings, the target node is marked as failed. |
||||
Failed nodes are not used for quorum or other internal requests. |
||||
|
||||
## Node preference |
||||
|
||||
Garage prioritizes which nodes to query according to a few criteria: |
||||
|
||||
- A node always prefers itself if it can answer the request |
||||
- Then the node prioritizes nodes in the same zone |
||||
- Finally the nodes with the lowest latency are prioritized |
||||
|
||||
|
||||
For further reading on the cluster structure look at the [gateway](@/documentation/cookbook/gateways.md) |
||||
and [cluster layout management](@/documentation/reference-manual/layout.md) pages. |
@ -1,57 +0,0 @@ |
||||
+++ |
||||
title = "Migrating from 0.7 to 0.8" |
||||
weight = 13 |
||||
+++ |
||||
|
||||
**This guide explains how to migrate to 0.8 if you have an existing 0.7 cluster. |
||||
We don't recommend trying to migrate to 0.8 directly from 0.6 or older.** |
||||
|
||||
**We make no guarantee that this migration will work perfectly: |
||||
back up all your data before attempting it!** |
||||
|
||||
Garage v0.8 introduces new data tables that allow the counting of objects in buckets in order to implement bucket quotas. |
||||
A manual migration step is required to first count objects in Garage buckets and populate these tables with accurate data. |
||||
|
||||
## Simple migration procedure (takes cluster offline for a while) |
||||
|
||||
The migration steps are as follows: |
||||
|
||||
1. Disable API and web access. Garage v0.7 does not support disabling |
||||
these endpoints but you can change the port number or stop your reverse proxy for instance. |
||||
2. Do `garage repair --all-nodes --yes tables` and `garage repair --all-nodes --yes blocks`, |
||||
check the logs and check that all data seems to be synced correctly between |
||||
nodes. If you have time, do additional checks (`versions`, `block_refs`, etc.) |
||||
3. Check that queues are empty: run `garage stats` to query them or inspect metrics in the Grafana dashboard. |
||||
4. Turn off Garage v0.7 |
||||
5. **Backup the metadata folder of all your nodes!** For instance, use the following command |
||||
if your metadata directory is `/var/lib/garage/meta`: `cd /var/lib/garage ; tar -acf meta-v0.7.tar.zst meta/` |
||||
6. Install Garage v0.8 |
||||
7. **Before starting Garage v0.8**, run the offline migration step: `garage offline-repair --yes object_counters`. |
||||
This can take a while to run, depending on the number of objects stored in your cluster. |
||||
8. Turn on Garage v0.8 |
||||
9. Do `garage repair --all-nodes --yes tables` and `garage repair --all-nodes --yes blocks`. |
||||
Wait for a full table sync to run. |
||||
10. Your upgraded cluster should be in a working state. Re-enable API and Web |
||||
access and check that everything went well. |
||||
11. Monitor your cluster in the next hours to see if it works well under your production load, report any issue. |
||||
|
||||
## Minimal downtime migration procedure |
||||
|
||||
The migration to Garage v0.8 can be done with almost no downtime, |
||||
by restarting all nodes at once in the new version. The only limitation with this |
||||
method is that bucket sizes and item counts will not be estimated correctly |
||||
until all nodes have had a chance to run their offline migration procedure. |
||||
|
||||
The migration steps are as follows: |
||||
|
||||
1. Do `garage repair --all-nodes --yes tables` and `garage repair --all-nodes --yes blocks`, |
||||
check the logs and check that all data seems to be synced correctly between |
||||
nodes. If you have time, do additional checks (`versions`, `block_refs`, etc.) |
||||
|
||||
2. Turn off each node individually; back up its metadata folder (see above); turn it back on again. This will allow you to take a backup of all nodes without impacting global cluster availability. You can do all nodes of a single zone at once as this does not impact the availability of Garage. |
||||
|
||||
3. Prepare your binaries and configuration files for Garage v0.8 |
||||
|
||||
4. Shut down all v0.7 nodes simultaneously, and restart them all simultaneously in v0.8. Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to achieve this as fast as possible. |
||||
|
||||
5. At this point, Garage will indicate invalid values for the size and number of objects in each bucket (most likely, it will indicate zero). To fix this, take each node offline individually to do the offline migration step: `garage offline-repair --yes object_counters`. Again you can do all nodes of a single zone at once. |
@ -1,72 +0,0 @@ |
||||
+++ |
||||
title = "Migrating from 0.8 to 0.9" |
||||
weight = 12 |
||||
+++ |
||||
|
||||
**This guide explains how to migrate to 0.9 if you have an existing 0.8 cluster. |
||||
We don't recommend trying to migrate to 0.9 directly from 0.7 or older.** |
||||
|
||||
This migration procedure has been tested on several clusters without issues. |
||||
However, it is still a *critical procedure* that might cause issues. |
||||
**Make sure to back up all your data before attempting it!** |
||||
|
||||
You might also want to read our [general documentation on upgrading Garage](@/documentation/operations/upgrading.md). |
||||
|
||||
The following are **breaking changes** in Garage v0.9 that require your attention when migrating: |
||||
|
||||
- LMDB is now the default metadata db engine and Sled is deprecated. If you were using Sled, make sure to specify `db_engine = "sled"` in your configuration file, or take the time to [convert your database](https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#db-engine-since-v0-8-0). |
||||
|
||||
- Capacity values are now in actual byte units. The translation from the old layout will assign 1 capacity = 1Gb by default, which might be wrong for your cluster. This does not cause any data to be moved around, but you might want to re-assign correct capacity values post-migration. |
||||
|
||||
- Multipart uploads that were started in Garage v0.8 will not be visible in Garage v0.9 and will have to be restarted from scratch. |
||||
|
||||
- Changes to the admin API: some `v0/` endpoints have been replaced by `v1/` counterparts with updated/uniformized syntax. All other endpoints have also moved to `v1/` by default, without syntax changes, but are still available under `v0/` for compatibility. |
||||
|
||||
|
||||
## Simple migration procedure (takes cluster offline for a while) |
||||
|
||||
The migration steps are as follows: |
||||
|
||||
1. Disable API and web access. You may do this by stopping your reverse proxy or by commenting out |
||||
the `api_bind_addr` values in your `config.toml` file and restarting Garage. |
||||
2. Do `garage repair --all-nodes --yes tables` and `garage repair --all-nodes --yes blocks`, |
||||
check the logs and check that all data seems to be synced correctly between |
||||
nodes. If you have time, do additional checks (`versions`, `block_refs`, etc.) |
||||
3. Check that the block resync queue and Merkle queue are empty: |
||||
run `garage stats -a` to query them or inspect metrics in the Grafana dashboard. |
||||
4. Turn off Garage v0.8 |
||||
5. **Backup the metadata folder of all your nodes!** For instance, use the following command |
||||
if your metadata directory is `/var/lib/garage/meta`: `cd /var/lib/garage ; tar -acf meta-v0.8.tar.zst meta/` |
||||
6. Install Garage v0.9 |
||||
7. Update your configuration file if necessary. |
||||
8. Turn on Garage v0.9 |
||||
9. Do `garage repair --all-nodes --yes tables` and `garage repair --all-nodes --yes blocks`. |
||||
Wait for a full table sync to run. |
||||
10. Your upgraded cluster should be in a working state. Re-enable API and Web |
||||
access and check that everything went well. |
||||
11. Monitor your cluster in the next hours to see if it works well under your production load, report any issue. |
||||
12. You might want to assign correct capacity values to all your nodes. Doing so might cause data to be moved |
||||
in your cluster, which should also be monitored carefully. |
||||
|
||||
## Minimal downtime migration procedure |
||||
|
||||
The migration to Garage v0.9 can be done with almost no downtime, |
||||
by restarting all nodes at once in the new version. |
||||
|
||||
The migration steps are as follows: |
||||
|
||||
1. Do `garage repair --all-nodes --yes tables` and `garage repair --all-nodes --yes blocks`, |
||||
check the logs and check that all data seems to be synced correctly between |
||||
nodes. If you have time, do additional checks (`versions`, `block_refs`, etc.) |
||||
|
||||
2. Turn off each node individually; back up its metadata folder (see above); turn it back on again. |
||||
This will allow you to take a backup of all nodes without impacting global cluster availability. |
||||
You can do all nodes of a single zone at once as this does not impact the availability of Garage. |
||||
|
||||
3. Prepare your binaries and configuration files for Garage v0.9 |
||||
|
||||
4. Shut down all v0.8 nodes simultaneously, and restart them all simultaneously in v0.9. |
||||
Use your favorite deployment tool (Ansible, Kubernetes, Nomad) to achieve this as fast as possible. |
||||
Garage v0.9 should be in a working state as soon as it starts. |
||||
|
||||
5. Proceed with repair and monitoring as described in steps 9-12 above. |
@ -1,75 +0,0 @@ |
||||
+++ |
||||
title = "Testing strategy" |
||||
weight = 30 |
||||
+++ |
||||
|
||||
|
||||
## Testing Garage |
||||
|
||||
Currently, we have the following tests: |
||||
|
||||
- some unit tests spread around the codebase |
||||
- integration tests written in Rust (`src/garage/test`) to check that Garage operations perform correctly |
||||
- integration test for compatibility with external tools (`script/test-smoke.sh`) |
||||
|
||||
We have also tried `minio/mint` but it fails a lot and for now we haven't gotten a lot from it. |
||||
|
||||
In the future: |
||||
|
||||
1. We'd like to have a systematic way of testing with `minio/mint`, |
||||
it would add value to Garage by providing a compatibility score and reference that can be trusted. |
||||
2. We'd also like to do testing with Jepsen in some way. |
||||
|
||||
## How to instrument Garagae |
||||
|
||||
We should try to test in least invasive ways, i.e. minimize the impact of the testing framework on Garage's source code. This means for example: |
||||
|
||||
- Not abstracting IO/nondeterminism in the source code |
||||
- Not making `garage` a shared library (launch using `execve`, it's perfectly fine) |
||||
|
||||
Instead, we should focus on building a clean outer interface for the `garage` binary, |
||||
for example loading configuration using environnement variables instead of the configuration file if that's helpfull for writing the tests. |
||||
|
||||
There are two reasons for this: |
||||
|
||||
- Keep the soure code clean and focused |
||||
- Test something that is as close as possible as the true garage that will actually be running |
||||
|
||||
Reminder: rules of simplicity, concerning changes to Garage's source code. |
||||
Always question what we are doing. |
||||
Never do anything just because it looks nice or because we "think" it might be usefull at some later point but without knowing precisely why/when. |
||||
Only do things that make perfect sense in the context of what we currently know. |
||||
|
||||
## References |
||||
|
||||
Testing is a research field on its own. |
||||
About testing distributed systems: |
||||
|
||||
- [Jepsen](https://jepsen.io/) is a testing framework designed to test distributed systems. It can mock some part of the system like the time and the network. |
||||
- [FoundationDB Testing Approach](https://www.micahlerner.com/2021/06/12/foundationdb-a-distributed-unbundled-transactional-key-value-store.html#what-is-unique-about-foundationdbs-testing-framework). They chose to abstract "all sources of nondeterminism and communication are abstracted, including network, disk, time, and pseudo random number generator" to be able to run tests by simulating faults. |
||||
- [Testing Distributed Systems](https://asatarin.github.io/testing-distributed-systems/) - Curated list of resources on testing distributed systems |
||||
|
||||
About S3 compatibility: |
||||
- [ceph/s3-tests](https://github.com/ceph/s3-tests) |
||||
- (deprecated) [minio/s3verify](https://blog.min.io/s3verify-a-simple-tool-to-verify-aws-s3-api-compatibility/) |
||||
- [minio/mint](https://github.com/minio/mint) |
||||
|
||||
About benchmarking S3 (I think it is not necessarily very relevant for this iteration): |
||||
- [minio/warp](https://github.com/minio/warp) |
||||
- [wasabi-tech/s3-benchmark](https://github.com/wasabi-tech/s3-benchmark) |
||||
- [dvassallo/s3-benchmark](https://github.com/dvassallo/s3-benchmark) |
||||
- [intel-cloud/cosbench](https://github.com/intel-cloud/cosbench) - used by Ceph |
||||
|
||||
Engineering blog posts: |
||||
- [Quincy @ Scale: A Tale of Three Large-Scale Clusters](https://ceph.io/en/news/blog/2022/three-large-scale-clusters/) |
||||
|
||||
Interesting blog posts on the blog of the Sled database: |
||||
|
||||
- <https://sled.rs/simulation.html> |
||||
- <https://sled.rs/perf.html> |
||||
|
||||
Misc: |
||||
- [mutagen](https://github.com/llogiq/mutagen) - mutation testing is a way to assert our test quality by mutating the code and see if the mutation makes the tests fail |
||||
- [fuzzing](https://rust-fuzz.github.io/book/) - cargo supports fuzzing, it could be a way to test our software reliability in presence of garbage data. |
||||
|
||||
|
@ -1,752 +0,0 @@ |
||||
+++ |
||||
title = "Administration API" |
||||
weight = 60 |
||||
+++ |
||||
|
||||
The Garage administration API is accessible through a dedicated server whose |
||||
listen address is specified in the `[admin]` section of the configuration |
||||
file (see [configuration file |
||||
reference](@/documentation/reference-manual/configuration.md)) |
||||
|
||||
**WARNING.** At this point, there is no comittement to stability of the APIs described in this document. |
||||
We will bump the version numbers prefixed to each API endpoint at each time the syntax |
||||
or semantics change, meaning that code that relies on these endpoint will break |
||||
when changes are introduced. |
||||
|
||||
The Garage administration API was introduced in version 0.7.2, this document |
||||
does not apply to older versions of Garage. |
||||
|
||||
|
||||
## Access control |
||||
|
||||
The admin API uses two different tokens for acces control, that are specified in the config file's `[admin]` section: |
||||
|
||||
- `metrics_token`: the token for accessing the Metrics endpoint (if this token |
||||
is not set in the config file, the Metrics endpoint can be accessed without |
||||
access control); |
||||
|
||||
- `admin_token`: the token for accessing all of the other administration |
||||
endpoints (if this token is not set in the config file, access to these |
||||
endpoints is disabled entirely). |
||||
|
||||
These tokens are used as simple HTTP bearer tokens. In other words, to |
||||
authenticate access to an admin API endpoint, add the following HTTP header |
||||
to your request: |
||||
|
||||
``` |
||||
Authorization: Bearer <token> |
||||
``` |
||||
|
||||
## Administration API endpoints |
||||
|
||||
### Metrics-related endpoints |
||||
|
||||
#### Metrics `GET /metrics` |
||||
|
||||
Returns internal Garage metrics in Prometheus format. |
||||
|
||||
#### Health `GET /health` |
||||
|
||||
Used for simple health checks in a cluster setting with an orchestrator. |
||||
Returns an HTTP status 200 if the node is ready to answer user's requests, |
||||
and an HTTP status 503 (Service Unavailable) if there are some partitions |
||||
for which a quorum of nodes is not available. |
||||
A simple textual message is also returned in a body with content-type `text/plain`. |
||||
See `/v1/health` for an API that also returns JSON output. |
||||
|
||||
### Cluster operations |
||||
|
||||
#### GetClusterStatus `GET /v1/status` |
||||
|
||||
Returns the cluster's current status in JSON, including: |
||||
|
||||
- ID of the node being queried and its version of the Garage daemon |
||||
- Live nodes |
||||
- Currently configured cluster layout |
||||
- Staged changes to the cluster layout |
||||
|
||||
Example response body: |
||||
|
||||
```json |
||||
{ |
||||
"node": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f", |
||||
"garageVersion": "git:v0.9.0-dev", |
||||
"garageFeatures": [ |
||||
"k2v", |
||||
"sled", |
||||
"lmdb", |
||||
"sqlite", |
||||
"metrics", |
||||
"bundled-libs" |
||||
], |
||||
"rustVersion": "1.68.0", |
||||
"dbEngine": "LMDB (using Heed crate)", |
||||
"knownNodes": [ |
||||
{ |
||||
"id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f", |
||||
"addr": "10.0.0.11:3901", |
||||
"isUp": true, |
||||
"lastSeenSecsAgo": 9, |
||||
"hostname": "node1" |
||||
}, |
||||
{ |
||||
"id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff", |
||||
"addr": "10.0.0.12:3901", |
||||
"isUp": true, |
||||
"lastSeenSecsAgo": 1, |
||||
"hostname": "node2" |
||||
}, |
||||
{ |
||||
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27", |
||||
"addr": "10.0.0.21:3901", |
||||
"isUp": true, |
||||
"lastSeenSecsAgo": 7, |
||||
"hostname": "node3" |
||||
}, |
||||
{ |
||||
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b", |
||||
"addr": "10.0.0.22:3901", |
||||
"isUp": true, |
||||
"lastSeenSecsAgo": 1, |
||||
"hostname": "node4" |
||||
} |
||||
], |
||||
"layout": { |
||||
"version": 12, |
||||
"roles": [ |
||||
{ |
||||
"id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f", |
||||
"zone": "dc1", |
||||
"capacity": 10737418240, |
||||
"tags": [ |
||||
"node1" |
||||
] |
||||
}, |
||||
{ |
||||
"id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff", |
||||
"zone": "dc1", |
||||
"capacity": 10737418240, |
||||
"tags": [ |
||||
"node2" |
||||
] |
||||
}, |
||||
{ |
||||
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27", |
||||
"zone": "dc2", |
||||
"capacity": 10737418240, |
||||
"tags": [ |
||||
"node3" |
||||
] |
||||
} |
||||
], |
||||
"stagedRoleChanges": [ |
||||
{ |
||||
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b", |
||||
"remove": false, |
||||
"zone": "dc2", |
||||
"capacity": 10737418240, |
||||
"tags": [ |
||||
"node4" |
||||
] |
||||
} |
||||
{ |
||||
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27", |
||||
"remove": true, |
||||
"zone": null, |
||||
"capacity": null, |
||||
"tags": null, |
||||
} |
||||
] |
||||
} |
||||
} |
||||
``` |
||||
|
||||
#### GetClusterHealth `GET /v1/health` |
||||
|
||||
Returns the cluster's current health in JSON format, with the following variables: |
||||
|
||||
- `status`: one of `healthy`, `degraded` or `unavailable`: |
||||
- healthy: Garage node is connected to all storage nodes |
||||
- degraded: Garage node is not connected to all storage nodes, but a quorum of write nodes is available for all partitions |
||||
- unavailable: a quorum of write nodes is not available for some partitions |
||||
- `knownNodes`: the number of nodes this Garage node has had a TCP connection to since the daemon started |
||||
- `connectedNodes`: the nubmer of nodes this Garage node currently has an open connection to |
||||
- `storageNodes`: the number of storage nodes currently registered in the cluster layout |
||||
- `storageNodesOk`: the number of storage nodes to which a connection is currently open |
||||
- `partitions`: the total number of partitions of the data (currently always 256) |
||||
- `partitionsQuorum`: the number of partitions for which a quorum of write nodes is available |
||||
- `partitionsAllOk`: the number of partitions for which we are connected to all storage nodes responsible of storing it |
||||
|
||||
Contrarily to `GET /health`, this endpoint always returns a 200 OK HTTP response code. |
||||
|
||||
Example response body: |
||||
|
||||
```json |
||||
{ |
||||
"status": "degraded", |
||||
"knownNodes": 3, |
||||
"connectedNodes": 3, |
||||
"storageNodes": 4, |
||||
"storageNodesOk": 3, |
||||
"partitions": 256, |
||||
"partitionsQuorum": 256, |
||||
"partitionsAllOk": 64 |
||||
} |
||||
``` |
||||
|
||||
#### ConnectClusterNodes `POST /v1/connect` |
||||
|
||||
Instructs this Garage node to connect to other Garage nodes at specified addresses. |
||||
|
||||
Example request body: |
||||
|
||||
```json |
||||
[ |
||||
"ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f@10.0.0.11:3901", |
||||
"4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff@10.0.0.12:3901" |
||||
] |
||||
``` |
||||
|
||||
The format of the string for a node to connect to is: `<node ID>@<ip address>:<port>`, same as in the `garage node connect` CLI call. |
||||
|
||||
Example response: |
||||
|
||||
```json |
||||
[ |
||||
{ |
||||
"success": true, |
||||
"error": null |
||||
}, |
||||
{ |
||||
"success": false, |
||||
"error": "Handshake error" |
||||
} |
||||
] |
||||
``` |
||||
|
||||
#### GetClusterLayout `GET /v1/layout` |
||||
|
||||
Returns the cluster's current layout in JSON, including: |
||||
|
||||
- Currently configured cluster layout |
||||
- Staged changes to the cluster layout |
||||
|
||||
(the info returned by this endpoint is a subset of the info returned by GetClusterStatus) |
||||
|
||||
Example response body: |
||||
|
||||
```json |
||||
{ |
||||
"version": 12, |
||||
"roles": [ |
||||
{ |
||||
"id": "ec79480e0ce52ae26fd00c9da684e4fa56658d9c64cdcecb094e936de0bfe71f", |
||||
"zone": "dc1", |
||||
"capacity": 10737418240, |
||||
"tags": [ |
||||
"node1" |
||||
] |
||||
}, |
||||
{ |
||||
"id": "4a6ae5a1d0d33bf895f5bb4f0a418b7dc94c47c0dd2eb108d1158f3c8f60b0ff", |
||||
"zone": "dc1", |
||||
"capacity": 10737418240, |
||||
"tags": [ |
||||
"node2" |
||||
] |
||||
}, |
||||
{ |
||||
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27", |
||||
"zone": "dc2", |
||||
"capacity": 10737418240, |
||||
"tags": [ |
||||
"node3" |
||||
] |
||||
} |
||||
], |
||||
"stagedRoleChanges": [ |
||||
{ |
||||
"id": "e2ee7984ee65b260682086ec70026165903c86e601a4a5a501c1900afe28d84b", |
||||
"remove": false, |
||||
"zone": "dc2", |
||||
"capacity": 10737418240, |
||||
"tags": [ |
||||
"node4" |
||||
] |
||||
} |
||||
{ |
||||
"id": "23ffd0cdd375ebff573b20cc5cef38996b51c1a7d6dbcf2c6e619876e507cf27", |
||||
"remove": true, |
||||
"zone": null, |
||||
"capacity": null, |
||||
"tags": null, |
||||
} |
||||
] |
||||
} |
||||
``` |
||||
|
||||
#### UpdateClusterLayout `POST /v1/layout` |
||||
|
||||
Send modifications to the cluster layout. These modifications will |
||||
be included in the staged role changes, visible in subsequent calls |
||||
of `GetClusterLayout`. Once the set of staged changes is satisfactory, |
||||
the user may call `ApplyClusterLayout` to apply the changed changes, |
||||
or `Revert ClusterLayout` to clear all of the staged changes in |
||||
the layout. |
||||
|
||||
Request body format: |
||||
|
||||
```json |
||||
[ |
||||
{ |
||||
"id": <node_id>, |
||||
"capacity": <new_capacity>, |
||||
"zone": <new_zone>, |
||||
"tags": [ |
||||
<new_tag>, |
||||
... |
||||
] |
||||
}, |
||||
{ |
||||
"id": <node_id_to_remove>, |
||||
"remove": true |
||||
} |
||||
] |
||||
``` |
||||
|
||||
Contrary to the CLI that may update only a subset of the fields |
||||
`capacity`, `zone` and `tags`, when calling this API all of these |
||||
values must be specified. |
||||
|
||||
This returns the new cluster layout with the proposed staged changes, |
||||
as returned by GetClusterLayout. |
||||
|
||||
|
||||
#### ApplyClusterLayout `POST /v1/layout/apply` |
||||
|
||||
Applies to the cluster the layout changes currently registered as |
||||
staged layout changes. |
||||
|
||||
Request body format: |
||||
|
||||
```json |
||||
{ |
||||
"version": 13 |
||||
} |
||||
``` |
||||
|
||||
Similarly to the CLI, the body must include the version of the new layout |
||||
that will be created, which MUST be 1 + the value of the currently |
||||
existing layout in the cluster. |
||||
|
||||
This returns the message describing all the calculations done to compute the new |
||||
layout, as well as the description of the layout as returned by GetClusterLayout. |
||||
|
||||
#### RevertClusterLayout `POST /v1/layout/revert` |
||||
|
||||
Clears all of the staged layout changes. |
||||
|
||||
Request body format: |
||||
|
||||
```json |
||||
{ |
||||
"version": 13 |
||||
} |
||||
``` |
||||
|
||||
Reverting the staged changes is done by incrementing the version number |
||||
and clearing the contents of the staged change list. |
||||
Similarly to the CLI, the body must include the incremented |
||||
version number, which MUST be 1 + the value of the currently |
||||
existing layout in the cluster. |
||||
|
||||
This returns the new cluster layout with all changes reverted, |
||||
as returned by GetClusterLayout. |
||||
|
||||
|
||||
### Access key operations |
||||
|
||||
#### ListKeys `GET /v1/key` |
||||
|
||||
Returns all API access keys in the cluster. |
||||
|
||||
Example response: |
||||
|
||||
```json |
||||
[ |
||||
{ |
||||
"id": "GK31c2f218a2e44f485b94239e", |
||||
"name": "test" |
||||
}, |
||||
{ |
||||
"id": "GKe10061ac9c2921f09e4c5540", |
||||
"name": "test2" |
||||
} |
||||
] |
||||
``` |
||||
|
||||
#### GetKeyInfo `GET /v1/key?id=<acces key id>` |
||||
#### GetKeyInfo `GET /v1/key?search=<pattern>` |
||||
|
||||
Returns information about the requested API access key. |
||||
|
||||
If `id` is set, the key is looked up using its exact identifier (faster). |
||||
If `search` is set, the key is looked up using its name or prefix |
||||
of identifier (slower, all keys are enumerated to do this). |
||||
|
||||
Optionnally, the query parameter `showSecretKey=true` can be set to reveal the |
||||
associated secret access key. |
||||
|
||||
Example response: |
||||
|
||||
```json |
||||
{ |
||||
"name": "test", |
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e", |
||||
"secretAccessKey": "b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835", |
||||
"permissions": { |
||||
"createBucket": false |
||||
}, |
||||
"buckets": [ |
||||
{ |
||||
"id": "70dc3bed7fe83a75e46b66e7ddef7d56e65f3c02f9f80b6749fb97eccb5e1033", |
||||
"globalAliases": [ |
||||
"test2" |
||||
], |
||||
"localAliases": [], |
||||
"permissions": { |
||||
"read": true, |
||||
"write": true, |
||||
"owner": false |
||||
} |
||||
}, |
||||
{ |
||||
"id": "d7452a935e663fc1914f3a5515163a6d3724010ce8dfd9e4743ca8be5974f995", |
||||
"globalAliases": [ |
||||
"test3" |
||||
], |
||||
"localAliases": [], |
||||
"permissions": { |
||||
"read": true, |
||||
"write": true, |
||||
"owner": false |
||||
} |
||||
}, |
||||
{ |
||||
"id": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b", |
||||
"globalAliases": [], |
||||
"localAliases": [ |
||||
"test" |
||||
], |
||||
"permissions": { |
||||
"read": true, |
||||
"write": true, |
||||
"owner": true |
||||
} |
||||
}, |
||||
{ |
||||
"id": "96470e0df00ec28807138daf01915cfda2bee8eccc91dea9558c0b4855b5bf95", |
||||
"globalAliases": [ |
||||
"alex" |
||||
], |
||||
"localAliases": [], |
||||
"permissions": { |
||||
"read": true, |
||||
"write": true, |
||||
"owner": true |
||||
} |
||||
} |
||||
] |
||||
} |
||||
``` |
||||
|
||||
#### CreateKey `POST /v1/key` |
||||
|
||||
Creates a new API access key. |
||||
|
||||
Request body format: |
||||
|
||||
```json |
||||
{ |
||||
"name": "NameOfMyKey" |
||||
} |
||||
``` |
||||
|
||||
This returns the key info, including the created secret key, |
||||
in the same format as the result of GetKeyInfo. |
||||
|
||||
#### ImportKey `POST /v1/key/import` |
||||
|
||||
Imports an existing API key. |
||||
This will check that the imported key is in the valid format, i.e. |
||||
is a key that could have been generated by Garage. |
||||
|
||||
Request body format: |
||||
|
||||
```json |
||||
{ |
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e", |
||||
"secretAccessKey": "b892c0665f0ada8a4755dae98baa3b133590e11dae3bcc1f9d769d67f16c3835", |
||||
"name": "NameOfMyKey" |
||||
} |
||||
``` |
||||
|
||||
This returns the key info in the same format as the result of GetKeyInfo. |
||||
|
||||
#### UpdateKey `POST /v1/key?id=<acces key id>` |
||||
|
||||
Updates information about the specified API access key. |
||||
|
||||
Request body format: |
||||
|
||||
```json |
||||
{ |
||||
"name": "NameOfMyKey", |
||||
"allow": { |
||||
"createBucket": true, |
||||
}, |
||||
"deny": {} |
||||
} |
||||
``` |
||||
|
||||
All fields (`name`, `allow` and `deny`) are optional. |
||||
If they are present, the corresponding modifications are applied to the key, otherwise nothing is changed. |
||||
The possible flags in `allow` and `deny` are: `createBucket`. |
||||
|
||||
This returns the key info in the same format as the result of GetKeyInfo. |
||||
|
||||
#### DeleteKey `DELETE /v1/key?id=<acces key id>` |
||||
|
||||
Deletes an API access key. |
||||
|
||||
|
||||
### Bucket operations |
||||
|
||||
#### ListBuckets `GET /v1/bucket` |
||||
|
||||
Returns all storage buckets in the cluster. |
||||
|
||||
Example response: |
||||
|
||||
```json |
||||
[ |
||||
{ |
||||
"id": "70dc3bed7fe83a75e46b66e7ddef7d56e65f3c02f9f80b6749fb97eccb5e1033", |
||||
"globalAliases": [ |
||||
"test2" |
||||
], |
||||
"localAliases": [] |
||||
}, |
||||
{ |
||||
"id": "96470e0df00ec28807138daf01915cfda2bee8eccc91dea9558c0b4855b5bf95", |
||||
"globalAliases": [ |
||||
"alex" |
||||
], |
||||
"localAliases": [] |
||||
}, |
||||
{ |
||||
"id": "d7452a935e663fc1914f3a5515163a6d3724010ce8dfd9e4743ca8be5974f995", |
||||
"globalAliases": [ |
||||
"test3" |
||||
], |
||||
"localAliases": [] |
||||
}, |
||||
{ |
||||
"id": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b", |
||||
"globalAliases": [], |
||||
"localAliases": [ |
||||
{ |
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e", |
||||
"alias": "test" |
||||
} |
||||
] |
||||
} |
||||
] |
||||
``` |
||||
|
||||
#### GetBucketInfo `GET /v1/bucket?id=<bucket id>` |
||||
#### GetBucketInfo `GET /v1/bucket?globalAlias=<alias>` |
||||
|
||||
Returns information about the requested storage bucket. |
||||
|
||||
If `id` is set, the bucket is looked up using its exact identifier. |
||||
If `globalAlias` is set, the bucket is looked up using its global alias. |
||||
(both are fast) |
||||
|
||||
Example response: |
||||
|
||||
```json |
||||
{ |
||||
"id": "afa8f0a22b40b1247ccd0affb869b0af5cff980924a20e4b5e0720a44deb8d39", |
||||
"globalAliases": [], |
||||
"websiteAccess": false, |
||||
"websiteConfig": null, |
||||
"keys": [ |
||||
{ |
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e", |
||||
"name": "Imported key", |
||||
"permissions": { |
||||
"read": true, |
||||
"write": true, |
||||
"owner": true |
||||
}, |
||||
"bucketLocalAliases": [ |
||||
"debug" |
||||
] |
||||
} |
||||
], |
||||
"objects": 14827, |
||||
"bytes": 13189855625, |
||||
"unfinishedUploads": 1, |
||||
"unfinishedMultipartUploads": 1, |
||||
"unfinishedMultipartUploadParts": 11, |
||||
"unfinishedMultipartUploadBytes": 41943040, |
||||
"quotas": { |
||||
"maxSize": null, |
||||
"maxObjects": null |
||||
} |
||||
} |
||||
``` |
||||
|
||||
#### CreateBucket `POST /v1/bucket` |
||||
|
||||
Creates a new storage bucket. |
||||
|
||||
Request body format: |
||||
|
||||
```json |
||||
{ |
||||
"globalAlias": "NameOfMyBucket" |
||||
} |
||||
``` |
||||
|
||||
OR |
||||
|
||||
```json |
||||
{ |
||||
"localAlias": { |
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e", |
||||
"alias": "NameOfMyBucket", |
||||
"allow": { |
||||
"read": true, |
||||
"write": true, |
||||
"owner": false |
||||
} |
||||
} |
||||
} |
||||
``` |
||||
|
||||
OR |
||||
|
||||
```json |
||||
{} |
||||
``` |
||||
|
||||
Creates a new bucket, either with a global alias, a local one, |
||||
or no alias at all. |
||||
|
||||
Technically, you can also specify both `globalAlias` and `localAlias` and that would create |
||||
two aliases, but I don't see why you would want to do that. |
||||
|
||||
#### UpdateBucket `PUT /v1/bucket?id=<bucket id>` |
||||
|
||||
Updates configuration of the given bucket. |
||||
|
||||
Request body format: |
||||
|
||||
```json |
||||
{ |
||||
"websiteAccess": { |
||||
"enabled": true, |
||||
"indexDocument": "index.html", |
||||
"errorDocument": "404.html" |
||||
}, |
||||
"quotas": { |
||||
"maxSize": 19029801, |
||||
"maxObjects": null, |
||||
} |
||||
} |
||||
``` |
||||
|
||||
All fields (`websiteAccess` and `quotas`) are optional. |
||||
If they are present, the corresponding modifications are applied to the bucket, otherwise nothing is changed. |
||||
|
||||
In `websiteAccess`: if `enabled` is `true`, `indexDocument` must be specified. |
||||
The field `errorDocument` is optional, if no error document is set a generic |
||||
error message is displayed when errors happen. Conversely, if `enabled` is |
||||
`false`, neither `indexDocument` nor `errorDocument` must be specified. |
||||
|
||||
In `quotas`: new values of `maxSize` and `maxObjects` must both be specified, or set to `null` |
||||
to remove the quotas. An absent value will be considered the same as a `null`. It is not possible |
||||
to change only one of the two quotas. |
||||
|
||||
#### DeleteBucket `DELETE /v1/bucket?id=<bucket id>` |
||||
|
||||
Deletes a storage bucket. A bucket cannot be deleted if it is not empty. |
||||
|
||||
Warning: this will delete all aliases associated with the bucket! |
||||
|
||||
|
||||
### Operations on permissions for keys on buckets |
||||
|
||||
#### BucketAllowKey `POST /v1/bucket/allow` |
||||
|
||||
Allows a key to do read/write/owner operations on a bucket. |
||||
|
||||
Request body format: |
||||
|
||||
```json |
||||
{ |
||||
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b", |
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e", |
||||
"permissions": { |
||||
"read": true, |
||||
"write": true, |
||||
"owner": true |
||||
}, |
||||
} |
||||
``` |
||||
|
||||
Flags in `permissions` which have the value `true` will be activated. |
||||
Other flags will remain unchanged. |
||||
|
||||
#### BucketDenyKey `POST /v1/bucket/deny` |
||||
|
||||
Denies a key from doing read/write/owner operations on a bucket. |
||||
|
||||
Request body format: |
||||
|
||||
```json |
||||
{ |
||||
"bucketId": "e6a14cd6a27f48684579ec6b381c078ab11697e6bc8513b72b2f5307e25fff9b", |
||||
"accessKeyId": "GK31c2f218a2e44f485b94239e", |
||||
"permissions": { |
||||
"read": false, |
||||
"write": false, |
||||
"owner": true |
||||
}, |
||||
} |
||||
``` |
||||
|
||||
Flags in `permissions` which have the value `true` will be deactivated. |
||||
Other flags will remain unchanged. |
||||
|
||||
|
||||
### Operations on bucket aliases |
||||
|
||||
#### GlobalAliasBucket `PUT /v1/bucket/alias/global?id=<bucket id>&alias=<global alias>` |
||||
|
||||
Empty body. Creates a global alias for a bucket. |
||||
|
||||
#### GlobalUnaliasBucket `DELETE /v1/bucket/alias/global?id=<bucket id>&alias=<global alias>` |
||||
|
||||
Removes a global alias for a bucket. |
||||
|
||||
#### LocalAliasBucket `PUT /v1/bucket/alias/local?id=<bucket id>&accessKeyId=<access key ID>&alias=<local alias>` |
||||
|
||||
Empty body. Creates a local alias for a bucket in the namespace of a specific access key. |
||||
|
||||
#### LocalUnaliasBucket `DELETE /v1/bucket/alias/local?id=<bucket id>&accessKeyId<access key ID>&alias=<local alias>` |
||||
|
||||
Removes a local alias for a bucket in the namespace of a specific access key. |
||||
|
Before Width: | Height: | Size: 41 KiB |
@ -1,13 +0,0 @@ |
||||
optimal_layout.aux |
||||
optimal_layout.log |
||||
optimal_layout.synctex.gz |
||||
optimal_layout.bbl |
||||
optimal_layout.blg |
||||
|
||||
geodistrib.aux |
||||
geodistrib.bbl |
||||
geodistrib.blg |
||||
geodistrib.log |
||||
geodistrib.out |
||||
geodistrib.synctex.gz |
||||
|
Binary file not shown.
Before Width: | Height: | Size: 161 KiB |
Binary file not shown.
Before Width: | Height: | Size: 560 KiB |
Binary file not shown.
Before Width: | Height: | Size: 287 KiB |
Binary file not shown.
Before Width: | Height: | Size: 112 KiB |
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue