Factor out garage-apply-layout-diff
The new code runs the equivalent functionality within the daemon go code. It was required to make Env be immutable in order to prevent race conditions (this really should have been done from the beginning anyway).
This commit is contained in:
parent
41e0b56617
commit
936ca8d48f
@ -26,7 +26,6 @@ in rec {
|
||||
paths = [
|
||||
garage
|
||||
minioClient
|
||||
./src
|
||||
];
|
||||
};
|
||||
|
||||
|
@ -1,24 +0,0 @@
|
||||
|
||||
set -e -o pipefail
|
||||
|
||||
tmp="$(mktemp -d -t cryptic-net-garage-apply-layout-diff-XXX)"
|
||||
|
||||
( trap "rm -rf '$tmp'" EXIT
|
||||
|
||||
tar xzf "$_BOOTSTRAP_PATH" -C "$tmp" ./hosts
|
||||
|
||||
thisHostName=$(tar xzf "$_BOOTSTRAP_PATH" --to-stdout ./hostname)
|
||||
thisHostIP=$(cat "$tmp"/hosts/"$thisHostName".yml | yq '.nebula.ip')
|
||||
|
||||
firstRPCPort=$(cat "$_DAEMON_YML_PATH" | yq '.storage.allocations[0].rpc_port')
|
||||
|
||||
firstPeerID=$(cryptic-net-main garage-peer-keygen -danger -ip "$thisHostIP" -port "$firstRPCPort")
|
||||
|
||||
export GARAGE_RPC_HOST="$firstPeerID"@"$thisHostIP":"$firstRPCPort"
|
||||
export GARAGE_RPC_SECRET=$(tar -xzf "$_BOOTSTRAP_PATH" --to-stdout "./garage/rpc-secret.txt")
|
||||
|
||||
garage layout show | cryptic-net-main garage-layout-diff | while read diffLine; do
|
||||
echo "> $diffLine"
|
||||
$diffLine
|
||||
done
|
||||
)
|
@ -16,8 +16,6 @@ package main
|
||||
|
||||
import (
|
||||
"cryptic-net/cmd/entrypoint"
|
||||
garage_layout_diff "cryptic-net/cmd/garage-layout-diff"
|
||||
garage_peer_keygen "cryptic-net/cmd/garage-peer-keygen"
|
||||
nebula_entrypoint "cryptic-net/cmd/nebula-entrypoint"
|
||||
update_global_bucket "cryptic-net/cmd/update-global-bucket"
|
||||
"fmt"
|
||||
@ -31,8 +29,6 @@ type mainFn struct {
|
||||
|
||||
var mainFns = []mainFn{
|
||||
{"entrypoint", entrypoint.Main},
|
||||
{"garage-layout-diff", garage_layout_diff.Main},
|
||||
{"garage-peer-keygen", garage_peer_keygen.Main},
|
||||
{"nebula-entrypoint", nebula_entrypoint.Main},
|
||||
{"update-global-bucket", update_global_bucket.Main},
|
||||
}
|
||||
|
@ -10,10 +10,8 @@ import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/cryptic-io/pmux/pmuxlib"
|
||||
@ -48,63 +46,6 @@ func readAdmin(path string) (admin.Admin, error) {
|
||||
return admin.FromReader(f)
|
||||
}
|
||||
|
||||
func garageInitializeGlobalBucket(
|
||||
ctx context.Context,
|
||||
adminClient *garage.AdminClient,
|
||||
globalBucketCreds garage.S3APICredentials,
|
||||
) error {
|
||||
|
||||
// first attempt to import the key
|
||||
err := adminClient.Do(ctx, nil, "POST", "/v0/key/import", map[string]string{
|
||||
"accessKeyId": globalBucketCreds.ID,
|
||||
"secretAccessKey": globalBucketCreds.Secret,
|
||||
"name": "shared-global-bucket-key",
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("importing global bucket key into garage: %w", err)
|
||||
}
|
||||
|
||||
// create global bucket
|
||||
err = adminClient.Do(ctx, nil, "POST", "/v0/bucket", map[string]string{
|
||||
"globalAlias": garage.GlobalBucket,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating global bucket: %w", err)
|
||||
}
|
||||
|
||||
// retrieve newly created bucket's id
|
||||
var getBucketRes struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
err = adminClient.Do(
|
||||
ctx, &getBucketRes,
|
||||
"GET", "/v0/bucket?globalAlias="+garage.GlobalBucket, nil,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching global bucket id: %w", err)
|
||||
}
|
||||
|
||||
// allow shared global bucket key to perform all operations
|
||||
err = adminClient.Do(ctx, nil, "POST", "/v0/bucket/allow", map[string]interface{}{
|
||||
"bucketId": getBucketRes.ID,
|
||||
"accessKeyId": globalBucketCreds.ID,
|
||||
"permissions": map[string]bool{
|
||||
"read": true,
|
||||
"write": true,
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("granting permissions to shared global bucket key: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var subCmdAdminCreateNetwork = subCmd{
|
||||
name: "create-network",
|
||||
descr: "Creates a new cryptic-net network, outputting the resulting admin.tgz to stdout",
|
||||
@ -220,8 +161,7 @@ var subCmdAdminCreateNetwork = subCmd{
|
||||
GarageGlobalBucketS3APICredentials: garage.NewS3APICredentials(),
|
||||
}
|
||||
|
||||
// this will also write the bootstrap file
|
||||
if err := mergeDaemonIntoBootstrap(env); err != nil {
|
||||
if env, err = mergeDaemonIntoBootstrap(env); err != nil {
|
||||
return fmt.Errorf("merging daemon.yml into bootstrap data: %w", err)
|
||||
}
|
||||
|
||||
@ -250,7 +190,7 @@ var subCmdAdminCreateNetwork = subCmd{
|
||||
ctx, cancel := context.WithCancel(env.Context)
|
||||
pmuxDoneCh := make(chan struct{})
|
||||
|
||||
log.Printf("starting child processes")
|
||||
fmt.Fprintln(os.Stderr, "starting child processes")
|
||||
go func() {
|
||||
pmuxlib.Run(ctx, pmuxConfig)
|
||||
close(pmuxDoneCh)
|
||||
@ -258,52 +198,22 @@ var subCmdAdminCreateNetwork = subCmd{
|
||||
|
||||
defer func() {
|
||||
cancel()
|
||||
log.Printf("waiting for child processes to exit")
|
||||
fmt.Fprintln(os.Stderr, "waiting for child processes to exit")
|
||||
<-pmuxDoneCh
|
||||
}()
|
||||
|
||||
var garageAdminClient *garage.AdminClient
|
||||
garageAdminClients := map[string]*garage.AdminClient{}
|
||||
|
||||
for _, alloc := range daemon.Storage.Allocations {
|
||||
|
||||
garageAdminAddr := net.JoinHostPort(ip.String(), strconv.Itoa(alloc.AdminPort))
|
||||
|
||||
garageAdminClient = garage.NewAdminClient(
|
||||
garageAdminAddr,
|
||||
env.Bootstrap.GarageAdminToken,
|
||||
)
|
||||
|
||||
garageAdminClients[garageAdminAddr] = garageAdminClient
|
||||
fmt.Fprintln(os.Stderr, "waiting for garage instances to come online")
|
||||
if err := waitForGarage(ctx, env); err != nil {
|
||||
return fmt.Errorf("waiting for garage to start up: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("waiting for garage instances to come online")
|
||||
for garageAdminAddr, garageAdminClient := range garageAdminClients {
|
||||
if err := garageAdminClient.Wait(ctx); err != nil {
|
||||
return fmt.Errorf("waiting for garage instance %q to start up: %w", garageAdminAddr, err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("applying initial garage layout")
|
||||
err = garageApplyLayout(
|
||||
ctx,
|
||||
garageAdminClient,
|
||||
*hostName, ip.String(),
|
||||
daemon.Storage.Allocations,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "applying initial garage layout")
|
||||
if err := garageApplyLayout(ctx, env); err != nil {
|
||||
return fmt.Errorf("applying initial garage layout: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("initializing garage shared global bucket")
|
||||
err = garageInitializeGlobalBucket(
|
||||
ctx,
|
||||
garageAdminClient,
|
||||
env.Bootstrap.GarageGlobalBucketS3APICredentials,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "initializing garage shared global bucket")
|
||||
if err := garageInitializeGlobalBucket(ctx, env); err != nil {
|
||||
return fmt.Errorf("initializing garage shared global bucket: %w", err)
|
||||
}
|
||||
|
||||
@ -312,13 +222,13 @@ var subCmdAdminCreateNetwork = subCmd{
|
||||
return fmt.Errorf("initializing garage shared global bucket client: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("writing data for this host into garage")
|
||||
fmt.Fprintln(os.Stderr, "writing data for this host into garage")
|
||||
err = bootstrap.PutGarageBoostrapHost(ctx, garageS3Client, env.Bootstrap.ThisHost())
|
||||
if err != nil {
|
||||
return fmt.Errorf("putting host data into garage: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("cluster initialized successfully, writing admin.tgz to stdout")
|
||||
fmt.Fprintln(os.Stderr, "cluster initialized successfully, writing admin.tgz to stdout")
|
||||
|
||||
err = admin.Admin{
|
||||
CreationParams: adminCreationParams,
|
||||
|
@ -40,44 +40,44 @@ import (
|
||||
|
||||
// creates a new bootstrap file using available information from the network. If
|
||||
// the new bootstrap file is different than the existing one, the existing one
|
||||
// is overwritten, ReloadBootstrap is called on env, true is returned.
|
||||
func reloadBootstrap(env *crypticnet.Env, s3Client garage.S3APIClient) (bool, error) {
|
||||
// is overwritten, env's bootstrap is reloaded, true is returned.
|
||||
func reloadBootstrap(env crypticnet.Env, s3Client garage.S3APIClient) (crypticnet.Env, bool, error) {
|
||||
|
||||
newHosts, err := bootstrap.GetGarageBootstrapHosts(env.Context, s3Client)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("getting hosts from garage: %w", err)
|
||||
return crypticnet.Env{}, false, fmt.Errorf("getting hosts from garage: %w", err)
|
||||
}
|
||||
|
||||
newHostsHash, err := bootstrap.HostsHash(newHosts)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("calculating hash of new hosts: %w", err)
|
||||
return crypticnet.Env{}, false, fmt.Errorf("calculating hash of new hosts: %w", err)
|
||||
}
|
||||
|
||||
currHostsHash, err := bootstrap.HostsHash(env.Bootstrap.Hosts)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("calculating hash of current hosts: %w", err)
|
||||
return crypticnet.Env{}, false, fmt.Errorf("calculating hash of current hosts: %w", err)
|
||||
}
|
||||
|
||||
if bytes.Equal(newHostsHash, currHostsHash) {
|
||||
return false, nil
|
||||
return crypticnet.Env{}, false, nil
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if err := env.Bootstrap.WithHosts(newHosts).WriteTo(buf); err != nil {
|
||||
return false, fmt.Errorf("writing new bootstrap file to buffer: %w", err)
|
||||
return crypticnet.Env{}, false, fmt.Errorf("writing new bootstrap file to buffer: %w", err)
|
||||
}
|
||||
|
||||
if err := copyBootstrapToDataDir(env, buf); err != nil {
|
||||
return false, fmt.Errorf("copying new bootstrap file to data dir: %w", err)
|
||||
if env, err = copyBootstrapToDataDirAndReload(env, buf); err != nil {
|
||||
return crypticnet.Env{}, false, fmt.Errorf("copying new bootstrap file to data dir: %w", err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
return env, true, nil
|
||||
}
|
||||
|
||||
// runs a single pmux process ofor daemon, returning only once the env.Context
|
||||
// has been canceled or bootstrap info has been changed. This will always block
|
||||
// until the spawned pmux has returned.
|
||||
func runDaemonPmuxOnce(env *crypticnet.Env, s3Client garage.S3APIClient) error {
|
||||
func runDaemonPmuxOnce(env crypticnet.Env, s3Client garage.S3APIClient) error {
|
||||
|
||||
thisHost := env.Bootstrap.ThisHost()
|
||||
thisDaemon := env.ThisDaemon()
|
||||
@ -100,7 +100,6 @@ func runDaemonPmuxOnce(env *crypticnet.Env, s3Client garage.S3APIClient) error {
|
||||
}
|
||||
|
||||
pmuxProcConfigs = append(pmuxProcConfigs, garageChildrenPmuxProcConfigs...)
|
||||
pmuxProcConfigs = append(pmuxProcConfigs, garageApplyLayoutDiffPmuxProcConfig(env))
|
||||
}
|
||||
|
||||
pmuxProcConfigs = append(pmuxProcConfigs, pmuxlib.ProcessConfig{
|
||||
@ -126,6 +125,26 @@ func runDaemonPmuxOnce(env *crypticnet.Env, s3Client garage.S3APIClient) error {
|
||||
pmuxlib.Run(ctx, pmuxConfig)
|
||||
}()
|
||||
|
||||
if len(thisDaemon.Storage.Allocations) > 0 {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
if err := waitForGarage(ctx, env); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "aborted waiting for garage instances to start: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
err := doOnce(ctx, func(ctx context.Context) error {
|
||||
return garageApplyLayout(ctx, env)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "aborted applying garage layout: %v\n", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(3 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
@ -139,7 +158,12 @@ func runDaemonPmuxOnce(env *crypticnet.Env, s3Client garage.S3APIClient) error {
|
||||
|
||||
fmt.Fprintln(os.Stderr, "checking for changes to bootstrap")
|
||||
|
||||
if changed, err := reloadBootstrap(env, s3Client); err != nil {
|
||||
var (
|
||||
changed bool
|
||||
err error
|
||||
)
|
||||
|
||||
if env, changed, err = reloadBootstrap(env, s3Client); err != nil {
|
||||
return fmt.Errorf("reloading bootstrap: %w", err)
|
||||
|
||||
} else if changed {
|
||||
@ -226,7 +250,7 @@ var subCmdDaemon = subCmd{
|
||||
return fmt.Errorf("opening file %q: %w", env.BootstrapPath, err)
|
||||
}
|
||||
|
||||
err = copyBootstrapToDataDir(env, f)
|
||||
env, err = copyBootstrapToDataDirAndReload(env, f)
|
||||
f.Close()
|
||||
|
||||
if err != nil {
|
||||
@ -238,12 +262,14 @@ var subCmdDaemon = subCmd{
|
||||
return fmt.Errorf("merging and writing daemon.yml file: %w", err)
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
// we update this Host's data using whatever configuration has been
|
||||
// provided by daemon.yml. This way the daemon has the most
|
||||
// up-to-date possible bootstrap. This updated bootstrap will later
|
||||
// get updated in garage using update-global-bucket, so other hosts
|
||||
// will see it as well.
|
||||
if err := mergeDaemonIntoBootstrap(env); err != nil {
|
||||
if env, err = mergeDaemonIntoBootstrap(env); err != nil {
|
||||
return fmt.Errorf("merging daemon.yml into bootstrap data: %w", err)
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,7 @@ package entrypoint
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
crypticnet "cryptic-net"
|
||||
"cryptic-net/bootstrap"
|
||||
"fmt"
|
||||
@ -12,35 +13,31 @@ import (
|
||||
"github.com/cryptic-io/pmux/pmuxlib"
|
||||
)
|
||||
|
||||
func copyBootstrapToDataDir(env *crypticnet.Env, r io.Reader) error {
|
||||
func copyBootstrapToDataDirAndReload(env crypticnet.Env, r io.Reader) (crypticnet.Env, error) {
|
||||
|
||||
path := env.DataDirBootstrapPath()
|
||||
dirPath := filepath.Dir(path)
|
||||
|
||||
if err := os.MkdirAll(dirPath, 0700); err != nil {
|
||||
return fmt.Errorf("creating directory %q: %w", dirPath, err)
|
||||
return crypticnet.Env{}, fmt.Errorf("creating directory %q: %w", dirPath, err)
|
||||
}
|
||||
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating file %q: %w", path, err)
|
||||
return crypticnet.Env{}, fmt.Errorf("creating file %q: %w", path, err)
|
||||
}
|
||||
|
||||
_, err = io.Copy(f, r)
|
||||
f.Close()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("copying bootstrap file to %q: %w", path, err)
|
||||
return crypticnet.Env{}, fmt.Errorf("copying bootstrap file to %q: %w", path, err)
|
||||
}
|
||||
|
||||
if err := env.LoadBootstrap(path); err != nil {
|
||||
return fmt.Errorf("loading bootstrap from %q: %w", path, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return env.LoadBootstrap(path)
|
||||
}
|
||||
|
||||
func mergeDaemonIntoBootstrap(env *crypticnet.Env) error {
|
||||
func mergeDaemonIntoBootstrap(env crypticnet.Env) (crypticnet.Env, error) {
|
||||
daemon := env.ThisDaemon()
|
||||
host := env.Bootstrap.ThisHost()
|
||||
|
||||
@ -64,17 +61,23 @@ func mergeDaemonIntoBootstrap(env *crypticnet.Env) error {
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if err := env.Bootstrap.WithHosts(env.Bootstrap.Hosts).WriteTo(buf); err != nil {
|
||||
return fmt.Errorf("writing new bootstrap file to buffer: %w", err)
|
||||
return crypticnet.Env{}, fmt.Errorf("writing new bootstrap file to buffer: %w", err)
|
||||
}
|
||||
|
||||
if err := copyBootstrapToDataDir(env, buf); err != nil {
|
||||
return fmt.Errorf("copying new bootstrap file to data dir: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return copyBootstrapToDataDirAndReload(env, buf)
|
||||
}
|
||||
|
||||
func waitForNebulaArgs(env *crypticnet.Env, args ...string) []string {
|
||||
func doOnce(ctx context.Context, fn func(context.Context) error) error {
|
||||
for {
|
||||
if err := fn(ctx); err == nil {
|
||||
return nil
|
||||
} else if ctxErr := ctx.Err(); ctxErr != nil {
|
||||
return ctxErr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitForNebulaArgs(env crypticnet.Env, args ...string) []string {
|
||||
thisHost := env.Bootstrap.ThisHost()
|
||||
return append([]string{"wait-for-ip", thisHost.Nebula.IP}, args...)
|
||||
}
|
||||
|
@ -13,11 +13,11 @@ import (
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func builtinDaemonYmlPath(env *crypticnet.Env) string {
|
||||
func builtinDaemonYmlPath(env crypticnet.Env) string {
|
||||
return filepath.Join(env.AppDirPath, "etc", "daemon.yml")
|
||||
}
|
||||
|
||||
func writeBuiltinDaemonYml(env *crypticnet.Env, w io.Writer) error {
|
||||
func writeBuiltinDaemonYml(env crypticnet.Env, w io.Writer) error {
|
||||
|
||||
builtinDaemonYmlPath := builtinDaemonYmlPath(env)
|
||||
|
||||
@ -33,7 +33,7 @@ func writeBuiltinDaemonYml(env *crypticnet.Env, w io.Writer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeMergedDaemonYml(env *crypticnet.Env, userDaemonYmlPath string) error {
|
||||
func writeMergedDaemonYml(env crypticnet.Env, userDaemonYmlPath string) error {
|
||||
|
||||
builtinDaemonYmlPath := builtinDaemonYmlPath(env)
|
||||
|
||||
|
@ -14,7 +14,30 @@ import (
|
||||
"github.com/cryptic-io/pmux/pmuxlib"
|
||||
)
|
||||
|
||||
func waitForGarageArgs(env *crypticnet.Env, args ...string) []string {
|
||||
func waitForGarage(ctx context.Context, env crypticnet.Env) error {
|
||||
|
||||
for _, alloc := range env.ThisDaemon().Storage.Allocations {
|
||||
|
||||
adminAddr := net.JoinHostPort(
|
||||
env.Bootstrap.ThisHost().Nebula.IP,
|
||||
strconv.Itoa(alloc.AdminPort),
|
||||
)
|
||||
|
||||
adminClient := garage.NewAdminClient(
|
||||
adminAddr,
|
||||
env.Bootstrap.GarageAdminToken,
|
||||
)
|
||||
|
||||
if err := adminClient.Wait(ctx); err != nil {
|
||||
return fmt.Errorf("waiting for instance %q to start up: %w", adminAddr, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func waitForGarageArgs(env crypticnet.Env, args ...string) []string {
|
||||
|
||||
thisHost := env.Bootstrap.ThisHost()
|
||||
allocs := env.ThisDaemon().Storage.Allocations
|
||||
@ -38,7 +61,7 @@ func waitForGarageArgs(env *crypticnet.Env, args ...string) []string {
|
||||
}
|
||||
|
||||
func garageWriteChildConf(
|
||||
env *crypticnet.Env,
|
||||
env crypticnet.Env,
|
||||
alloc crypticnet.DaemonYmlStorageAllocation,
|
||||
) (
|
||||
string, error,
|
||||
@ -93,7 +116,7 @@ func garageWriteChildConf(
|
||||
return garageTomlPath, nil
|
||||
}
|
||||
|
||||
func garageChildrenPmuxProcConfigs(env *crypticnet.Env) ([]pmuxlib.ProcessConfig, error) {
|
||||
func garageChildrenPmuxProcConfigs(env crypticnet.Env) ([]pmuxlib.ProcessConfig, error) {
|
||||
|
||||
var pmuxProcConfigs []pmuxlib.ProcessConfig
|
||||
|
||||
@ -116,7 +139,7 @@ func garageChildrenPmuxProcConfigs(env *crypticnet.Env) ([]pmuxlib.ProcessConfig
|
||||
return pmuxProcConfigs, nil
|
||||
}
|
||||
|
||||
func garageApplyLayoutDiffPmuxProcConfig(env *crypticnet.Env) pmuxlib.ProcessConfig {
|
||||
func garageApplyLayoutDiffPmuxProcConfig(env crypticnet.Env) pmuxlib.ProcessConfig {
|
||||
return pmuxlib.ProcessConfig{
|
||||
Name: "garage-apply-layout-diff",
|
||||
Cmd: "bash",
|
||||
@ -125,12 +148,73 @@ func garageApplyLayoutDiffPmuxProcConfig(env *crypticnet.Env) pmuxlib.ProcessCon
|
||||
}
|
||||
}
|
||||
|
||||
func garageApplyLayout(
|
||||
ctx context.Context,
|
||||
adminClient *garage.AdminClient,
|
||||
hostName, ipStr string,
|
||||
allocs []crypticnet.DaemonYmlStorageAllocation,
|
||||
) error {
|
||||
func garageInitializeGlobalBucket(ctx context.Context, env crypticnet.Env) error {
|
||||
|
||||
var (
|
||||
adminClient = env.GarageAdminClient()
|
||||
globalBucketCreds = env.Bootstrap.GarageGlobalBucketS3APICredentials
|
||||
)
|
||||
|
||||
// first attempt to import the key
|
||||
err := adminClient.Do(ctx, nil, "POST", "/v0/key/import", map[string]string{
|
||||
"accessKeyId": globalBucketCreds.ID,
|
||||
"secretAccessKey": globalBucketCreds.Secret,
|
||||
"name": "shared-global-bucket-key",
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("importing global bucket key into garage: %w", err)
|
||||
}
|
||||
|
||||
// create global bucket
|
||||
err = adminClient.Do(ctx, nil, "POST", "/v0/bucket", map[string]string{
|
||||
"globalAlias": garage.GlobalBucket,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating global bucket: %w", err)
|
||||
}
|
||||
|
||||
// retrieve newly created bucket's id
|
||||
var getBucketRes struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
err = adminClient.Do(
|
||||
ctx, &getBucketRes,
|
||||
"GET", "/v0/bucket?globalAlias="+garage.GlobalBucket, nil,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching global bucket id: %w", err)
|
||||
}
|
||||
|
||||
// allow shared global bucket key to perform all operations
|
||||
err = adminClient.Do(ctx, nil, "POST", "/v0/bucket/allow", map[string]interface{}{
|
||||
"bucketId": getBucketRes.ID,
|
||||
"accessKeyId": globalBucketCreds.ID,
|
||||
"permissions": map[string]bool{
|
||||
"read": true,
|
||||
"write": true,
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("granting permissions to shared global bucket key: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func garageApplyLayout(ctx context.Context, env crypticnet.Env) error {
|
||||
|
||||
var (
|
||||
adminClient = env.GarageAdminClient()
|
||||
thisHost = env.Bootstrap.ThisHost()
|
||||
hostName = thisHost.Name
|
||||
ip = thisHost.Nebula.IP
|
||||
allocs = env.ThisDaemon().Storage.Allocations
|
||||
)
|
||||
|
||||
type peerLayout struct {
|
||||
Capacity int `json:"capacity"`
|
||||
@ -144,7 +228,7 @@ func garageApplyLayout(
|
||||
for _, alloc := range allocs {
|
||||
|
||||
peer := garage.Peer{
|
||||
IP: ipStr,
|
||||
IP: ip,
|
||||
RPCPort: alloc.RPCPort,
|
||||
S3APIPort: alloc.S3APIPort,
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ type subCmdCtx struct {
|
||||
subCmd subCmd // the subCmd itself
|
||||
args []string // command-line arguments, excluding the subCmd itself.
|
||||
subCmdNames []string // names of subCmds so far, including this one
|
||||
env *crypticnet.Env
|
||||
env crypticnet.Env
|
||||
}
|
||||
|
||||
type subCmd struct {
|
||||
|
@ -1,256 +0,0 @@
|
||||
package garage_layout_diff
|
||||
|
||||
// This binary accepts the output of `garage layout show` into its stdout, and
|
||||
// it outputs a newline-delimited set of `garage layout $cmd` strings on
|
||||
// stdout. The layout commands which are output will, if run, bring the current
|
||||
// node's layout on the cluster up-to-date with what's in daemon.yml.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
crypticnet "cryptic-net"
|
||||
"cryptic-net/garage"
|
||||
)
|
||||
|
||||
type clusterNode struct {
|
||||
ID string
|
||||
Zone string
|
||||
Capacity int
|
||||
}
|
||||
|
||||
type clusterNodes []clusterNode
|
||||
|
||||
func (n clusterNodes) get(id string) (clusterNode, bool) {
|
||||
|
||||
var ok bool
|
||||
|
||||
for _, node := range n {
|
||||
|
||||
if len(node.ID) > len(id) {
|
||||
ok = strings.HasPrefix(node.ID, id)
|
||||
} else {
|
||||
ok = strings.HasPrefix(id, node.ID)
|
||||
}
|
||||
|
||||
if ok {
|
||||
return node, true
|
||||
}
|
||||
}
|
||||
|
||||
return clusterNode{}, false
|
||||
}
|
||||
|
||||
var currClusterLayoutVersionB = []byte("Current cluster layout version:")
|
||||
|
||||
func readCurrNodes(r io.Reader) (clusterNodes, int, error) {
|
||||
|
||||
input, err := io.ReadAll(r)
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("reading stdin: %w", err)
|
||||
}
|
||||
|
||||
// NOTE I'm not sure if this check should be turned on or not. It simplifies
|
||||
// things to turn it off and just say that no one should ever be manually
|
||||
// messing with the layout, but on the other hand maybe someone might?
|
||||
//
|
||||
//if i := bytes.Index(input, []byte("==== STAGED ROLE CHANGES ====")); i >= 0 {
|
||||
// return nil, 0, errors.New("cluster layout has staged changes already, won't modify")
|
||||
//}
|
||||
|
||||
/* The first section of input will always be something like this:
|
||||
|
||||
```
|
||||
==== CURRENT CLUSTER LAYOUT ====
|
||||
ID Tags Zone Capacity
|
||||
AAA… ZZZ 1
|
||||
BBB… ZZZ 1
|
||||
CCC… ZZZ 1
|
||||
|
||||
Current cluster layout version: N
|
||||
```
|
||||
|
||||
There may be more, depending on if the cluster already has changes staged,
|
||||
but this will definitely be first. */
|
||||
|
||||
i := bytes.Index(input, currClusterLayoutVersionB)
|
||||
|
||||
if i < 0 {
|
||||
return nil, 0, errors.New("no current cluster layout found in input")
|
||||
}
|
||||
|
||||
input, tail := input[:i], input[i:]
|
||||
|
||||
var currNodes clusterNodes
|
||||
|
||||
for inputBuf := bufio.NewReader(bytes.NewBuffer(input)); ; {
|
||||
|
||||
line, err := inputBuf.ReadString('\n')
|
||||
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, 0, fmt.Errorf("reading input line by line from buffer: %w", err)
|
||||
}
|
||||
|
||||
fields := strings.Fields(line)
|
||||
|
||||
if len(fields) < 3 {
|
||||
continue
|
||||
}
|
||||
|
||||
id := fields[0]
|
||||
|
||||
// The ID will always be given ending in this fucked up ellipses
|
||||
if trimmedID := strings.TrimSuffix(id, "…"); id == trimmedID {
|
||||
continue
|
||||
} else {
|
||||
id = trimmedID
|
||||
}
|
||||
|
||||
zone := fields[1]
|
||||
|
||||
capacity, err := strconv.Atoi(fields[2])
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("parsing capacity %q: %w", fields[2], err)
|
||||
}
|
||||
|
||||
currNodes = append(currNodes, clusterNode{
|
||||
ID: id,
|
||||
Zone: zone,
|
||||
Capacity: capacity,
|
||||
})
|
||||
}
|
||||
|
||||
// parse current cluster version from tail
|
||||
tail = bytes.TrimPrefix(tail, currClusterLayoutVersionB)
|
||||
|
||||
if i := bytes.Index(tail, []byte("\n")); i > 0 {
|
||||
tail = tail[:i]
|
||||
}
|
||||
|
||||
tail = bytes.TrimSpace(tail)
|
||||
|
||||
version, err := strconv.Atoi(string(tail))
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("parsing version string from %q: %w", tail, err)
|
||||
}
|
||||
|
||||
return currNodes, version, nil
|
||||
}
|
||||
|
||||
func readExpNodes(env *crypticnet.Env) clusterNodes {
|
||||
|
||||
thisHost := env.Bootstrap.ThisHost()
|
||||
|
||||
var expNodes clusterNodes
|
||||
|
||||
for _, alloc := range env.ThisDaemon().Storage.Allocations {
|
||||
|
||||
peer := garage.Peer{
|
||||
IP: thisHost.Nebula.IP,
|
||||
RPCPort: alloc.RPCPort,
|
||||
S3APIPort: alloc.S3APIPort,
|
||||
}
|
||||
|
||||
id := peer.RPCPeerID()
|
||||
|
||||
expNodes = append(expNodes, clusterNode{
|
||||
ID: id,
|
||||
Zone: env.Bootstrap.HostName,
|
||||
Capacity: alloc.Capacity / 100,
|
||||
})
|
||||
}
|
||||
|
||||
return expNodes
|
||||
}
|
||||
|
||||
// NOTE: The id formatting for currNodes and expNodes is different; expNodes has
|
||||
// fully expanded ids, currNodes are abbreviated.
|
||||
|
||||
func diff(currNodes, expNodes clusterNodes) []string {
|
||||
|
||||
var lines []string
|
||||
|
||||
for _, node := range currNodes {
|
||||
|
||||
if _, ok := expNodes.get(node.ID); !ok {
|
||||
lines = append(
|
||||
lines,
|
||||
fmt.Sprintf("garage layout remove %s", node.ID),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
for _, expNode := range expNodes {
|
||||
|
||||
currNode, ok := currNodes.get(expNode.ID)
|
||||
|
||||
currNode.ID = expNode.ID // so that equality checking works
|
||||
|
||||
if ok && currNode == expNode {
|
||||
continue
|
||||
}
|
||||
|
||||
lines = append(
|
||||
lines,
|
||||
fmt.Sprintf(
|
||||
"garage layout assign %s -z %s -c %d",
|
||||
expNode.ID,
|
||||
expNode.Zone,
|
||||
expNode.Capacity,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
return lines
|
||||
}
|
||||
|
||||
func Main() {
|
||||
|
||||
env, err := crypticnet.ReadEnv()
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("reading environment: %w", err))
|
||||
}
|
||||
|
||||
currNodes, currVersion, err := readCurrNodes(os.Stdin)
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("reading current layout from stdin: %w", err))
|
||||
}
|
||||
|
||||
thisCurrNodes := make(clusterNodes, 0, len(currNodes))
|
||||
|
||||
for _, node := range currNodes {
|
||||
|
||||
if env.Bootstrap.HostName != node.Zone {
|
||||
continue
|
||||
}
|
||||
|
||||
thisCurrNodes = append(thisCurrNodes, node)
|
||||
}
|
||||
|
||||
expNodes := readExpNodes(env)
|
||||
|
||||
lines := diff(thisCurrNodes, expNodes)
|
||||
|
||||
if len(lines) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, line := range lines {
|
||||
fmt.Println(line)
|
||||
}
|
||||
|
||||
fmt.Printf("garage layout apply --version %d\n", currVersion+1)
|
||||
}
|
@ -1,137 +0,0 @@
|
||||
package garage_layout_diff
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadCurrNodes(t *testing.T) {
|
||||
|
||||
expNodes := clusterNodes{
|
||||
{
|
||||
ID: "AAA",
|
||||
Zone: "XXX",
|
||||
Capacity: 1,
|
||||
},
|
||||
{
|
||||
ID: "BBB",
|
||||
Zone: "YYY",
|
||||
Capacity: 2,
|
||||
},
|
||||
{
|
||||
ID: "CCC",
|
||||
Zone: "ZZZ",
|
||||
Capacity: 3,
|
||||
},
|
||||
}
|
||||
|
||||
expVersion := 666
|
||||
|
||||
tests := []struct {
|
||||
input string
|
||||
expNodes clusterNodes
|
||||
expVersion int
|
||||
}{
|
||||
{
|
||||
input: `
|
||||
==== CURRENT CLUSTER LAYOUT ====
|
||||
ID Tags Zone Capacity
|
||||
AAA… XXX 1
|
||||
BBB… YYY 2
|
||||
CCC… ZZZ 3
|
||||
|
||||
Current cluster layout version: 666
|
||||
`,
|
||||
expNodes: expNodes,
|
||||
expVersion: expVersion,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
|
||||
gotNodes, gotVersion, err := readCurrNodes(
|
||||
bytes.NewBufferString(test.input),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if gotVersion != test.expVersion {
|
||||
t.Fatalf(
|
||||
"expected version %d, got %d",
|
||||
test.expVersion,
|
||||
gotVersion,
|
||||
)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(gotNodes, test.expNodes) {
|
||||
t.Fatalf(
|
||||
"expected nodes: %#v,\ngot nodes: %#v",
|
||||
gotNodes,
|
||||
test.expNodes,
|
||||
)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiff(t *testing.T) {
|
||||
|
||||
currNodes := clusterNodes{
|
||||
{
|
||||
ID: "1",
|
||||
Zone: "zone",
|
||||
Capacity: 1,
|
||||
},
|
||||
{
|
||||
ID: "2",
|
||||
Zone: "zone",
|
||||
Capacity: 1,
|
||||
},
|
||||
{
|
||||
ID: "3",
|
||||
Zone: "zone",
|
||||
Capacity: 1,
|
||||
},
|
||||
{
|
||||
ID: "4",
|
||||
Zone: "zone",
|
||||
Capacity: 1,
|
||||
},
|
||||
}
|
||||
|
||||
expNodes := clusterNodes{
|
||||
{
|
||||
ID: "111",
|
||||
Zone: "zone",
|
||||
Capacity: 1,
|
||||
},
|
||||
{
|
||||
ID: "222",
|
||||
Zone: "zone2",
|
||||
Capacity: 1,
|
||||
},
|
||||
{
|
||||
ID: "333",
|
||||
Zone: "zone",
|
||||
Capacity: 10,
|
||||
},
|
||||
}
|
||||
|
||||
expLines := []string{
|
||||
`garage layout remove 4`,
|
||||
`garage layout assign 222 -z zone2 -c 1`,
|
||||
`garage layout assign 333 -z zone -c 10`,
|
||||
}
|
||||
|
||||
gotLines := diff(currNodes, expNodes)
|
||||
|
||||
if !reflect.DeepEqual(gotLines, expLines) {
|
||||
t.Fatalf("expected lines: %#v,\ngot lines: %#v", expLines, gotLines)
|
||||
}
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
package garage_peer_keygen
|
||||
|
||||
/*
|
||||
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
!! !!
|
||||
!! DANGER !!
|
||||
!! !!
|
||||
!! This script will deterministically produce public/private keys given some !!
|
||||
!! arbitrary input. This is NEVER what you want. It's only being used in !!
|
||||
!! cryptic-net for a very specific purpose for which I think it's ok and is !!
|
||||
!! very necessary, and people are probably _still_ going to yell at me. !!
|
||||
!! !!
|
||||
!! DONT USE THIS. !!
|
||||
!! !!
|
||||
!! - Brian !!
|
||||
!! !!
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"cryptic-net/garage"
|
||||
)
|
||||
|
||||
func Main() {
|
||||
|
||||
ip := flag.String("ip", "", "Internal IP address of the node to generate a key for")
|
||||
port := flag.Int("port", 0, "RPC port number for the garage instance to generate a key for")
|
||||
outPriv := flag.String("out-priv", "", "The path to the private key which should be created, if given.")
|
||||
outPub := flag.String("out-pub", "", "The path to the public key which should be created, if given.")
|
||||
danger := flag.Bool("danger", false, "Set this flag to indicate you understand WHY this binary should NEVER be used (see source code).")
|
||||
flag.Parse()
|
||||
|
||||
if len(*ip) == 0 || *port == 0 || !*danger {
|
||||
panic("The arguments -ip, -port, and -danger are required")
|
||||
}
|
||||
|
||||
peer := garage.Peer{
|
||||
IP: *ip,
|
||||
RPCPort: *port,
|
||||
}
|
||||
|
||||
pubKey, privKey := peer.RPCPeerKey()
|
||||
|
||||
fmt.Fprintln(os.Stdout, hex.EncodeToString(pubKey))
|
||||
|
||||
if *outPub != "" {
|
||||
if err := ioutil.WriteFile(*outPub, pubKey, 0444); err != nil {
|
||||
panic(fmt.Errorf("writing public key to %q: %w", *outPub, err))
|
||||
}
|
||||
}
|
||||
|
||||
if *outPriv != "" {
|
||||
if err := ioutil.WriteFile(*outPriv, privKey, 0400); err != nil {
|
||||
panic(fmt.Errorf("writing private key to %q: %w", *outPriv, err))
|
||||
}
|
||||
}
|
||||
}
|
@ -3,13 +3,16 @@ package crypticnet
|
||||
import (
|
||||
"context"
|
||||
"cryptic-net/bootstrap"
|
||||
"cryptic-net/garage"
|
||||
"cryptic-net/yamlutil"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
@ -56,24 +59,24 @@ func getAppDirPath() string {
|
||||
// If bootstrapOptional is true then NewEnv will first check if a bootstrap file
|
||||
// can be found in the expected places, and if not then it will not populate
|
||||
// BootstrapFS or any other fields based on it.
|
||||
func NewEnv(bootstrapOptional bool) (*Env, error) {
|
||||
func NewEnv(bootstrapOptional bool) (Env, error) {
|
||||
|
||||
runtimeDirPath := filepath.Join(xdg.RuntimeDir, "cryptic-net")
|
||||
appDirPath := getAppDirPath()
|
||||
|
||||
env := &Env{
|
||||
env := Env{
|
||||
AppDirPath: appDirPath,
|
||||
DaemonYmlPath: filepath.Join(runtimeDirPath, "daemon.yml"),
|
||||
RuntimeDirPath: runtimeDirPath,
|
||||
DataDirPath: filepath.Join(xdg.DataHome, "cryptic-net"),
|
||||
}
|
||||
|
||||
return env, env.init(bootstrapOptional)
|
||||
return env.init(bootstrapOptional)
|
||||
}
|
||||
|
||||
// ReadEnv reads an Env from the process's environment variables, rather than
|
||||
// calculating like NewEnv does.
|
||||
func ReadEnv() (*Env, error) {
|
||||
func ReadEnv() (Env, error) {
|
||||
|
||||
var err error
|
||||
|
||||
@ -91,7 +94,7 @@ func ReadEnv() (*Env, error) {
|
||||
return val
|
||||
}
|
||||
|
||||
env := &Env{
|
||||
env := Env{
|
||||
AppDirPath: getAppDirPath(),
|
||||
DaemonYmlPath: readEnv(DaemonYmlPathEnvVar),
|
||||
RuntimeDirPath: readEnv(RuntimeDirPathEnvVar),
|
||||
@ -99,35 +102,35 @@ func ReadEnv() (*Env, error) {
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return Env{}, err
|
||||
}
|
||||
|
||||
return env, env.init(false)
|
||||
return env.init(false)
|
||||
}
|
||||
|
||||
// DataDirBootstrapPath returns the path to the bootstrap file within the user's
|
||||
// data dir. If the file does not exist there it will be found in the AppDirPath
|
||||
// by ReloadBootstrap.
|
||||
func (e *Env) DataDirBootstrapPath() string {
|
||||
func (e Env) DataDirBootstrapPath() string {
|
||||
return filepath.Join(e.DataDirPath, "bootstrap.tgz")
|
||||
}
|
||||
|
||||
// LoadBootstrap sets BootstrapPath to the given value, and loads BootstrapFS
|
||||
// and all derived fields based on that.
|
||||
func (e *Env) LoadBootstrap(path string) error {
|
||||
// LoadBootstrap loads a Bootstrap from the given path, and returns a copy of
|
||||
// the Env with that Bootstrap set along with the BootstrapPath (or an error).
|
||||
func (e Env) LoadBootstrap(path string) (Env, error) {
|
||||
|
||||
var err error
|
||||
|
||||
if e.Bootstrap, err = bootstrap.FromFile(path); err != nil {
|
||||
return fmt.Errorf("parsing bootstrap.tgz at %q: %w", path, err)
|
||||
return Env{}, fmt.Errorf("parsing bootstrap.tgz at %q: %w", path, err)
|
||||
}
|
||||
|
||||
e.BootstrapPath = path
|
||||
|
||||
return nil
|
||||
return e, nil
|
||||
}
|
||||
|
||||
func (e *Env) initBootstrap(bootstrapOptional bool) error {
|
||||
func (e Env) initBootstrap(bootstrapOptional bool) (Env, error) {
|
||||
|
||||
exists := func(path string) (bool, error) {
|
||||
if _, err := os.Stat(path); errors.Is(err, fs.ErrNotExist) {
|
||||
@ -145,7 +148,7 @@ func (e *Env) initBootstrap(bootstrapOptional bool) error {
|
||||
bootstrapPath := e.DataDirBootstrapPath()
|
||||
|
||||
if exists, err := exists(bootstrapPath); err != nil {
|
||||
return fmt.Errorf("determining if %q exists: %w", bootstrapPath, err)
|
||||
return Env{}, fmt.Errorf("determining if %q exists: %w", bootstrapPath, err)
|
||||
|
||||
} else if exists {
|
||||
return e.LoadBootstrap(bootstrapPath)
|
||||
@ -158,20 +161,20 @@ func (e *Env) initBootstrap(bootstrapOptional bool) error {
|
||||
bootstrapPath := filepath.Join(e.AppDirPath, "share/bootstrap.tgz")
|
||||
|
||||
if exists, err := exists(bootstrapPath); err != nil {
|
||||
return fmt.Errorf("determining if %q exists: %w", bootstrapPath, err)
|
||||
return Env{}, fmt.Errorf("determining if %q exists: %w", bootstrapPath, err)
|
||||
|
||||
} else if !exists && !bootstrapOptional {
|
||||
return fmt.Errorf("boostrap file not found at %q", bootstrapPath)
|
||||
return Env{}, fmt.Errorf("boostrap file not found at %q", bootstrapPath)
|
||||
|
||||
} else if exists {
|
||||
return e.LoadBootstrap(bootstrapPath)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return e, nil
|
||||
}
|
||||
|
||||
func (e *Env) init(bootstrapOptional bool) error {
|
||||
func (e Env) init(bootstrapOptional bool) (Env, error) {
|
||||
|
||||
var cancel context.CancelFunc
|
||||
e.Context, cancel = context.WithCancel(context.Background())
|
||||
@ -191,16 +194,12 @@ func (e *Env) init(bootstrapOptional bool) error {
|
||||
os.Exit(1)
|
||||
}()
|
||||
|
||||
if err := e.initBootstrap(bootstrapOptional); err != nil {
|
||||
return fmt.Errorf("initializing bootstrap data: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return e.initBootstrap(bootstrapOptional)
|
||||
}
|
||||
|
||||
// ToMap returns the Env as a map of key/value strings. If this map is set into
|
||||
// a process's environment, then that process can read it back using ReadEnv.
|
||||
func (e *Env) ToMap() map[string]string {
|
||||
func (e Env) ToMap() map[string]string {
|
||||
return map[string]string{
|
||||
DaemonYmlPathEnvVar: e.DaemonYmlPath,
|
||||
BootstrapPathEnvVar: e.BootstrapPath,
|
||||
@ -211,7 +210,7 @@ func (e *Env) ToMap() map[string]string {
|
||||
|
||||
// ThisDaemon returns the DaemonYml (loaded from DaemonYmlPath) for the
|
||||
// currently running process.
|
||||
func (e *Env) ThisDaemon() DaemonYml {
|
||||
func (e Env) ThisDaemon() DaemonYml {
|
||||
e.thisDaemonOnce.Do(func() {
|
||||
if err := yamlutil.LoadYamlFile(&e.thisDaemon, e.DaemonYmlPath); err != nil {
|
||||
panic(err)
|
||||
@ -221,6 +220,22 @@ func (e *Env) ThisDaemon() DaemonYml {
|
||||
}
|
||||
|
||||
// BinPath returns the absolute path to a binary in the AppDir.
|
||||
func (e *Env) BinPath(name string) string {
|
||||
func (e Env) BinPath(name string) string {
|
||||
return filepath.Join(e.AppDirPath, "bin", name)
|
||||
}
|
||||
|
||||
// GarageAdminClient will return an AdminClient for a local garage instance, or
|
||||
// it will _panic_ if there is no local instance configured.
|
||||
func (e Env) GarageAdminClient() *garage.AdminClient {
|
||||
|
||||
thisHost := e.Bootstrap.ThisHost()
|
||||
thisDaemon := e.ThisDaemon()
|
||||
|
||||
return garage.NewAdminClient(
|
||||
net.JoinHostPort(
|
||||
thisHost.Nebula.IP,
|
||||
strconv.Itoa(thisDaemon.Storage.Allocations[0].AdminPort),
|
||||
),
|
||||
e.Bootstrap.GarageAdminToken,
|
||||
)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user