Compare commits

..

3 Commits

Author SHA1 Message Date
Brian Picciano
cf52cbff52 Update daemon process tree diagram 2022-10-16 15:54:51 +02:00
Brian Picciano
93bdd3ebd4 Update host's config in bootstrap using daemon.yml prior to starting pmux
Previously if the `daemon.yml` of a host was changed it would first
start up, load that new daemon.yml in, persist the new configuration for
the host to garage using `update-garage-host`, pull that config back
down and persist it to the bootstrap in `runDaemonPmuxOnce`, and restart
all child processes so they get the new config.

Now, once `daemon.yml` is loaded in we immediately merge it into and
persist this host's bootstrap file, prior to ever starting child
processes. This removes the necessity of restarting those process at
start.

This change also allows the bootstrap file to be the sole repository of
information required to pick a garage node to connect to, since it is
presumably always as up-to-date as it can possibly be. This allows for
removing some more logic from `Env`.
2022-10-16 15:38:15 +02:00
Brian Picciano
51b2fbba36 Don't support legacy bootstrap format, we have to redo all bootstraps anyway 2022-10-16 15:11:49 +02:00
9 changed files with 143 additions and 201 deletions

View File

@ -11,10 +11,12 @@ state AppDir {
AppRun : * Set PATH to APPDIR/bin AppRun : * Set PATH to APPDIR/bin
} }
state "./bin/entrypoint" as entrypoint { state "./bin/cryptic-net-main entrypoint daemon -c ./daemon.yml" as entrypoint {
entrypoint : * Merge given and default daemon.yml files
entrypoint : * Create runtime dir at $_RUNTIME_DIR_PATH entrypoint : * Create runtime dir at $_RUNTIME_DIR_PATH
entrypoint : * Lock runtime dir entrypoint : * Lock runtime dir
entrypoint : * Merge given and default daemon.yml files
entrypoint : * Copy bootstrap.tgz into $_DATA_DIR_PATH, if it's not there
entrypoint : * Merge daemon.yml config into bootstrap.tgz
entrypoint : * Run child processes entrypoint : * Run child processes
} }
@ -30,39 +32,36 @@ state AppDir {
entrypoint --> dnsmasqEntrypoint : child entrypoint --> dnsmasqEntrypoint : child
dnsmasqEntrypoint --> dnsmasq : exec dnsmasqEntrypoint --> dnsmasq : exec
state "./bin/nebula-entrypoint" as nebulaEntrypoint { state "./bin/cryptic-net-main nebula-entrypoint" as nebulaEntrypoint {
nebulaEntrypoint : * Create $_RUNTIME_DIR_PATH/nebula.yml nebulaEntrypoint : * Create $_RUNTIME_DIR_PATH/nebula.yml
} }
state "./bin/nebula -config $_RUNTIME_DIR_PATH/nebula.yml" as nebula state "./bin/nebula -config $_RUNTIME_DIR_PATH/nebula.yml" as nebula
state "./bin/nebula-update-global-bucket" as nebulaUpdateGlobalBucket {
nebulaUpdateGlobalBucket : * Runs once then exits
nebulaUpdateGlobalBucket : * Updates network topo data in garage global bucket (used for bootstrapping)
}
entrypoint --> nebulaEntrypoint : child entrypoint --> nebulaEntrypoint : child
nebulaEntrypoint --> nebula : exec nebulaEntrypoint --> nebula : exec
nebulaEntrypoint --> nebulaUpdateGlobalBucket : child
state "./bin/garage-entrypoint" as garageEntrypoint { state "./bin/cryptic-net-main garage-entrypoint" as garageEntrypoint {
garageEntrypoint : * Create $_RUNTIME_DIR_PATH/garage-N.toml\n (one per storage allocation) garageEntrypoint : * Create $_RUNTIME_DIR_PATH/garage-N.toml\n (one per storage allocation)
garageEntrypoint : * Run child processes garageEntrypoint : * Run child processes
} }
state "./bin/garage -c $_RUNTIME_DIR_PATH/garage-N.toml server" as garage state "./bin/garage -c $_RUNTIME_DIR_PATH/garage-N.toml server" as garage
state "./bin/garage-apply-layout-diff" as garageApplyLayoutDiff { state "./bin/cryptic-net-main garage-apply-layout-diff" as garageApplyLayoutDiff {
garageApplyLayoutDiff : * Runs once then exits garageApplyLayoutDiff : * Runs once then exits
garageApplyLayoutDiff : * Updates cluster topo garageApplyLayoutDiff : * Updates cluster topo
} }
state "./bin/garage-update-global-bucket" as garageUpdateGlobalBucket {
garageUpdateGlobalBucket : * Runs once then exits
garageUpdateGlobalBucket : * Updates cluster topo data in garage global bucket (used for bootstrapping)
}
entrypoint --> garageEntrypoint : child (only if >1 storage allocation defined in daemon.yml) entrypoint --> garageEntrypoint : child (only if >1 storage allocation defined in daemon.yml)
garageEntrypoint --> garage : child (one per storage allocation) garageEntrypoint --> garage : child (one per storage allocation)
garageEntrypoint --> garageApplyLayoutDiff : child garageEntrypoint --> garageApplyLayoutDiff : child
garageEntrypoint --> garageUpdateGlobalBucket : child
state "./bin/cryptic-net-main update-global-bucket" as updateGlobalBucket {
updateGlobalBucket : * Runs once then exits
updateGlobalBucket : * Updates the bootstrap data for the host in garage for other hosts to query
}
entrypoint --> updateGlobalBucket : child
} }
@enduml @enduml

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 18 KiB

View File

@ -2,6 +2,7 @@ package bootstrap
import ( import (
"cryptic-net/garage" "cryptic-net/garage"
"fmt"
) )
// Paths within the bootstrap FS related to garage. // Paths within the bootstrap FS related to garage.
@ -45,3 +46,41 @@ func (b Bootstrap) GarageRPCPeerAddrs() []string {
} }
return addrs return addrs
} }
// ChooseGaragePeer returns a Peer for a garage instance from the network. It
// will prefer a garage instance on this particular host, if there is one, but
// will otherwise return a random endpoint.
func (b Bootstrap) ChooseGaragePeer() garage.Peer {
thisHost := b.ThisHost()
if thisHost.Garage != nil && len(thisHost.Garage.Instances) > 0 {
inst := thisHost.Garage.Instances[0]
return garage.Peer{
IP: thisHost.Nebula.IP,
RPCPort: inst.RPCPort,
S3APIPort: inst.S3APIPort,
}
}
for _, peer := range b.GaragePeers() {
return peer
}
panic("no garage instances configured")
}
// GlobalBucketS3APIClient returns an S3 client pre-configured with access to
// the global bucket.
func (b Bootstrap) GlobalBucketS3APIClient() (garage.S3APIClient, error) {
addr := b.ChooseGaragePeer().S3APIAddr()
creds := b.GarageGlobalBucketS3APICredentials
client, err := garage.NewS3APIClient(addr, creds)
if err != nil {
return nil, fmt.Errorf("connecting to garage S3 API At %q: %w", addr, err)
}
return client, err
}

View File

@ -1,7 +1,6 @@
package bootstrap package bootstrap
import ( import (
"errors"
"fmt" "fmt"
"io/fs" "io/fs"
"path/filepath" "path/filepath"
@ -42,62 +41,6 @@ type Host struct {
Garage *GarageHost `yaml:"garage,omitempty"` Garage *GarageHost `yaml:"garage,omitempty"`
} }
func loadHostsLegacy(bootstrapFS fs.FS) (map[string]Host, error) {
hosts := map[string]Host{}
readAsYaml := func(into interface{}, path string) error {
b, err := fs.ReadFile(bootstrapFS, path)
if err != nil {
return fmt.Errorf("reading file from fs: %w", err)
}
return yaml.Unmarshal(b, into)
}
{
globPath := "nebula/hosts/*.yml"
nebulaHostFiles, err := fs.Glob(bootstrapFS, globPath)
if err != nil {
return nil, fmt.Errorf("listing nebula host files at %q in fs: %w", globPath, err)
}
for _, nebulaHostPath := range nebulaHostFiles {
hostName := filepath.Base(nebulaHostPath)
hostName = strings.TrimSuffix(hostName, filepath.Ext(hostName))
var nebulaHost NebulaHost
if err := readAsYaml(&nebulaHost, nebulaHostPath); err != nil {
return nil, fmt.Errorf("reading %q as yaml: %w", nebulaHostPath, err)
}
hosts[hostName] = Host{
Name: hostName,
Nebula: nebulaHost,
}
}
}
for hostName, host := range hosts {
garageHostPath := filepath.Join("garage/hosts", hostName+".yml")
var garageHost GarageHost
if err := readAsYaml(&garageHost, garageHostPath); errors.Is(err, fs.ErrNotExist) {
continue
} else if err != nil {
return nil, fmt.Errorf("reading %q as yaml: %w", garageHostPath, err)
}
host.Garage = &garageHost
hosts[hostName] = host
}
return hosts, nil
}
func loadHosts(bootstrapFS fs.FS) (map[string]Host, error) { func loadHosts(bootstrapFS fs.FS) (map[string]Host, error) {
hosts := map[string]Host{} hosts := map[string]Host{}
@ -131,18 +74,6 @@ func loadHosts(bootstrapFS fs.FS) (map[string]Host, error) {
hosts[hostName] = host hosts[hostName] = host
} }
if len(hosts) > 0 {
return hosts, nil
}
// We used to have the bootstrap file laid out differently. If no hosts were
// found then the bootstrap file is probably in that format.
hosts, err = loadHostsLegacy(bootstrapFS)
if err != nil {
return nil, fmt.Errorf("loading hosts in legacy layout from fs: %w", err)
}
if len(hosts) == 0 { if len(hosts) == 0 {
return nil, fmt.Errorf("failed to load any hosts from fs") return nil, fmt.Errorf("failed to load any hosts from fs")
} }

View File

@ -36,6 +36,9 @@ import (
// * Merges the user-provided daemon.yml file with the default, and writes the // * Merges the user-provided daemon.yml file with the default, and writes the
// result to the runtime dir. // result to the runtime dir.
// //
// * Merges daemon.yml configuration into the bootstrap configuration, and
// rewrites the bootstrap file.
//
// * Sets up environment variables that all other sub-processes then use, based // * Sets up environment variables that all other sub-processes then use, based
// on the runtime dir. // on the runtime dir.
// //
@ -79,7 +82,7 @@ func writeDaemonYml(userDaemonYmlPath, builtinDaemonYmlPath, runtimeDirPath stri
return nil return nil
} }
func writeBootstrapToDataDir(env *crypticnet.Env, r io.Reader) error { func copyBootstrapToDataDir(env *crypticnet.Env, r io.Reader) error {
path := env.DataDirBootstrapPath() path := env.DataDirBootstrapPath()
dirPath := filepath.Dir(path) dirPath := filepath.Dir(path)
@ -97,7 +100,7 @@ func writeBootstrapToDataDir(env *crypticnet.Env, r io.Reader) error {
f.Close() f.Close()
if err != nil { if err != nil {
return fmt.Errorf("writing new bootstrap file to %q: %w", path, err) return fmt.Errorf("copying bootstrap file to %q: %w", path, err)
} }
if err := env.LoadBootstrap(path); err != nil { if err := env.LoadBootstrap(path); err != nil {
@ -133,11 +136,11 @@ func reloadBootstrap(env *crypticnet.Env, s3Client garage.S3APIClient) (bool, er
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
if err := env.Bootstrap.WithHosts(newHosts).WriteTo(buf); err != nil { if err := env.Bootstrap.WithHosts(newHosts).WriteTo(buf); err != nil {
return false, fmt.Errorf("writing new bootstrap file: %w", err) return false, fmt.Errorf("writing new bootstrap file to buffer: %w", err)
} }
if err := writeBootstrapToDataDir(env, buf); err != nil { if err := copyBootstrapToDataDir(env, buf); err != nil {
return false, fmt.Errorf("writing new bootstrap file: %w", err) return false, fmt.Errorf("copying new bootstrap file to data dir: %w", err)
} }
return true, nil return true, nil
@ -281,7 +284,7 @@ var subCmdDaemon = subCmd{
env := subCmdCtx.env env := subCmdCtx.env
s3Client, err := env.GlobalBucketS3APIClient() s3Client, err := env.Bootstrap.GlobalBucketS3APIClient()
if err != nil { if err != nil {
return fmt.Errorf("creating client for global bucket: %w", err) return fmt.Errorf("creating client for global bucket: %w", err)
} }
@ -349,7 +352,7 @@ var subCmdDaemon = subCmd{
return fmt.Errorf("opening file %q: %w", env.BootstrapPath, err) return fmt.Errorf("opening file %q: %w", env.BootstrapPath, err)
} }
err = writeBootstrapToDataDir(env, f) err = copyBootstrapToDataDir(env, f)
f.Close() f.Close()
if err != nil { if err != nil {
@ -361,6 +364,46 @@ var subCmdDaemon = subCmd{
return fmt.Errorf("generating daemon.yml file: %w", err) return fmt.Errorf("generating daemon.yml file: %w", err)
} }
{
// we update this Host's data using whatever configuration has been
// provided by daemon.yml. This way the daemon has the most
// up-to-date possible bootstrap. This updated bootstrap will later
// get updated in garage using update-global-bucket, so other hosts
// will see it as well.
// ThisDaemon can only be called after writeDaemonYml.
daemon := env.ThisDaemon()
host := env.Bootstrap.ThisHost()
host.Nebula.PublicAddr = daemon.VPN.PublicAddr
host.Garage = nil
if allocs := daemon.Storage.Allocations; len(allocs) > 0 {
host.Garage = new(bootstrap.GarageHost)
for _, alloc := range allocs {
host.Garage.Instances = append(host.Garage.Instances, bootstrap.GarageHostInstance{
RPCPort: alloc.RPCPort,
S3APIPort: alloc.S3APIPort,
WebPort: alloc.WebPort,
})
}
}
env.Bootstrap.Hosts[host.Name] = host
buf := new(bytes.Buffer)
if err := env.Bootstrap.WithHosts(env.Bootstrap.Hosts).WriteTo(buf); err != nil {
return fmt.Errorf("writing new bootstrap file to buffer: %w", err)
}
if err := copyBootstrapToDataDir(env, buf); err != nil {
return fmt.Errorf("copying new bootstrap file to data dir: %w", err)
}
}
for key, val := range env.ToMap() { for key, val := range env.ToMap() {
if err := os.Setenv(key, val); err != nil { if err := os.Setenv(key, val); err != nil {
return fmt.Errorf("failed to set %q to %q: %w", key, val, err) return fmt.Errorf("failed to set %q to %q: %w", key, val, err)

View File

@ -30,7 +30,7 @@ var subCmdGarageMC = subCmd{
env := subCmdCtx.env env := subCmdCtx.env
s3APIAddr := env.ChooseGaragePeer().S3APIAddr() s3APIAddr := env.Bootstrap.ChooseGaragePeer().S3APIAddr()
if *keyID == "" || *keySecret == "" { if *keyID == "" || *keySecret == "" {
@ -90,7 +90,7 @@ var subCmdGarageCLI = subCmd{
args = append([]string{"garage"}, subCmdCtx.args...) args = append([]string{"garage"}, subCmdCtx.args...)
cliEnv = append( cliEnv = append(
os.Environ(), os.Environ(),
"GARAGE_RPC_HOST="+env.ChooseGaragePeer().RPCAddr(), "GARAGE_RPC_HOST="+env.Bootstrap.ChooseGaragePeer().RPCAddr(),
"GARAGE_RPC_SECRET="+env.Bootstrap.GarageRPCSecret, "GARAGE_RPC_SECRET="+env.Bootstrap.GarageRPCSecret,
) )
) )

View File

@ -64,7 +64,7 @@ var subCmdHostsAdd = subCmd{
env := subCmdCtx.env env := subCmdCtx.env
client, err := env.GlobalBucketS3APIClient() client, err := env.Bootstrap.GlobalBucketS3APIClient()
if err != nil { if err != nil {
return fmt.Errorf("creating client for global bucket: %w", err) return fmt.Errorf("creating client for global bucket: %w", err)
} }
@ -88,7 +88,7 @@ var subCmdHostsList = subCmd{
env := subCmdCtx.env env := subCmdCtx.env
client, err := env.GlobalBucketS3APIClient() client, err := env.Bootstrap.GlobalBucketS3APIClient()
if err != nil { if err != nil {
return fmt.Errorf("creating client for global bucket: %w", err) return fmt.Errorf("creating client for global bucket: %w", err)
} }
@ -132,7 +132,7 @@ var subCmdHostsDelete = subCmd{
env := subCmdCtx.env env := subCmdCtx.env
client, err := env.GlobalBucketS3APIClient() client, err := env.Bootstrap.GlobalBucketS3APIClient()
if err != nil { if err != nil {
return fmt.Errorf("creating client for global bucket: %w", err) return fmt.Errorf("creating client for global bucket: %w", err)
} }
@ -195,7 +195,7 @@ var subCmdHostsMakeBootstrap = subCmd{
return fmt.Errorf("reading admin.tgz with --admin-path of %q: %w", *adminPath, err) return fmt.Errorf("reading admin.tgz with --admin-path of %q: %w", *adminPath, err)
} }
client, err := env.GlobalBucketS3APIClient() client, err := env.Bootstrap.GlobalBucketS3APIClient()
if err != nil { if err != nil {
return fmt.Errorf("creating client for global bucket: %w", err) return fmt.Errorf("creating client for global bucket: %w", err)
} }

View File

@ -13,42 +13,18 @@ func Main() {
log.Fatalf("reading envvars: %v", err) log.Fatalf("reading envvars: %v", err)
} }
client, err := env.GlobalBucketS3APIClient() client, err := env.Bootstrap.GlobalBucketS3APIClient()
if err != nil { if err != nil {
log.Fatalf("creating client for global bucket: %v", err) log.Fatalf("creating client for global bucket: %v", err)
} }
host := env.Bootstrap.ThisHost() err = bootstrap.PutGarageBoostrapHost(
env.Context,
client,
env.Bootstrap.ThisHost(),
)
// We update the Host for this host in place, prior to writing it via the if err != nil {
// bootstrap method. We want to ensure that any changes made via daemon are
// reflected into the bootstrap data which is pushed up.
//
// TODO it'd be better if this was done within the daemon command itself,
// prior to any sub-processes being started. This would help us avoid this
// weird logic here, and would prevent all sub-processes from needing to be
// restarted the first time the daemon is started after daemon.yml is
// modified.
daemon := env.ThisDaemon()
host.Nebula.PublicAddr = daemon.VPN.PublicAddr
host.Garage = nil
if allocs := daemon.Storage.Allocations; len(allocs) > 0 {
host.Garage = new(bootstrap.GarageHost)
for _, alloc := range allocs {
host.Garage.Instances = append(host.Garage.Instances, bootstrap.GarageHostInstance{
RPCPort: alloc.RPCPort,
S3APIPort: alloc.S3APIPort,
WebPort: alloc.WebPort,
})
}
}
if err := bootstrap.PutGarageBoostrapHost(env.Context, client, host); err != nil {
log.Fatal(err) log.Fatal(err)
} }
} }

View File

@ -1,44 +0,0 @@
package crypticnet
import (
"cryptic-net/garage"
"fmt"
)
// ChooseGaragePeer returns a Peer for a garage instance from the network. It
// will prefer a garage instance on this particular host, if there is one, but
// will otherwise return a random endpoint.
func (env *Env) ChooseGaragePeer() garage.Peer {
// TODO this only works well within the daemon process, otherwise daemon.yml
// isn't available.
if allocs := env.ThisDaemon().Storage.Allocations; len(allocs) > 0 {
return garage.Peer{
IP: env.Bootstrap.ThisHost().Nebula.IP,
RPCPort: allocs[0].RPCPort,
S3APIPort: allocs[0].S3APIPort,
}
}
for _, peer := range env.Bootstrap.GaragePeers() {
return peer
}
panic("no garage instances configured")
}
// GlobalBucketS3APIClient returns an S3 client pre-configured with access to
// the global bucket.
func (env *Env) GlobalBucketS3APIClient() (garage.S3APIClient, error) {
addr := env.ChooseGaragePeer().S3APIAddr()
creds := env.Bootstrap.GarageGlobalBucketS3APICredentials
client, err := garage.NewS3APIClient(addr, creds)
if err != nil {
return nil, fmt.Errorf("connecting to garage S3 API At %q: %w", addr, err)
}
return client, err
}