Compare commits
6 Commits
038a28bb02
...
8c3e6a2845
Author | SHA1 | Date | |
---|---|---|---|
8c3e6a2845 | |||
86b2ba7bfa | |||
a840d0e701 | |||
ef86c1bbd1 | |||
fed79c6ec7 | |||
8d3b17e1cb |
@ -7,10 +7,13 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"isle/nebula"
|
||||
"isle/toolkit"
|
||||
"maps"
|
||||
"net/netip"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"dev.mediocregopher.com/mediocre-go-lib.git/mctx"
|
||||
)
|
||||
|
||||
// StateDirPath returns the path within the user's state directory where the
|
||||
@ -33,6 +36,22 @@ type CreationParams struct {
|
||||
Domain string
|
||||
}
|
||||
|
||||
// NewCreationParams instantiates and returns a CreationParams.
|
||||
func NewCreationParams(name, domain string) CreationParams {
|
||||
return CreationParams{
|
||||
ID: toolkit.RandStr(32),
|
||||
Name: name,
|
||||
Domain: domain,
|
||||
}
|
||||
}
|
||||
|
||||
// Annotate implements the mctx.Annotator interface.
|
||||
func (p CreationParams) Annotate(aa mctx.Annotations) {
|
||||
aa["networkID"] = p.ID
|
||||
aa["networkName"] = p.Name
|
||||
aa["networkDomain"] = p.Domain
|
||||
}
|
||||
|
||||
// Bootstrap contains all information which is needed by a host daemon to join a
|
||||
// network on boot.
|
||||
type Bootstrap struct {
|
||||
|
@ -2,13 +2,13 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"isle/daemon"
|
||||
"isle/bootstrap"
|
||||
)
|
||||
|
||||
func (ctx subCmdCtx) getHosts() (daemon.GetHostsResult, error) {
|
||||
func (ctx subCmdCtx) getHosts() ([]bootstrap.Host, error) {
|
||||
res, err := ctx.daemonRPC.GetHosts(ctx)
|
||||
if err != nil {
|
||||
return daemon.GetHostsResult{}, fmt.Errorf("calling GetHosts: %w", err)
|
||||
return nil, fmt.Errorf("calling GetHosts: %w", err)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
|
||||
"isle/daemon"
|
||||
"isle/daemon/daecommon"
|
||||
|
||||
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
|
||||
)
|
||||
@ -41,7 +42,7 @@ var subCmdDaemon = subCmd{
|
||||
}
|
||||
|
||||
if *dumpConfig {
|
||||
return daemon.CopyDefaultConfig(os.Stdout, envAppDirPath)
|
||||
return daecommon.CopyDefaultConfig(os.Stdout, envAppDirPath)
|
||||
}
|
||||
|
||||
logLevel := mlog.LevelFromString(*logLevelStr)
|
||||
@ -55,12 +56,12 @@ var subCmdDaemon = subCmd{
|
||||
// required linux capabilities are set.
|
||||
// TODO check that the tun module is loaded (for nebula).
|
||||
|
||||
daemonConfig, err := daemon.LoadConfig(envAppDirPath, *daemonConfigPath)
|
||||
daemonConfig, err := daecommon.LoadConfig(envAppDirPath, *daemonConfigPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading daemon config: %w", err)
|
||||
}
|
||||
|
||||
daemonInst, err := daemon.NewDaemon(
|
||||
daemonInst, err := daemon.New(
|
||||
ctx, logger, daemonConfig, envBinDirPath, nil,
|
||||
)
|
||||
if err != nil {
|
||||
@ -77,7 +78,7 @@ var subCmdDaemon = subCmd{
|
||||
{
|
||||
logger := logger.WithNamespace("http")
|
||||
httpSrv, err := newHTTPServer(
|
||||
ctx, logger, daemon.NewRPC(daemonInst),
|
||||
ctx, logger, daemonInst,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("starting HTTP server: %w", err)
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"isle/daemon"
|
||||
"isle/daemon/jsonrpc2"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -18,7 +17,7 @@ import (
|
||||
const daemonHTTPRPCPath = "/rpc/v0.json"
|
||||
|
||||
func newHTTPServer(
|
||||
ctx context.Context, logger *mlog.Logger, rpc daemon.RPC,
|
||||
ctx context.Context, logger *mlog.Logger, daemonInst *daemon.Daemon,
|
||||
) (
|
||||
*http.Server, error,
|
||||
) {
|
||||
@ -51,16 +50,8 @@ func newHTTPServer(
|
||||
}
|
||||
|
||||
logger.Info(ctx, "HTTP server socket created")
|
||||
|
||||
rpcHandler := jsonrpc2.Chain(
|
||||
jsonrpc2.NewMLogMiddleware(logger.WithNamespace("rpc")),
|
||||
jsonrpc2.ExposeServerSideErrorsMiddleware,
|
||||
)(
|
||||
jsonrpc2.NewDispatchHandler(&rpc),
|
||||
)
|
||||
|
||||
httpMux := http.NewServeMux()
|
||||
httpMux.Handle(daemonHTTPRPCPath, jsonrpc2.NewHTTPHandler(rpcHandler))
|
||||
httpMux.Handle(daemonHTTPRPCPath, daemonInst.HTTPRPCHandler())
|
||||
|
||||
srv := &http.Server{Handler: httpMux}
|
||||
|
||||
|
@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"isle/daemon/daecommon"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
@ -12,9 +13,9 @@ import (
|
||||
// information which may or may not be useful. Unfortunately when it initializes
|
||||
// this directory it likes to print some annoying logs, so we pre-initialize in
|
||||
// order to prevent it from doing so.
|
||||
func initMCConfigDir() (string, error) {
|
||||
func initMCConfigDir(envVars daecommon.EnvVars) (string, error) {
|
||||
var (
|
||||
path = filepath.Join(daemonEnvVars.StateDirPath, "mc")
|
||||
path = filepath.Join(envVars.StateDir.Path, "mc")
|
||||
sharePath = filepath.Join(path, "share")
|
||||
configJSONPath = filepath.Join(path, "config.json")
|
||||
)
|
||||
@ -72,7 +73,9 @@ var subCmdGarageMC = subCmd{
|
||||
args = args[i:]
|
||||
}
|
||||
|
||||
configDir, err := initMCConfigDir()
|
||||
envVars := daecommon.GetEnvVars()
|
||||
|
||||
configDir, err := initMCConfigDir(envVars)
|
||||
if err != nil {
|
||||
return fmt.Errorf("initializing minio-client config directory: %w", err)
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"isle/bootstrap"
|
||||
"isle/daemon"
|
||||
"isle/daemon/network"
|
||||
"isle/jsonutil"
|
||||
"os"
|
||||
"sort"
|
||||
@ -43,8 +43,12 @@ var subCmdHostCreate = subCmd{
|
||||
return errors.New("--hostname is required")
|
||||
}
|
||||
|
||||
res, err := ctx.daemonRPC.CreateHost(
|
||||
ctx, hostName.V, daemon.CreateHostOpts{
|
||||
var (
|
||||
res network.JoiningBootstrap
|
||||
err error
|
||||
)
|
||||
res, err = ctx.daemonRPC.CreateHost(
|
||||
ctx, hostName.V, network.CreateHostOpts{
|
||||
IP: ip.V,
|
||||
CanCreateHosts: *canCreateHosts,
|
||||
},
|
||||
@ -53,7 +57,7 @@ var subCmdHostCreate = subCmd{
|
||||
return fmt.Errorf("calling CreateHost: %w", err)
|
||||
}
|
||||
|
||||
return json.NewEncoder(os.Stdout).Encode(res.JoiningBootstrap)
|
||||
return json.NewEncoder(os.Stdout).Encode(res)
|
||||
},
|
||||
}
|
||||
|
||||
@ -74,8 +78,8 @@ var subCmdHostList = subCmd{
|
||||
Storage bootstrap.GarageHost `json:",omitempty"`
|
||||
}
|
||||
|
||||
hosts := make([]host, 0, len(hostsRes.Hosts))
|
||||
for _, h := range hostsRes.Hosts {
|
||||
hosts := make([]host, 0, len(hostsRes))
|
||||
for _, h := range hostsRes {
|
||||
|
||||
host := host{
|
||||
Name: string(h.Name),
|
||||
@ -116,8 +120,7 @@ var subCmdHostRemove = subCmd{
|
||||
return errors.New("--hostname is required")
|
||||
}
|
||||
|
||||
_, err := ctx.daemonRPC.RemoveHost(ctx, hostName.V)
|
||||
if err != nil {
|
||||
if err := ctx.daemonRPC.RemoveHost(ctx, hostName.V); err != nil {
|
||||
return fmt.Errorf("calling RemoveHost: %w", err)
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"isle/daemon"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
@ -21,7 +20,6 @@ func getAppDirPath() string {
|
||||
}
|
||||
|
||||
var (
|
||||
daemonEnvVars = daemon.GetEnvVars()
|
||||
envAppDirPath = getAppDirPath()
|
||||
envBinDirPath = filepath.Join(envAppDirPath, "bin")
|
||||
)
|
||||
|
@ -53,7 +53,7 @@ var subCmdNebulaCreateCert = subCmd{
|
||||
return fmt.Errorf("calling CreateNebulaCertificate: %w", err)
|
||||
}
|
||||
|
||||
nebulaHostCertPEM, err := res.HostNebulaCertificate.Unwrap().MarshalToPEM()
|
||||
nebulaHostCertPEM, err := res.Unwrap().MarshalToPEM()
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshaling cert to PEM: %w", err)
|
||||
}
|
||||
@ -112,7 +112,7 @@ var subCmdNebulaShow = subCmd{
|
||||
SubnetCIDR: subnet.String(),
|
||||
}
|
||||
|
||||
for _, h := range hosts.Hosts {
|
||||
for _, h := range hosts {
|
||||
if h.Nebula.PublicAddr == "" {
|
||||
continue
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"isle/daemon"
|
||||
"isle/daemon/network"
|
||||
"isle/jsonutil"
|
||||
)
|
||||
|
||||
@ -51,7 +51,7 @@ var subCmdNetworkCreate = subCmd{
|
||||
return errors.New("--name, --domain, --ip-net, and --hostname are required")
|
||||
}
|
||||
|
||||
_, err := ctx.daemonRPC.CreateNetwork(
|
||||
err := ctx.daemonRPC.CreateNetwork(
|
||||
ctx, *name, *domain, ipNet.V, hostName.V,
|
||||
)
|
||||
if err != nil {
|
||||
@ -81,15 +81,14 @@ var subCmdNetworkJoin = subCmd{
|
||||
return errors.New("--bootstrap-path is required")
|
||||
}
|
||||
|
||||
var newBootstrap daemon.JoiningBootstrap
|
||||
var newBootstrap network.JoiningBootstrap
|
||||
if err := jsonutil.LoadFile(&newBootstrap, *bootstrapPath); err != nil {
|
||||
return fmt.Errorf(
|
||||
"loading bootstrap from %q: %w", *bootstrapPath, err,
|
||||
)
|
||||
}
|
||||
|
||||
_, err := ctx.daemonRPC.JoinNetwork(ctx, newBootstrap)
|
||||
return err
|
||||
return ctx.daemonRPC.JoinNetwork(ctx, newBootstrap)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -130,7 +130,9 @@ func (ctx subCmdCtx) doSubCmd(subCmds ...subCmd) error {
|
||||
}
|
||||
|
||||
daemonRPC := daemon.RPCFromClient(
|
||||
jsonrpc2.NewUnixHTTPClient(daemon.HTTPSocketPath(), daemonHTTPRPCPath),
|
||||
jsonrpc2.NewUnixHTTPClient(
|
||||
daemon.HTTPSocketPath(), daemonHTTPRPCPath,
|
||||
),
|
||||
)
|
||||
|
||||
err := subCmd.do(subCmdCtx{
|
||||
|
@ -1,16 +1,47 @@
|
||||
package daemon
|
||||
// Package children manages the creation, lifetime, and shutdown of child
|
||||
// processes created by the daemon.
|
||||
package children
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"isle/bootstrap"
|
||||
"isle/secrets"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"code.betamike.com/micropelago/pmux/pmuxlib"
|
||||
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
|
||||
|
||||
"isle/bootstrap"
|
||||
"isle/daemon/daecommon"
|
||||
"isle/secrets"
|
||||
"isle/toolkit"
|
||||
)
|
||||
|
||||
// Opts are optional parameters which can be passed in when initializing a new
|
||||
// Children instance. A nil Opts is equivalent to a zero value.
|
||||
type Opts struct {
|
||||
// Stdout and Stderr are what the associated outputs from child processes
|
||||
// will be directed to.
|
||||
Stdout, Stderr io.Writer
|
||||
}
|
||||
|
||||
func (o *Opts) withDefaults() *Opts {
|
||||
if o == nil {
|
||||
o = new(Opts)
|
||||
}
|
||||
|
||||
if o.Stdout == nil {
|
||||
o.Stdout = os.Stdout
|
||||
}
|
||||
|
||||
if o.Stderr == nil {
|
||||
o.Stderr = os.Stderr
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
// Children manages all child processes of a network. Child processes are
|
||||
// comprised of:
|
||||
// - nebula
|
||||
@ -18,20 +49,22 @@ import (
|
||||
// - garage (0 or more, depending on configured storage allocations)
|
||||
type Children struct {
|
||||
logger *mlog.Logger
|
||||
daemonConfig Config
|
||||
daemonConfig daecommon.Config
|
||||
runtimeDir toolkit.Dir
|
||||
opts Opts
|
||||
|
||||
pmux *pmuxlib.Pmux
|
||||
}
|
||||
|
||||
// NewChildren initialized and returns a Children instance. If initialization
|
||||
// fails an error is returned.
|
||||
func NewChildren(
|
||||
// New initializes and returns a Children instance. If initialization fails an
|
||||
// error is returned.
|
||||
func New(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
binDirPath string,
|
||||
secretsStore secrets.Store,
|
||||
daemonConfig Config,
|
||||
daemonConfig daecommon.Config,
|
||||
runtimeDir toolkit.Dir,
|
||||
garageAdminToken string,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
opts *Opts,
|
||||
@ -41,7 +74,7 @@ func NewChildren(
|
||||
opts = opts.withDefaults()
|
||||
|
||||
logger.Info(ctx, "Loading secrets")
|
||||
garageRPCSecret, err := getGarageRPCSecret(ctx, secretsStore)
|
||||
garageRPCSecret, err := daecommon.GetGarageRPCSecret(ctx, secretsStore)
|
||||
if err != nil && !errors.Is(err, secrets.ErrNotFound) {
|
||||
return nil, fmt.Errorf("loading garage RPC secret: %w", err)
|
||||
}
|
||||
@ -49,6 +82,7 @@ func NewChildren(
|
||||
c := &Children{
|
||||
logger: logger,
|
||||
daemonConfig: daemonConfig,
|
||||
runtimeDir: runtimeDir,
|
||||
opts: *opts,
|
||||
}
|
||||
|
||||
@ -79,9 +113,12 @@ func NewChildren(
|
||||
}
|
||||
|
||||
// RestartDNSMasq rewrites the dnsmasq config and restarts the process.
|
||||
//
|
||||
// TODO block until process has been confirmed to have come back up
|
||||
// successfully.
|
||||
func (c *Children) RestartDNSMasq(hostBootstrap bootstrap.Bootstrap) error {
|
||||
_, err := dnsmasqWriteConfig(
|
||||
c.opts.EnvVars.RuntimeDirPath, c.daemonConfig, hostBootstrap,
|
||||
c.runtimeDir.Path, c.daemonConfig, hostBootstrap,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing new dnsmasq config: %w", err)
|
||||
@ -92,9 +129,12 @@ func (c *Children) RestartDNSMasq(hostBootstrap bootstrap.Bootstrap) error {
|
||||
}
|
||||
|
||||
// RestartNebula rewrites the nebula config and restarts the process.
|
||||
//
|
||||
// TODO block until process has been confirmed to have come back up
|
||||
// successfully.
|
||||
func (c *Children) RestartNebula(hostBootstrap bootstrap.Bootstrap) error {
|
||||
_, err := nebulaWriteConfig(
|
||||
c.opts.EnvVars.RuntimeDirPath, c.daemonConfig, hostBootstrap,
|
||||
c.runtimeDir.Path, c.daemonConfig, hostBootstrap,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing a new nebula config: %w", err)
|
||||
@ -104,6 +144,30 @@ func (c *Children) RestartNebula(hostBootstrap bootstrap.Bootstrap) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reload applies a ReloadDiff to the Children, using the given bootstrap which
|
||||
// must be the same one which was passed to CalculateReloadDiff.
|
||||
func (c *Children) Reload(
|
||||
ctx context.Context, newBootstrap bootstrap.Bootstrap, diff ReloadDiff,
|
||||
) error {
|
||||
var errs []error
|
||||
|
||||
if diff.DNSChanged {
|
||||
c.logger.Info(ctx, "Restarting dnsmasq to account for bootstrap changes")
|
||||
if err := c.RestartDNSMasq(newBootstrap); err != nil {
|
||||
errs = append(errs, fmt.Errorf("restarting dnsmasq: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
if diff.NebulaChanged {
|
||||
c.logger.Info(ctx, "Restarting nebula to account for bootstrap changes")
|
||||
if err := c.RestartNebula(newBootstrap); err != nil {
|
||||
errs = append(errs, fmt.Errorf("restarting nebula: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// Shutdown blocks until all child processes have gracefully shut themselves
|
||||
// down.
|
||||
func (c *Children) Shutdown() {
|
47
go/daemon/children/diff.go
Normal file
47
go/daemon/children/diff.go
Normal file
@ -0,0 +1,47 @@
|
||||
package children
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"isle/bootstrap"
|
||||
"isle/daemon/daecommon"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// ReloadDiff describes which children had their configurations changed as part
|
||||
// of a change in the bootstrap.
|
||||
type ReloadDiff struct {
|
||||
NebulaChanged bool
|
||||
DNSChanged bool
|
||||
}
|
||||
|
||||
// CalculateReloadDiff calculates a ReloadDiff based on an old and new
|
||||
// bootstrap.
|
||||
func CalculateReloadDiff(
|
||||
daemonConfig daecommon.Config,
|
||||
prevBootstrap, nextBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
diff ReloadDiff, err error,
|
||||
) {
|
||||
{
|
||||
prevNebulaConfig, prevErr := nebulaConfig(daemonConfig, prevBootstrap)
|
||||
nextNebulaConfig, nextErr := nebulaConfig(daemonConfig, nextBootstrap)
|
||||
if err = errors.Join(prevErr, nextErr); err != nil {
|
||||
err = fmt.Errorf("calculating nebula config: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
diff.NebulaChanged = !reflect.DeepEqual(
|
||||
prevNebulaConfig, nextNebulaConfig,
|
||||
)
|
||||
}
|
||||
|
||||
{
|
||||
diff.DNSChanged = !reflect.DeepEqual(
|
||||
dnsmasqConfig(daemonConfig, prevBootstrap),
|
||||
dnsmasqConfig(daemonConfig, nextBootstrap),
|
||||
)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
@ -1,9 +1,10 @@
|
||||
package daemon
|
||||
package children
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"isle/bootstrap"
|
||||
"isle/daemon/daecommon"
|
||||
"isle/dnsmasq"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@ -13,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
func dnsmasqConfig(
|
||||
daemonConfig Config, hostBootstrap bootstrap.Bootstrap,
|
||||
daemonConfig daecommon.Config, hostBootstrap bootstrap.Bootstrap,
|
||||
) dnsmasq.ConfData {
|
||||
hostsSlice := make([]dnsmasq.ConfDataHost, 0, len(hostBootstrap.Hosts))
|
||||
for _, host := range hostBootstrap.Hosts {
|
||||
@ -37,7 +38,7 @@ func dnsmasqConfig(
|
||||
|
||||
func dnsmasqWriteConfig(
|
||||
runtimeDirPath string,
|
||||
daemonConfig Config,
|
||||
daemonConfig daecommon.Config,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
string, error,
|
||||
@ -57,7 +58,7 @@ func dnsmasqWriteConfig(
|
||||
func dnsmasqPmuxProcConfig(
|
||||
logger *mlog.Logger,
|
||||
runtimeDirPath, binDirPath string,
|
||||
daemonConfig Config,
|
||||
daemonConfig daecommon.Config,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
pmuxlib.ProcessConfig, error,
|
@ -1,9 +1,10 @@
|
||||
package daemon
|
||||
package children
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"isle/bootstrap"
|
||||
"isle/daemon/daecommon"
|
||||
"isle/garage"
|
||||
"isle/garage/garagesrv"
|
||||
"net"
|
||||
@ -19,31 +20,10 @@ func garageAdminClientLogger(logger *mlog.Logger) *mlog.Logger {
|
||||
return logger.WithNamespace("garageAdminClient")
|
||||
}
|
||||
|
||||
// newGarageAdminClient will return an AdminClient for a local garage instance,
|
||||
// or it will _panic_ if there is no local instance configured.
|
||||
func newGarageAdminClient(
|
||||
logger *mlog.Logger,
|
||||
daemonConfig Config,
|
||||
adminToken string,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
) *garage.AdminClient {
|
||||
|
||||
thisHost := hostBootstrap.ThisHost()
|
||||
|
||||
return garage.NewAdminClient(
|
||||
garageAdminClientLogger(logger),
|
||||
net.JoinHostPort(
|
||||
thisHost.IP().String(),
|
||||
strconv.Itoa(daemonConfig.Storage.Allocations[0].AdminPort),
|
||||
),
|
||||
adminToken,
|
||||
)
|
||||
}
|
||||
|
||||
func waitForGarage(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
daemonConfig Config,
|
||||
daemonConfig daecommon.Config,
|
||||
adminToken string,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
) error {
|
||||
@ -81,35 +61,16 @@ func waitForGarage(
|
||||
|
||||
}
|
||||
|
||||
// bootstrapGarageHostForAlloc returns the bootstrap.GarageHostInstance which
|
||||
// corresponds with the given alloc from the daemon config. This will panic if
|
||||
// no associated instance can be found.
|
||||
//
|
||||
// This assumes that coalesceDaemonConfigAndBootstrap has already been called.
|
||||
func bootstrapGarageHostForAlloc(
|
||||
host bootstrap.Host,
|
||||
alloc ConfigStorageAllocation,
|
||||
) bootstrap.GarageHostInstance {
|
||||
|
||||
for _, inst := range host.Garage.Instances {
|
||||
if inst.RPCPort == alloc.RPCPort {
|
||||
return inst
|
||||
}
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("could not find alloc %+v in the bootstrap data", alloc))
|
||||
}
|
||||
|
||||
func garageWriteChildConfig(
|
||||
rpcSecret, runtimeDirPath, adminToken string,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
alloc ConfigStorageAllocation,
|
||||
alloc daecommon.ConfigStorageAllocation,
|
||||
) (
|
||||
string, error,
|
||||
) {
|
||||
|
||||
thisHost := hostBootstrap.ThisHost()
|
||||
id := bootstrapGarageHostForAlloc(thisHost, alloc).ID
|
||||
id := daecommon.BootstrapGarageHostForAlloc(thisHost, alloc).ID
|
||||
|
||||
peer := garage.LocalPeer{
|
||||
RemotePeer: garage.RemotePeer{
|
||||
@ -147,7 +108,7 @@ func garagePmuxProcConfigs(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
rpcSecret, runtimeDirPath, binDirPath string,
|
||||
daemonConfig Config,
|
||||
daemonConfig daecommon.Config,
|
||||
adminToken string,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
@ -184,41 +145,3 @@ func garagePmuxProcConfigs(
|
||||
|
||||
return pmuxProcConfigs, nil
|
||||
}
|
||||
|
||||
func garageApplyLayout(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
daemonConfig Config,
|
||||
adminToken string,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
) error {
|
||||
|
||||
var (
|
||||
adminClient = newGarageAdminClient(
|
||||
logger, daemonConfig, adminToken, hostBootstrap,
|
||||
)
|
||||
thisHost = hostBootstrap.ThisHost()
|
||||
hostName = thisHost.Name
|
||||
allocs = daemonConfig.Storage.Allocations
|
||||
peers = make([]garage.PeerLayout, len(allocs))
|
||||
)
|
||||
|
||||
for i, alloc := range allocs {
|
||||
|
||||
id := bootstrapGarageHostForAlloc(thisHost, alloc).ID
|
||||
|
||||
zone := string(hostName)
|
||||
if alloc.Zone != "" {
|
||||
zone = alloc.Zone
|
||||
}
|
||||
|
||||
peers[i] = garage.PeerLayout{
|
||||
ID: id,
|
||||
Capacity: alloc.Capacity * 1_000_000_000,
|
||||
Zone: zone,
|
||||
Tags: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
return adminClient.ApplyLayout(ctx, peers)
|
||||
}
|
30
go/daemon/children/jigs.go
Normal file
30
go/daemon/children/jigs.go
Normal file
@ -0,0 +1,30 @@
|
||||
package children
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
|
||||
)
|
||||
|
||||
// until keeps trying fn until it returns nil, returning true. If the context is
|
||||
// canceled then until returns false.
|
||||
func until(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
descr string,
|
||||
fn func(context.Context) error,
|
||||
) bool {
|
||||
for {
|
||||
logger.Info(ctx, descr)
|
||||
err := fn(ctx)
|
||||
if err == nil {
|
||||
return true
|
||||
} else if ctxErr := ctx.Err(); ctxErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Warn(ctx, descr+" failed, retrying in one second", err)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
@ -1,9 +1,10 @@
|
||||
package daemon
|
||||
package children
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"isle/bootstrap"
|
||||
"isle/daemon/daecommon"
|
||||
"isle/yamlutil"
|
||||
"net"
|
||||
"path/filepath"
|
||||
@ -47,7 +48,7 @@ func waitForNebula(
|
||||
}
|
||||
|
||||
func nebulaConfig(
|
||||
daemonConfig Config,
|
||||
daemonConfig daecommon.Config,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
map[string]any, error,
|
||||
@ -136,7 +137,7 @@ func nebulaConfig(
|
||||
|
||||
func nebulaWriteConfig(
|
||||
runtimeDirPath string,
|
||||
daemonConfig Config,
|
||||
daemonConfig daecommon.Config,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
string, error,
|
||||
@ -157,7 +158,7 @@ func nebulaWriteConfig(
|
||||
|
||||
func nebulaPmuxProcConfig(
|
||||
runtimeDirPath, binDirPath string,
|
||||
daemonConfig Config,
|
||||
daemonConfig daecommon.Config,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
pmuxlib.ProcessConfig, error,
|
@ -1,9 +1,10 @@
|
||||
package daemon
|
||||
package children
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"isle/bootstrap"
|
||||
"isle/daemon/daecommon"
|
||||
|
||||
"code.betamike.com/micropelago/pmux/pmuxlib"
|
||||
)
|
||||
@ -11,14 +12,14 @@ import (
|
||||
func (c *Children) newPmuxConfig(
|
||||
ctx context.Context,
|
||||
garageRPCSecret, binDirPath string,
|
||||
daemonConfig Config,
|
||||
daemonConfig daecommon.Config,
|
||||
garageAdminToken string,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
pmuxlib.Config, error,
|
||||
) {
|
||||
nebulaPmuxProcConfig, err := nebulaPmuxProcConfig(
|
||||
c.opts.EnvVars.RuntimeDirPath,
|
||||
c.runtimeDir.Path,
|
||||
binDirPath,
|
||||
daemonConfig,
|
||||
hostBootstrap,
|
||||
@ -29,7 +30,7 @@ func (c *Children) newPmuxConfig(
|
||||
|
||||
dnsmasqPmuxProcConfig, err := dnsmasqPmuxProcConfig(
|
||||
c.logger,
|
||||
c.opts.EnvVars.RuntimeDirPath,
|
||||
c.runtimeDir.Path,
|
||||
binDirPath,
|
||||
daemonConfig,
|
||||
hostBootstrap,
|
||||
@ -44,7 +45,7 @@ func (c *Children) newPmuxConfig(
|
||||
ctx,
|
||||
c.logger,
|
||||
garageRPCSecret,
|
||||
c.opts.EnvVars.RuntimeDirPath,
|
||||
c.runtimeDir.Path,
|
||||
binDirPath,
|
||||
daemonConfig,
|
||||
garageAdminToken,
|
||||
@ -67,7 +68,7 @@ func (c *Children) newPmuxConfig(
|
||||
|
||||
func (c *Children) postPmuxInit(
|
||||
ctx context.Context,
|
||||
daemonConfig Config,
|
||||
daemonConfig daecommon.Config,
|
||||
garageAdminToken string,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
) error {
|
@ -8,7 +8,9 @@ package daemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"isle/bootstrap"
|
||||
"isle/daemon/jsonrpc2"
|
||||
"isle/daemon/network"
|
||||
"isle/nebula"
|
||||
)
|
||||
|
||||
@ -22,33 +24,32 @@ func RPCFromClient(client jsonrpc2.Client) RPC {
|
||||
return &rpcClient{client}
|
||||
}
|
||||
|
||||
func (c *rpcClient) CreateHost(ctx context.Context, hostName nebula.HostName, opts CreateHostOpts) (c2 CreateHostResult, err error) {
|
||||
func (c *rpcClient) CreateHost(ctx context.Context, h1 nebula.HostName, c2 network.CreateHostOpts) (j1 network.JoiningBootstrap, err error) {
|
||||
err = c.client.Call(
|
||||
ctx,
|
||||
&c2,
|
||||
&j1,
|
||||
"CreateHost",
|
||||
hostName,
|
||||
opts,
|
||||
h1,
|
||||
c2,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *rpcClient) CreateNebulaCertificate(ctx context.Context, hostName nebula.HostName, hostEncryptingPublicKey nebula.EncryptingPublicKey) (c2 CreateNebulaCertificateResult, err error) {
|
||||
func (c *rpcClient) CreateNebulaCertificate(ctx context.Context, h1 nebula.HostName, e1 nebula.EncryptingPublicKey) (c2 nebula.Certificate, err error) {
|
||||
err = c.client.Call(
|
||||
ctx,
|
||||
&c2,
|
||||
"CreateNebulaCertificate",
|
||||
hostName,
|
||||
hostEncryptingPublicKey,
|
||||
h1,
|
||||
e1,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *rpcClient) CreateNetwork(ctx context.Context, name string, domain string, ipNet nebula.IPNet, hostName nebula.HostName) (st1 struct {
|
||||
}, err error) {
|
||||
func (c *rpcClient) CreateNetwork(ctx context.Context, name string, domain string, ipNet nebula.IPNet, hostName nebula.HostName) (err error) {
|
||||
err = c.client.Call(
|
||||
ctx,
|
||||
&st1,
|
||||
nil,
|
||||
"CreateNetwork",
|
||||
name,
|
||||
domain,
|
||||
@ -58,7 +59,7 @@ func (c *rpcClient) CreateNetwork(ctx context.Context, name string, domain strin
|
||||
return
|
||||
}
|
||||
|
||||
func (c *rpcClient) GetGarageClientParams(ctx context.Context) (g1 GarageClientParams, err error) {
|
||||
func (c *rpcClient) GetGarageClientParams(ctx context.Context) (g1 network.GarageClientParams, err error) {
|
||||
err = c.client.Call(
|
||||
ctx,
|
||||
&g1,
|
||||
@ -67,10 +68,10 @@ func (c *rpcClient) GetGarageClientParams(ctx context.Context) (g1 GarageClientP
|
||||
return
|
||||
}
|
||||
|
||||
func (c *rpcClient) GetHosts(ctx context.Context) (g1 GetHostsResult, err error) {
|
||||
func (c *rpcClient) GetHosts(ctx context.Context) (ha1 []bootstrap.Host, err error) {
|
||||
err = c.client.Call(
|
||||
ctx,
|
||||
&g1,
|
||||
&ha1,
|
||||
"GetHosts",
|
||||
)
|
||||
return
|
||||
@ -85,22 +86,20 @@ func (c *rpcClient) GetNebulaCAPublicCredentials(ctx context.Context) (c2 nebula
|
||||
return
|
||||
}
|
||||
|
||||
func (c *rpcClient) JoinNetwork(ctx context.Context, req JoiningBootstrap) (st1 struct {
|
||||
}, err error) {
|
||||
func (c *rpcClient) JoinNetwork(ctx context.Context, j1 network.JoiningBootstrap) (err error) {
|
||||
err = c.client.Call(
|
||||
ctx,
|
||||
&st1,
|
||||
nil,
|
||||
"JoinNetwork",
|
||||
req,
|
||||
j1,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *rpcClient) RemoveHost(ctx context.Context, hostName nebula.HostName) (st1 struct {
|
||||
}, err error) {
|
||||
func (c *rpcClient) RemoveHost(ctx context.Context, hostName nebula.HostName) (err error) {
|
||||
err = c.client.Call(
|
||||
ctx,
|
||||
&st1,
|
||||
nil,
|
||||
"RemoveHost",
|
||||
hostName,
|
||||
)
|
||||
|
@ -1,183 +1,81 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"isle/yamlutil"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
"gopkg.in/yaml.v3"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func defaultConfigPath(appDirPath string) string {
|
||||
return filepath.Join(appDirPath, "etc", "daemon.yml")
|
||||
func getDefaultHTTPSocketDirPath() string {
|
||||
path, err := firstExistingDir(
|
||||
"/tmp",
|
||||
|
||||
// TODO it's possible the daemon process can't actually write to these
|
||||
"/run",
|
||||
"/var/run",
|
||||
"/dev/shm",
|
||||
)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to find directory for HTTP socket: %v", err))
|
||||
}
|
||||
|
||||
type ConfigTun struct {
|
||||
Device string `yaml:"device"`
|
||||
return path
|
||||
}
|
||||
|
||||
type ConfigFirewall struct {
|
||||
Conntrack ConfigConntrack `yaml:"conntrack"`
|
||||
Outbound []ConfigFirewallRule `yaml:"outbound"`
|
||||
Inbound []ConfigFirewallRule `yaml:"inbound"`
|
||||
}
|
||||
|
||||
type ConfigConntrack struct {
|
||||
TCPTimeout string `yaml:"tcp_timeout"`
|
||||
UDPTimeout string `yaml:"udp_timeout"`
|
||||
DefaultTimeout string `yaml:"default_timeout"`
|
||||
MaxConnections int `yaml:"max_connections"`
|
||||
}
|
||||
|
||||
type ConfigFirewallRule struct {
|
||||
Port string `yaml:"port,omitempty"`
|
||||
Code string `yaml:"code,omitempty"`
|
||||
Proto string `yaml:"proto,omitempty"`
|
||||
Host string `yaml:"host,omitempty"`
|
||||
Group string `yaml:"group,omitempty"`
|
||||
Groups []string `yaml:"groups,omitempty"`
|
||||
CIDR string `yaml:"cidr,omitempty"`
|
||||
CASha string `yaml:"ca_sha,omitempty"`
|
||||
CAName string `yaml:"ca_name,omitempty"`
|
||||
}
|
||||
|
||||
// ConfigStorageAllocation describes the structure of each storage allocation
|
||||
// within the daemon config file.
|
||||
type ConfigStorageAllocation struct {
|
||||
DataPath string `yaml:"data_path"`
|
||||
MetaPath string `yaml:"meta_path"`
|
||||
Capacity int `yaml:"capacity"`
|
||||
S3APIPort int `yaml:"s3_api_port"`
|
||||
RPCPort int `yaml:"rpc_port"`
|
||||
AdminPort int `yaml:"admin_port"`
|
||||
|
||||
// Zone is a secret option which makes it easier to test garage bugs, but
|
||||
// which we don't want users to otherwise know about.
|
||||
Zone string `yaml:"zone"`
|
||||
}
|
||||
|
||||
// Config describes the structure of the daemon config file.
|
||||
type Config struct {
|
||||
DNS struct {
|
||||
Resolvers []string `yaml:"resolvers"`
|
||||
} `yaml:"dns"`
|
||||
VPN struct {
|
||||
PublicAddr string `yaml:"public_addr"`
|
||||
Firewall ConfigFirewall `yaml:"firewall"`
|
||||
Tun ConfigTun `yaml:"tun"`
|
||||
} `yaml:"vpn"`
|
||||
Storage struct {
|
||||
Allocations []ConfigStorageAllocation
|
||||
} `yaml:"storage"`
|
||||
}
|
||||
|
||||
func (c *Config) fillDefaults() {
|
||||
|
||||
var firewallGarageInbound []ConfigFirewallRule
|
||||
|
||||
for i := range c.Storage.Allocations {
|
||||
if c.Storage.Allocations[i].RPCPort == 0 {
|
||||
c.Storage.Allocations[i].RPCPort = 3900 + (i * 10)
|
||||
}
|
||||
|
||||
if c.Storage.Allocations[i].S3APIPort == 0 {
|
||||
c.Storage.Allocations[i].S3APIPort = 3901 + (i * 10)
|
||||
}
|
||||
|
||||
if c.Storage.Allocations[i].AdminPort == 0 {
|
||||
c.Storage.Allocations[i].AdminPort = 3902 + (i * 10)
|
||||
}
|
||||
|
||||
alloc := c.Storage.Allocations[i]
|
||||
|
||||
firewallGarageInbound = append(
|
||||
firewallGarageInbound,
|
||||
ConfigFirewallRule{
|
||||
Port: strconv.Itoa(alloc.S3APIPort),
|
||||
Proto: "tcp",
|
||||
Host: "any",
|
||||
},
|
||||
ConfigFirewallRule{
|
||||
Port: strconv.Itoa(alloc.RPCPort),
|
||||
Proto: "tcp",
|
||||
Host: "any",
|
||||
// HTTPSocketPath returns the path to the daemon's HTTP socket which is used for
|
||||
// RPC and other functionality.
|
||||
var HTTPSocketPath = sync.OnceValue(func() string {
|
||||
return envOr(
|
||||
"ISLE_DAEMON_HTTP_SOCKET_PATH",
|
||||
func() string {
|
||||
return filepath.Join(
|
||||
getDefaultHTTPSocketDirPath(), "isle-daemon.sock",
|
||||
)
|
||||
},
|
||||
)
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Jigs
|
||||
|
||||
func envOr(name string, fallback func() string) string {
|
||||
if v := os.Getenv(name); v != "" {
|
||||
return v
|
||||
}
|
||||
return fallback()
|
||||
}
|
||||
|
||||
c.VPN.Firewall.Inbound = append(
|
||||
c.VPN.Firewall.Inbound,
|
||||
firewallGarageInbound...,
|
||||
func firstExistingDir(paths ...string) (string, error) {
|
||||
var errs []error
|
||||
for _, path := range paths {
|
||||
stat, err := os.Stat(path)
|
||||
switch {
|
||||
case errors.Is(err, fs.ErrExist):
|
||||
continue
|
||||
case err != nil:
|
||||
errs = append(
|
||||
errs, fmt.Errorf("checking if path %q exists: %w", path, err),
|
||||
)
|
||||
}
|
||||
|
||||
// CopyDefaultConfig copies the daemon config file embedded in the AppDir into
|
||||
// the given io.Writer.
|
||||
func CopyDefaultConfig(into io.Writer, appDirPath string) error {
|
||||
|
||||
defaultConfigPath := defaultConfigPath(appDirPath)
|
||||
|
||||
f, err := os.Open(defaultConfigPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening daemon config at %q: %w", defaultConfigPath, err)
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
if _, err := io.Copy(into, f); err != nil {
|
||||
return fmt.Errorf("copying daemon config from %q: %w", defaultConfigPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadConfig loads the daemon config from userConfigPath, merges it with
|
||||
// the default found in the appDirPath, and returns the result.
|
||||
//
|
||||
// If userConfigPath is not given then the default is loaded and returned.
|
||||
func LoadConfig(
|
||||
appDirPath, userConfigPath string,
|
||||
) (
|
||||
Config, error,
|
||||
) {
|
||||
|
||||
defaultConfigPath := defaultConfigPath(appDirPath)
|
||||
|
||||
var fullDaemon map[string]interface{}
|
||||
|
||||
if err := yamlutil.LoadYamlFile(&fullDaemon, defaultConfigPath); err != nil {
|
||||
return Config{}, fmt.Errorf("parsing default daemon config file: %w", err)
|
||||
}
|
||||
|
||||
if userConfigPath != "" {
|
||||
|
||||
var daemonConfig map[string]interface{}
|
||||
if err := yamlutil.LoadYamlFile(&daemonConfig, userConfigPath); err != nil {
|
||||
return Config{}, fmt.Errorf("parsing %q: %w", userConfigPath, err)
|
||||
}
|
||||
|
||||
err := mergo.Merge(&fullDaemon, daemonConfig, mergo.WithOverride)
|
||||
if err != nil {
|
||||
return Config{}, fmt.Errorf("merging contents of file %q: %w", userConfigPath, err)
|
||||
case !stat.IsDir():
|
||||
errs = append(
|
||||
errs, fmt.Errorf("path %q exists but is not a directory", path),
|
||||
)
|
||||
default:
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
fullDaemonB, err := yaml.Marshal(fullDaemon)
|
||||
|
||||
if err != nil {
|
||||
return Config{}, fmt.Errorf("yaml marshaling: %w", err)
|
||||
err := fmt.Errorf(
|
||||
"no directory found at any of the following paths: %s",
|
||||
strings.Join(paths, ", "),
|
||||
)
|
||||
if len(errs) > 0 {
|
||||
err = errors.Join(slices.Insert(errs, 0, err)...)
|
||||
}
|
||||
|
||||
var config Config
|
||||
if err := yaml.Unmarshal(fullDaemonB, &config); err != nil {
|
||||
return Config{}, fmt.Errorf("yaml unmarshaling back into Config struct: %w", err)
|
||||
}
|
||||
|
||||
config.fillDefaults()
|
||||
|
||||
return config, nil
|
||||
return "", err
|
||||
}
|
||||
|
201
go/daemon/daecommon/config.go
Normal file
201
go/daemon/daecommon/config.go
Normal file
@ -0,0 +1,201 @@
|
||||
package daecommon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"isle/bootstrap"
|
||||
"isle/yamlutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func defaultConfigPath(appDirPath string) string {
|
||||
return filepath.Join(appDirPath, "etc", "daemon.yml")
|
||||
}
|
||||
|
||||
type ConfigTun struct {
|
||||
Device string `yaml:"device"`
|
||||
}
|
||||
|
||||
type ConfigFirewall struct {
|
||||
Conntrack ConfigConntrack `yaml:"conntrack"`
|
||||
Outbound []ConfigFirewallRule `yaml:"outbound"`
|
||||
Inbound []ConfigFirewallRule `yaml:"inbound"`
|
||||
}
|
||||
|
||||
type ConfigConntrack struct {
|
||||
TCPTimeout string `yaml:"tcp_timeout"`
|
||||
UDPTimeout string `yaml:"udp_timeout"`
|
||||
DefaultTimeout string `yaml:"default_timeout"`
|
||||
MaxConnections int `yaml:"max_connections"`
|
||||
}
|
||||
|
||||
type ConfigFirewallRule struct {
|
||||
Port string `yaml:"port,omitempty"`
|
||||
Code string `yaml:"code,omitempty"`
|
||||
Proto string `yaml:"proto,omitempty"`
|
||||
Host string `yaml:"host,omitempty"`
|
||||
Group string `yaml:"group,omitempty"`
|
||||
Groups []string `yaml:"groups,omitempty"`
|
||||
CIDR string `yaml:"cidr,omitempty"`
|
||||
CASha string `yaml:"ca_sha,omitempty"`
|
||||
CAName string `yaml:"ca_name,omitempty"`
|
||||
}
|
||||
|
||||
// ConfigStorageAllocation describes the structure of each storage allocation
|
||||
// within the daemon config file.
|
||||
type ConfigStorageAllocation struct {
|
||||
DataPath string `yaml:"data_path"`
|
||||
MetaPath string `yaml:"meta_path"`
|
||||
Capacity int `yaml:"capacity"`
|
||||
S3APIPort int `yaml:"s3_api_port"`
|
||||
RPCPort int `yaml:"rpc_port"`
|
||||
AdminPort int `yaml:"admin_port"`
|
||||
|
||||
// Zone is a secret option which makes it easier to test garage bugs, but
|
||||
// which we don't want users to otherwise know about.
|
||||
Zone string `yaml:"zone"`
|
||||
}
|
||||
|
||||
// Config describes the structure of the daemon config file.
|
||||
type Config struct {
|
||||
DNS struct {
|
||||
Resolvers []string `yaml:"resolvers"`
|
||||
} `yaml:"dns"`
|
||||
VPN struct {
|
||||
PublicAddr string `yaml:"public_addr"`
|
||||
Firewall ConfigFirewall `yaml:"firewall"`
|
||||
Tun ConfigTun `yaml:"tun"`
|
||||
} `yaml:"vpn"`
|
||||
Storage struct {
|
||||
Allocations []ConfigStorageAllocation
|
||||
} `yaml:"storage"`
|
||||
}
|
||||
|
||||
func (c *Config) fillDefaults() {
|
||||
|
||||
var firewallGarageInbound []ConfigFirewallRule
|
||||
|
||||
for i := range c.Storage.Allocations {
|
||||
if c.Storage.Allocations[i].RPCPort == 0 {
|
||||
c.Storage.Allocations[i].RPCPort = 3900 + (i * 10)
|
||||
}
|
||||
|
||||
if c.Storage.Allocations[i].S3APIPort == 0 {
|
||||
c.Storage.Allocations[i].S3APIPort = 3901 + (i * 10)
|
||||
}
|
||||
|
||||
if c.Storage.Allocations[i].AdminPort == 0 {
|
||||
c.Storage.Allocations[i].AdminPort = 3902 + (i * 10)
|
||||
}
|
||||
|
||||
alloc := c.Storage.Allocations[i]
|
||||
|
||||
firewallGarageInbound = append(
|
||||
firewallGarageInbound,
|
||||
ConfigFirewallRule{
|
||||
Port: strconv.Itoa(alloc.S3APIPort),
|
||||
Proto: "tcp",
|
||||
Host: "any",
|
||||
},
|
||||
ConfigFirewallRule{
|
||||
Port: strconv.Itoa(alloc.RPCPort),
|
||||
Proto: "tcp",
|
||||
Host: "any",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
c.VPN.Firewall.Inbound = append(
|
||||
c.VPN.Firewall.Inbound,
|
||||
firewallGarageInbound...,
|
||||
)
|
||||
}
|
||||
|
||||
// CopyDefaultConfig copies the daemon config file embedded in the AppDir into
|
||||
// the given io.Writer.
|
||||
func CopyDefaultConfig(into io.Writer, appDirPath string) error {
|
||||
|
||||
defaultConfigPath := defaultConfigPath(appDirPath)
|
||||
|
||||
f, err := os.Open(defaultConfigPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening daemon config at %q: %w", defaultConfigPath, err)
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
if _, err := io.Copy(into, f); err != nil {
|
||||
return fmt.Errorf("copying daemon config from %q: %w", defaultConfigPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadConfig loads the daemon config from userConfigPath, merges it with
|
||||
// the default found in the appDirPath, and returns the result.
|
||||
//
|
||||
// If userConfigPath is not given then the default is loaded and returned.
|
||||
func LoadConfig(
|
||||
appDirPath, userConfigPath string,
|
||||
) (
|
||||
Config, error,
|
||||
) {
|
||||
|
||||
defaultConfigPath := defaultConfigPath(appDirPath)
|
||||
|
||||
var fullDaemon map[string]interface{}
|
||||
|
||||
if err := yamlutil.LoadYamlFile(&fullDaemon, defaultConfigPath); err != nil {
|
||||
return Config{}, fmt.Errorf("parsing default daemon config file: %w", err)
|
||||
}
|
||||
|
||||
if userConfigPath != "" {
|
||||
|
||||
var daemonConfig map[string]interface{}
|
||||
if err := yamlutil.LoadYamlFile(&daemonConfig, userConfigPath); err != nil {
|
||||
return Config{}, fmt.Errorf("parsing %q: %w", userConfigPath, err)
|
||||
}
|
||||
|
||||
err := mergo.Merge(&fullDaemon, daemonConfig, mergo.WithOverride)
|
||||
if err != nil {
|
||||
return Config{}, fmt.Errorf("merging contents of file %q: %w", userConfigPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
fullDaemonB, err := yaml.Marshal(fullDaemon)
|
||||
|
||||
if err != nil {
|
||||
return Config{}, fmt.Errorf("yaml marshaling: %w", err)
|
||||
}
|
||||
|
||||
var config Config
|
||||
if err := yaml.Unmarshal(fullDaemonB, &config); err != nil {
|
||||
return Config{}, fmt.Errorf("yaml unmarshaling back into Config struct: %w", err)
|
||||
}
|
||||
|
||||
config.fillDefaults()
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// BootstrapGarageHostForAlloc returns the bootstrap.GarageHostInstance which
|
||||
// corresponds with the given alloc from the daemon config. This will panic if
|
||||
// no associated instance can be found.
|
||||
func BootstrapGarageHostForAlloc(
|
||||
host bootstrap.Host,
|
||||
alloc ConfigStorageAllocation,
|
||||
) bootstrap.GarageHostInstance {
|
||||
|
||||
for _, inst := range host.Garage.Instances {
|
||||
if inst.RPCPort == alloc.RPCPort {
|
||||
return inst
|
||||
}
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("could not find alloc %+v in the bootstrap data", alloc))
|
||||
}
|
3
go/daemon/daecommon/daecommon.go
Normal file
3
go/daemon/daecommon/daecommon.go
Normal file
@ -0,0 +1,3 @@
|
||||
// Package daecommon holds types and functionality which are required the daemon
|
||||
// package and other of its subpackages.
|
||||
package daecommon
|
58
go/daemon/daecommon/env.go
Normal file
58
go/daemon/daecommon/env.go
Normal file
@ -0,0 +1,58 @@
|
||||
package daecommon
|
||||
|
||||
import (
|
||||
"isle/toolkit"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/adrg/xdg"
|
||||
)
|
||||
|
||||
// EnvVars are variables which are derived based on the environment which the
|
||||
// process is running in.
|
||||
type EnvVars struct {
|
||||
StateDir toolkit.Dir
|
||||
RuntimeDir toolkit.Dir
|
||||
}
|
||||
|
||||
// GetEnvVars will return the EnvVars of the current processes, as determined by
|
||||
// the process's environment.
|
||||
var GetEnvVars = sync.OnceValue(func() EnvVars {
|
||||
// RUNTIME_DIRECTORY/STATE_DIRECTORY are used by the systemd service in
|
||||
// conjunction with the RuntimeDirectory/StateDirectory directives.
|
||||
|
||||
var (
|
||||
res EnvVars
|
||||
|
||||
stateDirPath = envOr(
|
||||
"STATE_DIRECTORY",
|
||||
func() string { return filepath.Join(xdg.StateHome, "isle") },
|
||||
)
|
||||
|
||||
runtimeDirPath = envOr(
|
||||
"RUNTIME_DIRECTORY",
|
||||
func() string { return filepath.Join(xdg.RuntimeDir, "isle") },
|
||||
)
|
||||
)
|
||||
|
||||
h := new(toolkit.MkDirHelper)
|
||||
res.StateDir, _ = h.Maybe(toolkit.MkDir(stateDirPath, true))
|
||||
res.RuntimeDir, _ = h.Maybe(toolkit.MkDir(runtimeDirPath, true))
|
||||
|
||||
if err := h.Err(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return res
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Jigs
|
||||
|
||||
func envOr(name string, fallback func() string) string {
|
||||
if v := os.Getenv(name); v != "" {
|
||||
return v
|
||||
}
|
||||
return fallback()
|
||||
}
|
8
go/daemon/daecommon/errors.go
Normal file
8
go/daemon/daecommon/errors.go
Normal file
@ -0,0 +1,8 @@
|
||||
package daecommon
|
||||
|
||||
// Registrationg of error code ranges for different namespaces (ie packages)
|
||||
// within daemon. Each range covers 1000 available error codes.
|
||||
const (
|
||||
ErrorCodeRangeDaemon = 0 // daemon package
|
||||
ErrorCodeRangeNetwork = 1000 // daemon/network package
|
||||
)
|
51
go/daemon/daecommon/secrets.go
Normal file
51
go/daemon/daecommon/secrets.go
Normal file
@ -0,0 +1,51 @@
|
||||
package daecommon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"isle/garage"
|
||||
"isle/nebula"
|
||||
"isle/secrets"
|
||||
)
|
||||
|
||||
const (
|
||||
secretsNSNebula = "nebula"
|
||||
secretsNSGarage = "garage"
|
||||
)
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Nebula-related secrets
|
||||
|
||||
// IDs and Get/Set functions for nebula-related secrets.
|
||||
var (
|
||||
NebulaCASigningPrivateKeySecretID = secrets.NewID(secretsNSNebula, "ca-signing-private-key")
|
||||
|
||||
GetNebulaCASigningPrivateKey, SetNebulaCASigningPrivateKey = secrets.GetSetFunctions[nebula.SigningPrivateKey](
|
||||
NebulaCASigningPrivateKeySecretID,
|
||||
)
|
||||
)
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Garage-related secrets
|
||||
|
||||
func garageS3APIBucketCredentialsSecretID(credsName string) secrets.ID {
|
||||
return secrets.NewID(
|
||||
secretsNSGarage, fmt.Sprintf("s3-api-bucket-credentials-%s", credsName),
|
||||
)
|
||||
}
|
||||
|
||||
// IDs and Get/Set functions for garage-related secrets.
|
||||
var (
|
||||
GarageRPCSecretSecretID = secrets.NewID(secretsNSGarage, "rpc-secret")
|
||||
GarageS3APIGlobalBucketCredentialsSecretID = garageS3APIBucketCredentialsSecretID(
|
||||
garage.GlobalBucketS3APICredentialsName,
|
||||
)
|
||||
|
||||
GetGarageRPCSecret, SetGarageRPCSecret = secrets.GetSetFunctions[string](
|
||||
GarageRPCSecretSecretID,
|
||||
)
|
||||
|
||||
GetGarageS3APIGlobalBucketCredentials,
|
||||
SetGarageS3APIGlobalBucketCredentials = secrets.GetSetFunctions[garage.S3APICredentials](
|
||||
GarageS3APIGlobalBucketCredentialsSecretID,
|
||||
)
|
||||
)
|
File diff suppressed because it is too large
Load Diff
130
go/daemon/env.go
130
go/daemon/env.go
@ -1,130 +0,0 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/adrg/xdg"
|
||||
)
|
||||
|
||||
// EnvVars are variables which are derived based on the environment which the
|
||||
// process is running in.
|
||||
type EnvVars struct {
|
||||
RuntimeDirPath string
|
||||
StateDirPath string
|
||||
}
|
||||
|
||||
func (e EnvVars) init() error {
|
||||
var errs []error
|
||||
if err := mkDir(e.RuntimeDirPath); err != nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"creating runtime directory %q: %w",
|
||||
e.RuntimeDirPath,
|
||||
err,
|
||||
))
|
||||
}
|
||||
|
||||
if err := mkDir(e.StateDirPath); err != nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"creating state directory %q: %w",
|
||||
e.StateDirPath,
|
||||
err,
|
||||
))
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func getDefaultHTTPSocketDirPath() string {
|
||||
path, err := firstExistingDir(
|
||||
"/tmp",
|
||||
|
||||
// TODO it's possible the daemon process can't actually write to these
|
||||
"/run",
|
||||
"/var/run",
|
||||
"/dev/shm",
|
||||
)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to find directory for HTTP socket: %v", err))
|
||||
}
|
||||
|
||||
return path
|
||||
}
|
||||
|
||||
// HTTPSocketPath returns the path to the daemon's HTTP socket which is used for
|
||||
// RPC and other functionality.
|
||||
var HTTPSocketPath = sync.OnceValue(func() string {
|
||||
return envOr(
|
||||
"ISLE_DAEMON_HTTP_SOCKET_PATH",
|
||||
func() string {
|
||||
return filepath.Join(
|
||||
getDefaultHTTPSocketDirPath(), "isle-daemon.sock",
|
||||
)
|
||||
},
|
||||
)
|
||||
})
|
||||
|
||||
// GetEnvVars will return the EnvVars of the current processes, as determined by
|
||||
// the process's environment.
|
||||
var GetEnvVars = sync.OnceValue(func() (v EnvVars) {
|
||||
// RUNTIME_DIRECTORY/STATE_DIRECTORY are used by the systemd service in
|
||||
// conjunction with the RuntimeDirectory/StateDirectory directives.
|
||||
|
||||
v.RuntimeDirPath = envOr(
|
||||
"RUNTIME_DIRECTORY",
|
||||
func() string { return filepath.Join(xdg.RuntimeDir, "isle") },
|
||||
)
|
||||
|
||||
v.StateDirPath = envOr(
|
||||
"STATE_DIRECTORY",
|
||||
func() string { return filepath.Join(xdg.StateHome, "isle") },
|
||||
)
|
||||
|
||||
return
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Jigs
|
||||
|
||||
func envOr(name string, fallback func() string) string {
|
||||
if v := os.Getenv(name); v != "" {
|
||||
return v
|
||||
}
|
||||
return fallback()
|
||||
}
|
||||
|
||||
func firstExistingDir(paths ...string) (string, error) {
|
||||
var errs []error
|
||||
for _, path := range paths {
|
||||
stat, err := os.Stat(path)
|
||||
switch {
|
||||
case errors.Is(err, fs.ErrExist):
|
||||
continue
|
||||
case err != nil:
|
||||
errs = append(
|
||||
errs, fmt.Errorf("checking if path %q exists: %w", path, err),
|
||||
)
|
||||
case !stat.IsDir():
|
||||
errs = append(
|
||||
errs, fmt.Errorf("path %q exists but is not a directory", path),
|
||||
)
|
||||
default:
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
err := fmt.Errorf(
|
||||
"no directory found at any of the following paths: %s",
|
||||
strings.Join(paths, ", "),
|
||||
)
|
||||
if len(errs) > 0 {
|
||||
err = errors.Join(slices.Insert(errs, 0, err)...)
|
||||
}
|
||||
return "", err
|
||||
}
|
@ -1,27 +1,21 @@
|
||||
package daemon
|
||||
|
||||
import "isle/daemon/jsonrpc2"
|
||||
import (
|
||||
"isle/daemon/daecommon"
|
||||
"isle/daemon/jsonrpc2"
|
||||
)
|
||||
|
||||
const (
|
||||
errCodeNoNetwork = daecommon.ErrorCodeRangeDaemon + iota
|
||||
errCodeAlreadyJoined
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoNetwork is returned when the daemon has never been configured with a
|
||||
// network.
|
||||
ErrNoNetwork = jsonrpc2.NewError(1, "No network configured")
|
||||
|
||||
// ErrInitializing is returned when a network is unavailable due to still
|
||||
// being initialized.
|
||||
ErrInitializing = jsonrpc2.NewError(2, "Network is being initialized")
|
||||
ErrNoNetwork = jsonrpc2.NewError(errCodeNoNetwork, "No network configured")
|
||||
|
||||
// ErrAlreadyJoined is returned when the daemon is instructed to create or
|
||||
// join a new network, but it is already joined to a network.
|
||||
ErrAlreadyJoined = jsonrpc2.NewError(4, "Already joined to a network")
|
||||
|
||||
// ErrInvalidConfig is returned when the daemon's configuration is invalid
|
||||
// for an operation being attempted.
|
||||
//
|
||||
// The Data field will be a string containing further details.
|
||||
ErrInvalidConfig = jsonrpc2.NewError(5, "Invalid daemon config")
|
||||
|
||||
// ErrHostNotFound is returned when performing an operation which expected a
|
||||
// host to exist in the network, but that host wasn't found.
|
||||
ErrHostNotFound = jsonrpc2.NewError(6, "Host not found")
|
||||
ErrAlreadyJoined = jsonrpc2.NewError(errCodeAlreadyJoined, "Already joined to a network")
|
||||
)
|
||||
|
@ -1,52 +0,0 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"isle/bootstrap"
|
||||
"isle/garage"
|
||||
"isle/secrets"
|
||||
)
|
||||
|
||||
// GarageClientParams contains all the data needed to instantiate garage
|
||||
// clients.
|
||||
type GarageClientParams struct {
|
||||
Peer garage.RemotePeer
|
||||
GlobalBucketS3APICredentials garage.S3APICredentials
|
||||
|
||||
// RPCSecret may be empty, if the secret is not available on the host.
|
||||
RPCSecret string
|
||||
}
|
||||
|
||||
func (d *daemon) getGarageClientParams(
|
||||
ctx context.Context, currBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
GarageClientParams, error,
|
||||
) {
|
||||
creds, err := getGarageS3APIGlobalBucketCredentials(ctx, d.secretsStore)
|
||||
if err != nil {
|
||||
return GarageClientParams{}, fmt.Errorf("getting garage global bucket creds: %w", err)
|
||||
}
|
||||
|
||||
rpcSecret, err := getGarageRPCSecret(ctx, d.secretsStore)
|
||||
if err != nil && !errors.Is(err, secrets.ErrNotFound) {
|
||||
return GarageClientParams{}, fmt.Errorf("getting garage rpc secret: %w", err)
|
||||
}
|
||||
|
||||
return GarageClientParams{
|
||||
Peer: currBootstrap.ChooseGaragePeer(),
|
||||
GlobalBucketS3APICredentials: creds,
|
||||
RPCSecret: rpcSecret,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GlobalBucketS3APIClient returns an S3 client pre-configured with access to
|
||||
// the global bucket.
|
||||
func (p GarageClientParams) GlobalBucketS3APIClient() garage.S3APIClient {
|
||||
var (
|
||||
addr = p.Peer.S3APIAddr()
|
||||
creds = p.GlobalBucketS3APICredentials
|
||||
)
|
||||
return garage.NewS3APIClient(addr, creds)
|
||||
}
|
@ -1,76 +0,0 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
|
||||
)
|
||||
|
||||
// until keeps trying fn until it returns nil, returning true. If the context is
|
||||
// canceled then until returns false.
|
||||
func until(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
descr string,
|
||||
fn func(context.Context) error,
|
||||
) bool {
|
||||
for {
|
||||
logger.Info(ctx, descr)
|
||||
err := fn(ctx)
|
||||
if err == nil {
|
||||
return true
|
||||
} else if ctxErr := ctx.Err(); ctxErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Warn(ctx, descr+" failed, retrying in one second", err)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func randStr(l int) string {
|
||||
b := make([]byte, l)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hex.EncodeToString(b)
|
||||
}
|
||||
|
||||
// mkDir is like os.Mkdir but it returns better error messages. If the directory
|
||||
// already exists then nil is returned.
|
||||
func mkDir(path string) error {
|
||||
{
|
||||
parentPath := filepath.Dir(path)
|
||||
parentInfo, err := os.Stat(parentPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking fs node of parent %q: %w", parentPath, err)
|
||||
} else if !parentInfo.IsDir() {
|
||||
return fmt.Errorf("%q is not a directory", parentPath)
|
||||
}
|
||||
}
|
||||
|
||||
info, err := os.Stat(path)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// fine
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("checking fs node: %w", err)
|
||||
} else if !info.IsDir() {
|
||||
return fmt.Errorf("exists but is not a directory")
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := os.Mkdir(path, 0700); err != nil {
|
||||
return fmt.Errorf("creating directory: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -17,11 +17,21 @@ func {{.Interface.Name}}FromClient(client jsonrpc2.Client) {{.Interface.Name}} {
|
||||
{{range $method := .Interface.Methods}}
|
||||
func (c *{{$t}}) {{$method.Declaration}} {
|
||||
{{- $ctx := (index $method.Params 0).Name}}
|
||||
{{- $rcv := (index $method.Results 0).Name}}
|
||||
{{- $err := (index $method.Results 1).Name}}
|
||||
|
||||
{{- $rcv := ""}}
|
||||
{{- $err := ""}}
|
||||
|
||||
{{- if (eq (len $method.Results) 1)}}
|
||||
{{- $rcv = "nil" }}
|
||||
{{- $err = (index $method.Results 0).Name}}
|
||||
{{- else}}
|
||||
{{- $rcv = printf "&%s" (index $method.Results 0).Name}}
|
||||
{{- $err = (index $method.Results 1).Name}}
|
||||
{{- end}}
|
||||
|
||||
{{- $err}} = c.client.Call(
|
||||
{{$ctx}},
|
||||
&{{$rcv}},
|
||||
{{$rcv}},
|
||||
"{{$method.Name}}",
|
||||
{{- range $param := (slice $method.Params 1)}}
|
||||
{{$param.Name}},
|
||||
|
@ -49,15 +49,25 @@ func newMethodDispatchFunc(
|
||||
|
||||
var (
|
||||
callResV = method.Call(callVals)
|
||||
resV = callResV[0]
|
||||
errV = callResV[1]
|
||||
resV, errV reflect.Value
|
||||
)
|
||||
|
||||
if errV.IsNil() {
|
||||
if len(callResV) == 1 {
|
||||
errV = callResV[0]
|
||||
} else {
|
||||
resV = callResV[0]
|
||||
errV = callResV[1]
|
||||
}
|
||||
|
||||
if !errV.IsNil() {
|
||||
return nil, errV.Interface().(error)
|
||||
}
|
||||
|
||||
if resV.IsValid() {
|
||||
return resV.Interface(), nil
|
||||
}
|
||||
|
||||
return nil, errV.Interface().(error)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -69,7 +79,8 @@ type dispatcher struct {
|
||||
// value to dispatch RPC calls. The passed in value must be a pointer. All
|
||||
// exported methods which look like:
|
||||
//
|
||||
// MethodName(context.Context, ParamType) (ResponseType, error)
|
||||
// MethodName(context.Context, ...ParamType) (ResponseType, error)
|
||||
// MethodName(context.Context, ...ParamType) error
|
||||
//
|
||||
// will be available via RPC calls.
|
||||
func NewDispatchHandler(i any) Handler {
|
||||
@ -96,8 +107,9 @@ func NewDispatchHandler(i any) Handler {
|
||||
if !method.IsExported() ||
|
||||
methodT.NumIn() < 1 ||
|
||||
methodT.In(0) != ctxT ||
|
||||
methodT.NumOut() != 2 ||
|
||||
methodT.Out(1) != errT {
|
||||
(methodT.NumOut() == 1 && methodT.Out(0) != errT) ||
|
||||
(methodT.NumOut() == 2 && methodT.Out(1) != errT) ||
|
||||
methodT.NumOut() > 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -36,12 +36,16 @@ func (dividerImpl) Divide2(ctx context.Context, top, bottom int) (int, error) {
|
||||
return top / bottom, nil
|
||||
}
|
||||
|
||||
func (i dividerImpl) Noop(ctx context.Context) (int, error) {
|
||||
func (i dividerImpl) Divide(ctx context.Context, p DivideParams) (int, error) {
|
||||
return i.Divide2(ctx, p.Top, p.Bottom)
|
||||
}
|
||||
|
||||
func (i dividerImpl) One(ctx context.Context) (int, error) {
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
func (i dividerImpl) Divide(ctx context.Context, p DivideParams) (int, error) {
|
||||
return i.Divide2(ctx, p.Top, p.Bottom)
|
||||
func (i dividerImpl) Noop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dividerImpl) Hidden(ctx context.Context, p struct{}) (int, error) {
|
||||
@ -49,9 +53,10 @@ func (dividerImpl) Hidden(ctx context.Context, p struct{}) (int, error) {
|
||||
}
|
||||
|
||||
type divider interface {
|
||||
Noop(ctx context.Context) (int, error)
|
||||
Divide2(ctx context.Context, top, bottom int) (int, error)
|
||||
Divide(ctx context.Context, p DivideParams) (int, error)
|
||||
One(ctx context.Context) (int, error)
|
||||
Noop(ctx context.Context) error
|
||||
}
|
||||
|
||||
var testHandler = func() Handler {
|
||||
@ -104,7 +109,7 @@ func testClient(t *testing.T, client Client) {
|
||||
|
||||
t.Run("success/no_params", func(t *testing.T) {
|
||||
var res int
|
||||
err := client.Call(ctx, &res, "Noop")
|
||||
err := client.Call(ctx, &res, "One")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if res != 1 {
|
||||
@ -112,6 +117,13 @@ func testClient(t *testing.T, client Client) {
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("success/no_results", func(t *testing.T) {
|
||||
err := client.Call(ctx, nil, "Noop")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("err/application", func(t *testing.T) {
|
||||
err := client.Call(ctx, nil, "Divide", DivideParams{})
|
||||
if !errors.Is(err, ErrDivideByZero) {
|
||||
|
73
go/daemon/network.go
Normal file
73
go/daemon/network.go
Normal file
@ -0,0 +1,73 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"isle/bootstrap"
|
||||
"isle/daemon/network"
|
||||
"isle/toolkit"
|
||||
)
|
||||
|
||||
func networkStateDir(
|
||||
networksStateDir toolkit.Dir, networkID string, mayExist bool,
|
||||
) (
|
||||
toolkit.Dir, error,
|
||||
) {
|
||||
return networksStateDir.MkChildDir(networkID, mayExist)
|
||||
}
|
||||
|
||||
func networkRuntimeDir(
|
||||
networksRuntimeDir toolkit.Dir, networkID string, mayExist bool,
|
||||
) (
|
||||
toolkit.Dir, error,
|
||||
) {
|
||||
return networksRuntimeDir.MkChildDir(networkID, mayExist)
|
||||
}
|
||||
|
||||
func networkDirs(
|
||||
networksStateDir, networksRuntimeDir toolkit.Dir,
|
||||
networkID string,
|
||||
mayExist bool,
|
||||
) (
|
||||
stateDir, runtimeDir toolkit.Dir, err error,
|
||||
) {
|
||||
h := new(toolkit.MkDirHelper)
|
||||
stateDir, _ = h.Maybe(
|
||||
networkStateDir(networksStateDir, networkID, mayExist),
|
||||
)
|
||||
runtimeDir, _ = h.Maybe(
|
||||
networkRuntimeDir(networksRuntimeDir, networkID, mayExist),
|
||||
)
|
||||
err = h.Err()
|
||||
return
|
||||
}
|
||||
|
||||
// LoadableNetworks returns the CreationParams for each Network which is able to
|
||||
// be loaded.
|
||||
func LoadableNetworks(
|
||||
networksStateDir toolkit.Dir,
|
||||
) (
|
||||
[]bootstrap.CreationParams, error,
|
||||
) {
|
||||
networkStateDirs, err := networksStateDir.ChildDirs()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"listing children of %q: %w", networksStateDir.Path, err,
|
||||
)
|
||||
}
|
||||
|
||||
creationParams := make([]bootstrap.CreationParams, 0, len(networkStateDirs))
|
||||
|
||||
for _, networkStateDir := range networkStateDirs {
|
||||
thisCreationParams, err := network.LoadCreationParams(networkStateDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"loading creation params from %q: %w",
|
||||
networkStateDir.Path,
|
||||
err,
|
||||
)
|
||||
}
|
||||
creationParams = append(creationParams, thisCreationParams)
|
||||
}
|
||||
|
||||
return creationParams, nil
|
||||
}
|
@ -1,27 +1,15 @@
|
||||
package daemon
|
||||
package network
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
|
||||
"isle/bootstrap"
|
||||
"isle/daemon/daecommon"
|
||||
"isle/garage/garagesrv"
|
||||
"isle/jsonutil"
|
||||
"isle/secrets"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// JoiningBootstrap wraps a normal Bootstrap to include extra data which a host
|
||||
// might need while joining a network.
|
||||
type JoiningBootstrap struct {
|
||||
Bootstrap bootstrap.Bootstrap
|
||||
Secrets map[secrets.ID]json.RawMessage
|
||||
}
|
||||
|
||||
func writeBootstrapToStateDir(
|
||||
stateDirPath string, hostBootstrap bootstrap.Bootstrap,
|
||||
) error {
|
||||
@ -42,7 +30,7 @@ func writeBootstrapToStateDir(
|
||||
}
|
||||
|
||||
func coalesceDaemonConfigAndBootstrap(
|
||||
daemonConfig Config, hostBootstrap bootstrap.Bootstrap,
|
||||
daemonConfig daecommon.Config, hostBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
bootstrap.Bootstrap, error,
|
||||
) {
|
||||
@ -80,49 +68,3 @@ func coalesceDaemonConfigAndBootstrap(
|
||||
|
||||
return hostBootstrap, nil
|
||||
}
|
||||
|
||||
type bootstrapDiff struct {
|
||||
hostsChanged bool
|
||||
nebulaChanged bool
|
||||
dnsChanged bool
|
||||
}
|
||||
|
||||
func calcBootstrapDiff(
|
||||
daemonConfig Config,
|
||||
prevBootstrap, nextBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
diff bootstrapDiff, err error,
|
||||
) {
|
||||
{
|
||||
prevHash, prevErr := bootstrap.HostsHash(prevBootstrap.Hosts)
|
||||
nextHash, nextErr := bootstrap.HostsHash(nextBootstrap.Hosts)
|
||||
if err = errors.Join(prevErr, nextErr); err != nil {
|
||||
err = fmt.Errorf("calculating host hashes: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
diff.hostsChanged = !bytes.Equal(prevHash, nextHash)
|
||||
}
|
||||
|
||||
{
|
||||
prevNebulaConfig, prevErr := nebulaConfig(daemonConfig, prevBootstrap)
|
||||
nextNebulaConfig, nextErr := nebulaConfig(daemonConfig, nextBootstrap)
|
||||
if err = errors.Join(prevErr, nextErr); err != nil {
|
||||
err = fmt.Errorf("calculating nebula config: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
diff.nebulaChanged = !reflect.DeepEqual(
|
||||
prevNebulaConfig, nextNebulaConfig,
|
||||
)
|
||||
}
|
||||
|
||||
{
|
||||
diff.dnsChanged = !reflect.DeepEqual(
|
||||
dnsmasqConfig(daemonConfig, prevBootstrap),
|
||||
dnsmasqConfig(daemonConfig, nextBootstrap),
|
||||
)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
28
go/daemon/network/errors.go
Normal file
28
go/daemon/network/errors.go
Normal file
@ -0,0 +1,28 @@
|
||||
package network
|
||||
|
||||
import (
|
||||
"isle/daemon/daecommon"
|
||||
"isle/daemon/jsonrpc2"
|
||||
)
|
||||
|
||||
const (
|
||||
errCodeInitializing = daecommon.ErrorCodeRangeNetwork + iota
|
||||
errCodeInvalidConfig
|
||||
errCodeHostNotFound
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInitializing is returned when a network is unavailable due to still
|
||||
// being initialized.
|
||||
ErrInitializing = jsonrpc2.NewError(errCodeInitializing, "Network is being initialized")
|
||||
|
||||
// ErrInvalidConfig is returned when the daemon's configuration is invalid
|
||||
// for an operation being attempted.
|
||||
//
|
||||
// The Data field will be a string containing further details.
|
||||
ErrInvalidConfig = jsonrpc2.NewError(errCodeInvalidConfig, "Invalid daemon config")
|
||||
|
||||
// ErrHostNotFound is returned when performing an operation which expected a
|
||||
// host to exist in the network, but that host wasn't found.
|
||||
ErrHostNotFound = jsonrpc2.NewError(errCodeHostNotFound, "Host not found")
|
||||
)
|
@ -1,14 +1,19 @@
|
||||
package daemon
|
||||
package network
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"isle/bootstrap"
|
||||
"isle/daemon/daecommon"
|
||||
"isle/garage"
|
||||
"isle/nebula"
|
||||
"isle/secrets"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"dev.mediocregopher.com/mediocre-go-lib.git/mctx"
|
||||
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
|
||||
@ -20,10 +25,97 @@ const (
|
||||
garageGlobalBucketBootstrapHostsDirPath = "bootstrap/hosts"
|
||||
)
|
||||
|
||||
func (n *network) getGarageClientParams(
|
||||
ctx context.Context, currBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
GarageClientParams, error,
|
||||
) {
|
||||
creds, err := daecommon.GetGarageS3APIGlobalBucketCredentials(
|
||||
ctx, n.secretsStore,
|
||||
)
|
||||
if err != nil {
|
||||
return GarageClientParams{}, fmt.Errorf("getting garage global bucket creds: %w", err)
|
||||
}
|
||||
|
||||
rpcSecret, err := daecommon.GetGarageRPCSecret(ctx, n.secretsStore)
|
||||
if err != nil && !errors.Is(err, secrets.ErrNotFound) {
|
||||
return GarageClientParams{}, fmt.Errorf("getting garage rpc secret: %w", err)
|
||||
}
|
||||
|
||||
return GarageClientParams{
|
||||
Peer: currBootstrap.ChooseGaragePeer(),
|
||||
GlobalBucketS3APICredentials: creds,
|
||||
RPCSecret: rpcSecret,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func garageAdminClientLogger(logger *mlog.Logger) *mlog.Logger {
|
||||
return logger.WithNamespace("garageAdminClient")
|
||||
}
|
||||
|
||||
// newGarageAdminClient will return an AdminClient for a local garage instance,
|
||||
// or it will _panic_ if there is no local instance configured.
|
||||
func newGarageAdminClient(
|
||||
logger *mlog.Logger,
|
||||
daemonConfig daecommon.Config,
|
||||
adminToken string,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
) *garage.AdminClient {
|
||||
|
||||
thisHost := hostBootstrap.ThisHost()
|
||||
|
||||
return garage.NewAdminClient(
|
||||
garageAdminClientLogger(logger),
|
||||
net.JoinHostPort(
|
||||
thisHost.IP().String(),
|
||||
strconv.Itoa(daemonConfig.Storage.Allocations[0].AdminPort),
|
||||
),
|
||||
adminToken,
|
||||
)
|
||||
}
|
||||
|
||||
func garageApplyLayout(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
daemonConfig daecommon.Config,
|
||||
adminToken string,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
) error {
|
||||
|
||||
var (
|
||||
adminClient = newGarageAdminClient(
|
||||
logger, daemonConfig, adminToken, hostBootstrap,
|
||||
)
|
||||
thisHost = hostBootstrap.ThisHost()
|
||||
hostName = thisHost.Name
|
||||
allocs = daemonConfig.Storage.Allocations
|
||||
peers = make([]garage.PeerLayout, len(allocs))
|
||||
)
|
||||
|
||||
for i, alloc := range allocs {
|
||||
|
||||
id := daecommon.BootstrapGarageHostForAlloc(thisHost, alloc).ID
|
||||
|
||||
zone := string(hostName)
|
||||
if alloc.Zone != "" {
|
||||
zone = alloc.Zone
|
||||
}
|
||||
|
||||
peers[i] = garage.PeerLayout{
|
||||
ID: id,
|
||||
Capacity: alloc.Capacity * 1_000_000_000,
|
||||
Zone: zone,
|
||||
Tags: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
return adminClient.ApplyLayout(ctx, peers)
|
||||
}
|
||||
|
||||
func garageInitializeGlobalBucket(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
daemonConfig Config,
|
||||
daemonConfig daecommon.Config,
|
||||
adminToken string,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
@ -60,13 +152,73 @@ func garageInitializeGlobalBucket(
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
func (n *network) getGarageBootstrapHosts(
|
||||
ctx context.Context, currBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
map[nebula.HostName]bootstrap.Host, error,
|
||||
) {
|
||||
garageClientParams, err := n.getGarageClientParams(ctx, currBootstrap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting garage client params: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
client = garageClientParams.GlobalBucketS3APIClient()
|
||||
hosts = map[nebula.HostName]bootstrap.Host{}
|
||||
|
||||
objInfoCh = client.ListObjects(
|
||||
ctx, garage.GlobalBucket,
|
||||
minio.ListObjectsOptions{
|
||||
Prefix: garageGlobalBucketBootstrapHostsDirPath,
|
||||
Recursive: true,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
for objInfo := range objInfoCh {
|
||||
|
||||
ctx := mctx.Annotate(ctx, "objectKey", objInfo.Key)
|
||||
|
||||
if objInfo.Err != nil {
|
||||
return nil, fmt.Errorf("listing objects: %w", objInfo.Err)
|
||||
}
|
||||
|
||||
obj, err := client.GetObject(
|
||||
ctx, garage.GlobalBucket, objInfo.Key, minio.GetObjectOptions{},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving object %q: %w", objInfo.Key, err)
|
||||
}
|
||||
|
||||
var authedHost bootstrap.AuthenticatedHost
|
||||
|
||||
err = json.NewDecoder(obj).Decode(&authedHost)
|
||||
obj.Close()
|
||||
|
||||
if err != nil {
|
||||
n.logger.Warn(ctx, "Object contains invalid json", err)
|
||||
continue
|
||||
}
|
||||
|
||||
host, err := authedHost.Unwrap(currBootstrap.CAPublicCredentials)
|
||||
if err != nil {
|
||||
n.logger.Warn(ctx, "Host could not be authenticated", err)
|
||||
}
|
||||
|
||||
hosts[host.Name] = host
|
||||
}
|
||||
|
||||
return hosts, nil
|
||||
}
|
||||
|
||||
// putGarageBoostrapHost places the <hostname>.json.signed file for this host
|
||||
// into garage so that other hosts are able to see relevant configuration for
|
||||
// it.
|
||||
func (d *daemon) putGarageBoostrapHost(
|
||||
func (n *network) putGarageBoostrapHost(
|
||||
ctx context.Context, currBootstrap bootstrap.Bootstrap,
|
||||
) error {
|
||||
garageClientParams, err := d.getGarageClientParams(ctx, currBootstrap)
|
||||
garageClientParams, err := n.getGarageClientParams(ctx, currBootstrap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting garage client params: %w", err)
|
||||
}
|
||||
@ -112,66 +264,6 @@ func (d *daemon) putGarageBoostrapHost(
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *daemon) getGarageBootstrapHosts(
|
||||
ctx context.Context, currBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
map[nebula.HostName]bootstrap.Host, error,
|
||||
) {
|
||||
garageClientParams, err := d.getGarageClientParams(ctx, currBootstrap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting garage client params: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
client = garageClientParams.GlobalBucketS3APIClient()
|
||||
hosts = map[nebula.HostName]bootstrap.Host{}
|
||||
|
||||
objInfoCh = client.ListObjects(
|
||||
ctx, garage.GlobalBucket,
|
||||
minio.ListObjectsOptions{
|
||||
Prefix: garageGlobalBucketBootstrapHostsDirPath,
|
||||
Recursive: true,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
for objInfo := range objInfoCh {
|
||||
|
||||
ctx := mctx.Annotate(ctx, "objectKey", objInfo.Key)
|
||||
|
||||
if objInfo.Err != nil {
|
||||
return nil, fmt.Errorf("listing objects: %w", objInfo.Err)
|
||||
}
|
||||
|
||||
obj, err := client.GetObject(
|
||||
ctx, garage.GlobalBucket, objInfo.Key, minio.GetObjectOptions{},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving object %q: %w", objInfo.Key, err)
|
||||
}
|
||||
|
||||
var authedHost bootstrap.AuthenticatedHost
|
||||
|
||||
err = json.NewDecoder(obj).Decode(&authedHost)
|
||||
obj.Close()
|
||||
|
||||
if err != nil {
|
||||
d.logger.Warn(ctx, "Object contains invalid json", err)
|
||||
continue
|
||||
}
|
||||
|
||||
host, err := authedHost.Unwrap(currBootstrap.CAPublicCredentials)
|
||||
if err != nil {
|
||||
d.logger.Warn(ctx, "Host could not be authenticated", err)
|
||||
}
|
||||
|
||||
hosts[host.Name] = host
|
||||
}
|
||||
|
||||
return hosts, nil
|
||||
}
|
||||
|
||||
func removeGarageBootstrapHost(
|
||||
ctx context.Context, client garage.S3APIClient, hostName nebula.HostName,
|
||||
) error {
|
14
go/daemon/network/jigs.go
Normal file
14
go/daemon/network/jigs.go
Normal file
@ -0,0 +1,14 @@
|
||||
package network
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
)
|
||||
|
||||
func randStr(l int) string {
|
||||
b := make([]byte, l)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hex.EncodeToString(b)
|
||||
}
|
900
go/daemon/network/network.go
Normal file
900
go/daemon/network/network.go
Normal file
@ -0,0 +1,900 @@
|
||||
// Package network implements the Network type, which manages the daemon's
|
||||
// membership in a single network.
|
||||
package network
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"isle/bootstrap"
|
||||
"isle/daemon/children"
|
||||
"isle/daemon/daecommon"
|
||||
"isle/garage"
|
||||
"isle/jsonutil"
|
||||
"isle/nebula"
|
||||
"isle/secrets"
|
||||
"isle/toolkit"
|
||||
"log"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
// GarageClientParams contains all the data needed to instantiate garage
|
||||
// clients.
|
||||
type GarageClientParams struct {
|
||||
Peer garage.RemotePeer
|
||||
GlobalBucketS3APICredentials garage.S3APICredentials
|
||||
|
||||
// RPCSecret may be empty, if the secret is not available on the host.
|
||||
RPCSecret string
|
||||
}
|
||||
|
||||
// GlobalBucketS3APIClient returns an S3 client pre-configured with access to
|
||||
// the global bucket.
|
||||
func (p GarageClientParams) GlobalBucketS3APIClient() garage.S3APIClient {
|
||||
var (
|
||||
addr = p.Peer.S3APIAddr()
|
||||
creds = p.GlobalBucketS3APICredentials
|
||||
)
|
||||
return garage.NewS3APIClient(addr, creds)
|
||||
}
|
||||
|
||||
// CreateHostOpts are optional parameters to the CreateHost method.
|
||||
type CreateHostOpts struct {
|
||||
// IP address of the new host. An IP address will be randomly chosen if one
|
||||
// is not given here.
|
||||
IP netip.Addr
|
||||
|
||||
// CanCreateHosts indicates that the bootstrap produced by CreateHost should
|
||||
// give the new host the ability to create new hosts as well.
|
||||
CanCreateHosts bool
|
||||
|
||||
// TODO add nebula cert tags
|
||||
}
|
||||
|
||||
// JoiningBootstrap wraps a normal Bootstrap to include extra data which a host
|
||||
// might need while joining a network.
|
||||
type JoiningBootstrap struct {
|
||||
Bootstrap bootstrap.Bootstrap
|
||||
Secrets map[secrets.ID]json.RawMessage
|
||||
}
|
||||
|
||||
// RPC defines the methods related to a single network which are available over
|
||||
// the daemon's RPC interface.
|
||||
type RPC interface {
|
||||
// GetHosts returns all hosts known to the network, sorted by their name.
|
||||
GetHosts(context.Context) ([]bootstrap.Host, error)
|
||||
|
||||
// GetGarageClientParams returns a GarageClientParams for the current
|
||||
// network state.
|
||||
GetGarageClientParams(context.Context) (GarageClientParams, error)
|
||||
|
||||
// GetNebulaCAPublicCredentials returns the CAPublicCredentials for the
|
||||
// network.
|
||||
GetNebulaCAPublicCredentials(
|
||||
context.Context,
|
||||
) (
|
||||
nebula.CAPublicCredentials, error,
|
||||
)
|
||||
|
||||
// RemoveHost removes the host of the given name from the network.
|
||||
RemoveHost(ctx context.Context, hostName nebula.HostName) error
|
||||
|
||||
// CreateHost creates a bootstrap for a new host with the given name and IP
|
||||
// address.
|
||||
CreateHost(
|
||||
context.Context, nebula.HostName, CreateHostOpts,
|
||||
) (
|
||||
JoiningBootstrap, error,
|
||||
)
|
||||
|
||||
// CreateNebulaCertificate creates and signs a new nebula certficate for an
|
||||
// existing host, given the public key for that host. This is currently
|
||||
// mostly useful for creating certs for mobile devices.
|
||||
//
|
||||
// TODO replace this with CreateHostBootstrap, and the
|
||||
// CreateNebulaCertificate RPC method can just pull cert out of that.
|
||||
//
|
||||
// Errors:
|
||||
// - ErrHostNotFound
|
||||
CreateNebulaCertificate(
|
||||
context.Context, nebula.HostName, nebula.EncryptingPublicKey,
|
||||
) (
|
||||
nebula.Certificate, error,
|
||||
)
|
||||
}
|
||||
|
||||
// Network manages membership in a single micropelago network. Each Network
|
||||
// is comprised of a unique IP subnet, hosts connected together on that subnet
|
||||
// via a VPN, an S3 storage layer only accessible to those hosts, plus other
|
||||
// services built on this foundation.
|
||||
//
|
||||
// A single daemon (isle server) can manage multiple networks. Each network is
|
||||
// expected to be independent of the others, ie they should not share any
|
||||
// resources.
|
||||
type Network interface {
|
||||
RPC
|
||||
|
||||
// GetNetworkCreationParams returns the CreationParams that the Network was
|
||||
// originally created with.
|
||||
GetNetworkCreationParams(context.Context) (bootstrap.CreationParams, error)
|
||||
|
||||
// Shutdown blocks until all resources held or created by the Network,
|
||||
// including child processes it has started, have been cleaned up.
|
||||
//
|
||||
// If this returns an error then it's possible that child processes are
|
||||
// still running and are no longer managed.
|
||||
Shutdown() error
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Network implementation
|
||||
|
||||
// Opts are optional parameters which can be passed in when initializing a new
|
||||
// Network instance. A nil Opts is equivalent to a zero value.
|
||||
type Opts struct {
|
||||
ChildrenOpts *children.Opts
|
||||
}
|
||||
|
||||
func (o *Opts) withDefaults() *Opts {
|
||||
if o == nil {
|
||||
o = new(Opts)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
type network struct {
|
||||
logger *mlog.Logger
|
||||
daemonConfig daecommon.Config
|
||||
|
||||
envBinDirPath string
|
||||
stateDir toolkit.Dir
|
||||
runtimeDir toolkit.Dir
|
||||
|
||||
opts *Opts
|
||||
|
||||
secretsStore secrets.Store
|
||||
garageAdminToken string
|
||||
|
||||
l sync.RWMutex
|
||||
children *children.Children
|
||||
currBootstrap bootstrap.Bootstrap
|
||||
|
||||
shutdownCh chan struct{}
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// instatiateNetwork returns an instantiated *network instance which has not yet
|
||||
// been initialized.
|
||||
func instatiateNetwork(
|
||||
logger *mlog.Logger,
|
||||
networkID string,
|
||||
daemonConfig daecommon.Config,
|
||||
envBinDirPath string,
|
||||
stateDir toolkit.Dir,
|
||||
runtimeDir toolkit.Dir,
|
||||
opts *Opts,
|
||||
) *network {
|
||||
log.Printf("DEBUG: network stateDir:%+v runtimeDir:%+v", stateDir, runtimeDir)
|
||||
return &network{
|
||||
logger: logger,
|
||||
daemonConfig: daemonConfig,
|
||||
envBinDirPath: envBinDirPath,
|
||||
stateDir: stateDir,
|
||||
runtimeDir: runtimeDir,
|
||||
opts: opts.withDefaults(),
|
||||
garageAdminToken: randStr(32),
|
||||
shutdownCh: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// LoadCreationParams returns the CreationParams of a Network which was
|
||||
// Created/Joined with the given state directory.
|
||||
func LoadCreationParams(
|
||||
stateDir toolkit.Dir,
|
||||
) (
|
||||
bootstrap.CreationParams, error,
|
||||
) {
|
||||
var (
|
||||
// TODO store/load the creation params separately from the rest of
|
||||
// the bootstrap, since the bootstrap contains potentially the
|
||||
// entire host list of a network, which could be pretty bulky.
|
||||
bootstrapFilePath = bootstrap.StateDirPath(stateDir.Path)
|
||||
bs bootstrap.Bootstrap
|
||||
)
|
||||
|
||||
if err := jsonutil.LoadFile(&bs, bootstrapFilePath); err != nil {
|
||||
return bootstrap.CreationParams{}, fmt.Errorf(
|
||||
"loading bootstrap from %q: %w", bootstrapFilePath, err,
|
||||
)
|
||||
}
|
||||
|
||||
return bs.NetworkCreationParams, nil
|
||||
}
|
||||
|
||||
// Load initializes and returns a Network instance for a network which was
|
||||
// previously joined or created, and which has the given ID.
|
||||
func Load(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
networkID string,
|
||||
daemonConfig daecommon.Config,
|
||||
envBinDirPath string,
|
||||
stateDir toolkit.Dir,
|
||||
runtimeDir toolkit.Dir,
|
||||
opts *Opts,
|
||||
) (
|
||||
Network, error,
|
||||
) {
|
||||
n := instatiateNetwork(
|
||||
logger,
|
||||
networkID,
|
||||
daemonConfig,
|
||||
envBinDirPath,
|
||||
stateDir,
|
||||
runtimeDir,
|
||||
opts,
|
||||
)
|
||||
|
||||
if err := n.initializeDirs(true); err != nil {
|
||||
return nil, fmt.Errorf("initializing directories: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
currBootstrap bootstrap.Bootstrap
|
||||
bootstrapFilePath = bootstrap.StateDirPath(n.stateDir.Path)
|
||||
)
|
||||
|
||||
if err := jsonutil.LoadFile(&currBootstrap, bootstrapFilePath); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"loading bootstrap from %q: %w", bootstrapFilePath, err,
|
||||
)
|
||||
} else if err := n.initialize(ctx, currBootstrap); err != nil {
|
||||
return nil, fmt.Errorf("initializing with bootstrap: %w", err)
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Join initializes and returns a Network instance for an existing network which
|
||||
// was not previously joined to on this host. Once Join has been called for a
|
||||
// particular network it will error on subsequent calls for that same network,
|
||||
// Load should be used instead.
|
||||
func Join(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
daemonConfig daecommon.Config,
|
||||
joiningBootstrap JoiningBootstrap,
|
||||
envBinDirPath string,
|
||||
stateDir toolkit.Dir,
|
||||
runtimeDir toolkit.Dir,
|
||||
opts *Opts,
|
||||
) (
|
||||
Network, error,
|
||||
) {
|
||||
n := instatiateNetwork(
|
||||
logger,
|
||||
joiningBootstrap.Bootstrap.NetworkCreationParams.ID,
|
||||
daemonConfig,
|
||||
envBinDirPath,
|
||||
stateDir,
|
||||
runtimeDir,
|
||||
opts,
|
||||
)
|
||||
|
||||
if err := n.initializeDirs(false); err != nil {
|
||||
return nil, fmt.Errorf("initializing directories: %w", err)
|
||||
}
|
||||
|
||||
if err := secrets.Import(
|
||||
ctx, n.secretsStore, joiningBootstrap.Secrets,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("importing secrets: %w", err)
|
||||
}
|
||||
|
||||
if err := n.initialize(ctx, joiningBootstrap.Bootstrap); err != nil {
|
||||
return nil, fmt.Errorf("initializing with bootstrap: %w", err)
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Create initializes and returns a Network for a brand new network which uses
|
||||
// the given creation parameters.
|
||||
//
|
||||
// - name: Human-readable name of the network.
|
||||
// - domain: Primary domain name that network services are served under.
|
||||
// - ipNet: An IP subnet, in CIDR form, which will be the overall range of
|
||||
// possible IPs in the network. The first IP in this network range will
|
||||
// become this first host's IP.
|
||||
// - hostName: The name of this first host in the network.
|
||||
//
|
||||
// Errors:
|
||||
// - ErrInvalidConfig - if daemonConfig doesn't have 3 storage allocations
|
||||
// configured.
|
||||
func Create(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
daemonConfig daecommon.Config,
|
||||
envBinDirPath string,
|
||||
stateDir toolkit.Dir,
|
||||
runtimeDir toolkit.Dir,
|
||||
creationParams bootstrap.CreationParams,
|
||||
ipNet nebula.IPNet, // TODO should this go in CreationParams?
|
||||
hostName nebula.HostName,
|
||||
opts *Opts,
|
||||
) (
|
||||
Network, error,
|
||||
) {
|
||||
if len(daemonConfig.Storage.Allocations) < 3 {
|
||||
return nil, ErrInvalidConfig.WithData(
|
||||
"At least three storage allocations are required.",
|
||||
)
|
||||
}
|
||||
|
||||
nebulaCACreds, err := nebula.NewCACredentials(creationParams.Domain, ipNet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating nebula CA cert: %w", err)
|
||||
}
|
||||
|
||||
garageRPCSecret := randStr(32)
|
||||
|
||||
n := instatiateNetwork(
|
||||
logger,
|
||||
creationParams.ID,
|
||||
daemonConfig,
|
||||
envBinDirPath,
|
||||
stateDir,
|
||||
runtimeDir,
|
||||
opts,
|
||||
)
|
||||
|
||||
if err := n.initializeDirs(false); err != nil {
|
||||
return nil, fmt.Errorf("initializing directories: %w", err)
|
||||
}
|
||||
|
||||
err = daecommon.SetGarageRPCSecret(ctx, n.secretsStore, garageRPCSecret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting garage RPC secret: %w", err)
|
||||
}
|
||||
|
||||
err = daecommon.SetNebulaCASigningPrivateKey(
|
||||
ctx, n.secretsStore, nebulaCACreds.SigningPrivateKey,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting nebula CA signing key secret: %w", err)
|
||||
}
|
||||
|
||||
hostBootstrap, err := bootstrap.New(
|
||||
nebulaCACreds,
|
||||
creationParams,
|
||||
map[nebula.HostName]bootstrap.Host{},
|
||||
hostName,
|
||||
ipNet.FirstAddr(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing bootstrap data: %w", err)
|
||||
}
|
||||
|
||||
if err := n.initialize(ctx, hostBootstrap); err != nil {
|
||||
return nil, fmt.Errorf("initializing with bootstrap: %w", err)
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (n *network) initializeDirs(mayExist bool) error {
|
||||
secretsDir, err := n.stateDir.MkChildDir("secrets", mayExist)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating secrets dir: %w", err)
|
||||
}
|
||||
|
||||
n.secretsStore = secrets.NewFSStore(secretsDir.Path)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *network) initialize(
|
||||
ctx context.Context, currBootstrap bootstrap.Bootstrap,
|
||||
) error {
|
||||
// we update this Host's data using whatever configuration has been provided
|
||||
// by the daemon config. This way the network has the most up-to-date
|
||||
// possible bootstrap. This updated bootstrap will later get updated in
|
||||
// garage as a background task, so other hosts will see it as well.
|
||||
currBootstrap, err := coalesceDaemonConfigAndBootstrap(
|
||||
n.daemonConfig, currBootstrap,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("combining configuration into bootstrap: %w", err)
|
||||
}
|
||||
|
||||
err = writeBootstrapToStateDir(n.stateDir.Path, currBootstrap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing bootstrap to state dir: %w", err)
|
||||
}
|
||||
|
||||
n.currBootstrap = currBootstrap
|
||||
|
||||
n.logger.Info(ctx, "Creating child processes")
|
||||
n.children, err = children.New(
|
||||
ctx,
|
||||
n.logger.WithNamespace("children"),
|
||||
n.envBinDirPath,
|
||||
n.secretsStore,
|
||||
n.daemonConfig,
|
||||
n.runtimeDir,
|
||||
n.garageAdminToken,
|
||||
currBootstrap,
|
||||
n.opts.ChildrenOpts,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating child processes: %w", err)
|
||||
}
|
||||
|
||||
n.logger.Info(ctx, "Child processes created")
|
||||
|
||||
if err := n.postInit(ctx); err != nil {
|
||||
n.logger.Error(ctx, "Post-initialization failed, stopping child processes", err)
|
||||
n.children.Shutdown()
|
||||
return fmt.Errorf("performing post-initialization: %w", err)
|
||||
}
|
||||
|
||||
// TODO annotate this context with creation params
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
n.wg.Add(1)
|
||||
go func() {
|
||||
defer n.wg.Done()
|
||||
<-n.shutdownCh
|
||||
cancel()
|
||||
}()
|
||||
|
||||
n.wg.Add(1)
|
||||
go func() {
|
||||
defer n.wg.Done()
|
||||
n.reloadLoop(ctx)
|
||||
n.logger.Debug(ctx, "Daemon restart loop stopped")
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *network) postInit(ctx context.Context) error {
|
||||
if len(n.daemonConfig.Storage.Allocations) > 0 {
|
||||
n.logger.Info(ctx, "Applying garage layout")
|
||||
if err := garageApplyLayout(
|
||||
ctx, n.logger, n.daemonConfig, n.garageAdminToken, n.currBootstrap,
|
||||
); err != nil {
|
||||
return fmt.Errorf("applying garage layout: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// This is only necessary during network creation, otherwise the bootstrap
|
||||
// should already have these credentials built in.
|
||||
//
|
||||
// TODO this is pretty hacky, but there doesn't seem to be a better way to
|
||||
// manage it at the moment.
|
||||
_, err := daecommon.GetGarageS3APIGlobalBucketCredentials(
|
||||
ctx, n.secretsStore,
|
||||
)
|
||||
if errors.Is(err, secrets.ErrNotFound) {
|
||||
n.logger.Info(ctx, "Initializing garage shared global bucket")
|
||||
garageGlobalBucketCreds, err := garageInitializeGlobalBucket(
|
||||
ctx,
|
||||
n.logger,
|
||||
n.daemonConfig,
|
||||
n.garageAdminToken,
|
||||
n.currBootstrap,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("initializing global bucket: %w", err)
|
||||
}
|
||||
|
||||
err = daecommon.SetGarageS3APIGlobalBucketCredentials(
|
||||
ctx, n.secretsStore, garageGlobalBucketCreds,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("storing global bucket creds: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
n.logger.Info(ctx, "Updating host info in garage")
|
||||
err = n.putGarageBoostrapHost(ctx, n.currBootstrap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating host info in garage: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *network) reloadLoop(ctx context.Context) {
|
||||
ticker := time.NewTicker(3 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
n.l.RLock()
|
||||
currBootstrap := n.currBootstrap
|
||||
n.l.RUnlock()
|
||||
|
||||
n.logger.Info(ctx, "Checking for bootstrap changes")
|
||||
newHosts, err := n.getGarageBootstrapHosts(ctx, currBootstrap)
|
||||
if err != nil {
|
||||
n.logger.Error(ctx, "Failed to get hosts from garage", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO there's some potential race conditions here, where
|
||||
// CreateHost could be called at this point, write the new host to
|
||||
// garage and the bootstrap, but then this reload call removes the
|
||||
// host from this bootstrap/children until the next reload.
|
||||
|
||||
if err := n.reload(ctx, currBootstrap, newHosts); err != nil {
|
||||
n.logger.Error(ctx, "Reloading with new host data failed", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// reload will check the existing hosts data from currBootstrap against a
|
||||
// potentially updated set of hosts data, and if there are any differences will
|
||||
// perform whatever changes are necessary.
|
||||
func (n *network) reload(
|
||||
ctx context.Context,
|
||||
currBootstrap bootstrap.Bootstrap,
|
||||
newHosts map[nebula.HostName]bootstrap.Host,
|
||||
) error {
|
||||
var (
|
||||
newBootstrap = currBootstrap
|
||||
thisHost = currBootstrap.ThisHost()
|
||||
)
|
||||
|
||||
newBootstrap.Hosts = newHosts
|
||||
|
||||
// the daemon's view of this host's bootstrap info takes precedence over
|
||||
// whatever is in garage
|
||||
newBootstrap.Hosts[thisHost.Name] = thisHost
|
||||
|
||||
diff, err := children.CalculateReloadDiff(
|
||||
n.daemonConfig, currBootstrap, newBootstrap,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("calculating diff between bootstraps: %w", err)
|
||||
} else if diff == (children.ReloadDiff{}) {
|
||||
n.logger.Info(ctx, "No changes to bootstrap detected")
|
||||
return nil
|
||||
}
|
||||
|
||||
n.logger.Info(ctx, "Bootstrap has changed, storing new bootstrap")
|
||||
n.l.Lock()
|
||||
n.currBootstrap = newBootstrap
|
||||
n.l.Unlock()
|
||||
|
||||
if err := n.children.Reload(ctx, newBootstrap, diff); err != nil {
|
||||
return fmt.Errorf("reloading child processes (diff:%+v): %w", diff, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func withCurrBootstrap[Res any](
|
||||
n *network, fn func(bootstrap.Bootstrap) (Res, error),
|
||||
) (Res, error) {
|
||||
n.l.RLock()
|
||||
defer n.l.RUnlock()
|
||||
|
||||
currBootstrap := n.currBootstrap
|
||||
return fn(currBootstrap)
|
||||
}
|
||||
|
||||
func (n *network) getBootstrap(
|
||||
ctx context.Context,
|
||||
) (
|
||||
bootstrap.Bootstrap, error,
|
||||
) {
|
||||
return withCurrBootstrap(n, func(
|
||||
currBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
bootstrap.Bootstrap, error,
|
||||
) {
|
||||
return currBootstrap, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (n *network) GetHosts(ctx context.Context) ([]bootstrap.Host, error) {
|
||||
b, err := n.getBootstrap(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving bootstrap: %w", err)
|
||||
}
|
||||
|
||||
hosts := maps.Values(b.Hosts)
|
||||
slices.SortFunc(hosts, func(a, b bootstrap.Host) int {
|
||||
return cmp.Compare(a.Name, b.Name)
|
||||
})
|
||||
|
||||
return hosts, nil
|
||||
}
|
||||
|
||||
func (n *network) GetGarageClientParams(
|
||||
ctx context.Context,
|
||||
) (
|
||||
GarageClientParams, error,
|
||||
) {
|
||||
return withCurrBootstrap(n, func(
|
||||
currBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
GarageClientParams, error,
|
||||
) {
|
||||
return n.getGarageClientParams(ctx, currBootstrap)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *network) GetNebulaCAPublicCredentials(
|
||||
ctx context.Context,
|
||||
) (
|
||||
nebula.CAPublicCredentials, error,
|
||||
) {
|
||||
b, err := n.getBootstrap(ctx)
|
||||
if err != nil {
|
||||
return nebula.CAPublicCredentials{}, fmt.Errorf(
|
||||
"retrieving bootstrap: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
return b.CAPublicCredentials, nil
|
||||
}
|
||||
|
||||
func (n *network) RemoveHost(ctx context.Context, hostName nebula.HostName) error {
|
||||
// TODO RemoveHost should publish a certificate revocation for the host
|
||||
// being removed.
|
||||
_, err := withCurrBootstrap(n, func(
|
||||
currBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
struct{}, error,
|
||||
) {
|
||||
garageClientParams, err := n.getGarageClientParams(ctx, currBootstrap)
|
||||
if err != nil {
|
||||
return struct{}{}, fmt.Errorf("get garage client params: %w", err)
|
||||
}
|
||||
|
||||
client := garageClientParams.GlobalBucketS3APIClient()
|
||||
return struct{}{}, removeGarageBootstrapHost(ctx, client, hostName)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func makeCACreds(
|
||||
currBootstrap bootstrap.Bootstrap,
|
||||
caSigningPrivateKey nebula.SigningPrivateKey,
|
||||
) nebula.CACredentials {
|
||||
return nebula.CACredentials{
|
||||
Public: currBootstrap.CAPublicCredentials,
|
||||
SigningPrivateKey: caSigningPrivateKey,
|
||||
}
|
||||
}
|
||||
|
||||
func chooseAvailableIP(b bootstrap.Bootstrap) (netip.Addr, error) {
|
||||
var (
|
||||
cidrIPNet = b.CAPublicCredentials.Cert.Unwrap().Details.Subnets[0]
|
||||
cidrMask = cidrIPNet.Mask
|
||||
cidrIPB = cidrIPNet.IP
|
||||
|
||||
cidr = netip.MustParsePrefix(cidrIPNet.String())
|
||||
cidrIP = cidr.Addr()
|
||||
cidrSuffixBits = cidrIP.BitLen() - cidr.Bits()
|
||||
|
||||
inUseIPs = make(map[netip.Addr]struct{}, len(b.Hosts))
|
||||
)
|
||||
|
||||
for _, host := range b.Hosts {
|
||||
inUseIPs[host.IP()] = struct{}{}
|
||||
}
|
||||
|
||||
// first check that there are any addresses at all. We can determine the
|
||||
// number of possible addresses using the network CIDR. The first IP in a
|
||||
// subnet is the network identifier, and is reserved. The last IP is the
|
||||
// broadcast IP, and is also reserved. Hence, the -2.
|
||||
usableIPs := (1 << cidrSuffixBits) - 2
|
||||
if len(inUseIPs) >= usableIPs {
|
||||
return netip.Addr{}, errors.New("no available IPs")
|
||||
}
|
||||
|
||||
// We need to know the subnet broadcast address, so we don't accidentally
|
||||
// produce it.
|
||||
cidrBCastIPB := bytes.Clone(cidrIPB)
|
||||
for i := range cidrBCastIPB {
|
||||
cidrBCastIPB[i] |= ^cidrMask[i]
|
||||
}
|
||||
cidrBCastIP, ok := netip.AddrFromSlice(cidrBCastIPB)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("invalid broadcast ip calculated: %x", cidrBCastIP))
|
||||
}
|
||||
|
||||
// Try a handful of times to pick an IP at random. This is preferred, as it
|
||||
// leaves less room for two different CreateHost calls to choose the same
|
||||
// IP.
|
||||
for range 20 {
|
||||
b := make([]byte, len(cidrIPB))
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return netip.Addr{}, fmt.Errorf("reading random bytes: %w", err)
|
||||
}
|
||||
|
||||
for i := range b {
|
||||
b[i] = cidrIPB[i] | (b[i] & ^cidrMask[i])
|
||||
}
|
||||
|
||||
ip, ok := netip.AddrFromSlice(b)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("generated invalid IP: %x", b))
|
||||
} else if !cidr.Contains(ip) {
|
||||
panic(fmt.Sprintf(
|
||||
"generated IP %v which is not in cidr %v", ip, cidr,
|
||||
))
|
||||
}
|
||||
|
||||
if ip == cidrIP || ip == cidrBCastIP {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, inUse := inUseIPs[ip]; !inUse {
|
||||
return ip, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If randomly picking fails then just go through IPs one by one until the
|
||||
// free one is found.
|
||||
for ip := cidrIP.Next(); ip != cidrBCastIP; ip = ip.Next() {
|
||||
if _, inUse := inUseIPs[ip]; !inUse {
|
||||
return ip, nil
|
||||
}
|
||||
}
|
||||
|
||||
panic("All ips are in-use, but somehow that wasn't determined earlier")
|
||||
}
|
||||
|
||||
func (n *network) CreateHost(
|
||||
ctx context.Context,
|
||||
hostName nebula.HostName,
|
||||
opts CreateHostOpts,
|
||||
) (
|
||||
JoiningBootstrap, error,
|
||||
) {
|
||||
n.l.RLock()
|
||||
currBootstrap := n.currBootstrap
|
||||
n.l.RUnlock()
|
||||
|
||||
ip := opts.IP
|
||||
if ip == (netip.Addr{}) {
|
||||
var err error
|
||||
if ip, err = chooseAvailableIP(currBootstrap); err != nil {
|
||||
return JoiningBootstrap{}, fmt.Errorf(
|
||||
"choosing available IP: %w", err,
|
||||
)
|
||||
}
|
||||
}
|
||||
// TODO if the ip is given, check that it's not already in use.
|
||||
|
||||
caSigningPrivateKey, err := daecommon.GetNebulaCASigningPrivateKey(
|
||||
ctx, n.secretsStore,
|
||||
)
|
||||
if err != nil {
|
||||
return JoiningBootstrap{}, fmt.Errorf("getting CA signing key: %w", err)
|
||||
}
|
||||
|
||||
var joiningBootstrap JoiningBootstrap
|
||||
joiningBootstrap.Bootstrap, err = bootstrap.New(
|
||||
makeCACreds(currBootstrap, caSigningPrivateKey),
|
||||
currBootstrap.NetworkCreationParams,
|
||||
currBootstrap.Hosts,
|
||||
hostName,
|
||||
ip,
|
||||
)
|
||||
if err != nil {
|
||||
return JoiningBootstrap{}, fmt.Errorf(
|
||||
"initializing bootstrap data: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
secretsIDs := []secrets.ID{
|
||||
daecommon.GarageRPCSecretSecretID,
|
||||
daecommon.GarageS3APIGlobalBucketCredentialsSecretID,
|
||||
}
|
||||
|
||||
if opts.CanCreateHosts {
|
||||
secretsIDs = append(
|
||||
secretsIDs, daecommon.NebulaCASigningPrivateKeySecretID,
|
||||
)
|
||||
}
|
||||
|
||||
if joiningBootstrap.Secrets, err = secrets.Export(
|
||||
ctx, n.secretsStore, secretsIDs,
|
||||
); err != nil {
|
||||
return JoiningBootstrap{}, fmt.Errorf("exporting secrets: %w", err)
|
||||
}
|
||||
|
||||
n.logger.Info(ctx, "Putting new host in garage")
|
||||
err = n.putGarageBoostrapHost(ctx, joiningBootstrap.Bootstrap)
|
||||
if err != nil {
|
||||
return JoiningBootstrap{}, fmt.Errorf("putting new host in garage: %w", err)
|
||||
}
|
||||
|
||||
// the new bootstrap will have been initialized with both all existing hosts
|
||||
// (based on currBootstrap) and the host being created.
|
||||
newHosts := joiningBootstrap.Bootstrap.Hosts
|
||||
|
||||
n.logger.Info(ctx, "Reloading local state with new host")
|
||||
if err := n.reload(ctx, currBootstrap, newHosts); err != nil {
|
||||
return JoiningBootstrap{}, fmt.Errorf("reloading child processes: %w", err)
|
||||
}
|
||||
|
||||
return joiningBootstrap, nil
|
||||
}
|
||||
|
||||
func (n *network) CreateNebulaCertificate(
|
||||
ctx context.Context,
|
||||
hostName nebula.HostName,
|
||||
hostPubKey nebula.EncryptingPublicKey,
|
||||
) (
|
||||
nebula.Certificate, error,
|
||||
) {
|
||||
return withCurrBootstrap(n, func(
|
||||
currBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
nebula.Certificate, error,
|
||||
) {
|
||||
host, ok := currBootstrap.Hosts[hostName]
|
||||
if !ok {
|
||||
return nebula.Certificate{}, ErrHostNotFound
|
||||
}
|
||||
ip := host.IP()
|
||||
|
||||
caSigningPrivateKey, err := daecommon.GetNebulaCASigningPrivateKey(
|
||||
ctx, n.secretsStore,
|
||||
)
|
||||
if err != nil {
|
||||
return nebula.Certificate{}, fmt.Errorf("getting CA signing key: %w", err)
|
||||
}
|
||||
|
||||
caCreds := makeCACreds(currBootstrap, caSigningPrivateKey)
|
||||
|
||||
return nebula.NewHostCert(caCreds, hostPubKey, hostName, ip)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *network) GetNetworkCreationParams(
|
||||
ctx context.Context,
|
||||
) (
|
||||
bootstrap.CreationParams, error,
|
||||
) {
|
||||
|
||||
return withCurrBootstrap(n, func(
|
||||
currBootstrap bootstrap.Bootstrap,
|
||||
) (
|
||||
bootstrap.CreationParams, error,
|
||||
) {
|
||||
return currBootstrap.NetworkCreationParams, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (n *network) Shutdown() error {
|
||||
close(n.shutdownCh)
|
||||
n.wg.Wait()
|
||||
|
||||
if n.children != nil {
|
||||
n.children.Shutdown()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
212
go/daemon/rpc.go
212
go/daemon/rpc.go
@ -1,203 +1,47 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"fmt"
|
||||
"isle/bootstrap"
|
||||
"isle/daemon/jsonrpc2"
|
||||
"isle/daemon/network"
|
||||
"isle/nebula"
|
||||
"slices"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// GetHostsResult wraps the results from the GetHosts RPC method.
|
||||
type GetHostsResult struct {
|
||||
Hosts []bootstrap.Host
|
||||
}
|
||||
|
||||
// CreateHostResult wraps the results from the CreateHost RPC method.
|
||||
type CreateHostResult struct {
|
||||
JoiningBootstrap JoiningBootstrap
|
||||
}
|
||||
|
||||
// CreateNebulaCertificateResult wraps the results from the
|
||||
// CreateNebulaCertificate RPC method.
|
||||
type CreateNebulaCertificateResult struct {
|
||||
HostNebulaCertificate nebula.Certificate
|
||||
}
|
||||
|
||||
// RPC exposes all RPC methods which are available to be called over the RPC
|
||||
// interface.
|
||||
// RPC defines the methods which the Daemon exposes over RPC (via the RPCHandler
|
||||
// or HTTPHandler methods). Method documentation can be found on the Daemon
|
||||
// type.
|
||||
type RPC interface {
|
||||
// CreateNetwork passes through to the Daemon method of the same name.
|
||||
//
|
||||
// name: Human-readable name of the network.
|
||||
// domain: Primary domain name that network services are served under.
|
||||
// ipNet:
|
||||
// An IP subnet, in CIDR form, which will be the overall range of
|
||||
// possible IPs in the network. The first IP in this network range will
|
||||
// become this first host's IP.
|
||||
// hostName: The name of this first host in the network.
|
||||
CreateNetwork(
|
||||
ctx context.Context,
|
||||
name string,
|
||||
domain string,
|
||||
ipNet nebula.IPNet,
|
||||
hostName nebula.HostName,
|
||||
) error
|
||||
|
||||
JoinNetwork(context.Context, network.JoiningBootstrap) error
|
||||
|
||||
// All network.RPC methods are automatically implemented by Daemon using the
|
||||
// currently joined network. If no network is joined then any call to these
|
||||
// methods will return ErrNoNetwork.
|
||||
network.RPC
|
||||
}
|
||||
|
||||
// RPCHandler returns a jsonrpc2.Handler which will use the Daemon to serve all
|
||||
// methods defined on the RPC interface.
|
||||
func (d *Daemon) RPCHandler() jsonrpc2.Handler {
|
||||
rpc := RPC(d)
|
||||
return jsonrpc2.Chain(
|
||||
jsonrpc2.NewMLogMiddleware(d.logger.WithNamespace("rpc")),
|
||||
jsonrpc2.ExposeServerSideErrorsMiddleware,
|
||||
)(
|
||||
struct{}, error,
|
||||
)
|
||||
|
||||
// JoinNetwork passes through to the Daemon method of the same name.
|
||||
JoinNetwork(
|
||||
ctx context.Context, req JoiningBootstrap,
|
||||
) (
|
||||
struct{}, error,
|
||||
)
|
||||
|
||||
// GetHosts returns all hosts known to the network, sorted by their name.
|
||||
GetHosts(ctx context.Context) (GetHostsResult, error)
|
||||
|
||||
// GetGarageClientParams passes the call through to the Daemon method of the
|
||||
// same name.
|
||||
GetGarageClientParams(ctx context.Context) (GarageClientParams, error)
|
||||
|
||||
// GetNebulaCAPublicCredentials returns the CAPublicCredentials for the
|
||||
// network.
|
||||
GetNebulaCAPublicCredentials(
|
||||
ctx context.Context,
|
||||
) (
|
||||
nebula.CAPublicCredentials, error,
|
||||
)
|
||||
|
||||
// RemoveHost passes the call through to the Daemon method of the same name.
|
||||
RemoveHost(
|
||||
ctx context.Context, hostName nebula.HostName,
|
||||
) (
|
||||
struct{}, error,
|
||||
)
|
||||
|
||||
// CreateHost passes the call through to the Daemon method of the same name.
|
||||
CreateHost(
|
||||
ctx context.Context, hostName nebula.HostName, opts CreateHostOpts,
|
||||
) (
|
||||
CreateHostResult, error,
|
||||
)
|
||||
|
||||
// CreateNebulaCertificate passes the call through to the Daemon method of
|
||||
// the same name.
|
||||
CreateNebulaCertificate(
|
||||
ctx context.Context,
|
||||
hostName nebula.HostName,
|
||||
hostEncryptingPublicKey nebula.EncryptingPublicKey,
|
||||
) (
|
||||
CreateNebulaCertificateResult, error,
|
||||
jsonrpc2.NewDispatchHandler(&rpc),
|
||||
)
|
||||
}
|
||||
|
||||
type rpcImpl struct {
|
||||
daemon Daemon
|
||||
}
|
||||
|
||||
// NewRPC initializes and returns an RPC instance.
|
||||
func NewRPC(daemon Daemon) RPC {
|
||||
return &rpcImpl{daemon}
|
||||
}
|
||||
|
||||
func (r *rpcImpl) CreateNetwork(
|
||||
ctx context.Context,
|
||||
name string,
|
||||
domain string,
|
||||
ipNet nebula.IPNet,
|
||||
hostName nebula.HostName,
|
||||
) (
|
||||
struct{}, error,
|
||||
) {
|
||||
return struct{}{}, r.daemon.CreateNetwork(
|
||||
ctx, name, domain, ipNet, hostName,
|
||||
)
|
||||
}
|
||||
|
||||
func (r *rpcImpl) JoinNetwork(
|
||||
ctx context.Context, req JoiningBootstrap,
|
||||
) (
|
||||
struct{}, error,
|
||||
) {
|
||||
return struct{}{}, r.daemon.JoinNetwork(ctx, req)
|
||||
}
|
||||
|
||||
func (r *rpcImpl) GetHosts(ctx context.Context) (GetHostsResult, error) {
|
||||
b, err := r.daemon.GetBootstrap(ctx)
|
||||
if err != nil {
|
||||
return GetHostsResult{}, fmt.Errorf("retrieving bootstrap: %w", err)
|
||||
}
|
||||
|
||||
hosts := maps.Values(b.Hosts)
|
||||
slices.SortFunc(hosts, func(a, b bootstrap.Host) int {
|
||||
return cmp.Compare(a.Name, b.Name)
|
||||
})
|
||||
|
||||
return GetHostsResult{hosts}, nil
|
||||
}
|
||||
|
||||
func (r *rpcImpl) GetGarageClientParams(
|
||||
ctx context.Context,
|
||||
) (
|
||||
GarageClientParams, error,
|
||||
) {
|
||||
return r.daemon.GetGarageClientParams(ctx)
|
||||
}
|
||||
|
||||
func (r *rpcImpl) GetNebulaCAPublicCredentials(
|
||||
ctx context.Context,
|
||||
) (
|
||||
nebula.CAPublicCredentials, error,
|
||||
) {
|
||||
b, err := r.daemon.GetBootstrap(ctx)
|
||||
if err != nil {
|
||||
return nebula.CAPublicCredentials{}, fmt.Errorf(
|
||||
"retrieving bootstrap: %w", err,
|
||||
)
|
||||
}
|
||||
|
||||
return b.CAPublicCredentials, nil
|
||||
}
|
||||
|
||||
func (r *rpcImpl) RemoveHost(
|
||||
ctx context.Context, hostName nebula.HostName,
|
||||
) (
|
||||
struct{}, error,
|
||||
) {
|
||||
return struct{}{}, r.daemon.RemoveHost(ctx, hostName)
|
||||
}
|
||||
|
||||
func (r *rpcImpl) CreateHost(
|
||||
ctx context.Context, hostName nebula.HostName, opts CreateHostOpts,
|
||||
) (
|
||||
CreateHostResult, error,
|
||||
) {
|
||||
joiningBootstrap, err := r.daemon.CreateHost(ctx, hostName, opts)
|
||||
if err != nil {
|
||||
return CreateHostResult{}, err
|
||||
}
|
||||
|
||||
return CreateHostResult{JoiningBootstrap: joiningBootstrap}, nil
|
||||
}
|
||||
|
||||
func (r *rpcImpl) CreateNebulaCertificate(
|
||||
ctx context.Context,
|
||||
hostName nebula.HostName,
|
||||
hostEncryptingPublicKey nebula.EncryptingPublicKey,
|
||||
) (
|
||||
CreateNebulaCertificateResult, error,
|
||||
) {
|
||||
cert, err := r.daemon.CreateNebulaCertificate(
|
||||
ctx, hostName, hostEncryptingPublicKey,
|
||||
)
|
||||
if err != nil {
|
||||
return CreateNebulaCertificateResult{}, err
|
||||
}
|
||||
|
||||
return CreateNebulaCertificateResult{HostNebulaCertificate: cert}, nil
|
||||
// HTTPRPCHandler returns an http.Handler which will use the Daemon to serve all
|
||||
// methods defined on the RPC interface via the JSONRPC2 protocol.
|
||||
func (d *Daemon) HTTPRPCHandler() http.Handler {
|
||||
return jsonrpc2.NewHTTPHandler(d.RPCHandler())
|
||||
}
|
||||
|
@ -1,52 +0,0 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"isle/garage"
|
||||
"isle/nebula"
|
||||
"isle/secrets"
|
||||
)
|
||||
|
||||
const (
|
||||
secretsNSNebula = "nebula"
|
||||
secretsNSGarage = "garage"
|
||||
)
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Nebula-related secrets
|
||||
|
||||
var (
|
||||
nebulaCASigningPrivateKeySecretID = secrets.NewID(secretsNSNebula, "ca-signing-private-key")
|
||||
)
|
||||
|
||||
var getNebulaCASigningPrivateKey, setNebulaCASigningPrivateKey = secrets.GetSetFunctions[nebula.SigningPrivateKey](
|
||||
nebulaCASigningPrivateKeySecretID,
|
||||
)
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Garage-related secrets
|
||||
|
||||
func garageS3APIBucketCredentialsSecretID(credsName string) secrets.ID {
|
||||
return secrets.NewID(
|
||||
secretsNSGarage, fmt.Sprintf("s3-api-bucket-credentials-%s", credsName),
|
||||
)
|
||||
}
|
||||
|
||||
var (
|
||||
garageRPCSecretSecretID = secrets.NewID(secretsNSGarage, "rpc-secret")
|
||||
garageS3APIGlobalBucketCredentialsSecretID = garageS3APIBucketCredentialsSecretID(
|
||||
garage.GlobalBucketS3APICredentialsName,
|
||||
)
|
||||
)
|
||||
|
||||
// Get/Set functions for garage-related secrets.
|
||||
var (
|
||||
getGarageRPCSecret, setGarageRPCSecret = secrets.GetSetFunctions[string](
|
||||
garageRPCSecretSecretID,
|
||||
)
|
||||
|
||||
getGarageS3APIGlobalBucketCredentials,
|
||||
setGarageS3APIGlobalBucketCredentials = secrets.GetSetFunctions[garage.S3APICredentials](
|
||||
garageS3APIGlobalBucketCredentialsSecretID,
|
||||
)
|
||||
)
|
@ -20,12 +20,9 @@ type fsStorePayload[Body any] struct {
|
||||
}
|
||||
|
||||
// NewFSStore returns a Store which will store secrets to the given directory.
|
||||
func NewFSStore(dirPath string) (Store, error) {
|
||||
err := os.Mkdir(dirPath, 0700)
|
||||
if err != nil && !errors.Is(err, fs.ErrExist) {
|
||||
return nil, fmt.Errorf("making directory: %w", err)
|
||||
}
|
||||
return &fsStore{dirPath}, nil
|
||||
// The directory must already exist.
|
||||
func NewFSStore(dirPath string) Store {
|
||||
return &fsStore{dirPath}
|
||||
}
|
||||
|
||||
func (s *fsStore) path(id ID) string {
|
||||
|
@ -13,15 +13,10 @@ func Test_fsStore(t *testing.T) {
|
||||
|
||||
var (
|
||||
ctx = context.Background()
|
||||
dir = t.TempDir()
|
||||
store = NewFSStore(t.TempDir())
|
||||
id = NewID("testing", "a")
|
||||
)
|
||||
|
||||
store, err := NewFSStore(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var got payload
|
||||
if err := store.Get(ctx, &got, id); !errors.Is(err, ErrNotFound) {
|
||||
t.Fatalf("expected %v, got: %v", ErrNotFound, err)
|
||||
|
121
go/toolkit/dir.go
Normal file
121
go/toolkit/dir.go
Normal file
@ -0,0 +1,121 @@
|
||||
package toolkit
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Dir is a type which makes it possible to statically assert that a directory
|
||||
// has already been created.
|
||||
type Dir struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
// MkDir creates a Dir at the given path.
|
||||
//
|
||||
// If the directory already exists, and mayExist is true, then Dir is returned
|
||||
// successfully, otherwise an error is returned.
|
||||
func MkDir(path string, mayExist bool) (Dir, error) {
|
||||
if path == "" {
|
||||
panic("Empty path passed to MkDir")
|
||||
}
|
||||
|
||||
{
|
||||
parentPath := filepath.Dir(path)
|
||||
parentInfo, err := os.Stat(parentPath)
|
||||
if err != nil {
|
||||
return Dir{}, fmt.Errorf(
|
||||
"checking fs node of parent %q: %w", parentPath, err,
|
||||
)
|
||||
} else if !parentInfo.IsDir() {
|
||||
return Dir{}, fmt.Errorf(
|
||||
"parent %q is not a directory", parentPath,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
info, err := os.Stat(path)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// fine
|
||||
} else if err != nil {
|
||||
return Dir{}, fmt.Errorf("checking fs node: %w", err)
|
||||
} else if !info.IsDir() {
|
||||
return Dir{}, fmt.Errorf("exists but is not a directory")
|
||||
} else {
|
||||
if !mayExist {
|
||||
return Dir{}, errors.New("directory already exists")
|
||||
}
|
||||
return Dir{path}, nil
|
||||
}
|
||||
|
||||
if err := os.Mkdir(path, 0700); err != nil {
|
||||
return Dir{}, fmt.Errorf("creating directory: %w", err)
|
||||
}
|
||||
|
||||
return Dir{path}, nil
|
||||
}
|
||||
|
||||
// MkChildDir is a helper for joining Dir's path to the given name and calling
|
||||
// MkDir with the result.
|
||||
func (d Dir) MkChildDir(name string, mayExist bool) (Dir, error) {
|
||||
childPath := filepath.Join(d.Path, name)
|
||||
d, err := MkDir(childPath, mayExist)
|
||||
if err != nil {
|
||||
return Dir{}, fmt.Errorf(
|
||||
"creating child directory %q: %w", childPath, err,
|
||||
)
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// ChildDirs returns a Dir for every child directory found under this Dir.
|
||||
func (d Dir) ChildDirs() ([]Dir, error) {
|
||||
entries, err := os.ReadDir(d.Path)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("listing contents: %w", err)
|
||||
}
|
||||
|
||||
dirs := make([]Dir, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
dirs = append(dirs, Dir{Path: filepath.Join(d.Path, entry.Name())})
|
||||
}
|
||||
|
||||
return dirs, nil
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// MkDirHelper is a helper type for creating a set of directories. It will
|
||||
// collect errors as they occur, allowing error handling to be defered.
|
||||
type MkDirHelper struct {
|
||||
errs []error
|
||||
}
|
||||
|
||||
// Maybe returns the given Dir and true if err == nil. If err != nil then false
|
||||
// is returned, and the error is collected internally such that it will be
|
||||
// returned as part of the Err() output.
|
||||
//
|
||||
// This method is designed to be used in conjunction with a MkDir
|
||||
// function/method, e.g:
|
||||
//
|
||||
// d, ok := h.Maybe(MkDir("some/path", true))
|
||||
func (m *MkDirHelper) Maybe(d Dir, err error) (Dir, bool) {
|
||||
if err != nil {
|
||||
m.errs = append(m.errs, err)
|
||||
return Dir{}, false
|
||||
}
|
||||
return d, true
|
||||
}
|
||||
|
||||
// Err returns a join of all errors which have occurred during its lifetime.
|
||||
func (m *MkDirHelper) Err() error {
|
||||
return errors.Join(m.errs...)
|
||||
}
|
15
go/toolkit/rand.go
Normal file
15
go/toolkit/rand.go
Normal file
@ -0,0 +1,15 @@
|
||||
package toolkit
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
)
|
||||
|
||||
// RandStr returns a random string with the given entropy of bytes.
|
||||
func RandStr(l int) string {
|
||||
b := make([]byte, l)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return hex.EncodeToString(b)
|
||||
}
|
3
go/toolkit/toolkit.go
Normal file
3
go/toolkit/toolkit.go
Normal file
@ -0,0 +1,3 @@
|
||||
// Package toolkit contains useful utilities which are not specific to any
|
||||
// specific part of isle.
|
||||
package toolkit
|
@ -1,7 +1,6 @@
|
||||
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||
|
||||
adminBS="$XDG_STATE_HOME"/isle/bootstrap.json
|
||||
bs="$secondus_bootstrap" # set in with-1-data-1-empty-node-network.sh
|
||||
|
||||
[ "$(jq -r <"$bs" '.Bootstrap.NetworkCreationParams.Domain')" = "shared.test" ]
|
||||
@ -9,7 +8,7 @@ bs="$secondus_bootstrap" # set in with-1-data-1-empty-node-network.sh
|
||||
[ "$(jq -r <"$bs" '.Bootstrap.SignedHostAssigned.Body.Name')" = "secondus" ]
|
||||
|
||||
[ "$(jq -r <"$bs" '.Bootstrap.Hosts.primus.PublicCredentials')" \
|
||||
= "$(jq -r <"$adminBS" '.SignedHostAssigned.Body.PublicCredentials')" ]
|
||||
= "$(jq -r <"$BOOTSTRAP_FILE" '.SignedHostAssigned.Body.PublicCredentials')" ]
|
||||
|
||||
[ "$(jq <"$bs" '.Bootstrap.Hosts.primus.Garage.Instances|length')" = "3" ]
|
||||
|
||||
|
@ -9,7 +9,7 @@ cat pubkey
|
||||
--hostname non-esiste \
|
||||
--public-key-path pubkey \
|
||||
2>&1 || true \
|
||||
) | grep '\[6\] Host not found'
|
||||
) | grep '\[1002\] Host not found'
|
||||
|
||||
isle nebula create-cert \
|
||||
--hostname primus \
|
||||
|
@ -13,6 +13,6 @@ export TMPDIR="$TMPDIR"
|
||||
export XDG_RUNTIME_DIR="$XDG_RUNTIME_DIR"
|
||||
export XDG_STATE_HOME="$XDG_STATE_HOME"
|
||||
export ISLE_DAEMON_HTTP_SOCKET_PATH="$ROOT_TMPDIR/$base-daemon.sock"
|
||||
BOOTSTRAP_FILE="$XDG_STATE_HOME/isle/bootstrap.json"
|
||||
BOOTSTRAP_FILE="$XDG_STATE_HOME/isle/networks/$NETWORK_ID/bootstrap.json"
|
||||
cd "$TMPDIR"
|
||||
EOF
|
||||
|
@ -97,3 +97,9 @@ secondus_ip="$(
|
||||
| cut -d/ -f1
|
||||
)"
|
||||
|
||||
NETWORK_ID="$(jq '.Bootstrap.NetworkCreationParams.ID' "$secondus_bootstrap")"
|
||||
export NETWORK_ID
|
||||
|
||||
# shared-daemon-env.sh depends on NETWORK_ID, so we re-call as_primus in order
|
||||
# to fully populate the envvars we need.
|
||||
as_primus
|
||||
|
Loading…
Reference in New Issue
Block a user