Compare commits

..

4 Commits

18 changed files with 487 additions and 351 deletions

View File

@ -32,7 +32,7 @@ func main() {
logger := mlog.NewLogger(&mlog.LoggerOpts{ logger := mlog.NewLogger(&mlog.LoggerOpts{
MessageHandler: newLogMsgHandler(), MessageHandler: newLogMsgHandler(),
MaxLevel: mlog.LevelInfo.Int(), MaxLevel: mlog.LevelInfo.Int(), // TODO make this configurable
}) })
defer logger.Close() defer logger.Close()

View File

@ -6,8 +6,6 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"io"
"os"
"code.betamike.com/micropelago/pmux/pmuxlib" "code.betamike.com/micropelago/pmux/pmuxlib"
"dev.mediocregopher.com/mediocre-go-lib.git/mctx" "dev.mediocregopher.com/mediocre-go-lib.git/mctx"
@ -21,25 +19,13 @@ import (
// Opts are optional parameters which can be passed in when initializing a new // Opts are optional parameters which can be passed in when initializing a new
// Children instance. A nil Opts is equivalent to a zero value. // Children instance. A nil Opts is equivalent to a zero value.
type Opts struct { type Opts struct{}
// Stdout and Stderr are what the associated outputs from child processes
// will be directed to.
Stdout, Stderr io.Writer
}
func (o *Opts) withDefaults() *Opts { func (o *Opts) withDefaults() *Opts {
if o == nil { if o == nil {
o = new(Opts) o = new(Opts)
} }
if o.Stdout == nil {
o.Stdout = os.Stdout
}
if o.Stderr == nil {
o.Stderr = os.Stderr
}
return o return o
} }
@ -50,13 +36,16 @@ func (o *Opts) withDefaults() *Opts {
// - garage (0 or more, depending on configured storage allocations) // - garage (0 or more, depending on configured storage allocations)
type Children struct { type Children struct {
logger *mlog.Logger logger *mlog.Logger
binDirPath string
runtimeDir toolkit.Dir runtimeDir toolkit.Dir
garageAdminToken string garageAdminToken string
opts Opts opts Opts
garageRPCSecret string garageRPCSecret string
pmux *pmuxlib.Pmux nebulaProc *pmuxlib.Process
dnsmasqProc *pmuxlib.Process
garageProcs map[string]*pmuxlib.Process
} }
// New initializes and returns a Children instance. If initialization fails an // New initializes and returns a Children instance. If initialization fails an
@ -84,33 +73,66 @@ func New(
c := &Children{ c := &Children{
logger: logger, logger: logger,
binDirPath: binDirPath,
runtimeDir: runtimeDir, runtimeDir: runtimeDir,
garageAdminToken: garageAdminToken, garageAdminToken: garageAdminToken,
opts: *opts, opts: *opts,
garageRPCSecret: garageRPCSecret, garageRPCSecret: garageRPCSecret,
} }
pmuxConfig, err := c.newPmuxConfig( if c.nebulaProc, err = nebulaPmuxProc(
ctx, ctx,
c.logger,
c.runtimeDir.Path,
c.binDirPath,
networkConfig,
hostBootstrap,
); err != nil {
return nil, fmt.Errorf("starting nebula: %w", err)
}
if err := waitForNebula(ctx, c.logger, hostBootstrap); err != nil {
logger.Warn(ctx, "Failed waiting for nebula to initialize, shutting down child processes", err)
c.Shutdown()
return nil, fmt.Errorf("waiting for nebula to start: %w", err)
}
if c.dnsmasqProc, err = dnsmasqPmuxProc(
ctx,
c.logger,
c.runtimeDir.Path,
c.binDirPath,
networkConfig,
hostBootstrap,
); err != nil {
logger.Warn(ctx, "Failed to start dnsmasq, shutting down child processes", err)
c.Shutdown()
return nil, fmt.Errorf("starting dnsmasq: %w", err)
}
// TODO wait for dnsmasq to come up
if c.garageProcs, err = garagePmuxProcs(
ctx,
c.logger,
garageRPCSecret, garageRPCSecret,
binDirPath, c.runtimeDir.Path,
c.binDirPath,
networkConfig, networkConfig,
garageAdminToken, garageAdminToken,
hostBootstrap, hostBootstrap,
) ); err != nil {
if err != nil { logger.Warn(ctx, "Failed to start garage processes, shutting down child processes", err)
return nil, fmt.Errorf("generating pmux config: %w", err) c.Shutdown()
return nil, fmt.Errorf("starting garage processes: %w", err)
} }
c.pmux = pmuxlib.NewPmux(pmuxConfig, c.opts.Stdout, c.opts.Stderr) if err := waitForGarage(
ctx, c.logger, networkConfig, garageAdminToken, hostBootstrap,
initErr := c.postPmuxInit( ); err != nil {
ctx, networkConfig, garageAdminToken, hostBootstrap, logger.Warn(ctx, "Failed waiting for garage processes to initialize, shutting down child processes", err)
)
if initErr != nil {
logger.Warn(ctx, "failed to initialize Children, shutting down child processes", err)
c.Shutdown() c.Shutdown()
return nil, initErr return nil, fmt.Errorf("waiting for garage processes to initialize: %w", err)
} }
return c, nil return c, nil
@ -133,7 +155,7 @@ func (c *Children) reloadDNSMasq(
} }
c.logger.Info(ctx, "dnsmasq config file has changed, restarting process") c.logger.Info(ctx, "dnsmasq config file has changed, restarting process")
c.pmux.Restart("dnsmasq") c.dnsmasqProc.Restart()
return nil return nil
} }
@ -153,7 +175,7 @@ func (c *Children) reloadNebula(
} }
c.logger.Info(ctx, "nebula config file has changed, restarting process") c.logger.Info(ctx, "nebula config file has changed, restarting process")
c.pmux.Restart("nebula") c.nebulaProc.Restart()
if err := waitForNebula(ctx, c.logger, hostBootstrap); err != nil { if err := waitForNebula(ctx, c.logger, hostBootstrap); err != nil {
return fmt.Errorf("waiting for nebula to start: %w", err) return fmt.Errorf("waiting for nebula to start: %w", err)
@ -184,7 +206,7 @@ func (c *Children) reloadGarage(
) )
) )
_, changed, err := garageWriteChildConfig( childConfigPath, changed, err := garageWriteChildConfig(
ctx, ctx,
c.logger, c.logger,
c.garageRPCSecret, c.garageRPCSecret,
@ -202,8 +224,16 @@ func (c *Children) reloadGarage(
anyChanged = true anyChanged = true
if proc, ok := c.garageProcs[procName]; ok {
c.logger.Info(ctx, "garage config has changed, restarting process") c.logger.Info(ctx, "garage config has changed, restarting process")
c.pmux.Restart(procName) proc.Restart()
continue
}
c.logger.Info(ctx, "garage config has been added, creating process")
c.garageProcs[procName] = garagePmuxProc(
ctx, c.logger, c.binDirPath, procName, childConfigPath,
)
} }
if anyChanged { if anyChanged {
@ -244,5 +274,15 @@ func (c *Children) Reload(
// Shutdown blocks until all child processes have gracefully shut themselves // Shutdown blocks until all child processes have gracefully shut themselves
// down. // down.
func (c *Children) Shutdown() { func (c *Children) Shutdown() {
c.pmux.Stop() for _, proc := range c.garageProcs {
proc.Stop()
}
if c.dnsmasqProc != nil {
c.dnsmasqProc.Stop()
}
if c.nebulaProc != nil {
c.nebulaProc.Stop()
}
} }

View File

@ -49,37 +49,36 @@ func dnsmasqWriteConfig(
return confPath, changed, nil return confPath, changed, nil
} }
func dnsmasqPmuxProcConfig( // TODO consider a shared dnsmasq across all the daemon's networks.
// This would have a few benefits:
// - Less processes, less problems
// - Less configuration for the user in the case of more than one network.
// - Can listen on 127.0.0.x:53, rather than on the nebula address. This
// allows DNS to come up before nebula, which is helpful when nebula depends
// on DNS.
func dnsmasqPmuxProc(
ctx context.Context, ctx context.Context,
logger *mlog.Logger, logger *mlog.Logger,
runtimeDirPath, binDirPath string, runtimeDirPath, binDirPath string,
networkConfig daecommon.NetworkConfig, networkConfig daecommon.NetworkConfig,
hostBootstrap bootstrap.Bootstrap, hostBootstrap bootstrap.Bootstrap,
) ( ) (
pmuxlib.ProcessConfig, error, *pmuxlib.Process, error,
) { ) {
confPath, _, err := dnsmasqWriteConfig( confPath, _, err := dnsmasqWriteConfig(
ctx, logger, runtimeDirPath, networkConfig, hostBootstrap, ctx, logger, runtimeDirPath, networkConfig, hostBootstrap,
) )
if err != nil { if err != nil {
return pmuxlib.ProcessConfig{}, fmt.Errorf( return nil, fmt.Errorf(
"writing dnsmasq config: %w", err, "writing dnsmasq config: %w", err,
) )
} }
return pmuxlib.ProcessConfig{ cfg := pmuxlib.ProcessConfig{
Cmd: filepath.Join(binDirPath, "dnsmasq"), Cmd: filepath.Join(binDirPath, "dnsmasq"),
Args: []string{"-d", "-C", confPath}, Args: []string{"-d", "-C", confPath},
StartAfterFunc: func(ctx context.Context) error { }
// TODO consider a shared dnsmasq across all the daemon's networks. cfg = withPmuxLoggers(ctx, logger, "dnsmasq", cfg)
// This would have a few benefits:
// - Less processes, less problems return pmuxlib.NewProcess(cfg), nil
// - Less configuration for the user in the case of more than one
// network.
// - Can listen on 127.0.0.x:53, rather than on the nebula address.
// This allows DNS to come up before nebula, which is helpful when
// nebula depends on DNS.
return waitForNebula(ctx, logger, hostBootstrap)
},
}, nil
} }

View File

@ -120,7 +120,24 @@ func garagePmuxProcName(alloc daecommon.ConfigStorageAllocation) string {
return fmt.Sprintf("garage-%d", alloc.RPCPort) return fmt.Sprintf("garage-%d", alloc.RPCPort)
} }
func garagePmuxProcConfigs( func garagePmuxProc(
ctx context.Context,
logger *mlog.Logger,
binDirPath string,
procName string,
childConfigPath string,
) *pmuxlib.Process {
cfg := pmuxlib.ProcessConfig{
Cmd: filepath.Join(binDirPath, "garage"),
Args: []string{"-c", childConfigPath, "server"},
}
cfg = withPmuxLoggers(ctx, logger, procName, cfg)
return pmuxlib.NewProcess(cfg)
}
func garagePmuxProcs(
ctx context.Context, ctx context.Context,
logger *mlog.Logger, logger *mlog.Logger,
rpcSecret, runtimeDirPath, binDirPath string, rpcSecret, runtimeDirPath, binDirPath string,
@ -128,10 +145,10 @@ func garagePmuxProcConfigs(
adminToken string, adminToken string,
hostBootstrap bootstrap.Bootstrap, hostBootstrap bootstrap.Bootstrap,
) ( ) (
map[string]pmuxlib.ProcessConfig, error, map[string]*pmuxlib.Process, error,
) { ) {
var ( var (
pmuxProcConfigs = map[string]pmuxlib.ProcessConfig{} pmuxProcs = map[string]*pmuxlib.Process{}
allocs = networkConfig.Storage.Allocations allocs = networkConfig.Storage.Allocations
) )
@ -152,14 +169,10 @@ func garagePmuxProcConfigs(
} }
procName := garagePmuxProcName(alloc) procName := garagePmuxProcName(alloc)
pmuxProcConfigs[procName] = pmuxlib.ProcessConfig{ pmuxProcs[procName] = garagePmuxProc(
Cmd: filepath.Join(binDirPath, "garage"), ctx, logger, binDirPath, procName, childConfigPath,
Args: []string{"-c", childConfigPath, "server"}, )
StartAfterFunc: func(ctx context.Context) error {
return waitForNebula(ctx, logger, hostBootstrap)
},
}
} }
return pmuxProcConfigs, nil return pmuxProcs, nil
} }

View File

@ -0,0 +1,59 @@
package children
import (
"context"
"fmt"
"isle/toolkit"
"code.betamike.com/micropelago/pmux/pmuxlib"
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
)
type pmuxLogger struct {
ctx context.Context
logger *mlog.Logger
}
func newPmuxStdoutLogger(
ctx context.Context, logger *mlog.Logger, name string,
) pmuxlib.Logger {
return &pmuxLogger{ctx, logger.WithNamespace(name).WithNamespace("out")}
}
func newPmuxStderrLogger(
ctx context.Context, logger *mlog.Logger, name string,
) pmuxlib.Logger {
return &pmuxLogger{ctx, logger.WithNamespace(name).WithNamespace("err")}
}
func newPmuxSysLogger(
ctx context.Context, logger *mlog.Logger, name string,
) pmuxlib.Logger {
return &pmuxLogger{ctx, logger.WithNamespace(name).WithNamespace("sys")}
}
func (l *pmuxLogger) Println(line string) {
l.logger.Log(mlog.Message{
Context: l.ctx,
Level: toolkit.LogLevelChild,
Description: line,
})
}
func (l *pmuxLogger) Printf(format string, args ...any) {
l.Println(fmt.Sprintf(format, args...))
}
////////////////////////////////////////////////////////////////////////////////
func withPmuxLoggers(
ctx context.Context,
logger *mlog.Logger,
name string,
cfg pmuxlib.ProcessConfig,
) pmuxlib.ProcessConfig {
cfg.StdoutLogger = newPmuxStdoutLogger(ctx, logger, name)
cfg.StderrLogger = newPmuxStderrLogger(ctx, logger, name)
cfg.SysLogger = newPmuxSysLogger(ctx, logger, name)
return cfg
}

View File

@ -174,27 +174,27 @@ func nebulaWriteConfig(
return nebulaYmlPath, changed, nil return nebulaYmlPath, changed, nil
} }
func nebulaPmuxProcConfig( func nebulaPmuxProc(
ctx context.Context, ctx context.Context,
logger *mlog.Logger, logger *mlog.Logger,
runtimeDirPath, binDirPath string, runtimeDirPath, binDirPath string,
networkConfig daecommon.NetworkConfig, networkConfig daecommon.NetworkConfig,
hostBootstrap bootstrap.Bootstrap, hostBootstrap bootstrap.Bootstrap,
) ( ) (
pmuxlib.ProcessConfig, error, *pmuxlib.Process, error,
) { ) {
nebulaYmlPath, _, err := nebulaWriteConfig( nebulaYmlPath, _, err := nebulaWriteConfig(
ctx, logger, runtimeDirPath, networkConfig, hostBootstrap, ctx, logger, runtimeDirPath, networkConfig, hostBootstrap,
) )
if err != nil { if err != nil {
return pmuxlib.ProcessConfig{}, fmt.Errorf( return nil, fmt.Errorf("writing nebula config: %w", err)
"writing nebula config: %w", err,
)
} }
return pmuxlib.ProcessConfig{ cfg := pmuxlib.ProcessConfig{
Cmd: filepath.Join(binDirPath, "nebula"), Cmd: filepath.Join(binDirPath, "nebula"),
Args: []string{"-config", nebulaYmlPath}, Args: []string{"-config", nebulaYmlPath},
Group: -1, // Make sure nebula is shut down last. }
}, nil cfg = withPmuxLoggers(ctx, logger, "nebula", cfg)
return pmuxlib.NewProcess(cfg), nil
} }

View File

@ -1,90 +0,0 @@
package children
import (
"context"
"fmt"
"isle/bootstrap"
"isle/daemon/daecommon"
"code.betamike.com/micropelago/pmux/pmuxlib"
)
func (c *Children) newPmuxConfig(
ctx context.Context,
garageRPCSecret, binDirPath string,
networkConfig daecommon.NetworkConfig,
garageAdminToken string,
hostBootstrap bootstrap.Bootstrap,
) (
pmuxlib.Config, error,
) {
nebulaPmuxProcConfig, err := nebulaPmuxProcConfig(
ctx,
c.logger,
c.runtimeDir.Path,
binDirPath,
networkConfig,
hostBootstrap,
)
if err != nil {
return pmuxlib.Config{}, fmt.Errorf("generating nebula config: %w", err)
}
dnsmasqPmuxProcConfig, err := dnsmasqPmuxProcConfig(
ctx,
c.logger,
c.runtimeDir.Path,
binDirPath,
networkConfig,
hostBootstrap,
)
if err != nil {
return pmuxlib.Config{}, fmt.Errorf(
"generating dnsmasq config: %w", err,
)
}
garagePmuxProcConfigs, err := garagePmuxProcConfigs(
ctx,
c.logger,
garageRPCSecret,
c.runtimeDir.Path,
binDirPath,
networkConfig,
garageAdminToken,
hostBootstrap,
)
if err != nil {
return pmuxlib.Config{}, fmt.Errorf(
"generating garage children configs: %w", err,
)
}
pmuxProcConfigs := garagePmuxProcConfigs
pmuxProcConfigs["nebula"] = nebulaPmuxProcConfig
pmuxProcConfigs["dnsmasq"] = dnsmasqPmuxProcConfig
return pmuxlib.Config{
Processes: pmuxProcConfigs,
}, nil
}
func (c *Children) postPmuxInit(
ctx context.Context,
networkConfig daecommon.NetworkConfig,
garageAdminToken string,
hostBootstrap bootstrap.Bootstrap,
) error {
if err := waitForNebula(ctx, c.logger, hostBootstrap); err != nil {
return fmt.Errorf("waiting for nebula to start: %w", err)
}
err := waitForGarage(
ctx, c.logger, networkConfig, garageAdminToken, hostBootstrap,
)
if err != nil {
return fmt.Errorf("waiting for garage to start: %w", err)
}
return nil
}

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"errors" "errors"
"io" "io"
"isle/toolkit"
"net" "net"
"net/http/httptest" "net/http/httptest"
"path/filepath" "path/filepath"
@ -11,8 +12,6 @@ import (
"sync" "sync"
"testing" "testing"
"time" "time"
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
) )
type DivideParams struct { type DivideParams struct {
@ -72,7 +71,7 @@ type divider interface {
func testHandler(t *testing.T) Handler { func testHandler(t *testing.T) Handler {
var ( var (
logger = mlog.NewTestLogger(t) logger = toolkit.NewTestLogger(t)
d = divider(dividerImpl{}) d = divider(dividerImpl{})
) )

View File

@ -6,6 +6,7 @@ import (
"isle/daemon/daecommon" "isle/daemon/daecommon"
"isle/garage/garagesrv" "isle/garage/garagesrv"
"isle/jsonutil" "isle/jsonutil"
"maps"
"os" "os"
"path/filepath" "path/filepath"
) )
@ -54,16 +55,20 @@ func coalesceNetworkConfigAndBootstrap(
) )
} }
host.Garage.Instances = append(host.Garage.Instances, bootstrap.GarageHostInstance{ host.Garage.Instances = append(
host.Garage.Instances,
bootstrap.GarageHostInstance{
ID: id, ID: id,
RPCPort: rpcPort, RPCPort: rpcPort,
S3APIPort: alloc.S3APIPort, S3APIPort: alloc.S3APIPort,
}) },
)
allocs[i].RPCPort = rpcPort allocs[i].RPCPort = rpcPort
} }
} }
hostBootstrap.Hosts = maps.Clone(hostBootstrap.Hosts)
hostBootstrap.Hosts[host.Name] = host hostBootstrap.Hosts[host.Name] = host
return hostBootstrap, nil return hostBootstrap, nil

View File

@ -61,15 +61,12 @@ func newGarageAdminClient(
logger *mlog.Logger, logger *mlog.Logger,
networkConfig daecommon.NetworkConfig, networkConfig daecommon.NetworkConfig,
adminToken string, adminToken string,
hostBootstrap bootstrap.Bootstrap, host bootstrap.Host,
) *garage.AdminClient { ) *garage.AdminClient {
thisHost := hostBootstrap.ThisHost()
return garage.NewAdminClient( return garage.NewAdminClient(
garageAdminClientLogger(logger), garageAdminClientLogger(logger),
net.JoinHostPort( net.JoinHostPort(
thisHost.IP().String(), host.IP().String(),
strconv.Itoa(networkConfig.Storage.Allocations[0].AdminPort), strconv.Itoa(networkConfig.Storage.Allocations[0].AdminPort),
), ),
adminToken, adminToken,
@ -81,24 +78,26 @@ func garageApplyLayout(
logger *mlog.Logger, logger *mlog.Logger,
networkConfig daecommon.NetworkConfig, networkConfig daecommon.NetworkConfig,
adminToken string, adminToken string,
hostBootstrap bootstrap.Bootstrap, prevHost, currHost bootstrap.Host,
) error { ) error {
var ( var (
adminClient = newGarageAdminClient( adminClient = newGarageAdminClient(
logger, networkConfig, adminToken, hostBootstrap, logger, networkConfig, adminToken, currHost,
) )
thisHost = hostBootstrap.ThisHost() hostName = currHost.Name
hostName = thisHost.Name
allocs = networkConfig.Storage.Allocations allocs = networkConfig.Storage.Allocations
peers = make([]garage.PeerLayout, len(allocs)) peers = make([]garage.PeerLayout, len(allocs))
peerIDs = map[string]struct{}{}
idsToRemove = make([]string, 0, len(prevHost.Garage.Instances))
) )
defer adminClient.Close() defer adminClient.Close()
for i, alloc := range allocs { for i, alloc := range allocs {
id := daecommon.BootstrapGarageHostForAlloc(currHost, alloc).ID
id := daecommon.BootstrapGarageHostForAlloc(thisHost, alloc).ID peerIDs[id] = struct{}{}
zone := string(hostName) zone := string(hostName)
if alloc.Zone != "" { if alloc.Zone != "" {
@ -113,7 +112,13 @@ func garageApplyLayout(
} }
} }
return adminClient.ApplyLayout(ctx, peers) for _, prevInst := range prevHost.Garage.Instances {
if _, ok := peerIDs[prevInst.ID]; !ok {
idsToRemove = append(idsToRemove, prevInst.ID)
}
}
return adminClient.ApplyLayout(ctx, peers, idsToRemove)
} }
func garageInitializeGlobalBucket( func garageInitializeGlobalBucket(
@ -121,13 +126,11 @@ func garageInitializeGlobalBucket(
logger *mlog.Logger, logger *mlog.Logger,
networkConfig daecommon.NetworkConfig, networkConfig daecommon.NetworkConfig,
adminToken string, adminToken string,
hostBootstrap bootstrap.Bootstrap, host bootstrap.Host,
) ( ) (
garage.S3APICredentials, error, garage.S3APICredentials, error,
) { ) {
adminClient := newGarageAdminClient( adminClient := newGarageAdminClient(logger, networkConfig, adminToken, host)
logger, networkConfig, adminToken, hostBootstrap,
)
defer adminClient.Close() defer adminClient.Close()
creds, err := adminClient.CreateS3APICredentials( creds, err := adminClient.CreateS3APICredentials(

View File

@ -335,7 +335,7 @@ func Create(
stateDir toolkit.Dir, stateDir toolkit.Dir,
runtimeDir toolkit.Dir, runtimeDir toolkit.Dir,
creationParams bootstrap.CreationParams, creationParams bootstrap.CreationParams,
ipNet nebula.IPNet, // TODO should this go in CreationParams? ipNet nebula.IPNet,
hostName nebula.HostName, hostName nebula.HostName,
opts *Opts, opts *Opts,
) ( ) (
@ -408,19 +408,22 @@ func (n *network) initializeDirs(mayExist bool) error {
} }
func (n *network) initialize( func (n *network) initialize(
ctx context.Context, currBootstrap bootstrap.Bootstrap, ctx context.Context, prevBootstrap bootstrap.Bootstrap,
) error { ) error {
prevThisHost := prevBootstrap.ThisHost()
// we update this Host's data using whatever configuration has been provided // we update this Host's data using whatever configuration has been provided
// by the daemon config. This way the network has the most up-to-date // by the daemon config. This way the network has the most up-to-date
// possible bootstrap. This updated bootstrap will later get updated in // possible bootstrap. This updated bootstrap will later get updated in
// garage as a background task, so other hosts will see it as well. // garage as a background task, so other hosts will see it as well.
currBootstrap, err := coalesceNetworkConfigAndBootstrap( currBootstrap, err := coalesceNetworkConfigAndBootstrap(
n.networkConfig, currBootstrap, n.networkConfig, prevBootstrap,
) )
if err != nil { if err != nil {
return fmt.Errorf("combining configuration into bootstrap: %w", err) return fmt.Errorf("combining configuration into bootstrap: %w", err)
} }
n.logger.Info(ctx, "Writing updated bootstrap to state dir")
err = writeBootstrapToStateDir(n.stateDir.Path, currBootstrap) err = writeBootstrapToStateDir(n.stateDir.Path, currBootstrap)
if err != nil { if err != nil {
return fmt.Errorf("writing bootstrap to state dir: %w", err) return fmt.Errorf("writing bootstrap to state dir: %w", err)
@ -446,7 +449,7 @@ func (n *network) initialize(
n.logger.Info(ctx, "Child processes created") n.logger.Info(ctx, "Child processes created")
if err := n.postInit(ctx); err != nil { if err := n.postInit(ctx, prevThisHost); err != nil {
n.logger.Error(ctx, "Post-initialization failed, stopping child processes", err) n.logger.Error(ctx, "Post-initialization failed, stopping child processes", err)
n.children.Shutdown() n.children.Shutdown()
return fmt.Errorf("performing post-initialization: %w", err) return fmt.Errorf("performing post-initialization: %w", err)
@ -479,15 +482,22 @@ func (n *network) initialize(
return nil return nil
} }
func (n *network) postInit(ctx context.Context) error { func (n *network) postInit(
if len(n.networkConfig.Storage.Allocations) > 0 { ctx context.Context, prevThisHost bootstrap.Host,
) error {
n.l.RLock()
defer n.l.RUnlock()
thisHost := n.currBootstrap.ThisHost()
if len(prevThisHost.Garage.Instances)+len(thisHost.Garage.Instances) > 0 {
n.logger.Info(ctx, "Applying garage layout") n.logger.Info(ctx, "Applying garage layout")
if err := garageApplyLayout( if err := garageApplyLayout(
ctx, ctx,
n.logger, n.logger,
n.networkConfig, n.networkConfig,
n.opts.GarageAdminToken, n.opts.GarageAdminToken,
n.currBootstrap, prevThisHost, thisHost,
); err != nil { ); err != nil {
return fmt.Errorf("applying garage layout: %w", err) return fmt.Errorf("applying garage layout: %w", err)
} }
@ -508,7 +518,7 @@ func (n *network) postInit(ctx context.Context) error {
n.logger, n.logger,
n.networkConfig, n.networkConfig,
n.opts.GarageAdminToken, n.opts.GarageAdminToken,
n.currBootstrap, thisHost,
) )
if err != nil { if err != nil {
return fmt.Errorf("initializing global bucket: %w", err) return fmt.Errorf("initializing global bucket: %w", err)
@ -533,7 +543,6 @@ func (n *network) postInit(ctx context.Context) error {
func (n *network) reloadHosts(ctx context.Context) error { func (n *network) reloadHosts(ctx context.Context) error {
n.l.RLock() n.l.RLock()
networkConfig := n.networkConfig
currBootstrap := n.currBootstrap currBootstrap := n.currBootstrap
n.l.RUnlock() n.l.RUnlock()
@ -553,8 +562,13 @@ func (n *network) reloadHosts(ctx context.Context) error {
newBootstrap := currBootstrap newBootstrap := currBootstrap
newBootstrap.Hosts = newHosts newBootstrap.Hosts = newHosts
err = n.reload(ctx, networkConfig, newBootstrap) // the daemon's view of this host's bootstrap info takes precedence over
if err != nil { // whatever is in garage. The garage version lacks the private credentials
// which must be stored locally.
thisHost := currBootstrap.ThisHost()
newBootstrap.Hosts[thisHost.Name] = thisHost
if _, err = n.reload(ctx, nil, &newBootstrap); err != nil {
return fmt.Errorf("reloading with new host data: %w", err) return fmt.Errorf("reloading with new host data: %w", err)
} }
@ -580,39 +594,53 @@ func (n *network) reloadLoop(ctx context.Context) {
} }
} }
// reload will check the existing hosts data from currBootstrap against // returns the bootstrap prior to the reload being applied.
// a potentially updated set of hosts data, and if there are any differences
// will perform whatever changes are necessary.
func (n *network) reload( func (n *network) reload(
ctx context.Context, ctx context.Context,
newNetworkConfig daecommon.NetworkConfig, newNetworkConfig *daecommon.NetworkConfig,
newBootstrap bootstrap.Bootstrap, newBootstrap *bootstrap.Bootstrap,
) error { ) (
bootstrap.Bootstrap, error,
) {
n.l.Lock() n.l.Lock()
defer n.l.Unlock() defer n.l.Unlock()
// the daemon's view of this host's bootstrap info takes precedence over prevBootstrap := n.currBootstrap
// whatever is in garage. The garage version lacks the private credentials
// which must be stored locally. if newBootstrap != nil {
thisHost := n.currBootstrap.ThisHost() n.currBootstrap = *newBootstrap
newBootstrap.Hosts[thisHost.Name] = thisHost }
if newNetworkConfig != nil {
n.networkConfig = *newNetworkConfig
}
var err error
if n.currBootstrap, err = coalesceNetworkConfigAndBootstrap(
n.networkConfig, n.currBootstrap,
); err != nil {
return bootstrap.Bootstrap{}, fmt.Errorf(
"combining configuration into bootstrap: %w", err,
)
}
n.logger.Info(ctx, "Writing updated bootstrap to state dir") n.logger.Info(ctx, "Writing updated bootstrap to state dir")
err := writeBootstrapToStateDir(n.stateDir.Path, newBootstrap) err = writeBootstrapToStateDir(n.stateDir.Path, n.currBootstrap)
if err != nil { if err != nil {
return fmt.Errorf("writing bootstrap to state dir: %w", err) return bootstrap.Bootstrap{}, fmt.Errorf(
"writing bootstrap to state dir: %w", err,
)
} }
n.networkConfig = newNetworkConfig
n.currBootstrap = newBootstrap
n.logger.Info(ctx, "Reloading child processes") n.logger.Info(ctx, "Reloading child processes")
err = n.children.Reload(ctx, newNetworkConfig, newBootstrap) err = n.children.Reload(ctx, n.networkConfig, n.currBootstrap)
if err != nil { if err != nil {
return fmt.Errorf("reloading child processes: %w", err) return bootstrap.Bootstrap{}, fmt.Errorf(
"reloading child processes: %w", err,
)
} }
return nil return prevBootstrap, nil
} }
func withCurrBootstrap[Res any]( func withCurrBootstrap[Res any](
@ -800,7 +828,6 @@ func (n *network) CreateHost(
JoiningBootstrap, error, JoiningBootstrap, error,
) { ) {
n.l.RLock() n.l.RLock()
networkConfig := n.networkConfig
currBootstrap := n.currBootstrap currBootstrap := n.currBootstrap
n.l.RUnlock() n.l.RUnlock()
@ -865,8 +892,7 @@ func (n *network) CreateHost(
newBootstrap.Hosts = joiningBootstrap.Bootstrap.Hosts newBootstrap.Hosts = joiningBootstrap.Bootstrap.Hosts
n.logger.Info(ctx, "Reloading local state with new host") n.logger.Info(ctx, "Reloading local state with new host")
err = n.reload(ctx, networkConfig, newBootstrap) if _, err = n.reload(ctx, nil, &newBootstrap); err != nil {
if err != nil {
return JoiningBootstrap{}, fmt.Errorf("reloading child processes: %w", err) return JoiningBootstrap{}, fmt.Errorf("reloading child processes: %w", err)
} }
@ -913,46 +939,12 @@ func (n *network) GetConfig(context.Context) (daecommon.NetworkConfig, error) {
func (n *network) SetConfig( func (n *network) SetConfig(
ctx context.Context, config daecommon.NetworkConfig, ctx context.Context, config daecommon.NetworkConfig,
) error { ) error {
newBootstrap, err := coalesceNetworkConfigAndBootstrap( prevBootstrap, err := n.reload(ctx, &config, nil)
config, n.currBootstrap,
)
if err != nil { if err != nil {
return fmt.Errorf("combining configuration into bootstrap: %w", err) return fmt.Errorf("reloading config: %w", err)
} }
n.l.Lock() if err := n.postInit(ctx, prevBootstrap.ThisHost()); err != nil {
defer n.l.Unlock()
n.logger.Info(ctx, "Shutting down children")
n.children.Shutdown()
err = writeBootstrapToStateDir(n.stateDir.Path, newBootstrap)
if err != nil {
return fmt.Errorf("writing bootstrap to state dir: %w", err)
}
n.networkConfig = config
n.currBootstrap = newBootstrap
n.logger.Info(ctx, "Creating child processes")
n.children, err = children.New(
ctx,
n.logger.WithNamespace("children"),
n.envBinDirPath,
n.secretsStore,
n.networkConfig,
n.runtimeDir,
n.opts.GarageAdminToken,
n.currBootstrap,
n.opts.ChildrenOpts,
)
if err != nil {
return fmt.Errorf("creating child processes: %w", err)
}
n.logger.Info(ctx, "Child processes re-created")
if err := n.postInit(ctx); err != nil {
return fmt.Errorf("performing post-initialization: %w", err) return fmt.Errorf("performing post-initialization: %w", err)
} }

View File

@ -19,7 +19,9 @@ func TestCreate(t *testing.T) {
gotCreationParams, err := LoadCreationParams(network.stateDir) gotCreationParams, err := LoadCreationParams(network.stateDir)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, gotCreationParams, network.creationParams) assert.Equal(
t, gotCreationParams, network.getBootstrap(t).NetworkCreationParams,
)
} }
func TestLoad(t *testing.T) { func TestLoad(t *testing.T) {
@ -37,7 +39,7 @@ func TestLoad(t *testing.T) {
loadedNetwork, err := Load( loadedNetwork, err := Load(
h.ctx, h.ctx,
h.logger.WithNamespace("loadedNetwork"), h.logger.WithNamespace("loadedNetwork"),
network.networkConfig, network.getConfig(t),
getEnvBinDirPath(), getEnvBinDirPath(),
network.stateDir, network.stateDir,
h.mkDir(t, "runtime"), h.mkDir(t, "runtime"),
@ -76,18 +78,34 @@ func TestNetwork_GetConfig(t *testing.T) {
config, err := network.GetConfig(h.ctx) config, err := network.GetConfig(h.ctx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, config, network.networkConfig) assert.Equal(t, config, network.getConfig(t))
} }
func TestNetwork_SetConfig(t *testing.T) { func TestNetwork_SetConfig(t *testing.T) {
t.Run("adding storage alloc", func(t *testing.T) { allocsToPeerLayouts := func(
hostName nebula.HostName, allocs []bootstrap.GarageHostInstance,
) []garage.PeerLayout {
peers := make([]garage.PeerLayout, len(allocs))
for i := range allocs {
peers[i] = garage.PeerLayout{
ID: allocs[i].ID,
Capacity: 1_000_000_000,
Zone: string(hostName),
Tags: []string{},
}
}
return peers
}
t.Run("add storage alloc", func(t *testing.T) {
var ( var (
h = newIntegrationHarness(t) h = newIntegrationHarness(t)
network = h.createNetwork(t, "primus", nil) network = h.createNetwork(t, "primus", nil)
networkConfig = network.getConfig(t)
) )
network.networkConfig.Storage.Allocations = append( networkConfig.Storage.Allocations = append(
network.networkConfig.Storage.Allocations, networkConfig.Storage.Allocations,
daecommon.ConfigStorageAllocation{ daecommon.ConfigStorageAllocation{
DataPath: h.mkDir(t, "data").Path, DataPath: h.mkDir(t, "data").Path,
MetaPath: h.mkDir(t, "meta").Path, MetaPath: h.mkDir(t, "meta").Path,
@ -98,17 +116,10 @@ func TestNetwork_SetConfig(t *testing.T) {
}, },
) )
assert.NoError(t, network.SetConfig(h.ctx, network.networkConfig)) assert.NoError(t, network.SetConfig(h.ctx, networkConfig))
// Check that the Host information was updated
newHosts, err := network.GetHosts(h.ctx)
assert.NoError(t, err)
newHostsByName := map[nebula.HostName]bootstrap.Host{}
for _, h := range newHosts {
newHostsByName[h.Name] = h
}
t.Log("Checking that the Host information was updated")
newHostsByName := network.getHostsByName(t)
newHost, ok := newHostsByName[network.hostName] newHost, ok := newHostsByName[network.hostName]
assert.True(t, ok) assert.True(t, ok)
@ -123,34 +134,60 @@ func TestNetwork_SetConfig(t *testing.T) {
RPCPort: 4901, RPCPort: 4901,
}, newAlloc) }, newAlloc)
// Check that the bootstrap file was written with the new host config t.Log("Checking that the bootstrap file was written with the new host config")
var storedBootstrap bootstrap.Bootstrap var storedBootstrap bootstrap.Bootstrap
assert.NoError(t, jsonutil.LoadFile( assert.NoError(t, jsonutil.LoadFile(
&storedBootstrap, bootstrap.StateDirPath(network.stateDir.Path), &storedBootstrap, bootstrap.StateDirPath(network.stateDir.Path),
)) ))
assert.Equal(t, newHostsByName, storedBootstrap.Hosts) assert.Equal(t, newHostsByName, storedBootstrap.Hosts)
// Check that garage layout contains the new allocation t.Log("Checking that garage layout contains the new allocation")
garageAdminClient := newGarageAdminClient( expPeers := allocsToPeerLayouts(network.hostName, allocs)
h.logger, layout, err := network.garageAdminClient(t).GetLayout(h.ctx)
network.networkConfig,
network.opts.GarageAdminToken,
storedBootstrap,
)
layout, err := garageAdminClient.GetLayout(h.ctx)
assert.NoError(t, err) assert.NoError(t, err)
expPeers := make([]garage.PeerLayout, len(allocs))
for i := range allocs {
expPeers[i] = garage.PeerLayout{
ID: allocs[i].ID,
Capacity: 1_000_000_000,
Zone: string(network.hostName),
Tags: []string{},
}
}
assert.ElementsMatch(t, expPeers, layout.Peers) assert.ElementsMatch(t, expPeers, layout.Peers)
}) })
t.Run("remove storage alloc", func(t *testing.T) {
var (
h = newIntegrationHarness(t)
network = h.createNetwork(t, "primus", &createNetworkOpts{
numStorageAllocs: 4,
})
networkConfig = network.getConfig(t)
prevHost = network.getHostsByName(t)[network.hostName]
removedAlloc = networkConfig.Storage.Allocations[3]
removedGarageInst = daecommon.BootstrapGarageHostForAlloc(
prevHost, removedAlloc,
)
)
networkConfig.Storage.Allocations = networkConfig.Storage.Allocations[:3]
assert.NoError(t, network.SetConfig(h.ctx, networkConfig))
t.Log("Checking that the Host information was updated")
newHostsByName := network.getHostsByName(t)
newHost, ok := newHostsByName[network.hostName]
assert.True(t, ok)
allocs := newHost.HostConfigured.Garage.Instances
assert.Len(t, allocs, 3)
assert.NotContains(t, allocs, removedGarageInst)
t.Log("Checking that the bootstrap file was written with the new host config")
var storedBootstrap bootstrap.Bootstrap
assert.NoError(t, jsonutil.LoadFile(
&storedBootstrap, bootstrap.StateDirPath(network.stateDir.Path),
))
assert.Equal(t, newHostsByName, storedBootstrap.Hosts)
t.Log("Checking that garage layout contains the new allocation")
expPeers := allocsToPeerLayouts(network.hostName, allocs)
layout, err := network.garageAdminClient(t).GetLayout(h.ctx)
assert.NoError(t, err)
assert.ElementsMatch(t, expPeers, layout.Peers)
})
// TODO a host having allocs but removing all of them
} }

View File

@ -4,10 +4,9 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"isle/bootstrap" "isle/bootstrap"
"isle/daemon/children"
"isle/daemon/daecommon" "isle/daemon/daecommon"
"isle/garage"
"isle/nebula" "isle/nebula"
"isle/toolkit" "isle/toolkit"
"os" "os"
@ -101,9 +100,7 @@ func newIntegrationHarness(t *testing.T) *integrationHarness {
return &integrationHarness{ return &integrationHarness{
ctx: context.Background(), ctx: context.Background(),
logger: mlog.NewLogger(&mlog.LoggerOpts{ logger: toolkit.NewTestLogger(t),
MessageHandler: mlog.NewTestMessageHandler(t),
}),
rootDir: toolkit.Dir{Path: rootDir}, rootDir: toolkit.Dir{Path: rootDir},
} }
} }
@ -170,39 +167,10 @@ func (h *integrationHarness) mkNetworkConfig(
) )
} }
func (h *integrationHarness) mkChildrenOpts(
t *testing.T, runtimeDir toolkit.Dir,
) *children.Opts {
var (
childrenLogFilePath = filepath.Join(runtimeDir.Path, "children.log")
childrenOpts children.Opts
)
childrenLogFile, err := os.Create(childrenLogFilePath)
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, err)
})
if os.Getenv("ISLE_INTEGRATION_TEST_CHILDREN_LOG_STDOUT") == "" {
childrenOpts = children.Opts{
Stdout: childrenLogFile,
Stderr: childrenLogFile,
}
} else {
childrenOpts = children.Opts{
Stdout: io.MultiWriter(os.Stdout, childrenLogFile),
Stderr: io.MultiWriter(os.Stdout, childrenLogFile),
}
}
return &childrenOpts
}
type createNetworkOpts struct { type createNetworkOpts struct {
creationParams bootstrap.CreationParams creationParams bootstrap.CreationParams
manualShutdown bool manualShutdown bool
numStorageAllocs int
} }
func (o *createNetworkOpts) withDefaults() *createNetworkOpts { func (o *createNetworkOpts) withDefaults() *createNetworkOpts {
@ -214,15 +182,19 @@ func (o *createNetworkOpts) withDefaults() *createNetworkOpts {
o.creationParams = bootstrap.NewCreationParams("test", "test.localnet") o.creationParams = bootstrap.NewCreationParams("test", "test.localnet")
} }
if o.numStorageAllocs == 0 {
o.numStorageAllocs = 3
}
return o return o
} }
type integrationHarnessNetwork struct { type integrationHarnessNetwork struct {
Network Network
ctx context.Context
logger *mlog.Logger
hostName nebula.HostName hostName nebula.HostName
creationParams bootstrap.CreationParams
networkConfig daecommon.NetworkConfig
stateDir, runtimeDir toolkit.Dir stateDir, runtimeDir toolkit.Dir
opts *Opts opts *Opts
} }
@ -236,9 +208,10 @@ func (h *integrationHarness) createNetwork(
opts = opts.withDefaults() opts = opts.withDefaults()
var ( var (
logger = h.logger.WithNamespace("network").WithNamespace(hostNameStr)
networkConfig = h.mkNetworkConfig(t, &networkConfigOpts{ networkConfig = h.mkNetworkConfig(t, &networkConfigOpts{
hasPublicAddr: true, hasPublicAddr: true,
numStorageAllocs: 3, numStorageAllocs: opts.numStorageAllocs,
}) })
stateDir = h.mkDir(t, "state") stateDir = h.mkDir(t, "state")
@ -248,14 +221,13 @@ func (h *integrationHarness) createNetwork(
hostName = nebula.HostName(hostNameStr) hostName = nebula.HostName(hostNameStr)
networkOpts = &Opts{ networkOpts = &Opts{
ChildrenOpts: h.mkChildrenOpts(t, runtimeDir),
GarageAdminToken: "admin_token", GarageAdminToken: "admin_token",
} }
) )
network, err := Create( network, err := Create(
h.ctx, h.ctx,
h.logger.WithNamespace("network").WithNamespace(hostNameStr), logger,
networkConfig, networkConfig,
getEnvBinDirPath(), getEnvBinDirPath(),
stateDir, stateDir,
@ -280,9 +252,9 @@ func (h *integrationHarness) createNetwork(
return integrationHarnessNetwork{ return integrationHarnessNetwork{
network, network,
h.ctx,
logger,
hostName, hostName,
opts.creationParams,
networkConfig,
stateDir, stateDir,
runtimeDir, runtimeDir,
networkOpts, networkOpts,
@ -323,11 +295,11 @@ func (h *integrationHarness) joinNetwork(
} }
var ( var (
logger = h.logger.WithNamespace("network").WithNamespace(hostNameStr)
networkConfig = h.mkNetworkConfig(t, opts.networkConfigOpts) networkConfig = h.mkNetworkConfig(t, opts.networkConfigOpts)
stateDir = h.mkDir(t, "state") stateDir = h.mkDir(t, "state")
runtimeDir = h.mkDir(t, "runtime") runtimeDir = h.mkDir(t, "runtime")
networkOpts = &Opts{ networkOpts = &Opts{
ChildrenOpts: h.mkChildrenOpts(t, runtimeDir),
GarageAdminToken: "admin_token", GarageAdminToken: "admin_token",
} }
) )
@ -335,7 +307,7 @@ func (h *integrationHarness) joinNetwork(
t.Logf("Joining as %q", hostNameStr) t.Logf("Joining as %q", hostNameStr)
joinedNetwork, err := Join( joinedNetwork, err := Join(
h.ctx, h.ctx,
h.logger.WithNamespace("network").WithNamespace(hostNameStr), logger,
networkConfig, networkConfig,
joiningBootstrap, joiningBootstrap,
getEnvBinDirPath(), getEnvBinDirPath(),
@ -358,11 +330,52 @@ func (h *integrationHarness) joinNetwork(
return integrationHarnessNetwork{ return integrationHarnessNetwork{
joinedNetwork, joinedNetwork,
h.ctx,
logger,
hostName, hostName,
network.creationParams,
networkConfig,
stateDir, stateDir,
runtimeDir, runtimeDir,
networkOpts, networkOpts,
} }
} }
func (nh *integrationHarnessNetwork) getConfig(t *testing.T) daecommon.NetworkConfig {
networkConfig, err := nh.Network.GetConfig(nh.ctx)
require.NoError(t, err)
return networkConfig
}
func (nh *integrationHarnessNetwork) getBootstrap(
t *testing.T,
) bootstrap.Bootstrap {
currBootstrap, err := nh.Network.(*network).getBootstrap()
require.NoError(t, err)
return currBootstrap
}
func (nh *integrationHarnessNetwork) garageAdminClient(
t *testing.T,
) *garage.AdminClient {
c := newGarageAdminClient(
nh.logger,
nh.getConfig(t),
nh.opts.GarageAdminToken,
nh.getBootstrap(t).ThisHost(),
)
t.Cleanup(func() { assert.NoError(t, c.Close()) })
return c
}
func (nh *integrationHarnessNetwork) getHostsByName(
t *testing.T,
) map[nebula.HostName]bootstrap.Host {
hosts, err := nh.Network.GetHosts(nh.ctx)
require.NoError(t, err)
hostsByName := map[nebula.HostName]bootstrap.Host{}
for _, h := range hosts {
hostsByName[h.Name] = h
}
return hostsByName
}

View File

@ -153,7 +153,7 @@ func (c *AdminClient) do(
} }
// Wait will block until the instance connected to can see at least // Wait will block until the instance connected to can see at least
// ReplicationFactor-1 other garage instances. If the context is canceled it // ReplicationFactor other garage instances. If the context is canceled it
// will return the context error. // will return the context error.
func (c *AdminClient) Wait(ctx context.Context) error { func (c *AdminClient) Wait(ctx context.Context) error {
@ -193,7 +193,7 @@ func (c *AdminClient) Wait(ctx context.Context) error {
"numUp", numUp, "numUp", numUp,
) )
if numUp >= ReplicationFactor-1 { if numUp >= ReplicationFactor {
c.logger.Debug(ctx, "instance appears to be online") c.logger.Debug(ctx, "instance appears to be online")
return nil return nil
} }
@ -293,10 +293,23 @@ func (c *AdminClient) GetLayout(ctx context.Context) (ClusterLayout, error) {
} }
// ApplyLayout modifies the layout of the garage cluster. Only layout of the // ApplyLayout modifies the layout of the garage cluster. Only layout of the
// given peers will be modified/created, other peers are not affected. // given peers will be modified/created/removed, other peers are not affected.
func (c *AdminClient) ApplyLayout( func (c *AdminClient) ApplyLayout(
ctx context.Context, peers []PeerLayout, ctx context.Context, addModifyPeers []PeerLayout, removePeerIDs []string,
) error { ) error {
type removePeer struct {
ID string `json:"id"`
Remove bool `json:"remove"`
}
peers := make([]any, 0, len(addModifyPeers)+len(removePeerIDs))
for _, p := range addModifyPeers {
peers = append(peers, p)
}
for _, id := range removePeerIDs {
peers = append(peers, removePeer{ID: id, Remove: true})
}
{ {
// https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html#tag/Layout/operation/ApplyLayout // https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html#tag/Layout/operation/ApplyLayout
err := c.do(ctx, nil, "POST", "/v1/layout", peers) err := c.do(ctx, nil, "POST", "/v1/layout", peers)

View File

@ -3,7 +3,7 @@ module isle
go 1.22 go 1.22
require ( require (
code.betamike.com/micropelago/pmux v0.0.0-20240719134913-f5fce902e8c4 code.betamike.com/micropelago/pmux v0.0.0-20241029124408-55bc7097f7b8
dev.mediocregopher.com/mediocre-go-lib.git v0.0.0-20241023182613-55984cdf5233 dev.mediocregopher.com/mediocre-go-lib.git v0.0.0-20241023182613-55984cdf5233
github.com/adrg/xdg v0.4.0 github.com/adrg/xdg v0.4.0
github.com/jxskiss/base62 v1.1.0 github.com/jxskiss/base62 v1.1.0

View File

@ -1,5 +1,7 @@
code.betamike.com/micropelago/pmux v0.0.0-20240719134913-f5fce902e8c4 h1:n4pGP12kgdH5kCTF4TZYpOjWuAR/zZLpM9j3neeVMEk= code.betamike.com/micropelago/pmux v0.0.0-20240719134913-f5fce902e8c4 h1:n4pGP12kgdH5kCTF4TZYpOjWuAR/zZLpM9j3neeVMEk=
code.betamike.com/micropelago/pmux v0.0.0-20240719134913-f5fce902e8c4/go.mod h1:WlEWacLREVfIQl1IlBjKzuDgL56DFRvyl7YiL5gGP4w= code.betamike.com/micropelago/pmux v0.0.0-20240719134913-f5fce902e8c4/go.mod h1:WlEWacLREVfIQl1IlBjKzuDgL56DFRvyl7YiL5gGP4w=
code.betamike.com/micropelago/pmux v0.0.0-20241029124408-55bc7097f7b8 h1:DTAMr60/y9ZtpMORg50LYGpxyvA9+3UFKVW4mb4CwAE=
code.betamike.com/micropelago/pmux v0.0.0-20241029124408-55bc7097f7b8/go.mod h1:WlEWacLREVfIQl1IlBjKzuDgL56DFRvyl7YiL5gGP4w=
dev.mediocregopher.com/mediocre-go-lib.git v0.0.0-20241023182613-55984cdf5233 h1:Ea4HixNfDNDPh7zMngPpEeDf8gpociSPEROBFBedqIY= dev.mediocregopher.com/mediocre-go-lib.git v0.0.0-20241023182613-55984cdf5233 h1:Ea4HixNfDNDPh7zMngPpEeDf8gpociSPEROBFBedqIY=
dev.mediocregopher.com/mediocre-go-lib.git v0.0.0-20241023182613-55984cdf5233/go.mod h1:nP+AtQWrc3k5qq5y3ABiBLkOfUPlk/FO9fpTFpF+jgs= dev.mediocregopher.com/mediocre-go-lib.git v0.0.0-20241023182613-55984cdf5233/go.mod h1:nP+AtQWrc3k5qq5y3ABiBLkOfUPlk/FO9fpTFpF+jgs=
github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls= github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls=

32
go/toolkit/logging.go Normal file
View File

@ -0,0 +1,32 @@
package toolkit
import (
"strings"
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
)
type logLevel struct {
level int
name string
}
func (l logLevel) Int() int { return l.level }
func (l logLevel) String() string { return l.name }
var (
// LogLevelChild is used for logging out the stdout, stderr, and system logs
// (from pmux) related to child processes.
LogLevelChild mlog.Level = logLevel{mlog.LevelInfo.Int() + 1, "CHILD"}
)
// LogLevelFromString parses a string as a log level, taking into account custom
// log levels introduced in Isle.
func LogLevelFromString(str string) mlog.Level {
switch strings.TrimSpace(strings.ToUpper(str)) {
case LogLevelChild.String():
return LogLevelChild
default:
return mlog.LevelFromString(str)
}
}

View File

@ -1,8 +1,11 @@
package toolkit package toolkit
import ( import (
"fmt"
"os" "os"
"testing" "testing"
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
) )
// MarkIntegrationTest marks a test as being an integration test. It will be // MarkIntegrationTest marks a test as being an integration test. It will be
@ -12,3 +15,19 @@ func MarkIntegrationTest(t *testing.T) {
t.Skip("Skipped because ISLE_INTEGRATION_TEST isn't set") t.Skip("Skipped because ISLE_INTEGRATION_TEST isn't set")
} }
} }
// NewTestLogger returns a Logger which should be used for testing purposes. The
// log level of the Logger can be adjusted using the ISLE_LOG_LEVEL envvar.
func NewTestLogger(t *testing.T) *mlog.Logger {
level := mlog.LevelInfo
if levelStr := os.Getenv("ISLE_LOG_LEVEL"); levelStr != "" {
if level = LogLevelFromString(levelStr); level == nil {
panic(fmt.Sprintf("invalid log level: %q", levelStr))
}
}
return mlog.NewLogger(&mlog.LoggerOpts{
MessageHandler: mlog.NewTestMessageHandler(t),
MaxLevel: level.Int(),
})
}