Compare commits

..

No commits in common. "6ac473edcb54500e5a6731fcd2452d1935c25de2" and "b7c097ef63512c5ec8a2a3f6389bc9e61fcf7f69" have entirely different histories.

18 changed files with 349 additions and 485 deletions

View File

@ -32,7 +32,7 @@ func main() {
logger := mlog.NewLogger(&mlog.LoggerOpts{ logger := mlog.NewLogger(&mlog.LoggerOpts{
MessageHandler: newLogMsgHandler(), MessageHandler: newLogMsgHandler(),
MaxLevel: mlog.LevelInfo.Int(), // TODO make this configurable MaxLevel: mlog.LevelInfo.Int(),
}) })
defer logger.Close() defer logger.Close()

View File

@ -6,6 +6,8 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"io"
"os"
"code.betamike.com/micropelago/pmux/pmuxlib" "code.betamike.com/micropelago/pmux/pmuxlib"
"dev.mediocregopher.com/mediocre-go-lib.git/mctx" "dev.mediocregopher.com/mediocre-go-lib.git/mctx"
@ -19,13 +21,25 @@ import (
// Opts are optional parameters which can be passed in when initializing a new // Opts are optional parameters which can be passed in when initializing a new
// Children instance. A nil Opts is equivalent to a zero value. // Children instance. A nil Opts is equivalent to a zero value.
type Opts struct{} type Opts struct {
// Stdout and Stderr are what the associated outputs from child processes
// will be directed to.
Stdout, Stderr io.Writer
}
func (o *Opts) withDefaults() *Opts { func (o *Opts) withDefaults() *Opts {
if o == nil { if o == nil {
o = new(Opts) o = new(Opts)
} }
if o.Stdout == nil {
o.Stdout = os.Stdout
}
if o.Stderr == nil {
o.Stderr = os.Stderr
}
return o return o
} }
@ -36,16 +50,13 @@ func (o *Opts) withDefaults() *Opts {
// - garage (0 or more, depending on configured storage allocations) // - garage (0 or more, depending on configured storage allocations)
type Children struct { type Children struct {
logger *mlog.Logger logger *mlog.Logger
binDirPath string
runtimeDir toolkit.Dir runtimeDir toolkit.Dir
garageAdminToken string garageAdminToken string
opts Opts opts Opts
garageRPCSecret string garageRPCSecret string
nebulaProc *pmuxlib.Process pmux *pmuxlib.Pmux
dnsmasqProc *pmuxlib.Process
garageProcs map[string]*pmuxlib.Process
} }
// New initializes and returns a Children instance. If initialization fails an // New initializes and returns a Children instance. If initialization fails an
@ -73,66 +84,33 @@ func New(
c := &Children{ c := &Children{
logger: logger, logger: logger,
binDirPath: binDirPath,
runtimeDir: runtimeDir, runtimeDir: runtimeDir,
garageAdminToken: garageAdminToken, garageAdminToken: garageAdminToken,
opts: *opts, opts: *opts,
garageRPCSecret: garageRPCSecret, garageRPCSecret: garageRPCSecret,
} }
if c.nebulaProc, err = nebulaPmuxProc( pmuxConfig, err := c.newPmuxConfig(
ctx, ctx,
c.logger,
c.runtimeDir.Path,
c.binDirPath,
networkConfig,
hostBootstrap,
); err != nil {
return nil, fmt.Errorf("starting nebula: %w", err)
}
if err := waitForNebula(ctx, c.logger, hostBootstrap); err != nil {
logger.Warn(ctx, "Failed waiting for nebula to initialize, shutting down child processes", err)
c.Shutdown()
return nil, fmt.Errorf("waiting for nebula to start: %w", err)
}
if c.dnsmasqProc, err = dnsmasqPmuxProc(
ctx,
c.logger,
c.runtimeDir.Path,
c.binDirPath,
networkConfig,
hostBootstrap,
); err != nil {
logger.Warn(ctx, "Failed to start dnsmasq, shutting down child processes", err)
c.Shutdown()
return nil, fmt.Errorf("starting dnsmasq: %w", err)
}
// TODO wait for dnsmasq to come up
if c.garageProcs, err = garagePmuxProcs(
ctx,
c.logger,
garageRPCSecret, garageRPCSecret,
c.runtimeDir.Path, binDirPath,
c.binDirPath,
networkConfig, networkConfig,
garageAdminToken, garageAdminToken,
hostBootstrap, hostBootstrap,
); err != nil { )
logger.Warn(ctx, "Failed to start garage processes, shutting down child processes", err) if err != nil {
c.Shutdown() return nil, fmt.Errorf("generating pmux config: %w", err)
return nil, fmt.Errorf("starting garage processes: %w", err)
} }
if err := waitForGarage( c.pmux = pmuxlib.NewPmux(pmuxConfig, c.opts.Stdout, c.opts.Stderr)
ctx, c.logger, networkConfig, garageAdminToken, hostBootstrap,
); err != nil { initErr := c.postPmuxInit(
logger.Warn(ctx, "Failed waiting for garage processes to initialize, shutting down child processes", err) ctx, networkConfig, garageAdminToken, hostBootstrap,
)
if initErr != nil {
logger.Warn(ctx, "failed to initialize Children, shutting down child processes", err)
c.Shutdown() c.Shutdown()
return nil, fmt.Errorf("waiting for garage processes to initialize: %w", err) return nil, initErr
} }
return c, nil return c, nil
@ -155,7 +133,7 @@ func (c *Children) reloadDNSMasq(
} }
c.logger.Info(ctx, "dnsmasq config file has changed, restarting process") c.logger.Info(ctx, "dnsmasq config file has changed, restarting process")
c.dnsmasqProc.Restart() c.pmux.Restart("dnsmasq")
return nil return nil
} }
@ -175,7 +153,7 @@ func (c *Children) reloadNebula(
} }
c.logger.Info(ctx, "nebula config file has changed, restarting process") c.logger.Info(ctx, "nebula config file has changed, restarting process")
c.nebulaProc.Restart() c.pmux.Restart("nebula")
if err := waitForNebula(ctx, c.logger, hostBootstrap); err != nil { if err := waitForNebula(ctx, c.logger, hostBootstrap); err != nil {
return fmt.Errorf("waiting for nebula to start: %w", err) return fmt.Errorf("waiting for nebula to start: %w", err)
@ -206,7 +184,7 @@ func (c *Children) reloadGarage(
) )
) )
childConfigPath, changed, err := garageWriteChildConfig( _, changed, err := garageWriteChildConfig(
ctx, ctx,
c.logger, c.logger,
c.garageRPCSecret, c.garageRPCSecret,
@ -224,16 +202,8 @@ func (c *Children) reloadGarage(
anyChanged = true anyChanged = true
if proc, ok := c.garageProcs[procName]; ok { c.logger.Info(ctx, "garage config has changed, restarting process")
c.logger.Info(ctx, "garage config has changed, restarting process") c.pmux.Restart(procName)
proc.Restart()
continue
}
c.logger.Info(ctx, "garage config has been added, creating process")
c.garageProcs[procName] = garagePmuxProc(
ctx, c.logger, c.binDirPath, procName, childConfigPath,
)
} }
if anyChanged { if anyChanged {
@ -274,15 +244,5 @@ func (c *Children) Reload(
// Shutdown blocks until all child processes have gracefully shut themselves // Shutdown blocks until all child processes have gracefully shut themselves
// down. // down.
func (c *Children) Shutdown() { func (c *Children) Shutdown() {
for _, proc := range c.garageProcs { c.pmux.Stop()
proc.Stop()
}
if c.dnsmasqProc != nil {
c.dnsmasqProc.Stop()
}
if c.nebulaProc != nil {
c.nebulaProc.Stop()
}
} }

View File

@ -49,36 +49,37 @@ func dnsmasqWriteConfig(
return confPath, changed, nil return confPath, changed, nil
} }
// TODO consider a shared dnsmasq across all the daemon's networks. func dnsmasqPmuxProcConfig(
// This would have a few benefits:
// - Less processes, less problems
// - Less configuration for the user in the case of more than one network.
// - Can listen on 127.0.0.x:53, rather than on the nebula address. This
// allows DNS to come up before nebula, which is helpful when nebula depends
// on DNS.
func dnsmasqPmuxProc(
ctx context.Context, ctx context.Context,
logger *mlog.Logger, logger *mlog.Logger,
runtimeDirPath, binDirPath string, runtimeDirPath, binDirPath string,
networkConfig daecommon.NetworkConfig, networkConfig daecommon.NetworkConfig,
hostBootstrap bootstrap.Bootstrap, hostBootstrap bootstrap.Bootstrap,
) ( ) (
*pmuxlib.Process, error, pmuxlib.ProcessConfig, error,
) { ) {
confPath, _, err := dnsmasqWriteConfig( confPath, _, err := dnsmasqWriteConfig(
ctx, logger, runtimeDirPath, networkConfig, hostBootstrap, ctx, logger, runtimeDirPath, networkConfig, hostBootstrap,
) )
if err != nil { if err != nil {
return nil, fmt.Errorf( return pmuxlib.ProcessConfig{}, fmt.Errorf(
"writing dnsmasq config: %w", err, "writing dnsmasq config: %w", err,
) )
} }
cfg := pmuxlib.ProcessConfig{ return pmuxlib.ProcessConfig{
Cmd: filepath.Join(binDirPath, "dnsmasq"), Cmd: filepath.Join(binDirPath, "dnsmasq"),
Args: []string{"-d", "-C", confPath}, Args: []string{"-d", "-C", confPath},
} StartAfterFunc: func(ctx context.Context) error {
cfg = withPmuxLoggers(ctx, logger, "dnsmasq", cfg) // TODO consider a shared dnsmasq across all the daemon's networks.
// This would have a few benefits:
return pmuxlib.NewProcess(cfg), nil // - Less processes, less problems
// - Less configuration for the user in the case of more than one
// network.
// - Can listen on 127.0.0.x:53, rather than on the nebula address.
// This allows DNS to come up before nebula, which is helpful when
// nebula depends on DNS.
return waitForNebula(ctx, logger, hostBootstrap)
},
}, nil
} }

View File

@ -120,24 +120,7 @@ func garagePmuxProcName(alloc daecommon.ConfigStorageAllocation) string {
return fmt.Sprintf("garage-%d", alloc.RPCPort) return fmt.Sprintf("garage-%d", alloc.RPCPort)
} }
func garagePmuxProc( func garagePmuxProcConfigs(
ctx context.Context,
logger *mlog.Logger,
binDirPath string,
procName string,
childConfigPath string,
) *pmuxlib.Process {
cfg := pmuxlib.ProcessConfig{
Cmd: filepath.Join(binDirPath, "garage"),
Args: []string{"-c", childConfigPath, "server"},
}
cfg = withPmuxLoggers(ctx, logger, procName, cfg)
return pmuxlib.NewProcess(cfg)
}
func garagePmuxProcs(
ctx context.Context, ctx context.Context,
logger *mlog.Logger, logger *mlog.Logger,
rpcSecret, runtimeDirPath, binDirPath string, rpcSecret, runtimeDirPath, binDirPath string,
@ -145,11 +128,11 @@ func garagePmuxProcs(
adminToken string, adminToken string,
hostBootstrap bootstrap.Bootstrap, hostBootstrap bootstrap.Bootstrap,
) ( ) (
map[string]*pmuxlib.Process, error, map[string]pmuxlib.ProcessConfig, error,
) { ) {
var ( var (
pmuxProcs = map[string]*pmuxlib.Process{} pmuxProcConfigs = map[string]pmuxlib.ProcessConfig{}
allocs = networkConfig.Storage.Allocations allocs = networkConfig.Storage.Allocations
) )
if len(allocs) > 0 && rpcSecret == "" { if len(allocs) > 0 && rpcSecret == "" {
@ -169,10 +152,14 @@ func garagePmuxProcs(
} }
procName := garagePmuxProcName(alloc) procName := garagePmuxProcName(alloc)
pmuxProcs[procName] = garagePmuxProc( pmuxProcConfigs[procName] = pmuxlib.ProcessConfig{
ctx, logger, binDirPath, procName, childConfigPath, Cmd: filepath.Join(binDirPath, "garage"),
) Args: []string{"-c", childConfigPath, "server"},
StartAfterFunc: func(ctx context.Context) error {
return waitForNebula(ctx, logger, hostBootstrap)
},
}
} }
return pmuxProcs, nil return pmuxProcConfigs, nil
} }

View File

@ -1,59 +0,0 @@
package children
import (
"context"
"fmt"
"isle/toolkit"
"code.betamike.com/micropelago/pmux/pmuxlib"
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
)
type pmuxLogger struct {
ctx context.Context
logger *mlog.Logger
}
func newPmuxStdoutLogger(
ctx context.Context, logger *mlog.Logger, name string,
) pmuxlib.Logger {
return &pmuxLogger{ctx, logger.WithNamespace(name).WithNamespace("out")}
}
func newPmuxStderrLogger(
ctx context.Context, logger *mlog.Logger, name string,
) pmuxlib.Logger {
return &pmuxLogger{ctx, logger.WithNamespace(name).WithNamespace("err")}
}
func newPmuxSysLogger(
ctx context.Context, logger *mlog.Logger, name string,
) pmuxlib.Logger {
return &pmuxLogger{ctx, logger.WithNamespace(name).WithNamespace("sys")}
}
func (l *pmuxLogger) Println(line string) {
l.logger.Log(mlog.Message{
Context: l.ctx,
Level: toolkit.LogLevelChild,
Description: line,
})
}
func (l *pmuxLogger) Printf(format string, args ...any) {
l.Println(fmt.Sprintf(format, args...))
}
////////////////////////////////////////////////////////////////////////////////
func withPmuxLoggers(
ctx context.Context,
logger *mlog.Logger,
name string,
cfg pmuxlib.ProcessConfig,
) pmuxlib.ProcessConfig {
cfg.StdoutLogger = newPmuxStdoutLogger(ctx, logger, name)
cfg.StderrLogger = newPmuxStderrLogger(ctx, logger, name)
cfg.SysLogger = newPmuxSysLogger(ctx, logger, name)
return cfg
}

View File

@ -174,27 +174,27 @@ func nebulaWriteConfig(
return nebulaYmlPath, changed, nil return nebulaYmlPath, changed, nil
} }
func nebulaPmuxProc( func nebulaPmuxProcConfig(
ctx context.Context, ctx context.Context,
logger *mlog.Logger, logger *mlog.Logger,
runtimeDirPath, binDirPath string, runtimeDirPath, binDirPath string,
networkConfig daecommon.NetworkConfig, networkConfig daecommon.NetworkConfig,
hostBootstrap bootstrap.Bootstrap, hostBootstrap bootstrap.Bootstrap,
) ( ) (
*pmuxlib.Process, error, pmuxlib.ProcessConfig, error,
) { ) {
nebulaYmlPath, _, err := nebulaWriteConfig( nebulaYmlPath, _, err := nebulaWriteConfig(
ctx, logger, runtimeDirPath, networkConfig, hostBootstrap, ctx, logger, runtimeDirPath, networkConfig, hostBootstrap,
) )
if err != nil { if err != nil {
return nil, fmt.Errorf("writing nebula config: %w", err) return pmuxlib.ProcessConfig{}, fmt.Errorf(
"writing nebula config: %w", err,
)
} }
cfg := pmuxlib.ProcessConfig{ return pmuxlib.ProcessConfig{
Cmd: filepath.Join(binDirPath, "nebula"), Cmd: filepath.Join(binDirPath, "nebula"),
Args: []string{"-config", nebulaYmlPath}, Args: []string{"-config", nebulaYmlPath},
} Group: -1, // Make sure nebula is shut down last.
cfg = withPmuxLoggers(ctx, logger, "nebula", cfg) }, nil
return pmuxlib.NewProcess(cfg), nil
} }

View File

@ -0,0 +1,90 @@
package children
import (
"context"
"fmt"
"isle/bootstrap"
"isle/daemon/daecommon"
"code.betamike.com/micropelago/pmux/pmuxlib"
)
func (c *Children) newPmuxConfig(
ctx context.Context,
garageRPCSecret, binDirPath string,
networkConfig daecommon.NetworkConfig,
garageAdminToken string,
hostBootstrap bootstrap.Bootstrap,
) (
pmuxlib.Config, error,
) {
nebulaPmuxProcConfig, err := nebulaPmuxProcConfig(
ctx,
c.logger,
c.runtimeDir.Path,
binDirPath,
networkConfig,
hostBootstrap,
)
if err != nil {
return pmuxlib.Config{}, fmt.Errorf("generating nebula config: %w", err)
}
dnsmasqPmuxProcConfig, err := dnsmasqPmuxProcConfig(
ctx,
c.logger,
c.runtimeDir.Path,
binDirPath,
networkConfig,
hostBootstrap,
)
if err != nil {
return pmuxlib.Config{}, fmt.Errorf(
"generating dnsmasq config: %w", err,
)
}
garagePmuxProcConfigs, err := garagePmuxProcConfigs(
ctx,
c.logger,
garageRPCSecret,
c.runtimeDir.Path,
binDirPath,
networkConfig,
garageAdminToken,
hostBootstrap,
)
if err != nil {
return pmuxlib.Config{}, fmt.Errorf(
"generating garage children configs: %w", err,
)
}
pmuxProcConfigs := garagePmuxProcConfigs
pmuxProcConfigs["nebula"] = nebulaPmuxProcConfig
pmuxProcConfigs["dnsmasq"] = dnsmasqPmuxProcConfig
return pmuxlib.Config{
Processes: pmuxProcConfigs,
}, nil
}
func (c *Children) postPmuxInit(
ctx context.Context,
networkConfig daecommon.NetworkConfig,
garageAdminToken string,
hostBootstrap bootstrap.Bootstrap,
) error {
if err := waitForNebula(ctx, c.logger, hostBootstrap); err != nil {
return fmt.Errorf("waiting for nebula to start: %w", err)
}
err := waitForGarage(
ctx, c.logger, networkConfig, garageAdminToken, hostBootstrap,
)
if err != nil {
return fmt.Errorf("waiting for garage to start: %w", err)
}
return nil
}

View File

@ -4,7 +4,6 @@ import (
"context" "context"
"errors" "errors"
"io" "io"
"isle/toolkit"
"net" "net"
"net/http/httptest" "net/http/httptest"
"path/filepath" "path/filepath"
@ -12,6 +11,8 @@ import (
"sync" "sync"
"testing" "testing"
"time" "time"
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
) )
type DivideParams struct { type DivideParams struct {
@ -71,7 +72,7 @@ type divider interface {
func testHandler(t *testing.T) Handler { func testHandler(t *testing.T) Handler {
var ( var (
logger = toolkit.NewTestLogger(t) logger = mlog.NewTestLogger(t)
d = divider(dividerImpl{}) d = divider(dividerImpl{})
) )

View File

@ -6,7 +6,6 @@ import (
"isle/daemon/daecommon" "isle/daemon/daecommon"
"isle/garage/garagesrv" "isle/garage/garagesrv"
"isle/jsonutil" "isle/jsonutil"
"maps"
"os" "os"
"path/filepath" "path/filepath"
) )
@ -55,20 +54,16 @@ func coalesceNetworkConfigAndBootstrap(
) )
} }
host.Garage.Instances = append( host.Garage.Instances = append(host.Garage.Instances, bootstrap.GarageHostInstance{
host.Garage.Instances, ID: id,
bootstrap.GarageHostInstance{ RPCPort: rpcPort,
ID: id, S3APIPort: alloc.S3APIPort,
RPCPort: rpcPort, })
S3APIPort: alloc.S3APIPort,
},
)
allocs[i].RPCPort = rpcPort allocs[i].RPCPort = rpcPort
} }
} }
hostBootstrap.Hosts = maps.Clone(hostBootstrap.Hosts)
hostBootstrap.Hosts[host.Name] = host hostBootstrap.Hosts[host.Name] = host
return hostBootstrap, nil return hostBootstrap, nil

View File

@ -61,12 +61,15 @@ func newGarageAdminClient(
logger *mlog.Logger, logger *mlog.Logger,
networkConfig daecommon.NetworkConfig, networkConfig daecommon.NetworkConfig,
adminToken string, adminToken string,
host bootstrap.Host, hostBootstrap bootstrap.Bootstrap,
) *garage.AdminClient { ) *garage.AdminClient {
thisHost := hostBootstrap.ThisHost()
return garage.NewAdminClient( return garage.NewAdminClient(
garageAdminClientLogger(logger), garageAdminClientLogger(logger),
net.JoinHostPort( net.JoinHostPort(
host.IP().String(), thisHost.IP().String(),
strconv.Itoa(networkConfig.Storage.Allocations[0].AdminPort), strconv.Itoa(networkConfig.Storage.Allocations[0].AdminPort),
), ),
adminToken, adminToken,
@ -78,26 +81,24 @@ func garageApplyLayout(
logger *mlog.Logger, logger *mlog.Logger,
networkConfig daecommon.NetworkConfig, networkConfig daecommon.NetworkConfig,
adminToken string, adminToken string,
prevHost, currHost bootstrap.Host, hostBootstrap bootstrap.Bootstrap,
) error { ) error {
var ( var (
adminClient = newGarageAdminClient( adminClient = newGarageAdminClient(
logger, networkConfig, adminToken, currHost, logger, networkConfig, adminToken, hostBootstrap,
) )
hostName = currHost.Name thisHost = hostBootstrap.ThisHost()
hostName = thisHost.Name
allocs = networkConfig.Storage.Allocations allocs = networkConfig.Storage.Allocations
peers = make([]garage.PeerLayout, len(allocs)) peers = make([]garage.PeerLayout, len(allocs))
peerIDs = map[string]struct{}{}
idsToRemove = make([]string, 0, len(prevHost.Garage.Instances))
) )
defer adminClient.Close() defer adminClient.Close()
for i, alloc := range allocs { for i, alloc := range allocs {
id := daecommon.BootstrapGarageHostForAlloc(currHost, alloc).ID
peerIDs[id] = struct{}{} id := daecommon.BootstrapGarageHostForAlloc(thisHost, alloc).ID
zone := string(hostName) zone := string(hostName)
if alloc.Zone != "" { if alloc.Zone != "" {
@ -112,13 +113,7 @@ func garageApplyLayout(
} }
} }
for _, prevInst := range prevHost.Garage.Instances { return adminClient.ApplyLayout(ctx, peers)
if _, ok := peerIDs[prevInst.ID]; !ok {
idsToRemove = append(idsToRemove, prevInst.ID)
}
}
return adminClient.ApplyLayout(ctx, peers, idsToRemove)
} }
func garageInitializeGlobalBucket( func garageInitializeGlobalBucket(
@ -126,11 +121,13 @@ func garageInitializeGlobalBucket(
logger *mlog.Logger, logger *mlog.Logger,
networkConfig daecommon.NetworkConfig, networkConfig daecommon.NetworkConfig,
adminToken string, adminToken string,
host bootstrap.Host, hostBootstrap bootstrap.Bootstrap,
) ( ) (
garage.S3APICredentials, error, garage.S3APICredentials, error,
) { ) {
adminClient := newGarageAdminClient(logger, networkConfig, adminToken, host) adminClient := newGarageAdminClient(
logger, networkConfig, adminToken, hostBootstrap,
)
defer adminClient.Close() defer adminClient.Close()
creds, err := adminClient.CreateS3APICredentials( creds, err := adminClient.CreateS3APICredentials(

View File

@ -335,7 +335,7 @@ func Create(
stateDir toolkit.Dir, stateDir toolkit.Dir,
runtimeDir toolkit.Dir, runtimeDir toolkit.Dir,
creationParams bootstrap.CreationParams, creationParams bootstrap.CreationParams,
ipNet nebula.IPNet, ipNet nebula.IPNet, // TODO should this go in CreationParams?
hostName nebula.HostName, hostName nebula.HostName,
opts *Opts, opts *Opts,
) ( ) (
@ -408,22 +408,19 @@ func (n *network) initializeDirs(mayExist bool) error {
} }
func (n *network) initialize( func (n *network) initialize(
ctx context.Context, prevBootstrap bootstrap.Bootstrap, ctx context.Context, currBootstrap bootstrap.Bootstrap,
) error { ) error {
prevThisHost := prevBootstrap.ThisHost()
// we update this Host's data using whatever configuration has been provided // we update this Host's data using whatever configuration has been provided
// by the daemon config. This way the network has the most up-to-date // by the daemon config. This way the network has the most up-to-date
// possible bootstrap. This updated bootstrap will later get updated in // possible bootstrap. This updated bootstrap will later get updated in
// garage as a background task, so other hosts will see it as well. // garage as a background task, so other hosts will see it as well.
currBootstrap, err := coalesceNetworkConfigAndBootstrap( currBootstrap, err := coalesceNetworkConfigAndBootstrap(
n.networkConfig, prevBootstrap, n.networkConfig, currBootstrap,
) )
if err != nil { if err != nil {
return fmt.Errorf("combining configuration into bootstrap: %w", err) return fmt.Errorf("combining configuration into bootstrap: %w", err)
} }
n.logger.Info(ctx, "Writing updated bootstrap to state dir")
err = writeBootstrapToStateDir(n.stateDir.Path, currBootstrap) err = writeBootstrapToStateDir(n.stateDir.Path, currBootstrap)
if err != nil { if err != nil {
return fmt.Errorf("writing bootstrap to state dir: %w", err) return fmt.Errorf("writing bootstrap to state dir: %w", err)
@ -449,7 +446,7 @@ func (n *network) initialize(
n.logger.Info(ctx, "Child processes created") n.logger.Info(ctx, "Child processes created")
if err := n.postInit(ctx, prevThisHost); err != nil { if err := n.postInit(ctx); err != nil {
n.logger.Error(ctx, "Post-initialization failed, stopping child processes", err) n.logger.Error(ctx, "Post-initialization failed, stopping child processes", err)
n.children.Shutdown() n.children.Shutdown()
return fmt.Errorf("performing post-initialization: %w", err) return fmt.Errorf("performing post-initialization: %w", err)
@ -482,22 +479,15 @@ func (n *network) initialize(
return nil return nil
} }
func (n *network) postInit( func (n *network) postInit(ctx context.Context) error {
ctx context.Context, prevThisHost bootstrap.Host, if len(n.networkConfig.Storage.Allocations) > 0 {
) error {
n.l.RLock()
defer n.l.RUnlock()
thisHost := n.currBootstrap.ThisHost()
if len(prevThisHost.Garage.Instances)+len(thisHost.Garage.Instances) > 0 {
n.logger.Info(ctx, "Applying garage layout") n.logger.Info(ctx, "Applying garage layout")
if err := garageApplyLayout( if err := garageApplyLayout(
ctx, ctx,
n.logger, n.logger,
n.networkConfig, n.networkConfig,
n.opts.GarageAdminToken, n.opts.GarageAdminToken,
prevThisHost, thisHost, n.currBootstrap,
); err != nil { ); err != nil {
return fmt.Errorf("applying garage layout: %w", err) return fmt.Errorf("applying garage layout: %w", err)
} }
@ -518,7 +508,7 @@ func (n *network) postInit(
n.logger, n.logger,
n.networkConfig, n.networkConfig,
n.opts.GarageAdminToken, n.opts.GarageAdminToken,
thisHost, n.currBootstrap,
) )
if err != nil { if err != nil {
return fmt.Errorf("initializing global bucket: %w", err) return fmt.Errorf("initializing global bucket: %w", err)
@ -543,6 +533,7 @@ func (n *network) postInit(
func (n *network) reloadHosts(ctx context.Context) error { func (n *network) reloadHosts(ctx context.Context) error {
n.l.RLock() n.l.RLock()
networkConfig := n.networkConfig
currBootstrap := n.currBootstrap currBootstrap := n.currBootstrap
n.l.RUnlock() n.l.RUnlock()
@ -562,13 +553,8 @@ func (n *network) reloadHosts(ctx context.Context) error {
newBootstrap := currBootstrap newBootstrap := currBootstrap
newBootstrap.Hosts = newHosts newBootstrap.Hosts = newHosts
// the daemon's view of this host's bootstrap info takes precedence over err = n.reload(ctx, networkConfig, newBootstrap)
// whatever is in garage. The garage version lacks the private credentials if err != nil {
// which must be stored locally.
thisHost := currBootstrap.ThisHost()
newBootstrap.Hosts[thisHost.Name] = thisHost
if _, err = n.reload(ctx, nil, &newBootstrap); err != nil {
return fmt.Errorf("reloading with new host data: %w", err) return fmt.Errorf("reloading with new host data: %w", err)
} }
@ -594,53 +580,39 @@ func (n *network) reloadLoop(ctx context.Context) {
} }
} }
// returns the bootstrap prior to the reload being applied. // reload will check the existing hosts data from currBootstrap against
// a potentially updated set of hosts data, and if there are any differences
// will perform whatever changes are necessary.
func (n *network) reload( func (n *network) reload(
ctx context.Context, ctx context.Context,
newNetworkConfig *daecommon.NetworkConfig, newNetworkConfig daecommon.NetworkConfig,
newBootstrap *bootstrap.Bootstrap, newBootstrap bootstrap.Bootstrap,
) ( ) error {
bootstrap.Bootstrap, error,
) {
n.l.Lock() n.l.Lock()
defer n.l.Unlock() defer n.l.Unlock()
prevBootstrap := n.currBootstrap // the daemon's view of this host's bootstrap info takes precedence over
// whatever is in garage. The garage version lacks the private credentials
if newBootstrap != nil { // which must be stored locally.
n.currBootstrap = *newBootstrap thisHost := n.currBootstrap.ThisHost()
} newBootstrap.Hosts[thisHost.Name] = thisHost
if newNetworkConfig != nil {
n.networkConfig = *newNetworkConfig
}
var err error
if n.currBootstrap, err = coalesceNetworkConfigAndBootstrap(
n.networkConfig, n.currBootstrap,
); err != nil {
return bootstrap.Bootstrap{}, fmt.Errorf(
"combining configuration into bootstrap: %w", err,
)
}
n.logger.Info(ctx, "Writing updated bootstrap to state dir") n.logger.Info(ctx, "Writing updated bootstrap to state dir")
err = writeBootstrapToStateDir(n.stateDir.Path, n.currBootstrap) err := writeBootstrapToStateDir(n.stateDir.Path, newBootstrap)
if err != nil { if err != nil {
return bootstrap.Bootstrap{}, fmt.Errorf( return fmt.Errorf("writing bootstrap to state dir: %w", err)
"writing bootstrap to state dir: %w", err,
)
} }
n.networkConfig = newNetworkConfig
n.currBootstrap = newBootstrap
n.logger.Info(ctx, "Reloading child processes") n.logger.Info(ctx, "Reloading child processes")
err = n.children.Reload(ctx, n.networkConfig, n.currBootstrap) err = n.children.Reload(ctx, newNetworkConfig, newBootstrap)
if err != nil { if err != nil {
return bootstrap.Bootstrap{}, fmt.Errorf( return fmt.Errorf("reloading child processes: %w", err)
"reloading child processes: %w", err,
)
} }
return prevBootstrap, nil return nil
} }
func withCurrBootstrap[Res any]( func withCurrBootstrap[Res any](
@ -828,6 +800,7 @@ func (n *network) CreateHost(
JoiningBootstrap, error, JoiningBootstrap, error,
) { ) {
n.l.RLock() n.l.RLock()
networkConfig := n.networkConfig
currBootstrap := n.currBootstrap currBootstrap := n.currBootstrap
n.l.RUnlock() n.l.RUnlock()
@ -892,7 +865,8 @@ func (n *network) CreateHost(
newBootstrap.Hosts = joiningBootstrap.Bootstrap.Hosts newBootstrap.Hosts = joiningBootstrap.Bootstrap.Hosts
n.logger.Info(ctx, "Reloading local state with new host") n.logger.Info(ctx, "Reloading local state with new host")
if _, err = n.reload(ctx, nil, &newBootstrap); err != nil { err = n.reload(ctx, networkConfig, newBootstrap)
if err != nil {
return JoiningBootstrap{}, fmt.Errorf("reloading child processes: %w", err) return JoiningBootstrap{}, fmt.Errorf("reloading child processes: %w", err)
} }
@ -939,12 +913,46 @@ func (n *network) GetConfig(context.Context) (daecommon.NetworkConfig, error) {
func (n *network) SetConfig( func (n *network) SetConfig(
ctx context.Context, config daecommon.NetworkConfig, ctx context.Context, config daecommon.NetworkConfig,
) error { ) error {
prevBootstrap, err := n.reload(ctx, &config, nil) newBootstrap, err := coalesceNetworkConfigAndBootstrap(
config, n.currBootstrap,
)
if err != nil { if err != nil {
return fmt.Errorf("reloading config: %w", err) return fmt.Errorf("combining configuration into bootstrap: %w", err)
} }
if err := n.postInit(ctx, prevBootstrap.ThisHost()); err != nil { n.l.Lock()
defer n.l.Unlock()
n.logger.Info(ctx, "Shutting down children")
n.children.Shutdown()
err = writeBootstrapToStateDir(n.stateDir.Path, newBootstrap)
if err != nil {
return fmt.Errorf("writing bootstrap to state dir: %w", err)
}
n.networkConfig = config
n.currBootstrap = newBootstrap
n.logger.Info(ctx, "Creating child processes")
n.children, err = children.New(
ctx,
n.logger.WithNamespace("children"),
n.envBinDirPath,
n.secretsStore,
n.networkConfig,
n.runtimeDir,
n.opts.GarageAdminToken,
n.currBootstrap,
n.opts.ChildrenOpts,
)
if err != nil {
return fmt.Errorf("creating child processes: %w", err)
}
n.logger.Info(ctx, "Child processes re-created")
if err := n.postInit(ctx); err != nil {
return fmt.Errorf("performing post-initialization: %w", err) return fmt.Errorf("performing post-initialization: %w", err)
} }

View File

@ -19,9 +19,7 @@ func TestCreate(t *testing.T) {
gotCreationParams, err := LoadCreationParams(network.stateDir) gotCreationParams, err := LoadCreationParams(network.stateDir)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal( assert.Equal(t, gotCreationParams, network.creationParams)
t, gotCreationParams, network.getBootstrap(t).NetworkCreationParams,
)
} }
func TestLoad(t *testing.T) { func TestLoad(t *testing.T) {
@ -39,7 +37,7 @@ func TestLoad(t *testing.T) {
loadedNetwork, err := Load( loadedNetwork, err := Load(
h.ctx, h.ctx,
h.logger.WithNamespace("loadedNetwork"), h.logger.WithNamespace("loadedNetwork"),
network.getConfig(t), network.networkConfig,
getEnvBinDirPath(), getEnvBinDirPath(),
network.stateDir, network.stateDir,
h.mkDir(t, "runtime"), h.mkDir(t, "runtime"),
@ -78,34 +76,18 @@ func TestNetwork_GetConfig(t *testing.T) {
config, err := network.GetConfig(h.ctx) config, err := network.GetConfig(h.ctx)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, config, network.getConfig(t)) assert.Equal(t, config, network.networkConfig)
} }
func TestNetwork_SetConfig(t *testing.T) { func TestNetwork_SetConfig(t *testing.T) {
allocsToPeerLayouts := func( t.Run("adding storage alloc", func(t *testing.T) {
hostName nebula.HostName, allocs []bootstrap.GarageHostInstance,
) []garage.PeerLayout {
peers := make([]garage.PeerLayout, len(allocs))
for i := range allocs {
peers[i] = garage.PeerLayout{
ID: allocs[i].ID,
Capacity: 1_000_000_000,
Zone: string(hostName),
Tags: []string{},
}
}
return peers
}
t.Run("add storage alloc", func(t *testing.T) {
var ( var (
h = newIntegrationHarness(t) h = newIntegrationHarness(t)
network = h.createNetwork(t, "primus", nil) network = h.createNetwork(t, "primus", nil)
networkConfig = network.getConfig(t)
) )
networkConfig.Storage.Allocations = append( network.networkConfig.Storage.Allocations = append(
networkConfig.Storage.Allocations, network.networkConfig.Storage.Allocations,
daecommon.ConfigStorageAllocation{ daecommon.ConfigStorageAllocation{
DataPath: h.mkDir(t, "data").Path, DataPath: h.mkDir(t, "data").Path,
MetaPath: h.mkDir(t, "meta").Path, MetaPath: h.mkDir(t, "meta").Path,
@ -116,10 +98,17 @@ func TestNetwork_SetConfig(t *testing.T) {
}, },
) )
assert.NoError(t, network.SetConfig(h.ctx, networkConfig)) assert.NoError(t, network.SetConfig(h.ctx, network.networkConfig))
// Check that the Host information was updated
newHosts, err := network.GetHosts(h.ctx)
assert.NoError(t, err)
newHostsByName := map[nebula.HostName]bootstrap.Host{}
for _, h := range newHosts {
newHostsByName[h.Name] = h
}
t.Log("Checking that the Host information was updated")
newHostsByName := network.getHostsByName(t)
newHost, ok := newHostsByName[network.hostName] newHost, ok := newHostsByName[network.hostName]
assert.True(t, ok) assert.True(t, ok)
@ -134,60 +123,34 @@ func TestNetwork_SetConfig(t *testing.T) {
RPCPort: 4901, RPCPort: 4901,
}, newAlloc) }, newAlloc)
t.Log("Checking that the bootstrap file was written with the new host config") // Check that the bootstrap file was written with the new host config
var storedBootstrap bootstrap.Bootstrap var storedBootstrap bootstrap.Bootstrap
assert.NoError(t, jsonutil.LoadFile( assert.NoError(t, jsonutil.LoadFile(
&storedBootstrap, bootstrap.StateDirPath(network.stateDir.Path), &storedBootstrap, bootstrap.StateDirPath(network.stateDir.Path),
)) ))
assert.Equal(t, newHostsByName, storedBootstrap.Hosts) assert.Equal(t, newHostsByName, storedBootstrap.Hosts)
t.Log("Checking that garage layout contains the new allocation") // Check that garage layout contains the new allocation
expPeers := allocsToPeerLayouts(network.hostName, allocs) garageAdminClient := newGarageAdminClient(
layout, err := network.garageAdminClient(t).GetLayout(h.ctx) h.logger,
assert.NoError(t, err) network.networkConfig,
assert.ElementsMatch(t, expPeers, layout.Peers) network.opts.GarageAdminToken,
}) storedBootstrap,
t.Run("remove storage alloc", func(t *testing.T) {
var (
h = newIntegrationHarness(t)
network = h.createNetwork(t, "primus", &createNetworkOpts{
numStorageAllocs: 4,
})
networkConfig = network.getConfig(t)
prevHost = network.getHostsByName(t)[network.hostName]
removedAlloc = networkConfig.Storage.Allocations[3]
removedGarageInst = daecommon.BootstrapGarageHostForAlloc(
prevHost, removedAlloc,
)
) )
networkConfig.Storage.Allocations = networkConfig.Storage.Allocations[:3] layout, err := garageAdminClient.GetLayout(h.ctx)
assert.NoError(t, network.SetConfig(h.ctx, networkConfig))
t.Log("Checking that the Host information was updated")
newHostsByName := network.getHostsByName(t)
newHost, ok := newHostsByName[network.hostName]
assert.True(t, ok)
allocs := newHost.HostConfigured.Garage.Instances
assert.Len(t, allocs, 3)
assert.NotContains(t, allocs, removedGarageInst)
t.Log("Checking that the bootstrap file was written with the new host config")
var storedBootstrap bootstrap.Bootstrap
assert.NoError(t, jsonutil.LoadFile(
&storedBootstrap, bootstrap.StateDirPath(network.stateDir.Path),
))
assert.Equal(t, newHostsByName, storedBootstrap.Hosts)
t.Log("Checking that garage layout contains the new allocation")
expPeers := allocsToPeerLayouts(network.hostName, allocs)
layout, err := network.garageAdminClient(t).GetLayout(h.ctx)
assert.NoError(t, err) assert.NoError(t, err)
expPeers := make([]garage.PeerLayout, len(allocs))
for i := range allocs {
expPeers[i] = garage.PeerLayout{
ID: allocs[i].ID,
Capacity: 1_000_000_000,
Zone: string(network.hostName),
Tags: []string{},
}
}
assert.ElementsMatch(t, expPeers, layout.Peers) assert.ElementsMatch(t, expPeers, layout.Peers)
}) })
// TODO a host having allocs but removing all of them
} }

View File

@ -4,9 +4,10 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"isle/bootstrap" "isle/bootstrap"
"isle/daemon/children"
"isle/daemon/daecommon" "isle/daemon/daecommon"
"isle/garage"
"isle/nebula" "isle/nebula"
"isle/toolkit" "isle/toolkit"
"os" "os"
@ -99,8 +100,10 @@ func newIntegrationHarness(t *testing.T) *integrationHarness {
}) })
return &integrationHarness{ return &integrationHarness{
ctx: context.Background(), ctx: context.Background(),
logger: toolkit.NewTestLogger(t), logger: mlog.NewLogger(&mlog.LoggerOpts{
MessageHandler: mlog.NewTestMessageHandler(t),
}),
rootDir: toolkit.Dir{Path: rootDir}, rootDir: toolkit.Dir{Path: rootDir},
} }
} }
@ -167,10 +170,39 @@ func (h *integrationHarness) mkNetworkConfig(
) )
} }
func (h *integrationHarness) mkChildrenOpts(
t *testing.T, runtimeDir toolkit.Dir,
) *children.Opts {
var (
childrenLogFilePath = filepath.Join(runtimeDir.Path, "children.log")
childrenOpts children.Opts
)
childrenLogFile, err := os.Create(childrenLogFilePath)
require.NoError(t, err)
t.Cleanup(func() {
assert.NoError(t, err)
})
if os.Getenv("ISLE_INTEGRATION_TEST_CHILDREN_LOG_STDOUT") == "" {
childrenOpts = children.Opts{
Stdout: childrenLogFile,
Stderr: childrenLogFile,
}
} else {
childrenOpts = children.Opts{
Stdout: io.MultiWriter(os.Stdout, childrenLogFile),
Stderr: io.MultiWriter(os.Stdout, childrenLogFile),
}
}
return &childrenOpts
}
type createNetworkOpts struct { type createNetworkOpts struct {
creationParams bootstrap.CreationParams creationParams bootstrap.CreationParams
manualShutdown bool manualShutdown bool
numStorageAllocs int
} }
func (o *createNetworkOpts) withDefaults() *createNetworkOpts { func (o *createNetworkOpts) withDefaults() *createNetworkOpts {
@ -182,19 +214,15 @@ func (o *createNetworkOpts) withDefaults() *createNetworkOpts {
o.creationParams = bootstrap.NewCreationParams("test", "test.localnet") o.creationParams = bootstrap.NewCreationParams("test", "test.localnet")
} }
if o.numStorageAllocs == 0 {
o.numStorageAllocs = 3
}
return o return o
} }
type integrationHarnessNetwork struct { type integrationHarnessNetwork struct {
Network Network
ctx context.Context
logger *mlog.Logger
hostName nebula.HostName hostName nebula.HostName
creationParams bootstrap.CreationParams
networkConfig daecommon.NetworkConfig
stateDir, runtimeDir toolkit.Dir stateDir, runtimeDir toolkit.Dir
opts *Opts opts *Opts
} }
@ -208,10 +236,9 @@ func (h *integrationHarness) createNetwork(
opts = opts.withDefaults() opts = opts.withDefaults()
var ( var (
logger = h.logger.WithNamespace("network").WithNamespace(hostNameStr)
networkConfig = h.mkNetworkConfig(t, &networkConfigOpts{ networkConfig = h.mkNetworkConfig(t, &networkConfigOpts{
hasPublicAddr: true, hasPublicAddr: true,
numStorageAllocs: opts.numStorageAllocs, numStorageAllocs: 3,
}) })
stateDir = h.mkDir(t, "state") stateDir = h.mkDir(t, "state")
@ -221,13 +248,14 @@ func (h *integrationHarness) createNetwork(
hostName = nebula.HostName(hostNameStr) hostName = nebula.HostName(hostNameStr)
networkOpts = &Opts{ networkOpts = &Opts{
ChildrenOpts: h.mkChildrenOpts(t, runtimeDir),
GarageAdminToken: "admin_token", GarageAdminToken: "admin_token",
} }
) )
network, err := Create( network, err := Create(
h.ctx, h.ctx,
logger, h.logger.WithNamespace("network").WithNamespace(hostNameStr),
networkConfig, networkConfig,
getEnvBinDirPath(), getEnvBinDirPath(),
stateDir, stateDir,
@ -252,9 +280,9 @@ func (h *integrationHarness) createNetwork(
return integrationHarnessNetwork{ return integrationHarnessNetwork{
network, network,
h.ctx,
logger,
hostName, hostName,
opts.creationParams,
networkConfig,
stateDir, stateDir,
runtimeDir, runtimeDir,
networkOpts, networkOpts,
@ -295,11 +323,11 @@ func (h *integrationHarness) joinNetwork(
} }
var ( var (
logger = h.logger.WithNamespace("network").WithNamespace(hostNameStr)
networkConfig = h.mkNetworkConfig(t, opts.networkConfigOpts) networkConfig = h.mkNetworkConfig(t, opts.networkConfigOpts)
stateDir = h.mkDir(t, "state") stateDir = h.mkDir(t, "state")
runtimeDir = h.mkDir(t, "runtime") runtimeDir = h.mkDir(t, "runtime")
networkOpts = &Opts{ networkOpts = &Opts{
ChildrenOpts: h.mkChildrenOpts(t, runtimeDir),
GarageAdminToken: "admin_token", GarageAdminToken: "admin_token",
} }
) )
@ -307,7 +335,7 @@ func (h *integrationHarness) joinNetwork(
t.Logf("Joining as %q", hostNameStr) t.Logf("Joining as %q", hostNameStr)
joinedNetwork, err := Join( joinedNetwork, err := Join(
h.ctx, h.ctx,
logger, h.logger.WithNamespace("network").WithNamespace(hostNameStr),
networkConfig, networkConfig,
joiningBootstrap, joiningBootstrap,
getEnvBinDirPath(), getEnvBinDirPath(),
@ -330,52 +358,11 @@ func (h *integrationHarness) joinNetwork(
return integrationHarnessNetwork{ return integrationHarnessNetwork{
joinedNetwork, joinedNetwork,
h.ctx,
logger,
hostName, hostName,
network.creationParams,
networkConfig,
stateDir, stateDir,
runtimeDir, runtimeDir,
networkOpts, networkOpts,
} }
} }
func (nh *integrationHarnessNetwork) getConfig(t *testing.T) daecommon.NetworkConfig {
networkConfig, err := nh.Network.GetConfig(nh.ctx)
require.NoError(t, err)
return networkConfig
}
func (nh *integrationHarnessNetwork) getBootstrap(
t *testing.T,
) bootstrap.Bootstrap {
currBootstrap, err := nh.Network.(*network).getBootstrap()
require.NoError(t, err)
return currBootstrap
}
func (nh *integrationHarnessNetwork) garageAdminClient(
t *testing.T,
) *garage.AdminClient {
c := newGarageAdminClient(
nh.logger,
nh.getConfig(t),
nh.opts.GarageAdminToken,
nh.getBootstrap(t).ThisHost(),
)
t.Cleanup(func() { assert.NoError(t, c.Close()) })
return c
}
func (nh *integrationHarnessNetwork) getHostsByName(
t *testing.T,
) map[nebula.HostName]bootstrap.Host {
hosts, err := nh.Network.GetHosts(nh.ctx)
require.NoError(t, err)
hostsByName := map[nebula.HostName]bootstrap.Host{}
for _, h := range hosts {
hostsByName[h.Name] = h
}
return hostsByName
}

View File

@ -153,7 +153,7 @@ func (c *AdminClient) do(
} }
// Wait will block until the instance connected to can see at least // Wait will block until the instance connected to can see at least
// ReplicationFactor other garage instances. If the context is canceled it // ReplicationFactor-1 other garage instances. If the context is canceled it
// will return the context error. // will return the context error.
func (c *AdminClient) Wait(ctx context.Context) error { func (c *AdminClient) Wait(ctx context.Context) error {
@ -193,7 +193,7 @@ func (c *AdminClient) Wait(ctx context.Context) error {
"numUp", numUp, "numUp", numUp,
) )
if numUp >= ReplicationFactor { if numUp >= ReplicationFactor-1 {
c.logger.Debug(ctx, "instance appears to be online") c.logger.Debug(ctx, "instance appears to be online")
return nil return nil
} }
@ -293,23 +293,10 @@ func (c *AdminClient) GetLayout(ctx context.Context) (ClusterLayout, error) {
} }
// ApplyLayout modifies the layout of the garage cluster. Only layout of the // ApplyLayout modifies the layout of the garage cluster. Only layout of the
// given peers will be modified/created/removed, other peers are not affected. // given peers will be modified/created, other peers are not affected.
func (c *AdminClient) ApplyLayout( func (c *AdminClient) ApplyLayout(
ctx context.Context, addModifyPeers []PeerLayout, removePeerIDs []string, ctx context.Context, peers []PeerLayout,
) error { ) error {
type removePeer struct {
ID string `json:"id"`
Remove bool `json:"remove"`
}
peers := make([]any, 0, len(addModifyPeers)+len(removePeerIDs))
for _, p := range addModifyPeers {
peers = append(peers, p)
}
for _, id := range removePeerIDs {
peers = append(peers, removePeer{ID: id, Remove: true})
}
{ {
// https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html#tag/Layout/operation/ApplyLayout // https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html#tag/Layout/operation/ApplyLayout
err := c.do(ctx, nil, "POST", "/v1/layout", peers) err := c.do(ctx, nil, "POST", "/v1/layout", peers)

View File

@ -3,7 +3,7 @@ module isle
go 1.22 go 1.22
require ( require (
code.betamike.com/micropelago/pmux v0.0.0-20241029124408-55bc7097f7b8 code.betamike.com/micropelago/pmux v0.0.0-20240719134913-f5fce902e8c4
dev.mediocregopher.com/mediocre-go-lib.git v0.0.0-20241023182613-55984cdf5233 dev.mediocregopher.com/mediocre-go-lib.git v0.0.0-20241023182613-55984cdf5233
github.com/adrg/xdg v0.4.0 github.com/adrg/xdg v0.4.0
github.com/jxskiss/base62 v1.1.0 github.com/jxskiss/base62 v1.1.0

View File

@ -1,7 +1,5 @@
code.betamike.com/micropelago/pmux v0.0.0-20240719134913-f5fce902e8c4 h1:n4pGP12kgdH5kCTF4TZYpOjWuAR/zZLpM9j3neeVMEk= code.betamike.com/micropelago/pmux v0.0.0-20240719134913-f5fce902e8c4 h1:n4pGP12kgdH5kCTF4TZYpOjWuAR/zZLpM9j3neeVMEk=
code.betamike.com/micropelago/pmux v0.0.0-20240719134913-f5fce902e8c4/go.mod h1:WlEWacLREVfIQl1IlBjKzuDgL56DFRvyl7YiL5gGP4w= code.betamike.com/micropelago/pmux v0.0.0-20240719134913-f5fce902e8c4/go.mod h1:WlEWacLREVfIQl1IlBjKzuDgL56DFRvyl7YiL5gGP4w=
code.betamike.com/micropelago/pmux v0.0.0-20241029124408-55bc7097f7b8 h1:DTAMr60/y9ZtpMORg50LYGpxyvA9+3UFKVW4mb4CwAE=
code.betamike.com/micropelago/pmux v0.0.0-20241029124408-55bc7097f7b8/go.mod h1:WlEWacLREVfIQl1IlBjKzuDgL56DFRvyl7YiL5gGP4w=
dev.mediocregopher.com/mediocre-go-lib.git v0.0.0-20241023182613-55984cdf5233 h1:Ea4HixNfDNDPh7zMngPpEeDf8gpociSPEROBFBedqIY= dev.mediocregopher.com/mediocre-go-lib.git v0.0.0-20241023182613-55984cdf5233 h1:Ea4HixNfDNDPh7zMngPpEeDf8gpociSPEROBFBedqIY=
dev.mediocregopher.com/mediocre-go-lib.git v0.0.0-20241023182613-55984cdf5233/go.mod h1:nP+AtQWrc3k5qq5y3ABiBLkOfUPlk/FO9fpTFpF+jgs= dev.mediocregopher.com/mediocre-go-lib.git v0.0.0-20241023182613-55984cdf5233/go.mod h1:nP+AtQWrc3k5qq5y3ABiBLkOfUPlk/FO9fpTFpF+jgs=
github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls= github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls=

View File

@ -1,32 +0,0 @@
package toolkit
import (
"strings"
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
)
type logLevel struct {
level int
name string
}
func (l logLevel) Int() int { return l.level }
func (l logLevel) String() string { return l.name }
var (
// LogLevelChild is used for logging out the stdout, stderr, and system logs
// (from pmux) related to child processes.
LogLevelChild mlog.Level = logLevel{mlog.LevelInfo.Int() + 1, "CHILD"}
)
// LogLevelFromString parses a string as a log level, taking into account custom
// log levels introduced in Isle.
func LogLevelFromString(str string) mlog.Level {
switch strings.TrimSpace(strings.ToUpper(str)) {
case LogLevelChild.String():
return LogLevelChild
default:
return mlog.LevelFromString(str)
}
}

View File

@ -1,11 +1,8 @@
package toolkit package toolkit
import ( import (
"fmt"
"os" "os"
"testing" "testing"
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
) )
// MarkIntegrationTest marks a test as being an integration test. It will be // MarkIntegrationTest marks a test as being an integration test. It will be
@ -15,19 +12,3 @@ func MarkIntegrationTest(t *testing.T) {
t.Skip("Skipped because ISLE_INTEGRATION_TEST isn't set") t.Skip("Skipped because ISLE_INTEGRATION_TEST isn't set")
} }
} }
// NewTestLogger returns a Logger which should be used for testing purposes. The
// log level of the Logger can be adjusted using the ISLE_LOG_LEVEL envvar.
func NewTestLogger(t *testing.T) *mlog.Logger {
level := mlog.LevelInfo
if levelStr := os.Getenv("ISLE_LOG_LEVEL"); levelStr != "" {
if level = LogLevelFromString(levelStr); level == nil {
panic(fmt.Sprintf("invalid log level: %q", levelStr))
}
}
return mlog.NewLogger(&mlog.LoggerOpts{
MessageHandler: mlog.NewTestMessageHandler(t),
MaxLevel: level.Int(),
})
}