Compare commits
No commits in common. "93301b9f4c8b883abf3a6a3ed78d6814322c6a84" and "c645a8c7675fa4bf162c247fe386d933c6f540ce" have entirely different histories.
93301b9f4c
...
c645a8c767
12
default.nix
12
default.nix
@ -157,18 +157,16 @@ in rec {
|
||||
unset SOURCE_DATE_EPOCH
|
||||
|
||||
appimagetool ./isle.AppDir
|
||||
mv Isle-* "$out"
|
||||
|
||||
mkdir -p "$out"/bin
|
||||
chmod +w "$out" -R
|
||||
mv Isle-* "$out"/bin/isle
|
||||
'';
|
||||
};
|
||||
|
||||
appImageBin = pkgs.runCommand "isle-AppImage-bin" {} ''
|
||||
mkdir -p "$out"/bin
|
||||
cp ${appImage} "$out"/bin/isle
|
||||
'';
|
||||
|
||||
tests = pkgs.writeScript "isle-tests" ''
|
||||
export PATH=${pkgs.lib.makeBinPath [
|
||||
appImageBin
|
||||
appImage
|
||||
pkgs.busybox
|
||||
pkgs.yq-go
|
||||
pkgs.jq
|
||||
|
86
dist/linux/arch/default.nix
vendored
86
dist/linux/arch/default.nix
vendored
@ -1,86 +0,0 @@
|
||||
{
|
||||
pkgs,
|
||||
buildSystem,
|
||||
releaseName,
|
||||
appImage,
|
||||
}: let
|
||||
|
||||
cpuArch = (pkgs.lib.systems.parse.mkSystemFromString buildSystem).cpu.name;
|
||||
|
||||
pkgbuild = pkgs.writeText "isle-arch-PKGBUILD-${releaseName}-${cpuArch}" ''
|
||||
pkgname=isle
|
||||
pkgver=${releaseName}
|
||||
pkgrel=0
|
||||
pkgdesc="The foundation for an autonomous community cloud infrastructure."
|
||||
arch=('${cpuArch}')
|
||||
url="https://code.betamike.com/micropelago/isle"
|
||||
license=('AGPL-3.0-or-later')
|
||||
|
||||
depends=(
|
||||
'fuse2'
|
||||
)
|
||||
|
||||
# The appImage is deliberately kept separate from the src.tar.zst. For some
|
||||
# reason including the appImage within the archive results in a large part
|
||||
# of the binary being stripped away and some weird skeleton appImage comes
|
||||
# out the other end.
|
||||
source=('isle' 'src.tar.zst')
|
||||
md5sums=('SKIP' 'SKIP')
|
||||
noextract=('isle')
|
||||
|
||||
package() {
|
||||
cp -r etc "$pkgdir"/etc
|
||||
cp -r usr "$pkgdir"/usr
|
||||
|
||||
mkdir -p "$pkgdir"/usr/bin/
|
||||
cp isle "$pkgdir"/usr/bin/
|
||||
}
|
||||
'';
|
||||
|
||||
in
|
||||
pkgs.stdenv.mkDerivation {
|
||||
name = "isle-arch-pkg-${releaseName}-${cpuArch}";
|
||||
|
||||
nativeBuildInputs = [
|
||||
pkgs.zstd
|
||||
pkgs.pacman
|
||||
pkgs.fakeroot
|
||||
pkgs.libarchive
|
||||
];
|
||||
|
||||
inherit pkgbuild;
|
||||
src = appImage;
|
||||
appDir = ../../../AppDir;
|
||||
systemdService = ../isle.service;
|
||||
dontUnpack = true;
|
||||
|
||||
buildPhase = ''
|
||||
mkdir -p root/etc/isle/
|
||||
cp "$appDir"/etc/daemon.yml root/etc/isle/daemon.yml
|
||||
|
||||
mkdir -p root/usr/lib/sysusers.d/
|
||||
cat >root/usr/lib/sysusers.d/isle.conf <<EOF
|
||||
u isle - "isle Daemon"
|
||||
EOF
|
||||
|
||||
mkdir -p root/usr/lib/systemd/system
|
||||
cp "$systemdService" root/usr/lib/systemd/system/isle.service
|
||||
|
||||
cp $pkgbuild PKGBUILD
|
||||
|
||||
tar -cf src.tar.zst --zstd --mode=a+rX,u+w -C root .
|
||||
cp "$src" isle
|
||||
|
||||
PKGEXT=".pkg.tar.zst" makepkg \
|
||||
--nodeps \
|
||||
--config ${pkgs.pacman}/etc/makepkg.conf
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
mkdir -p $out
|
||||
cp *.pkg.tar.zst $out/
|
||||
'';
|
||||
|
||||
# NOTE if https://github.com/NixOS/nixpkgs/issues/241911 is ever addressed
|
||||
# it'd be nice to add an automatic check using namcap here.
|
||||
}
|
16
dist/linux/isle.service
vendored
16
dist/linux/isle.service
vendored
@ -1,16 +0,0 @@
|
||||
[Unit]
|
||||
Description=Isle
|
||||
Requires=network.target
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
User=isle
|
||||
ExecStart=/usr/bin/isle daemon -c /etc/isle/daemon.yml
|
||||
RuntimeDirectory=isle
|
||||
RuntimeDirectoryMode=0700
|
||||
StateDirectory=isle
|
||||
StateDirectoryMode=0700
|
||||
AmbientCapabilities=CAP_NET_ADMIN CAP_NET_BIND_SERVICE
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -7,30 +7,34 @@ state AppDir {
|
||||
|
||||
note "All relative paths are relative to the root of the AppDir" as N1
|
||||
|
||||
state "./bin/entrypoint daemon -c ./daemon.yml" as entrypoint {
|
||||
entrypoint : * Create runtime dir
|
||||
entrypoint : * Lock runtime dir
|
||||
entrypoint : * Merge given and default daemon.yml files
|
||||
entrypoint : * Copy bootstrap.json into state directory, if it's not there
|
||||
entrypoint : * Merge daemon.yml config into bootstrap.json
|
||||
entrypoint : * Create $RUNTIME_DIRECTORY/dnsmasq.conf
|
||||
entrypoint : * Create $RUNTIME_DIRECTORY/nebula.yml
|
||||
entrypoint : * Create $RUNTIME_DIRECTORY/garage-N.toml\n (one per storage allocation)
|
||||
entrypoint : * Spawn child processes
|
||||
entrypoint : * Wait for nebula & garage to initialize
|
||||
entrypoint : * Updates garage cluster layout
|
||||
entrypoint : * Stores host info in global bucket, based on latest bootstrap.json
|
||||
state "./AppRun" as AppRun {
|
||||
AppRun : * Set PATH to APPDIR/bin
|
||||
}
|
||||
|
||||
init --> entrypoint : exec
|
||||
state "./bin/entrypoint daemon -c ./daemon.yml" as entrypoint {
|
||||
entrypoint : * Create runtime dir at $_RUNTIME_DIR_PATH
|
||||
entrypoint : * Lock runtime dir
|
||||
entrypoint : * Merge given and default daemon.yml files
|
||||
entrypoint : * Copy bootstrap.json into $_DATA_DIR_PATH, if it's not there
|
||||
entrypoint : * Merge daemon.yml config into bootstrap.json
|
||||
entrypoint : * Create $_RUNTIME_DIR_PATH/dnsmasq.conf
|
||||
entrypoint : * Create $_RUNTIME_DIR_PATH/nebula.yml
|
||||
entrypoint : * Create $_RUNTIME_DIR_PATH/garage-N.toml\n (one per storage allocation)
|
||||
entrypoint : * Run child processes
|
||||
entrypoint : * (in the background) Updates garage cluster layout
|
||||
entrypoint : * (in the background) Stores host info in global bucket
|
||||
}
|
||||
|
||||
state "./bin/dnsmasq -d -C $RUNTIME_DIRECTORY/dnsmasq.conf" as dnsmasq
|
||||
init --> AppRun : exec
|
||||
AppRun --> entrypoint : exec
|
||||
|
||||
state "./bin/dnsmasq -d -C $_RUNTIME_DIR_PATH/dnsmasq.conf" as dnsmasq
|
||||
entrypoint --> dnsmasq : child
|
||||
|
||||
state "./bin/nebula -config $RUNTIME_DIRECTORY/nebula.yml" as nebula
|
||||
state "./bin/nebula -config $_RUNTIME_DIR_PATH/nebula.yml" as nebula
|
||||
entrypoint --> nebula : child
|
||||
|
||||
state "./bin/garage -c $RUNTIME_DIRECTORY/garage-N.toml server" as garage
|
||||
state "./bin/garage -c $_RUNTIME_DIR_PATH/garage-N.toml server" as garage
|
||||
entrypoint --> garage : child (one per storage allocation)
|
||||
}
|
||||
|
||||
|
File diff suppressed because one or more lines are too long
Before Width: | Height: | Size: 8.9 KiB After Width: | Height: | Size: 9.7 KiB |
@ -16,9 +16,9 @@ import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// StateDirPath returns the path within the user's state directory where the
|
||||
// DataDirPath returns the path within the user's data directory where the
|
||||
// bootstrap file is stored.
|
||||
func StateDirPath(dataDirPath string) string {
|
||||
func DataDirPath(dataDirPath string) string {
|
||||
return filepath.Join(dataDirPath, "bootstrap.json")
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
@ -14,6 +15,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"code.betamike.com/micropelago/pmux/pmuxlib"
|
||||
"github.com/mediocregopher/mediocre-go-lib/v2/mlog"
|
||||
)
|
||||
|
||||
@ -166,26 +168,50 @@ var subCmdAdminCreateNetwork = subCmd{
|
||||
return fmt.Errorf("merging daemon config into bootstrap data: %w", err)
|
||||
}
|
||||
|
||||
daemonInst, err := daemon.New(
|
||||
ctx,
|
||||
logger.WithNamespace("daemon"),
|
||||
daemonConfig,
|
||||
hostBootstrap,
|
||||
envRuntimeDirPath,
|
||||
envBinDirPath,
|
||||
&daemon.Opts{
|
||||
// SkipHostBootstrapPush is required, because the global bucket
|
||||
// hasn't actually been initialized yet, so there's nowhere to
|
||||
// push to.
|
||||
SkipHostBootstrapPush: true,
|
||||
|
||||
// NOTE both stdout and stderr are sent to stderr, so that the
|
||||
// user can pipe the resulting admin.json to stdout.
|
||||
Stdout: os.Stderr,
|
||||
},
|
||||
)
|
||||
nebulaPmuxProcConfig, err := nebulaPmuxProcConfig(hostBootstrap, daemonConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("initializing daemon: %w", err)
|
||||
return fmt.Errorf("generating nebula config: %w", err)
|
||||
}
|
||||
|
||||
garagePmuxProcConfigs, err := garagePmuxProcConfigs(hostBootstrap, daemonConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("generating garage configs: %w", err)
|
||||
}
|
||||
|
||||
pmuxConfig := pmuxlib.Config{
|
||||
Processes: append(
|
||||
[]pmuxlib.ProcessConfig{
|
||||
nebulaPmuxProcConfig,
|
||||
},
|
||||
garagePmuxProcConfigs...,
|
||||
),
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
pmuxDoneCh := make(chan struct{})
|
||||
|
||||
logger.Info(ctx, "starting child processes")
|
||||
go func() {
|
||||
// NOTE both stdout and stderr are sent to stderr, so that the user
|
||||
// can pipe the resulting admin.json to stdout.
|
||||
pmuxlib.Run(ctx, os.Stderr, os.Stderr, pmuxConfig)
|
||||
close(pmuxDoneCh)
|
||||
}()
|
||||
|
||||
defer func() {
|
||||
cancel()
|
||||
logger.Info(ctx, "waiting for child processes to exit")
|
||||
<-pmuxDoneCh
|
||||
}()
|
||||
|
||||
logger.Info(ctx, "waiting for garage instances to come online")
|
||||
if err := waitForGarageAndNebula(ctx, logger, hostBootstrap, daemonConfig); err != nil {
|
||||
return fmt.Errorf("waiting for garage to start up: %w", err)
|
||||
}
|
||||
|
||||
logger.Info(ctx, "applying initial garage layout")
|
||||
if err := garageApplyLayout(ctx, logger, hostBootstrap, daemonConfig); err != nil {
|
||||
return fmt.Errorf("applying initial garage layout: %w", err)
|
||||
}
|
||||
|
||||
logger.Info(ctx, "initializing garage shared global bucket")
|
||||
@ -193,17 +219,6 @@ var subCmdAdminCreateNetwork = subCmd{
|
||||
ctx, logger, hostBootstrap, daemonConfig,
|
||||
)
|
||||
|
||||
if cErr := (garage.AdminClientError{}); errors.As(err, &cErr) && cErr.StatusCode == 409 {
|
||||
return fmt.Errorf("shared global bucket has already been created, are the storage allocations from a previously initialized isle being used?")
|
||||
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("initializing garage shared global bucket: %w", err)
|
||||
}
|
||||
|
||||
if err := daemonInst.Shutdown(ctx); err != nil {
|
||||
return fmt.Errorf("shutting down daemon: %w (this can mean there are zombie children leftover)", err)
|
||||
}
|
||||
|
||||
hostBootstrap.Garage.GlobalBucketS3APICredentials = garageGlobalBucketCreds
|
||||
|
||||
// rewrite the bootstrap now that the global bucket creds have been
|
||||
@ -212,6 +227,13 @@ var subCmdAdminCreateNetwork = subCmd{
|
||||
return fmt.Errorf("writing bootstrap file: %w", err)
|
||||
}
|
||||
|
||||
if cErr := (garage.AdminClientError{}); errors.As(err, &cErr) && cErr.StatusCode == 409 {
|
||||
return fmt.Errorf("shared global bucket has already been created, are the storage allocations from a previously initialized isle being used?")
|
||||
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("initializing garage shared global bucket: %w", err)
|
||||
}
|
||||
|
||||
logger.Info(ctx, "cluster initialized successfully, writing admin.json to stdout")
|
||||
|
||||
adm := admin.Admin{
|
||||
|
@ -11,17 +11,17 @@ import (
|
||||
|
||||
func loadHostBootstrap() (bootstrap.Bootstrap, error) {
|
||||
|
||||
stateDirPath := bootstrap.StateDirPath(envStateDirPath)
|
||||
dataDirPath := bootstrap.DataDirPath(envDataDirPath)
|
||||
|
||||
hostBootstrap, err := bootstrap.FromFile(stateDirPath)
|
||||
hostBootstrap, err := bootstrap.FromFile(dataDirPath)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return bootstrap.Bootstrap{}, fmt.Errorf(
|
||||
"%q not found, has the daemon ever been run?",
|
||||
stateDirPath,
|
||||
dataDirPath,
|
||||
)
|
||||
|
||||
} else if err != nil {
|
||||
return bootstrap.Bootstrap{}, fmt.Errorf("loading %q: %w", stateDirPath, err)
|
||||
return bootstrap.Bootstrap{}, fmt.Errorf("loading %q: %w", dataDirPath, err)
|
||||
}
|
||||
|
||||
return hostBootstrap, nil
|
||||
@ -29,7 +29,7 @@ func loadHostBootstrap() (bootstrap.Bootstrap, error) {
|
||||
|
||||
func writeBootstrapToDataDir(hostBootstrap bootstrap.Bootstrap) error {
|
||||
|
||||
path := bootstrap.StateDirPath(envStateDirPath)
|
||||
path := bootstrap.DataDirPath(envDataDirPath)
|
||||
dirPath := filepath.Dir(path)
|
||||
|
||||
if err := os.MkdirAll(dirPath, 0700); err != nil {
|
||||
|
@ -7,11 +7,13 @@ import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"isle/bootstrap"
|
||||
"isle/daemon"
|
||||
|
||||
"code.betamike.com/micropelago/pmux/pmuxlib"
|
||||
"github.com/mediocregopher/mediocre-go-lib/v2/mctx"
|
||||
"github.com/mediocregopher/mediocre-go-lib/v2/mlog"
|
||||
)
|
||||
@ -92,24 +94,77 @@ func runDaemonPmuxOnce(
|
||||
) (
|
||||
bootstrap.Bootstrap, error,
|
||||
) {
|
||||
daemonInst, err := daemon.New(
|
||||
ctx,
|
||||
logger.WithNamespace("daemon"),
|
||||
daemonConfig,
|
||||
hostBootstrap,
|
||||
envRuntimeDirPath,
|
||||
envBinDirPath,
|
||||
nil,
|
||||
)
|
||||
|
||||
nebulaPmuxProcConfig, err := nebulaPmuxProcConfig(hostBootstrap, daemonConfig)
|
||||
if err != nil {
|
||||
return bootstrap.Bootstrap{}, fmt.Errorf("initializing daemon: %w", err)
|
||||
return bootstrap.Bootstrap{}, fmt.Errorf("generating nebula config: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := daemonInst.Shutdown(ctx); err != nil {
|
||||
logger.Error(ctx, "failed to cleanly shutdown daemon", err)
|
||||
}
|
||||
|
||||
dnsmasqPmuxProcConfig, err := dnsmasqPmuxProcConfig(hostBootstrap, daemonConfig)
|
||||
if err != nil {
|
||||
return bootstrap.Bootstrap{}, fmt.Errorf("generating dnsmasq config: %w", err)
|
||||
}
|
||||
|
||||
garagePmuxProcConfigs, err := garagePmuxProcConfigs(hostBootstrap, daemonConfig)
|
||||
if err != nil {
|
||||
return bootstrap.Bootstrap{}, fmt.Errorf("generating garage children configs: %w", err)
|
||||
}
|
||||
|
||||
pmuxConfig := pmuxlib.Config{
|
||||
Processes: append(
|
||||
[]pmuxlib.ProcessConfig{
|
||||
nebulaPmuxProcConfig,
|
||||
dnsmasqPmuxProcConfig,
|
||||
},
|
||||
garagePmuxProcConfigs...,
|
||||
),
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
pmuxlib.Run(ctx, os.Stdout, os.Stderr, pmuxConfig)
|
||||
}()
|
||||
|
||||
if err := waitForGarageAndNebula(ctx, logger, hostBootstrap, daemonConfig); err != nil {
|
||||
return bootstrap.Bootstrap{}, fmt.Errorf("waiting for nebula/garage to start up: %w", err)
|
||||
}
|
||||
|
||||
if len(daemonConfig.Storage.Allocations) > 0 {
|
||||
|
||||
err := doOnce(ctx, func(ctx context.Context) error {
|
||||
if err := garageApplyLayout(ctx, logger, hostBootstrap, daemonConfig); err != nil {
|
||||
logger.Error(ctx, "applying garage layout", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return bootstrap.Bootstrap{}, fmt.Errorf("applying garage layout: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = doOnce(ctx, func(ctx context.Context) error {
|
||||
if err := hostBootstrap.PutGarageBoostrapHost(ctx); err != nil {
|
||||
logger.Error(ctx, "updating host info in garage", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return bootstrap.Bootstrap{}, fmt.Errorf("updating host info in garage: %w", err)
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(3 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
@ -121,7 +176,7 @@ func runDaemonPmuxOnce(
|
||||
|
||||
case <-ticker.C:
|
||||
|
||||
logger.Info(ctx, "checking for changes to bootstrap")
|
||||
fmt.Fprintln(os.Stderr, "checking for changes to bootstrap")
|
||||
|
||||
var (
|
||||
changed bool
|
||||
@ -190,21 +245,20 @@ var subCmdDaemon = subCmd{
|
||||
defer runtimeDirCleanup()
|
||||
|
||||
var (
|
||||
bootstrapStateDirPath = bootstrap.StateDirPath(envStateDirPath)
|
||||
bootstrapAppDirPath = bootstrap.AppDirPath(envAppDirPath)
|
||||
bootstrapDataDirPath = bootstrap.DataDirPath(envDataDirPath)
|
||||
bootstrapAppDirPath = bootstrap.AppDirPath(envAppDirPath)
|
||||
|
||||
hostBootstrapPath string
|
||||
hostBootstrap bootstrap.Bootstrap
|
||||
)
|
||||
|
||||
tryLoadBootstrap := func(path string) bool {
|
||||
ctx := mctx.Annotate(ctx, "bootstrapFilePath", path)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
|
||||
} else if hostBootstrap, err = bootstrap.FromFile(path); errors.Is(err, fs.ErrNotExist) {
|
||||
logger.WarnString(ctx, "bootstrap file not found")
|
||||
fmt.Fprintf(os.Stderr, "bootstrap file not found at %q\n", path)
|
||||
err = nil
|
||||
return false
|
||||
|
||||
@ -213,14 +267,17 @@ var subCmdDaemon = subCmd{
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Info(ctx, "bootstrap file found")
|
||||
logger.Info(
|
||||
mctx.Annotate(ctx, "bootstrapFilePath", path),
|
||||
"bootstrap file found",
|
||||
)
|
||||
|
||||
hostBootstrapPath = path
|
||||
return true
|
||||
}
|
||||
|
||||
switch {
|
||||
case tryLoadBootstrap(bootstrapStateDirPath):
|
||||
case tryLoadBootstrap(bootstrapDataDirPath):
|
||||
case *bootstrapPath != "" && tryLoadBootstrap(*bootstrapPath):
|
||||
case tryLoadBootstrap(bootstrapAppDirPath):
|
||||
case err != nil:
|
||||
@ -229,7 +286,7 @@ var subCmdDaemon = subCmd{
|
||||
return errors.New("No bootstrap.json file could be found, and one is not provided with --bootstrap-path")
|
||||
}
|
||||
|
||||
if hostBootstrapPath != bootstrapStateDirPath {
|
||||
if hostBootstrapPath != bootstrapDataDirPath {
|
||||
|
||||
// If the bootstrap file is not being stored in the data dir, copy
|
||||
// it there, so it can be loaded from there next time.
|
||||
@ -246,8 +303,8 @@ var subCmdDaemon = subCmd{
|
||||
// we update this Host's data using whatever configuration has been
|
||||
// provided by the daemon config. This way the daemon has the most
|
||||
// up-to-date possible bootstrap. This updated bootstrap will later get
|
||||
// updated in garage as a background daemon task, so other hosts will
|
||||
// see it as well.
|
||||
// updated in garage using bootstrap.PutGarageBoostrapHost, so other
|
||||
// hosts will see it as well.
|
||||
if hostBootstrap, daemonConfig, err = coalesceDaemonConfigAndBootstrap(hostBootstrap, daemonConfig); err != nil {
|
||||
return fmt.Errorf("merging daemon config into bootstrap data: %w", err)
|
||||
}
|
||||
|
@ -1,10 +1,12 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"isle/bootstrap"
|
||||
"isle/daemon"
|
||||
"isle/garage/garagesrv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func coalesceDaemonConfigAndBootstrap(
|
||||
@ -50,3 +52,15 @@ func coalesceDaemonConfigAndBootstrap(
|
||||
|
||||
return hostBootstrap, daemonConfig, nil
|
||||
}
|
||||
|
||||
func doOnce(ctx context.Context, fn func(context.Context) error) error {
|
||||
for {
|
||||
if err := fn(ctx); err == nil {
|
||||
return nil
|
||||
} else if ctxErr := ctx.Err(); ctxErr != nil {
|
||||
return ctxErr
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,10 @@
|
||||
package daemon
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"isle/bootstrap"
|
||||
"isle/daemon"
|
||||
"isle/dnsmasq"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
@ -11,13 +12,13 @@ import (
|
||||
)
|
||||
|
||||
func dnsmasqPmuxProcConfig(
|
||||
runtimeDirPath, binDirPath string,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
daemonConfig Config,
|
||||
daemonConfig daemon.Config,
|
||||
) (
|
||||
pmuxlib.ProcessConfig, error,
|
||||
) {
|
||||
confPath := filepath.Join(runtimeDirPath, "dnsmasq.conf")
|
||||
|
||||
confPath := filepath.Join(envRuntimeDirPath, "dnsmasq.conf")
|
||||
|
||||
hostsSlice := make([]dnsmasq.ConfDataHost, 0, len(hostBootstrap.Hosts))
|
||||
for _, host := range hostBootstrap.Hosts {
|
||||
@ -44,7 +45,7 @@ func dnsmasqPmuxProcConfig(
|
||||
|
||||
return pmuxlib.ProcessConfig{
|
||||
Name: "dnsmasq",
|
||||
Cmd: filepath.Join(binDirPath, "dnsmasq"),
|
||||
Cmd: binPath("dnsmasq"),
|
||||
Args: []string{"-d", "-C", confPath},
|
||||
}, nil
|
||||
}
|
@ -13,7 +13,7 @@ import (
|
||||
// order to prevent it from doing so.
|
||||
func initMCConfigDir() (string, error) {
|
||||
var (
|
||||
path = filepath.Join(envStateDirPath, "mc")
|
||||
path = filepath.Join(envDataDirPath, "mc")
|
||||
sharePath = filepath.Join(path, "share")
|
||||
configJSONPath = filepath.Join(path, "config.json")
|
||||
)
|
||||
|
@ -6,10 +6,177 @@ import (
|
||||
"isle/bootstrap"
|
||||
"isle/daemon"
|
||||
"isle/garage"
|
||||
"isle/garage/garagesrv"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"code.betamike.com/micropelago/pmux/pmuxlib"
|
||||
"github.com/mediocregopher/mediocre-go-lib/v2/mctx"
|
||||
"github.com/mediocregopher/mediocre-go-lib/v2/mlog"
|
||||
)
|
||||
|
||||
func garageAdminClientLogger(logger *mlog.Logger) *mlog.Logger {
|
||||
return logger.WithNamespace("garageAdminClient")
|
||||
}
|
||||
|
||||
// newGarageAdminClient will return an AdminClient for a local garage instance,
|
||||
// or it will _panic_ if there is no local instance configured.
|
||||
func newGarageAdminClient(
|
||||
logger *mlog.Logger,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
daemonConfig daemon.Config,
|
||||
) *garage.AdminClient {
|
||||
|
||||
thisHost := hostBootstrap.ThisHost()
|
||||
|
||||
return garage.NewAdminClient(
|
||||
garageAdminClientLogger(logger),
|
||||
net.JoinHostPort(
|
||||
thisHost.IP().String(),
|
||||
strconv.Itoa(daemonConfig.Storage.Allocations[0].AdminPort),
|
||||
),
|
||||
hostBootstrap.Garage.AdminToken,
|
||||
)
|
||||
}
|
||||
|
||||
func waitForGarageAndNebula(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
daemonConfig daemon.Config,
|
||||
) error {
|
||||
|
||||
if err := waitForNebula(ctx, hostBootstrap); err != nil {
|
||||
return fmt.Errorf("waiting for nebula to start: %w", err)
|
||||
}
|
||||
|
||||
allocs := daemonConfig.Storage.Allocations
|
||||
|
||||
// if this host doesn't have any allocations specified then fall back to
|
||||
// waiting for nebula
|
||||
if len(allocs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
adminClientLogger := garageAdminClientLogger(logger)
|
||||
|
||||
for _, alloc := range allocs {
|
||||
|
||||
adminAddr := net.JoinHostPort(
|
||||
hostBootstrap.ThisHost().IP().String(),
|
||||
strconv.Itoa(alloc.AdminPort),
|
||||
)
|
||||
|
||||
adminClient := garage.NewAdminClient(
|
||||
adminClientLogger,
|
||||
adminAddr,
|
||||
hostBootstrap.Garage.AdminToken,
|
||||
)
|
||||
|
||||
ctx := mctx.Annotate(ctx, "garageAdminAddr", adminAddr)
|
||||
logger.Debug(ctx, "wating for garage instance to start")
|
||||
|
||||
if err := adminClient.Wait(ctx); err != nil {
|
||||
return fmt.Errorf("waiting for garage instance %q to start up: %w", adminAddr, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// bootstrapGarageHostForAlloc returns the bootstrap.GarageHostInstance which
|
||||
// corresponds with the given alloc from the daemon config. This will panic if
|
||||
// no associated instance can be found.
|
||||
//
|
||||
// This assumes that coalesceDaemonConfigAndBootstrap has already been called.
|
||||
func bootstrapGarageHostForAlloc(
|
||||
host bootstrap.Host,
|
||||
alloc daemon.ConfigStorageAllocation,
|
||||
) bootstrap.GarageHostInstance {
|
||||
|
||||
for _, inst := range host.Garage.Instances {
|
||||
if inst.RPCPort == alloc.RPCPort {
|
||||
return inst
|
||||
}
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("could not find alloc %+v in the bootstrap data", alloc))
|
||||
}
|
||||
|
||||
func garageWriteChildConfig(
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
alloc daemon.ConfigStorageAllocation,
|
||||
) (
|
||||
string, error,
|
||||
) {
|
||||
|
||||
thisHost := hostBootstrap.ThisHost()
|
||||
id := bootstrapGarageHostForAlloc(thisHost, alloc).ID
|
||||
|
||||
peer := garage.LocalPeer{
|
||||
RemotePeer: garage.RemotePeer{
|
||||
ID: id,
|
||||
IP: thisHost.IP().String(),
|
||||
RPCPort: alloc.RPCPort,
|
||||
S3APIPort: alloc.S3APIPort,
|
||||
},
|
||||
AdminPort: alloc.AdminPort,
|
||||
}
|
||||
|
||||
garageTomlPath := filepath.Join(
|
||||
envRuntimeDirPath, fmt.Sprintf("garage-%d.toml", alloc.RPCPort),
|
||||
)
|
||||
|
||||
err := garagesrv.WriteGarageTomlFile(garageTomlPath, garagesrv.GarageTomlData{
|
||||
MetaPath: alloc.MetaPath,
|
||||
DataPath: alloc.DataPath,
|
||||
|
||||
RPCSecret: hostBootstrap.Garage.RPCSecret,
|
||||
AdminToken: hostBootstrap.Garage.AdminToken,
|
||||
|
||||
LocalPeer: peer,
|
||||
BootstrapPeers: hostBootstrap.GaragePeers(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating garage.toml file at %q: %w", garageTomlPath, err)
|
||||
}
|
||||
|
||||
return garageTomlPath, nil
|
||||
}
|
||||
|
||||
func garagePmuxProcConfigs(
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
daemonConfig daemon.Config,
|
||||
) (
|
||||
[]pmuxlib.ProcessConfig, error,
|
||||
) {
|
||||
|
||||
var pmuxProcConfigs []pmuxlib.ProcessConfig
|
||||
|
||||
for _, alloc := range daemonConfig.Storage.Allocations {
|
||||
|
||||
childConfigPath, err := garageWriteChildConfig(hostBootstrap, alloc)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("writing child config file for alloc %+v: %w", alloc, err)
|
||||
}
|
||||
|
||||
pmuxProcConfigs = append(pmuxProcConfigs, pmuxlib.ProcessConfig{
|
||||
Name: fmt.Sprintf("garage-%d", alloc.RPCPort),
|
||||
Cmd: binPath("garage"),
|
||||
Args: []string{"-c", childConfigPath, "server"},
|
||||
StartAfterFunc: func(ctx context.Context) error {
|
||||
return waitForNebula(ctx, hostBootstrap)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return pmuxProcConfigs, nil
|
||||
}
|
||||
|
||||
func garageInitializeGlobalBucket(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
@ -18,9 +185,7 @@ func garageInitializeGlobalBucket(
|
||||
) (
|
||||
garage.S3APICredentials, error,
|
||||
) {
|
||||
adminClient := daemon.NewGarageAdminClient(
|
||||
logger, hostBootstrap, daemonConfig,
|
||||
)
|
||||
adminClient := newGarageAdminClient(logger, hostBootstrap, daemonConfig)
|
||||
|
||||
creds, err := adminClient.CreateS3APICredentials(
|
||||
ctx, garage.GlobalBucketS3APICredentialsName,
|
||||
@ -48,3 +213,38 @@ func garageInitializeGlobalBucket(
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
func garageApplyLayout(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
daemonConfig daemon.Config,
|
||||
) error {
|
||||
|
||||
var (
|
||||
adminClient = newGarageAdminClient(logger, hostBootstrap, daemonConfig)
|
||||
thisHost = hostBootstrap.ThisHost()
|
||||
hostName = thisHost.Name
|
||||
allocs = daemonConfig.Storage.Allocations
|
||||
peers = make([]garage.PeerLayout, len(allocs))
|
||||
)
|
||||
|
||||
for i, alloc := range allocs {
|
||||
|
||||
id := bootstrapGarageHostForAlloc(thisHost, alloc).ID
|
||||
|
||||
zone := hostName
|
||||
if alloc.Zone != "" {
|
||||
zone = alloc.Zone
|
||||
}
|
||||
|
||||
peers[i] = garage.PeerLayout{
|
||||
ID: id,
|
||||
Capacity: alloc.Capacity * 1_000_000_000,
|
||||
Zone: zone,
|
||||
Tags: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
return adminClient.ApplyLayout(ctx, peers)
|
||||
}
|
||||
|
@ -25,30 +25,14 @@ func getAppDirPath() string {
|
||||
return appDirPath
|
||||
}
|
||||
|
||||
func envOr(name, fallback string) string {
|
||||
if v := os.Getenv(name); v != "" {
|
||||
return v
|
||||
}
|
||||
return fallback
|
||||
}
|
||||
|
||||
// RUNTIME_DIRECTORY/STATE_DIRECTORY are used by the systemd service in
|
||||
// conjunction with the RuntimeDirectory/StateDirectory directives.
|
||||
var (
|
||||
envAppDirPath = getAppDirPath()
|
||||
envRuntimeDirPath = envOr(
|
||||
"RUNTIME_DIRECTORY",
|
||||
filepath.Join(xdg.RuntimeDir, "isle"),
|
||||
)
|
||||
envStateDirPath = envOr(
|
||||
"STATE_DIRECTORY",
|
||||
filepath.Join(xdg.StateHome, "isle"),
|
||||
)
|
||||
envBinDirPath = filepath.Join(envAppDirPath, "bin")
|
||||
envRuntimeDirPath = filepath.Join(xdg.RuntimeDir, "isle")
|
||||
envDataDirPath = filepath.Join(xdg.DataHome, "isle")
|
||||
)
|
||||
|
||||
func binPath(name string) string {
|
||||
return filepath.Join(envBinDirPath, name)
|
||||
return filepath.Join(envAppDirPath, "bin", name)
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
@ -1,9 +1,10 @@
|
||||
package daemon
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"isle/bootstrap"
|
||||
"isle/daemon"
|
||||
"isle/yamlutil"
|
||||
"net"
|
||||
"path/filepath"
|
||||
@ -16,16 +17,14 @@ import (
|
||||
// this by attempting to create a UDP connection which has the nebula IP set as
|
||||
// its source. If this succeeds we can assume that at the very least the nebula
|
||||
// interface has been initialized.
|
||||
func waitForNebula(
|
||||
ctx context.Context, hostBootstrap bootstrap.Bootstrap,
|
||||
) error {
|
||||
func waitForNebula(ctx context.Context, hostBootstrap bootstrap.Bootstrap) error {
|
||||
|
||||
ip := hostBootstrap.ThisHost().IP()
|
||||
|
||||
lUdpAddr := &net.UDPAddr{IP: ip, Port: 0}
|
||||
rUdpAddr := &net.UDPAddr{IP: ip, Port: 45535}
|
||||
|
||||
return until(ctx, func(context.Context) error {
|
||||
return doOnce(ctx, func(context.Context) error {
|
||||
conn, err := net.DialUDP("udp", lUdpAddr, rUdpAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -36,9 +35,8 @@ func waitForNebula(
|
||||
}
|
||||
|
||||
func nebulaPmuxProcConfig(
|
||||
runtimeDirPath, binDirPath string,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
daemonConfig Config,
|
||||
daemonConfig daemon.Config,
|
||||
) (
|
||||
pmuxlib.ProcessConfig, error,
|
||||
) {
|
||||
@ -124,7 +122,7 @@ func nebulaPmuxProcConfig(
|
||||
}
|
||||
}
|
||||
|
||||
nebulaYmlPath := filepath.Join(runtimeDirPath, "nebula.yml")
|
||||
nebulaYmlPath := filepath.Join(envRuntimeDirPath, "nebula.yml")
|
||||
|
||||
if err := yamlutil.WriteYamlFile(config, nebulaYmlPath, 0440); err != nil {
|
||||
return pmuxlib.ProcessConfig{}, fmt.Errorf("writing nebula.yml to %q: %w", nebulaYmlPath, err)
|
||||
@ -132,7 +130,7 @@ func nebulaPmuxProcConfig(
|
||||
|
||||
return pmuxlib.ProcessConfig{
|
||||
Name: "nebula",
|
||||
Cmd: filepath.Join(binDirPath, "nebula"),
|
||||
Cmd: binPath("nebula"),
|
||||
Args: []string{"-config", nebulaYmlPath},
|
||||
}, nil
|
||||
}
|
@ -1,206 +0,0 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"isle/bootstrap"
|
||||
"isle/garage"
|
||||
"isle/garage/garagesrv"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"code.betamike.com/micropelago/pmux/pmuxlib"
|
||||
"github.com/mediocregopher/mediocre-go-lib/v2/mctx"
|
||||
"github.com/mediocregopher/mediocre-go-lib/v2/mlog"
|
||||
)
|
||||
|
||||
func garageAdminClientLogger(logger *mlog.Logger) *mlog.Logger {
|
||||
return logger.WithNamespace("garageAdminClient")
|
||||
}
|
||||
|
||||
// NewGarageAdminClient will return an AdminClient for a local garage instance,
|
||||
// or it will _panic_ if there is no local instance configured.
|
||||
func NewGarageAdminClient(
|
||||
logger *mlog.Logger,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
config Config,
|
||||
) *garage.AdminClient {
|
||||
|
||||
thisHost := hostBootstrap.ThisHost()
|
||||
|
||||
return garage.NewAdminClient(
|
||||
garageAdminClientLogger(logger),
|
||||
net.JoinHostPort(
|
||||
thisHost.IP().String(),
|
||||
strconv.Itoa(config.Storage.Allocations[0].AdminPort),
|
||||
),
|
||||
hostBootstrap.Garage.AdminToken,
|
||||
)
|
||||
}
|
||||
|
||||
func (d *daemon) waitForGarage(ctx context.Context) error {
|
||||
|
||||
allocs := d.config.Storage.Allocations
|
||||
|
||||
// if this host doesn't have any allocations specified then fall back to
|
||||
// waiting for nebula
|
||||
if len(allocs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
adminClientLogger := garageAdminClientLogger(d.logger)
|
||||
|
||||
for _, alloc := range allocs {
|
||||
|
||||
adminAddr := net.JoinHostPort(
|
||||
d.hostBootstrap.ThisHost().IP().String(),
|
||||
strconv.Itoa(alloc.AdminPort),
|
||||
)
|
||||
|
||||
adminClient := garage.NewAdminClient(
|
||||
adminClientLogger,
|
||||
adminAddr,
|
||||
d.hostBootstrap.Garage.AdminToken,
|
||||
)
|
||||
|
||||
ctx := mctx.Annotate(ctx, "garageAdminAddr", adminAddr)
|
||||
d.logger.Debug(ctx, "waiting for garage instance to start")
|
||||
|
||||
if err := adminClient.Wait(ctx); err != nil {
|
||||
return fmt.Errorf("waiting for garage instance %q to start up: %w", adminAddr, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// bootstrapGarageHostForAlloc returns the bootstrap.GarageHostInstance which
|
||||
// corresponds with the given alloc from the daemon config. This will panic if
|
||||
// no associated instance can be found.
|
||||
//
|
||||
// This assumes that coalesceDaemonConfigAndBootstrap has already been called.
|
||||
func bootstrapGarageHostForAlloc(
|
||||
host bootstrap.Host,
|
||||
alloc ConfigStorageAllocation,
|
||||
) bootstrap.GarageHostInstance {
|
||||
|
||||
for _, inst := range host.Garage.Instances {
|
||||
if inst.RPCPort == alloc.RPCPort {
|
||||
return inst
|
||||
}
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("could not find alloc %+v in the bootstrap data", alloc))
|
||||
}
|
||||
|
||||
func garageWriteChildConfig(
|
||||
runtimeDirPath string,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
alloc ConfigStorageAllocation,
|
||||
) (
|
||||
string, error,
|
||||
) {
|
||||
|
||||
thisHost := hostBootstrap.ThisHost()
|
||||
id := bootstrapGarageHostForAlloc(thisHost, alloc).ID
|
||||
|
||||
peer := garage.LocalPeer{
|
||||
RemotePeer: garage.RemotePeer{
|
||||
ID: id,
|
||||
IP: thisHost.IP().String(),
|
||||
RPCPort: alloc.RPCPort,
|
||||
S3APIPort: alloc.S3APIPort,
|
||||
},
|
||||
AdminPort: alloc.AdminPort,
|
||||
}
|
||||
|
||||
garageTomlPath := filepath.Join(
|
||||
runtimeDirPath, fmt.Sprintf("garage-%d.toml", alloc.RPCPort),
|
||||
)
|
||||
|
||||
err := garagesrv.WriteGarageTomlFile(garageTomlPath, garagesrv.GarageTomlData{
|
||||
MetaPath: alloc.MetaPath,
|
||||
DataPath: alloc.DataPath,
|
||||
|
||||
RPCSecret: hostBootstrap.Garage.RPCSecret,
|
||||
AdminToken: hostBootstrap.Garage.AdminToken,
|
||||
|
||||
LocalPeer: peer,
|
||||
BootstrapPeers: hostBootstrap.GaragePeers(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating garage.toml file at %q: %w", garageTomlPath, err)
|
||||
}
|
||||
|
||||
return garageTomlPath, nil
|
||||
}
|
||||
|
||||
func garagePmuxProcConfigs(
|
||||
runtimeDirPath, binDirPath string,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
daemonConfig Config,
|
||||
) (
|
||||
[]pmuxlib.ProcessConfig, error,
|
||||
) {
|
||||
|
||||
var pmuxProcConfigs []pmuxlib.ProcessConfig
|
||||
|
||||
for _, alloc := range daemonConfig.Storage.Allocations {
|
||||
|
||||
childConfigPath, err := garageWriteChildConfig(
|
||||
runtimeDirPath, hostBootstrap, alloc,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("writing child config file for alloc %+v: %w", alloc, err)
|
||||
}
|
||||
|
||||
pmuxProcConfigs = append(pmuxProcConfigs, pmuxlib.ProcessConfig{
|
||||
Name: fmt.Sprintf("garage-%d", alloc.RPCPort),
|
||||
Cmd: filepath.Join(binDirPath, "garage"),
|
||||
Args: []string{"-c", childConfigPath, "server"},
|
||||
StartAfterFunc: func(ctx context.Context) error {
|
||||
return waitForNebula(ctx, hostBootstrap)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return pmuxProcConfigs, nil
|
||||
}
|
||||
|
||||
func garageApplyLayout(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
config Config,
|
||||
) error {
|
||||
|
||||
var (
|
||||
adminClient = NewGarageAdminClient(logger, hostBootstrap, config)
|
||||
thisHost = hostBootstrap.ThisHost()
|
||||
hostName = thisHost.Name
|
||||
allocs = config.Storage.Allocations
|
||||
peers = make([]garage.PeerLayout, len(allocs))
|
||||
)
|
||||
|
||||
for i, alloc := range allocs {
|
||||
|
||||
id := bootstrapGarageHostForAlloc(thisHost, alloc).ID
|
||||
|
||||
zone := hostName
|
||||
if alloc.Zone != "" {
|
||||
zone = alloc.Zone
|
||||
}
|
||||
|
||||
peers[i] = garage.PeerLayout{
|
||||
ID: id,
|
||||
Capacity: alloc.Capacity * 1_000_000_000,
|
||||
Zone: zone,
|
||||
Tags: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
return adminClient.ApplyLayout(ctx, peers)
|
||||
}
|
@ -1,20 +1,6 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"isle/yamlutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func defaultConfigPath(appDirPath string) string {
|
||||
return filepath.Join(appDirPath, "etc", "daemon.yml")
|
||||
}
|
||||
import "strconv"
|
||||
|
||||
type ConfigTun struct {
|
||||
Device string `yaml:"device"`
|
||||
@ -114,70 +100,3 @@ func (c *Config) fillDefaults() {
|
||||
firewallGarageInbound...,
|
||||
)
|
||||
}
|
||||
|
||||
// CopyDefaultConfig copies the daemon config file embedded in the AppDir into
|
||||
// the given io.Writer.
|
||||
func CopyDefaultConfig(into io.Writer, appDirPath string) error {
|
||||
|
||||
defaultConfigPath := defaultConfigPath(appDirPath)
|
||||
|
||||
f, err := os.Open(defaultConfigPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening daemon config at %q: %w", defaultConfigPath, err)
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
if _, err := io.Copy(into, f); err != nil {
|
||||
return fmt.Errorf("copying daemon config from %q: %w", defaultConfigPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadConfig loads the daemon config from userConfigPath, merges it with
|
||||
// the default found in the appDirPath, and returns the result.
|
||||
//
|
||||
// If userConfigPath is not given then the default is loaded and returned.
|
||||
func LoadConfig(
|
||||
appDirPath, userConfigPath string,
|
||||
) (
|
||||
Config, error,
|
||||
) {
|
||||
|
||||
defaultConfigPath := defaultConfigPath(appDirPath)
|
||||
|
||||
var fullDaemon map[string]interface{}
|
||||
|
||||
if err := yamlutil.LoadYamlFile(&fullDaemon, defaultConfigPath); err != nil {
|
||||
return Config{}, fmt.Errorf("parsing default daemon config file: %w", err)
|
||||
}
|
||||
|
||||
if userConfigPath != "" {
|
||||
|
||||
var daemonConfig map[string]interface{}
|
||||
if err := yamlutil.LoadYamlFile(&daemonConfig, userConfigPath); err != nil {
|
||||
return Config{}, fmt.Errorf("parsing %q: %w", userConfigPath, err)
|
||||
}
|
||||
|
||||
err := mergo.Merge(&fullDaemon, daemonConfig, mergo.WithOverride)
|
||||
if err != nil {
|
||||
return Config{}, fmt.Errorf("merging contents of file %q: %w", userConfigPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
fullDaemonB, err := yaml.Marshal(fullDaemon)
|
||||
|
||||
if err != nil {
|
||||
return Config{}, fmt.Errorf("yaml marshaling: %w", err)
|
||||
}
|
||||
|
||||
var config Config
|
||||
if err := yaml.Unmarshal(fullDaemonB, &config); err != nil {
|
||||
return Config{}, fmt.Errorf("yaml unmarshaling back into Config struct: %w", err)
|
||||
}
|
||||
|
||||
config.fillDefaults()
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
@ -1,200 +1,85 @@
|
||||
// Package daemon implements the isle daemon, which is a long-running service
|
||||
// managing all isle background tasks and sub-processes for a single cluster.
|
||||
// Package daemon contains types and functions related specifically to the
|
||||
// isle daemon.
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"isle/bootstrap"
|
||||
"isle/yamlutil"
|
||||
"os"
|
||||
"time"
|
||||
"path/filepath"
|
||||
|
||||
"code.betamike.com/micropelago/pmux/pmuxlib"
|
||||
"github.com/mediocregopher/mediocre-go-lib/v2/mlog"
|
||||
"github.com/imdario/mergo"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type daemon struct {
|
||||
logger *mlog.Logger
|
||||
config Config
|
||||
hostBootstrap bootstrap.Bootstrap
|
||||
opts Opts
|
||||
|
||||
pmuxCancelFn context.CancelFunc
|
||||
pmuxStoppedCh chan struct{}
|
||||
func defaultConfigPath(appDirPath string) string {
|
||||
return filepath.Join(appDirPath, "etc", "daemon.yml")
|
||||
}
|
||||
|
||||
// Daemon presents all functionality required for client frontends to interact
|
||||
// with isle, typically via the unix socket.
|
||||
type Daemon interface {
|
||||
// CopyDefaultConfig copies the daemon config file embedded in the AppDir into
|
||||
// the given io.Writer.
|
||||
func CopyDefaultConfig(into io.Writer, appDirPath string) error {
|
||||
|
||||
// Shutdown blocks until all resources held or created by the daemon,
|
||||
// including child processes it has started, have been cleaned up, or until
|
||||
// the context is canceled.
|
||||
//
|
||||
// If this returns an error then it's possible that child processes are
|
||||
// still running and are no longer managed.
|
||||
Shutdown(context.Context) error
|
||||
}
|
||||
defaultConfigPath := defaultConfigPath(appDirPath)
|
||||
|
||||
// Opts are optional parameters which can be passed in when initializing a new
|
||||
// Daemon instance. A nil Opts is equivalent to a zero value.
|
||||
type Opts struct {
|
||||
// SkipHostBootstrapPush, if set, will cause the Daemon to not push the
|
||||
// bootstrap to garage upon a successful initialization.
|
||||
SkipHostBootstrapPush bool
|
||||
|
||||
// Stdout and Stderr are what the associated outputs from child processes
|
||||
// will be directed to.
|
||||
Stdout, Stderr io.Writer
|
||||
}
|
||||
|
||||
func (o *Opts) withDefaults() *Opts {
|
||||
if o == nil {
|
||||
o = new(Opts)
|
||||
}
|
||||
|
||||
if o.Stdout == nil {
|
||||
o.Stdout = os.Stdout
|
||||
}
|
||||
|
||||
if o.Stderr == nil {
|
||||
o.Stderr = os.Stderr
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
// New initialized and returns a Daemon. If initialization fails an error is
|
||||
// returned.
|
||||
func New(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
config Config,
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
runtimeDirPath, binDirPath string,
|
||||
opts *Opts,
|
||||
) (
|
||||
Daemon, error,
|
||||
) {
|
||||
opts = opts.withDefaults()
|
||||
|
||||
nebulaPmuxProcConfig, err := nebulaPmuxProcConfig(
|
||||
runtimeDirPath, binDirPath, hostBootstrap, config,
|
||||
)
|
||||
f, err := os.Open(defaultConfigPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("generating nebula config: %w", err)
|
||||
return fmt.Errorf("opening daemon config at %q: %w", defaultConfigPath, err)
|
||||
}
|
||||
|
||||
dnsmasqPmuxProcConfig, err := dnsmasqPmuxProcConfig(
|
||||
runtimeDirPath, binDirPath, hostBootstrap, config,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("generating dnsmasq config: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
garagePmuxProcConfigs, err := garagePmuxProcConfigs(
|
||||
runtimeDirPath, binDirPath, hostBootstrap, config,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("generating garage children configs: %w", err)
|
||||
}
|
||||
|
||||
pmuxConfig := pmuxlib.Config{
|
||||
Processes: append(
|
||||
[]pmuxlib.ProcessConfig{
|
||||
nebulaPmuxProcConfig,
|
||||
dnsmasqPmuxProcConfig,
|
||||
},
|
||||
garagePmuxProcConfigs...,
|
||||
),
|
||||
}
|
||||
|
||||
pmuxCtx, pmuxCancelFn := context.WithCancel(context.Background())
|
||||
|
||||
d := &daemon{
|
||||
logger: logger,
|
||||
config: config,
|
||||
hostBootstrap: hostBootstrap,
|
||||
opts: *opts,
|
||||
pmuxCancelFn: pmuxCancelFn,
|
||||
pmuxStoppedCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(d.pmuxStoppedCh)
|
||||
pmuxlib.Run(pmuxCtx, d.opts.Stdout, d.opts.Stderr, pmuxConfig)
|
||||
}()
|
||||
|
||||
if initErr := d.postPmuxInit(ctx); initErr != nil {
|
||||
logger.Warn(ctx, "failed to initialize daemon, shutting down child processes", err)
|
||||
shutdownCtx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||
defer cancel()
|
||||
if err := d.Shutdown(shutdownCtx); err != nil {
|
||||
panic(fmt.Sprintf(
|
||||
"failed to shut down child processes after initialization"+
|
||||
" error, there may be zombie children leftover."+
|
||||
" Original error: %v",
|
||||
initErr,
|
||||
))
|
||||
}
|
||||
|
||||
return nil, initErr
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (d *daemon) postPmuxInit(ctx context.Context) error {
|
||||
d.logger.Info(ctx, "waiting for nebula VPN to come online")
|
||||
if err := waitForNebula(ctx, d.hostBootstrap); err != nil {
|
||||
return fmt.Errorf("waiting for nebula to start: %w", err)
|
||||
}
|
||||
|
||||
d.logger.Info(ctx, "waiting for garage instances to come online")
|
||||
if err := d.waitForGarage(ctx); err != nil {
|
||||
return fmt.Errorf("waiting for garage to start: %w", err)
|
||||
}
|
||||
|
||||
if len(d.config.Storage.Allocations) > 0 {
|
||||
|
||||
err := until(ctx, func(ctx context.Context) error {
|
||||
err := garageApplyLayout(ctx, d.logger, d.hostBootstrap, d.config)
|
||||
if err != nil {
|
||||
d.logger.Error(ctx, "applying garage layout", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("applying garage layout: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !d.opts.SkipHostBootstrapPush {
|
||||
if err := until(ctx, func(ctx context.Context) error {
|
||||
if err := d.hostBootstrap.PutGarageBoostrapHost(ctx); err != nil {
|
||||
d.logger.Error(ctx, "updating host info in garage", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("updating host info in garage: %w", err)
|
||||
}
|
||||
if _, err := io.Copy(into, f); err != nil {
|
||||
return fmt.Errorf("copying daemon config from %q: %w", defaultConfigPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *daemon) Shutdown(ctx context.Context) error {
|
||||
d.pmuxCancelFn()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-d.pmuxStoppedCh:
|
||||
return nil
|
||||
// LoadConfig loads the daemon config from userConfigPath, merges it with
|
||||
// the default found in the appDirPath, and returns the result.
|
||||
//
|
||||
// If userConfigPath is not given then the default is loaded and returned.
|
||||
func LoadConfig(
|
||||
appDirPath, userConfigPath string,
|
||||
) (
|
||||
Config, error,
|
||||
) {
|
||||
|
||||
defaultConfigPath := defaultConfigPath(appDirPath)
|
||||
|
||||
var fullDaemon map[string]interface{}
|
||||
|
||||
if err := yamlutil.LoadYamlFile(&fullDaemon, defaultConfigPath); err != nil {
|
||||
return Config{}, fmt.Errorf("parsing default daemon config file: %w", err)
|
||||
}
|
||||
|
||||
if userConfigPath != "" {
|
||||
|
||||
var daemonConfig map[string]interface{}
|
||||
if err := yamlutil.LoadYamlFile(&daemonConfig, userConfigPath); err != nil {
|
||||
return Config{}, fmt.Errorf("parsing %q: %w", userConfigPath, err)
|
||||
}
|
||||
|
||||
err := mergo.Merge(&fullDaemon, daemonConfig, mergo.WithOverride)
|
||||
if err != nil {
|
||||
return Config{}, fmt.Errorf("merging contents of file %q: %w", userConfigPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
fullDaemonB, err := yaml.Marshal(fullDaemon)
|
||||
|
||||
if err != nil {
|
||||
return Config{}, fmt.Errorf("yaml marshaling: %w", err)
|
||||
}
|
||||
|
||||
var config Config
|
||||
if err := yaml.Unmarshal(fullDaemonB, &config); err != nil {
|
||||
return Config{}, fmt.Errorf("yaml unmarshaling back into Config struct: %w", err)
|
||||
}
|
||||
|
||||
config.fillDefaults()
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
@ -1,18 +0,0 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
func until(ctx context.Context, fn func(context.Context) error) error {
|
||||
for {
|
||||
if err := fn(ctx); err == nil {
|
||||
return nil
|
||||
} else if ctxErr := ctx.Err(); ctxErr != nil {
|
||||
return ctxErr
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
12
release.nix
12
release.nix
@ -16,21 +16,15 @@
|
||||
inherit buildSystem hostSystem releaseName revision;
|
||||
}).appImage;
|
||||
|
||||
archPkg = ((import ./dist/linux/arch) {
|
||||
inherit pkgs buildSystem releaseName appImage;
|
||||
});
|
||||
|
||||
in pkgs.stdenv.mkDerivation {
|
||||
name = "isle-release-${hostSystem}";
|
||||
inherit releaseName hostSystem;
|
||||
inherit appImage archPkg;
|
||||
inherit releaseName appImage hostSystem;
|
||||
|
||||
builder = builtins.toFile "build.sh" ''
|
||||
source $stdenv/setup
|
||||
|
||||
mkdir -p "$out"/
|
||||
cp "$appImage" "$out"/isle-$releaseName-$hostSystem.AppImage
|
||||
cp "$archPkg"/*.tar.zst "$out"/isle-$releaseName-$hostSystem.pkg.tar.zst
|
||||
cp "$appImage"/bin/isle "$out"/isle-$releaseName-$hostSystem
|
||||
'';
|
||||
};
|
||||
|
||||
@ -49,7 +43,7 @@ in
|
||||
|
||||
mkdir -p "$out"
|
||||
for p in $releases; do
|
||||
cp "$p"/* "$out"/
|
||||
cp "$p"/isle-* "$out"/
|
||||
done
|
||||
|
||||
(cd "$out" && sha256sum * > sha256.txt)
|
||||
|
@ -9,7 +9,7 @@ source "$UTILS"/with-1-data-1-empty-node-cluster.sh
|
||||
[ "$(jq -r <admin.json '.CreationParams.Name')" = "testing" ]
|
||||
[ "$(jq -r <admin.json '.CreationParams.Domain')" = "shared.test" ]
|
||||
|
||||
bootstrap_file="$XDG_STATE_HOME/isle/bootstrap.json"
|
||||
bootstrap_file="$XDG_DATA_HOME/isle/bootstrap.json"
|
||||
|
||||
[ "$(jq -rc <"$bootstrap_file" '.AdminCreationParams')" = "$(jq -rc <admin.json '.CreationParams')" ]
|
||||
[ "$(jq -rc <"$bootstrap_file" '.CAPublicCredentials')" = "$(jq -rc <admin.json '.Nebula.CACredentials.Public')" ]
|
||||
|
@ -1,7 +1,7 @@
|
||||
# shellcheck source=../../utils/with-1-data-1-empty-node-cluster.sh
|
||||
source "$UTILS"/with-1-data-1-empty-node-cluster.sh
|
||||
|
||||
adminBS="$XDG_STATE_HOME"/isle/bootstrap.json
|
||||
adminBS="$XDG_DATA_HOME"/isle/bootstrap.json
|
||||
bs="$secondus_bootstrap" # set in with-1-data-1-empty-node-cluster.sh
|
||||
|
||||
[ "$(jq -r <"$bs" '.AdminCreationParams')" = "$(jq -r <admin.json '.CreationParams')" ]
|
||||
|
@ -48,7 +48,7 @@ echo "tmp dir is $ROOT_TMPDIR"
|
||||
# Blackhole these directories so that tests don't accidentally use the host's
|
||||
# real ones.
|
||||
export XDG_RUNTIME_DIR=/dev/null
|
||||
export XDG_STATE_HOME=/dev/null
|
||||
export XDG_DATA_HOME=/dev/null
|
||||
|
||||
test_files=$(
|
||||
find ./cases -type f -name '*.sh' \
|
||||
|
@ -4,13 +4,13 @@ base="$1"
|
||||
|
||||
TMPDIR="$ROOT_TMPDIR/$base"
|
||||
XDG_RUNTIME_DIR="$TMPDIR/.run"
|
||||
XDG_STATE_HOME="$TMPDIR/.state"
|
||||
XDG_DATA_HOME="$TMPDIR/.data"
|
||||
|
||||
mkdir -p "$TMPDIR" "$XDG_RUNTIME_DIR" "$XDG_STATE_HOME"
|
||||
mkdir -p "$TMPDIR" "$XDG_RUNTIME_DIR" "$XDG_DATA_HOME"
|
||||
|
||||
cat <<EOF
|
||||
export TMPDIR="$TMPDIR"
|
||||
export XDG_RUNTIME_DIR="$XDG_RUNTIME_DIR"
|
||||
export XDG_STATE_HOME="$XDG_STATE_HOME"
|
||||
export XDG_DATA_HOME="$XDG_DATA_HOME"
|
||||
cd "$TMPDIR"
|
||||
EOF
|
||||
|
@ -2,8 +2,8 @@ set -e
|
||||
|
||||
TMPDIR="$TMPDIR/$TEST_CASE_FILE.tmp"
|
||||
XDG_RUNTIME_DIR="$TMPDIR/.run"
|
||||
XDG_STATE_HOME="$TMPDIR/.state"
|
||||
XDG_DATA_HOME="$TMPDIR/.data"
|
||||
|
||||
mkdir -p "$TMPDIR" "$XDG_RUNTIME_DIR" "$XDG_STATE_HOME"
|
||||
mkdir -p "$TMPDIR" "$XDG_RUNTIME_DIR" "$XDG_DATA_HOME"
|
||||
|
||||
cd "$TMPDIR"
|
||||
|
Loading…
Reference in New Issue
Block a user