Compare commits

...

3 Commits

25 changed files with 207 additions and 507 deletions

View File

@ -118,19 +118,6 @@ in rec {
'';
};
tests = pkgs.writeScript "isle-tests" ''
export PATH=${pkgs.lib.makeBinPath [
build.appImage
pkgs.busybox
pkgs.yq-go
pkgs.jq
pkgs.dig
pkgs.nebula
]}
export SHELL=${pkgs.bash}/bin/bash
exec ${pkgs.bash}/bin/bash ${./tests}/entrypoint.sh "$@"
'';
devShell = pkgs.mkShell {
buildInputs = [
pkgs.go

View File

@ -11,5 +11,6 @@ to better understand how to navigate and work on the codebase.
These pages can be helpful in specific situations.
* [Building Isle](./building.md)
* [Testing Isle](./testing.md)
* [Rebuilding Documentation](./rebuilding-documentation.md)
* [Releases](./releases.md)

39
docs/dev/testing.md Normal file
View File

@ -0,0 +1,39 @@
# Testing Isle
All tests are currently written as go tests, and as such can be run from the
`go` directory using the normal go testing tool.
```
cd go
go test -run Foo ./daemon
go test ./... # Test everything
```
## Integration Tests
Integration tests are those which require processes or state external to the
test itself. Integration tests are marked using the
`toolkit.MarkIntegrationTest` function, which will cause them to be skipped
unless being run in the integration test environment.
Besides a normal nix installation (like all Isle development needs), integration
tests also require `sudo` and [capsh][capsh] to be installed on the system.
[capsh]: https://www.man7.org/linux/man-pages/man1/capsh.1.html
By running tests using the `go/integration_test.sh` script the tests will be
automatically run in the integration test environment. All arguments will be
passed directly to the go testing tool.
```
cd go
./integration_test.sh -run Foo ./daemon
```
`integration_test.sh` wraps a call to `go test` in a bash shell which has all
required binaries available to it, and which has acquired necessary
[capabilities][capabilities] to use the binaries as needed. Acquiring
capabilities is done by elevating the user to root using `sudo`, and then
dropping them back down to a shell of the original user with capabilities set.
[capabilities]: https://wiki.archlinux.org/title/Capabilities

View File

@ -6,24 +6,10 @@ import (
// GaragePeers returns a Peer for each known garage instance in the network.
func (b Bootstrap) GaragePeers() []garage.RemotePeer {
var peers []garage.RemotePeer
for _, host := range b.Hosts {
for _, instance := range host.Garage.Instances {
peer := garage.RemotePeer{
ID: instance.ID,
IP: host.IP().String(),
RPCPort: instance.RPCPort,
S3APIPort: instance.S3APIPort,
}
peers = append(peers, peer)
}
peers = append(peers, host.GaragePeers()...)
}
return peers
}
@ -31,18 +17,9 @@ func (b Bootstrap) GaragePeers() []garage.RemotePeer {
// will prefer a garage instance on this particular host, if there is one, but
// will otherwise return a random endpoint.
func (b Bootstrap) ChooseGaragePeer() garage.RemotePeer {
thisHost := b.ThisHost()
if len(thisHost.Garage.Instances) > 0 {
inst := thisHost.Garage.Instances[0]
return garage.RemotePeer{
ID: inst.ID,
IP: thisHost.IP().String(),
RPCPort: inst.RPCPort,
S3APIPort: inst.S3APIPort,
}
return thisHost.GaragePeers()[0]
}
for _, peer := range b.GaragePeers() {

View File

@ -2,6 +2,7 @@ package bootstrap
import (
"fmt"
"isle/garage"
"isle/nebula"
"net/netip"
)
@ -91,3 +92,18 @@ func (h Host) IP() netip.Addr {
return addr
}
// GaragePeers returns a RemotePeer for each garage instance advertised by this
// Host.
func (h Host) GaragePeers() []garage.RemotePeer {
var peers []garage.RemotePeer
for _, instance := range h.Garage.Instances {
peers = append(peers, garage.RemotePeer{
ID: instance.ID,
IP: h.IP().String(),
RPCPort: instance.RPCPort,
S3APIPort: instance.S3APIPort,
})
}
return peers
}

View File

@ -12,6 +12,7 @@ import (
_ "embed"
"dev.mediocregopher.com/mediocre-go-lib.git/mctx"
"gopkg.in/yaml.v3"
)
@ -60,6 +61,16 @@ type ConfigStorageAllocation struct {
Zone string `yaml:"zone"`
}
// Annotate implements the mctx.Annotator interface.
func (csa ConfigStorageAllocation) Annotate(aa mctx.Annotations) {
aa["allocDataPath"] = csa.DataPath
aa["allocMetaPath"] = csa.MetaPath
aa["allocCapacity"] = csa.Capacity
aa["allocS3APIPort"] = csa.S3APIPort
aa["allocRPCPort"] = csa.RPCPort
aa["allocAdminPort"] = csa.AdminPort
}
// NetworkConfig describes the configuration of a single network.
type NetworkConfig struct {
DNS struct {

View File

@ -14,6 +14,7 @@ import (
"net"
"path/filepath"
"strconv"
"time"
"dev.mediocregopher.com/mediocre-go-lib.git/mctx"
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
@ -298,3 +299,45 @@ func removeGarageBootstrapHost(
ctx, garage.GlobalBucket, filePath, minio.RemoveObjectOptions{},
)
}
// We can wait for the garage instance to appear healthy, but there are cases
// where they still haven't fully synced the list of buckets and bucket
// credentials. For those cases it's necessary to do this as an additional
// check.
func garageWaitForAlloc(
ctx context.Context,
logger *mlog.Logger,
alloc daecommon.ConfigStorageAllocation,
adminToken string,
host bootstrap.Host,
) error {
var (
hostIP = host.IP().String()
adminClient = garage.NewAdminClient(
garageAdminClientLogger(logger),
net.JoinHostPort(hostIP, strconv.Itoa(alloc.AdminPort)),
adminToken,
)
)
defer adminClient.Close()
ctx = mctx.WithAnnotator(ctx, alloc)
for {
logger.Info(ctx, "Checking if node has synced bucket list")
buckets, err := adminClient.ListBuckets(ctx)
if err != nil {
return fmt.Errorf("listing buckets: %w", err)
} else if len(buckets) == 0 {
logger.WarnString(ctx, "No buckets found, will wait a bit and try again")
select {
case <-time.After(1 * time.Second):
continue
case <-ctx.Done():
return ctx.Err()
}
}
return nil
}
}

View File

@ -23,6 +23,7 @@ import (
"sync"
"time"
"dev.mediocregopher.com/mediocre-go-lib.git/mctx"
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
"golang.org/x/exp/maps"
)
@ -34,6 +35,8 @@ type GarageClientParams struct {
GlobalBucketS3APICredentials garage.S3APICredentials
// RPCSecret may be empty, if the secret is not available on the host.
//
// TODO this shouldn't really be here I don't think, remove it?
RPCSecret string
}
@ -181,13 +184,15 @@ type network struct {
networkConfig daecommon.NetworkConfig
currBootstrap bootstrap.Bootstrap
shutdownCh chan struct{}
wg sync.WaitGroup
workerCtx context.Context
workerCancel context.CancelFunc
wg sync.WaitGroup
}
// instatiateNetwork returns an instantiated *network instance which has not yet
// been initialized.
func instatiateNetwork(
ctx context.Context,
logger *mlog.Logger,
networkConfig daecommon.NetworkConfig,
envBinDirPath string,
@ -195,6 +200,8 @@ func instatiateNetwork(
runtimeDir toolkit.Dir,
opts *Opts,
) *network {
ctx = context.WithoutCancel(ctx)
ctx, cancel := context.WithCancel(ctx)
return &network{
logger: logger,
networkConfig: networkConfig,
@ -202,7 +209,8 @@ func instatiateNetwork(
stateDir: stateDir,
runtimeDir: runtimeDir,
opts: opts.withDefaults(),
shutdownCh: make(chan struct{}),
workerCtx: ctx,
workerCancel: cancel,
}
}
@ -244,6 +252,7 @@ func Load(
Network, error,
) {
n := instatiateNetwork(
ctx,
logger,
networkConfig,
envBinDirPath,
@ -265,7 +274,7 @@ func Load(
return nil, fmt.Errorf(
"loading bootstrap from %q: %w", bootstrapFilePath, err,
)
} else if err := n.initialize(ctx, currBootstrap); err != nil {
} else if err := n.initialize(ctx, currBootstrap, false); err != nil {
return nil, fmt.Errorf("initializing with bootstrap: %w", err)
}
@ -289,6 +298,7 @@ func Join(
Network, error,
) {
n := instatiateNetwork(
ctx,
logger,
networkConfig,
envBinDirPath,
@ -307,7 +317,7 @@ func Join(
return nil, fmt.Errorf("importing secrets: %w", err)
}
if err := n.initialize(ctx, joiningBootstrap.Bootstrap); err != nil {
if err := n.initialize(ctx, joiningBootstrap.Bootstrap, false); err != nil {
return nil, fmt.Errorf("initializing with bootstrap: %w", err)
}
@ -355,6 +365,7 @@ func Create(
garageRPCSecret := toolkit.RandStr(32)
n := instatiateNetwork(
ctx,
logger,
networkConfig,
envBinDirPath,
@ -390,7 +401,7 @@ func Create(
return nil, fmt.Errorf("initializing bootstrap data: %w", err)
}
if err := n.initialize(ctx, hostBootstrap); err != nil {
if err := n.initialize(ctx, hostBootstrap, true); err != nil {
return nil, fmt.Errorf("initializing with bootstrap: %w", err)
}
@ -407,8 +418,42 @@ func (n *network) initializeDirs(mayExist bool) error {
return nil
}
func (n *network) periodically(
logger *mlog.Logger,
fn func(context.Context) error,
period time.Duration,
) {
n.wg.Add(1)
go func() {
defer n.wg.Done()
ctx := mctx.Annotate(n.workerCtx, "period", period)
ticker := time.NewTicker(period)
defer ticker.Stop()
logger.Info(ctx, "Starting background job runner")
defer logger.Info(ctx, "Stopping background job runner")
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
logger.Info(ctx, "Background job running")
if err := fn(ctx); err != nil {
logger.Error(ctx, "Background job failed", err)
}
}
}
}()
}
func (n *network) initialize(
ctx context.Context, prevBootstrap bootstrap.Bootstrap,
ctx context.Context,
prevBootstrap bootstrap.Bootstrap,
isCreate bool,
) error {
prevThisHost := prevBootstrap.ThisHost()
@ -449,41 +494,37 @@ func (n *network) initialize(
n.logger.Info(ctx, "Child processes created")
if err := n.postInit(ctx, prevThisHost); err != nil {
createGarageGlobalBucket := isCreate
err = n.postChildrenInit(ctx, prevThisHost, createGarageGlobalBucket)
if err != nil {
n.logger.Error(ctx, "Post-initialization failed, stopping child processes", err)
n.children.Shutdown()
return fmt.Errorf("performing post-initialization: %w", err)
}
// Do this now so that everything is stable before returning. This also
// serves a dual-purpose, as it makes sure that the PUT from the postInit
// serves a dual-purpose, as it makes sure that the PUT from the postChildrenInit
// above has propagated from the local garage instance, if there is one.
n.logger.Info(ctx, "Reloading hosts from network storage")
if err = n.reloadHosts(ctx); err != nil {
return fmt.Errorf("Reloading network bootstrap: %w", err)
}
ctx = context.WithoutCancel(ctx)
ctx, cancel := context.WithCancel(ctx)
n.wg.Add(1)
go func() {
defer n.wg.Done()
<-n.shutdownCh
cancel()
}()
n.wg.Add(1)
go func() {
defer n.wg.Done()
n.reloadLoop(ctx)
n.logger.Debug(ctx, "Daemon reload loop stopped")
}()
n.periodically(
n.logger.WithNamespace("reloadHosts"),
n.reloadHosts,
3*time.Minute,
)
return nil
}
func (n *network) postInit(
ctx context.Context, prevThisHost bootstrap.Host,
// postChildrenInit performs steps which are required after children have been
// initialized.
func (n *network) postChildrenInit(
ctx context.Context,
prevThisHost bootstrap.Host,
createGarageGlobalBucket bool,
) error {
n.l.RLock()
defer n.l.RUnlock()
@ -503,15 +544,7 @@ func (n *network) postInit(
}
}
// This is only necessary during network creation, otherwise the bootstrap
// should already have these credentials built in.
//
// TODO this is pretty hacky, but there doesn't seem to be a better way to
// manage it at the moment.
_, err := daecommon.GetGarageS3APIGlobalBucketCredentials(
ctx, n.secretsStore,
)
if errors.Is(err, secrets.ErrNotFound) {
if createGarageGlobalBucket {
n.logger.Info(ctx, "Initializing garage shared global bucket")
garageGlobalBucketCreds, err := garageInitializeGlobalBucket(
ctx,
@ -532,8 +565,18 @@ func (n *network) postInit(
}
}
for _, alloc := range n.networkConfig.Storage.Allocations {
if err := garageWaitForAlloc(
ctx, n.logger, alloc, n.opts.GarageAdminToken, thisHost,
); err != nil {
return fmt.Errorf(
"waiting for alloc %+v to initialize: %w", alloc, err,
)
}
}
n.logger.Info(ctx, "Updating host info in garage")
err = putGarageBoostrapHost(ctx, n.secretsStore, n.currBootstrap)
err := putGarageBoostrapHost(ctx, n.secretsStore, n.currBootstrap)
if err != nil {
return fmt.Errorf("updating host info in garage: %w", err)
}
@ -575,25 +618,6 @@ func (n *network) reloadHosts(ctx context.Context) error {
return nil
}
func (n *network) reloadLoop(ctx context.Context) {
const period = 3 * time.Minute
ticker := time.NewTicker(period)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
if err := n.reloadHosts(ctx); err != nil {
n.logger.Error(ctx, "Attempting to reload", err)
continue
}
}
}
}
// returns the bootstrap prior to the reload being applied.
func (n *network) reload(
ctx context.Context,
@ -944,7 +968,7 @@ func (n *network) SetConfig(
return fmt.Errorf("reloading config: %w", err)
}
if err := n.postInit(ctx, prevBootstrap.ThisHost()); err != nil {
if err := n.postChildrenInit(ctx, prevBootstrap.ThisHost(), false); err != nil {
return fmt.Errorf("performing post-initialization: %w", err)
}
@ -966,10 +990,11 @@ func (n *network) GetNetworkCreationParams(
}
func (n *network) Shutdown() error {
close(n.shutdownCh)
n.workerCancel()
n.wg.Wait()
if n.children != nil {
n.logger.Info(context.Background(), "Shutting down children")
n.children.Shutdown()
}

View File

@ -29,6 +29,12 @@ const (
BucketPermissionOwner BucketPermission = "owner"
)
// Bucket defines a bucket which has been created in a cluster
type Bucket struct {
ID BucketID `json:"id"`
GlobalAliases []string `json:"globalAliases"`
}
// AdminClientError gets returned from AdminClient Do methods for non-200
// errors.
type AdminClientError struct {
@ -249,6 +255,13 @@ func (c *AdminClient) CreateBucket(
return BucketID(res.ID), err
}
// ListBuckets returns all buckets known to this garage node.
func (c *AdminClient) ListBuckets(ctx context.Context) ([]Bucket, error) {
var res []Bucket
err := c.do(ctx, &res, "GET", "/v1/bucket?list", nil)
return res, err
}
// GrantBucketPermissions grants the S3APICredentials with the given ID
// permission(s) to interact with the bucket of the given ID.
func (c *AdminClient) GrantBucketPermissions(

View File

@ -1,16 +0,0 @@
#!/usr/bin/env bash
set -e
entrypoint="$(nix-build --no-out-link -A tests)"
this_user="$(whoami)"
echo "Requesting sudo in order to set thread capabilities, will drop back down to user '$this_user' immediately"
sudo -E capsh \
--caps="cap_net_admin,cap_net_bind_service+eip cap_setpcap,cap_setuid,cap_setgid+ep" \
--keep=1 \
--user="$this_user" \
--addamb=cap_net_admin \
--addamb=cap_net_bind_service \
-- "$entrypoint" "$@"

View File

@ -1,10 +0,0 @@
Ctrl+A X -> exits
qemu-system-aarch64 -nographic -cdrom tests/alpine-virt-3.18.4-aarch64.iso
Ctrl+Alt+G -> Escape mouse capture
qemu-system-x86_64 \
-cdrom tests/virt/Win11_23H2_English_x64.iso \
-m 8G \
-boot order=d \
-drive file=./tests/virt/winblows.qcow2

View File

@ -1,3 +0,0 @@
isle version | grep -q 'Release:'
isle version | grep -q 'Platform:'
isle version | grep -q 'Build Platform:'

View File

@ -1,17 +0,0 @@
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
source "$UTILS"/with-1-data-1-empty-node-network.sh
function assert_a {
want_ip="$1"
hostname="$2"
r="$(dig @"$current_ip" +noall +answer "$hostname")"
echo "$r" | grep -q "$want_ip"
}
as_primus
assert_a "$primus_ip" primus.hosts.shared.test
assert_a "$secondus_ip" secondus.hosts.shared.test
as_secondus
assert_a "$primus_ip" primus.hosts.shared.test
assert_a "$secondus_ip" secondus.hosts.shared.test

View File

@ -1,21 +0,0 @@
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
source "$UTILS"/with-1-data-1-empty-node-network.sh
function do_tests {
status="$(isle garage cli status | tail -n+3)"
[ "$(echo "$status" | wc -l)" = "3" ]
echo "$status" | grep -q '10.6.9.1:3900'
echo "$status" | grep -q '10.6.9.1:3910'
echo "$status" | grep -q '10.6.9.1:3920'
buckets="$(isle garage cli bucket list | tail -n+2)"
[ "$(echo "$buckets" | wc -l)" = 1 ]
echo "$buckets" | grep -q 'global-shared'
}
as_primus
do_tests
as_secondus
do_tests

View File

@ -1,16 +0,0 @@
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
source "$UTILS"/with-1-data-1-empty-node-network.sh
function do_tests {
files="$(isle garage mc -- tree --json garage)"
[ "$(echo "$files" | jq -s '.|length')" -ge "1" ]
file="$(echo "$files" | jq -sr '.[0].key')"
[ "$(isle garage mc -- cat "garage/$file" | wc -c)" -gt "0" ]
}
as_primus
do_tests
as_secondus
do_tests

View File

@ -1,20 +0,0 @@
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
source "$UTILS"/with-1-data-1-empty-node-network.sh
function do_tests {
hosts="$(isle hosts list)"
[ "$(echo "$hosts" | jq -r '.[0].Name')" = "primus" ]
[ "$(echo "$hosts" | jq -r '.[0].VPN.IP')" = "10.6.9.1" ]
[ "$(echo "$hosts" | jq -r '.[0].Storage.Instances|length')" = "3" ]
[ "$(echo "$hosts" | jq -r '.[1].Name')" = "secondus" ]
[ "$(echo "$hosts" | jq -r '.[1].VPN.IP')" = "$secondus_ip" ]
[ "$(echo "$hosts" | jq -r '.[1].Storage.Instances|length')" = "0" ]
}
as_primus
do_tests
as_secondus
do_tests

View File

@ -1,16 +0,0 @@
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
source "$UTILS"/with-1-data-1-empty-node-network.sh
bs="$secondus_bootstrap" # set in with-1-data-1-empty-node-network.sh
[ "$(jq -r <"$bs" '.Bootstrap.NetworkCreationParams.Domain')" = "shared.test" ]
[ "$(jq -r <"$bs" '.Bootstrap.NetworkCreationParams.Name')" = "testing" ]
[ "$(jq -r <"$bs" '.Bootstrap.SignedHostAssigned.Body.Name')" = "secondus" ]
[ "$(jq -r <"$bs" '.Bootstrap.Hosts.primus.PublicCredentials')" \
= "$(jq -r <"$BOOTSTRAP_FILE" '.SignedHostAssigned.Body.PublicCredentials')" ]
[ "$(jq <"$bs" '.Bootstrap.Hosts.primus.Garage.Instances|length')" = "3" ]
[ "$(jq <"$bs" '.Secrets["garage-rpc-secret"]')" != "null" ]

View File

@ -1,12 +0,0 @@
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
source "$UTILS"/with-1-data-1-empty-node-network.sh
info="$(isle nebula show)"
[ "$(echo "$info" | jq -r '.CACert')" \
= "$(jq -r <"$BOOTSTRAP_FILE" '.CAPublicCredentials.Cert')" ]
[ "$(echo "$info" | jq -r '.SubnetCIDR')" = "10.6.9.0/24" ]
[ "$(echo "$info" | jq -r '.Lighthouses|length')" = "1" ]
[ "$(echo "$info" | jq -r '.Lighthouses[0].PublicAddr')" = "127.0.0.1:60000" ]
[ "$(echo "$info" | jq -r '.Lighthouses[0].IP')" = "10.6.9.1" ]

View File

@ -1,17 +0,0 @@
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
source "$UTILS"/with-1-data-1-empty-node-network.sh
nebula-cert keygen -out-key /dev/null -out-pub pubkey
cat pubkey
(
isle nebula create-cert \
--hostname non-esiste \
--public-key-path pubkey \
2>&1 || true \
) | grep '\[1002\] Host not found'
isle nebula create-cert \
--hostname primus \
--public-key-path pubkey \
| grep -- '-----BEGIN NEBULA CERTIFICATE-----'

View File

@ -1,12 +0,0 @@
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
source "$UTILS"/with-1-data-1-empty-node-network.sh
[ "$(cat a/meta/isle/rpc_port)" = "3900" ]
[ "$(cat b/meta/isle/rpc_port)" = "3910" ]
[ "$(cat c/meta/isle/rpc_port)" = "3920" ]
[ "$(jq -r <"$BOOTSTRAP_FILE" '.NetworkCreationParams.ID')" != "" ]
[ "$(jq -r <"$BOOTSTRAP_FILE" '.NetworkCreationParams.Name')" = "testing" ]
[ "$(jq -r <"$BOOTSTRAP_FILE" '.NetworkCreationParams.Domain')" = "shared.test" ]
[ "$(jq -r <"$BOOTSTRAP_FILE" '.SignedHostAssigned.Body.Name')" = "primus" ]

View File

@ -1,115 +0,0 @@
set -e
# cd into script's directory
cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null
root=$(pwd)
export UTILS="$root"/utils
REGEXS=()
while [[ $# -gt 0 ]]; do
case $1 in
-h|--help)
cat <<EOF
USAGE: [flags] [test regexs...]
FLAGS
--keep-tmp
--verbose (-v)
--help (-h)
EOF
exit 1
;;
-v|--verbose)
VERBOSE=1
shift
;;
--keep-tmp)
KEEP_TMP=1
shift
;;
*)
REGEXS+=("$1")
shift
;;
esac
done
[ -n "$VERBOSE" ] && set -x
ROOT_TMPDIR="$(mktemp --tmpdir -d isle-tests.XXXXXX)"
if [ -z "$KEEP_TMP" ]; then trap 'rm -rf $ROOT_TMPDIR' EXIT; fi
TMPDIR="$ROOT_TMPDIR"
export ROOT_TMPDIR TMPDIR
echo "tmp dir is $ROOT_TMPDIR"
# Blackhole these directories so that tests don't accidentally use the host's
# real ones.
export XDG_RUNTIME_DIR=/dev/null
export XDG_STATE_HOME=/dev/null
test_files=$(
find ./cases -type f -name '*.sh' \
| sed "s|^\./cases/||" \
| grep -v entrypoint.sh \
| sort
)
for r in "${REGEXS[@]}"; do
test_files="$(echo "$test_files" | grep "$r")"
done
echo -e "number of tests: $(echo "$test_files" | wc -l)\n"
for file in $test_files; do
echo "Running test case: $file"
if [ -z "$VERBOSE" ]; then
output="$TMPDIR/$file.log"
mkdir -p "$(dirname "$output")"
exec 3>"$output"
else
exec 3>&1
fi
(
export TEST_CASE_FILE="$file"
if ! $SHELL -e -x "$root/cases/$file" >&3 2>&1; then
echo "$file FAILED"
if [ -z "$VERBOSE" ]; then
echo "output of test is as follows"
echo "------------------------------"
cat "$output"
echo "------------------------------"
fi
exit 1
fi
) || TESTS_FAILED=1
if [ -n "$TESTS_FAILED" ]; then break; fi
done
# Clean up any shared running networks. Each cleanup script is responsible for
# figuring out if its shared network was actually instantiated during any tests.
if [ -e "$ROOT_TMPDIR/cleanup-pids" ]; then
echo "Cleaning up running pids"
tac "$ROOT_TMPDIR/cleanup-pids" | while read -r line; do
pid="$(echo "$line" | cut -d' ' -f1)"
descr="$(echo "$line" | cut -d' ' -f2-)"
echo "Killing $descr ($pid)"
kill "$pid"
done
# This is easier than checking if the pids are still running, and for some
# reason it doesn't occur until after the pids have died anyway
echo "Waiting for appimage mounts to unmount"
while [ "$(find "$ROOT_TMPDIR" -type d -name '*.mount_isle*' | wc -l)" -ge "1" ]; do
sleep 1
done
fi
if [ -z "$TESTS_FAILED" ]; then echo -e '\nall tests succeeded!'; fi

View File

@ -1,3 +0,0 @@
set -e
echo "$1" "$2" >> "$ROOT_TMPDIR/cleanup-pids"

View File

@ -1,18 +0,0 @@
set -e
base="$1"
TMPDIR="$ROOT_TMPDIR/$base"
XDG_RUNTIME_DIR="$TMPDIR/.run"
XDG_STATE_HOME="$TMPDIR/.state"
mkdir -p "$TMPDIR" "$XDG_RUNTIME_DIR" "$XDG_STATE_HOME"
cat <<EOF
export TMPDIR="$TMPDIR"
export XDG_RUNTIME_DIR="$XDG_RUNTIME_DIR"
export XDG_STATE_HOME="$XDG_STATE_HOME"
export ISLE_DAEMON_HTTP_SOCKET_PATH="$ROOT_TMPDIR/$base-daemon.sock"
BOOTSTRAP_FILE="$XDG_STATE_HOME/isle/networks/$NETWORK_ID/bootstrap.json"
cd "$TMPDIR"
EOF

View File

@ -1,107 +0,0 @@
set -e
base="shared/1-data-1-empty"
ipNet="10.6.9.0/24"
primus_base="$base/primus"
primus_ip="10.6.9.1"
secondus_base="$base/secondus"
function as_primus {
current_ip="$primus_ip"
eval "$($SHELL "$UTILS/shared-daemon-env.sh" "$primus_base")"
}
function as_secondus {
current_ip="$secondus_ip"
eval "$($SHELL "$UTILS/shared-daemon-env.sh" "$secondus_base")"
}
# Even if it's already intialized, we want to put the caller in primus'
# environment
as_primus
secondus_bootstrap="$(pwd)/secondus-bootstrap.json"
if [ ! -d "$XDG_RUNTIME_DIR/isle" ]; then
echo "Initializing shared single node network"
mkdir a
mkdir b
mkdir c
cat >daemon.yml <<EOF
networks:
testing:
vpn:
public_addr: 127.0.0.1:60000
tun:
device: isle-primus
storage:
allocations:
- data_path: a/data
meta_path: a/meta
capacity: 1
- data_path: b/data
meta_path: b/meta
capacity: 1
- data_path: c/data
meta_path: c/meta
capacity: 1
EOF
isle daemon -l debug --config-path daemon.yml >daemon.log 2>&1 &
pid="$!"
$SHELL "$UTILS/register-cleanup.sh" "$pid" "1-data-1-empty-node-network/primus"
echo "Waiting for primus daemon (process $pid) to start"
while ! [ -e "$ISLE_DAEMON_HTTP_SOCKET_PATH" ]; do sleep 1; done
echo "Creating 1-data-1-empty network"
isle network create \
--domain shared.test \
--hostname primus \
--ip-net "$ipNet" \
--name "testing"
echo "Creating secondus bootstrap"
isle hosts create \
--hostname secondus \
> "$secondus_bootstrap"
(
as_secondus
cat >daemon.yml <<EOF
vpn:
tun:
device: isle-secondus
EOF
isle daemon -l debug -c daemon.yml >daemon.log 2>&1 &
pid="$!"
$SHELL "$UTILS/register-cleanup.sh" "$pid" "1-data-1-empty-node-network/secondus"
echo "Waiting for secondus daemon (process $!) to start"
while ! [ -e "$ISLE_DAEMON_HTTP_SOCKET_PATH" ]; do sleep 1; done
echo "Joining secondus to the network"
isle network join -b "$secondus_bootstrap"
)
fi
secondus_ip="$(
nebula-cert print -json \
-path <(jq -r '.Bootstrap.Hosts["secondus"].PublicCredentials.Cert' "$secondus_bootstrap") \
| jq -r '.details.ips[0]' \
| cut -d/ -f1
)"
NETWORK_ID="$(jq '.Bootstrap.NetworkCreationParams.ID' "$secondus_bootstrap")"
export NETWORK_ID
# shared-daemon-env.sh depends on NETWORK_ID, so we re-call as_primus in order
# to fully populate the envvars we need.
as_primus

View File

@ -1,9 +0,0 @@
set -e
TMPDIR="$TMPDIR/$TEST_CASE_FILE.tmp"
XDG_RUNTIME_DIR="$TMPDIR/.run"
XDG_STATE_HOME="$TMPDIR/.state"
mkdir -p "$TMPDIR" "$XDG_RUNTIME_DIR" "$XDG_STATE_HOME"
cd "$TMPDIR"