Compare commits
No commits in common. "734406d4bb3cb115428a9ff48c239326a443cded" and "efdab29ae66ed12087425593dfe7d757dcd63bc0" have entirely different histories.
734406d4bb
...
efdab29ae6
13
default.nix
13
default.nix
@ -118,6 +118,19 @@ in rec {
|
||||
'';
|
||||
};
|
||||
|
||||
tests = pkgs.writeScript "isle-tests" ''
|
||||
export PATH=${pkgs.lib.makeBinPath [
|
||||
build.appImage
|
||||
pkgs.busybox
|
||||
pkgs.yq-go
|
||||
pkgs.jq
|
||||
pkgs.dig
|
||||
pkgs.nebula
|
||||
]}
|
||||
export SHELL=${pkgs.bash}/bin/bash
|
||||
exec ${pkgs.bash}/bin/bash ${./tests}/entrypoint.sh "$@"
|
||||
'';
|
||||
|
||||
devShell = pkgs.mkShell {
|
||||
buildInputs = [
|
||||
pkgs.go
|
||||
|
@ -11,6 +11,5 @@ to better understand how to navigate and work on the codebase.
|
||||
These pages can be helpful in specific situations.
|
||||
|
||||
* [Building Isle](./building.md)
|
||||
* [Testing Isle](./testing.md)
|
||||
* [Rebuilding Documentation](./rebuilding-documentation.md)
|
||||
* [Releases](./releases.md)
|
||||
|
@ -1,39 +0,0 @@
|
||||
# Testing Isle
|
||||
|
||||
All tests are currently written as go tests, and as such can be run from the
|
||||
`go` directory using the normal go testing tool.
|
||||
|
||||
```
|
||||
cd go
|
||||
go test -run Foo ./daemon
|
||||
go test ./... # Test everything
|
||||
```
|
||||
|
||||
## Integration Tests
|
||||
|
||||
Integration tests are those which require processes or state external to the
|
||||
test itself. Integration tests are marked using the
|
||||
`toolkit.MarkIntegrationTest` function, which will cause them to be skipped
|
||||
unless being run in the integration test environment.
|
||||
|
||||
Besides a normal nix installation (like all Isle development needs), integration
|
||||
tests also require `sudo` and [capsh][capsh] to be installed on the system.
|
||||
|
||||
[capsh]: https://www.man7.org/linux/man-pages/man1/capsh.1.html
|
||||
|
||||
By running tests using the `go/integration_test.sh` script the tests will be
|
||||
automatically run in the integration test environment. All arguments will be
|
||||
passed directly to the go testing tool.
|
||||
|
||||
```
|
||||
cd go
|
||||
./integration_test.sh -run Foo ./daemon
|
||||
```
|
||||
|
||||
`integration_test.sh` wraps a call to `go test` in a bash shell which has all
|
||||
required binaries available to it, and which has acquired necessary
|
||||
[capabilities][capabilities] to use the binaries as needed. Acquiring
|
||||
capabilities is done by elevating the user to root using `sudo`, and then
|
||||
dropping them back down to a shell of the original user with capabilities set.
|
||||
|
||||
[capabilities]: https://wiki.archlinux.org/title/Capabilities
|
@ -6,10 +6,24 @@ import (
|
||||
|
||||
// GaragePeers returns a Peer for each known garage instance in the network.
|
||||
func (b Bootstrap) GaragePeers() []garage.RemotePeer {
|
||||
|
||||
var peers []garage.RemotePeer
|
||||
|
||||
for _, host := range b.Hosts {
|
||||
peers = append(peers, host.GaragePeers()...)
|
||||
|
||||
for _, instance := range host.Garage.Instances {
|
||||
|
||||
peer := garage.RemotePeer{
|
||||
ID: instance.ID,
|
||||
IP: host.IP().String(),
|
||||
RPCPort: instance.RPCPort,
|
||||
S3APIPort: instance.S3APIPort,
|
||||
}
|
||||
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
}
|
||||
|
||||
return peers
|
||||
}
|
||||
|
||||
@ -17,9 +31,18 @@ func (b Bootstrap) GaragePeers() []garage.RemotePeer {
|
||||
// will prefer a garage instance on this particular host, if there is one, but
|
||||
// will otherwise return a random endpoint.
|
||||
func (b Bootstrap) ChooseGaragePeer() garage.RemotePeer {
|
||||
|
||||
thisHost := b.ThisHost()
|
||||
|
||||
if len(thisHost.Garage.Instances) > 0 {
|
||||
return thisHost.GaragePeers()[0]
|
||||
|
||||
inst := thisHost.Garage.Instances[0]
|
||||
return garage.RemotePeer{
|
||||
ID: inst.ID,
|
||||
IP: thisHost.IP().String(),
|
||||
RPCPort: inst.RPCPort,
|
||||
S3APIPort: inst.S3APIPort,
|
||||
}
|
||||
}
|
||||
|
||||
for _, peer := range b.GaragePeers() {
|
||||
|
@ -2,7 +2,6 @@ package bootstrap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"isle/garage"
|
||||
"isle/nebula"
|
||||
"net/netip"
|
||||
)
|
||||
@ -92,18 +91,3 @@ func (h Host) IP() netip.Addr {
|
||||
|
||||
return addr
|
||||
}
|
||||
|
||||
// GaragePeers returns a RemotePeer for each garage instance advertised by this
|
||||
// Host.
|
||||
func (h Host) GaragePeers() []garage.RemotePeer {
|
||||
var peers []garage.RemotePeer
|
||||
for _, instance := range h.Garage.Instances {
|
||||
peers = append(peers, garage.RemotePeer{
|
||||
ID: instance.ID,
|
||||
IP: h.IP().String(),
|
||||
RPCPort: instance.RPCPort,
|
||||
S3APIPort: instance.S3APIPort,
|
||||
})
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
|
||||
_ "embed"
|
||||
|
||||
"dev.mediocregopher.com/mediocre-go-lib.git/mctx"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
@ -61,16 +60,6 @@ type ConfigStorageAllocation struct {
|
||||
Zone string `yaml:"zone"`
|
||||
}
|
||||
|
||||
// Annotate implements the mctx.Annotator interface.
|
||||
func (csa ConfigStorageAllocation) Annotate(aa mctx.Annotations) {
|
||||
aa["allocDataPath"] = csa.DataPath
|
||||
aa["allocMetaPath"] = csa.MetaPath
|
||||
aa["allocCapacity"] = csa.Capacity
|
||||
aa["allocS3APIPort"] = csa.S3APIPort
|
||||
aa["allocRPCPort"] = csa.RPCPort
|
||||
aa["allocAdminPort"] = csa.AdminPort
|
||||
}
|
||||
|
||||
// NetworkConfig describes the configuration of a single network.
|
||||
type NetworkConfig struct {
|
||||
DNS struct {
|
||||
|
@ -14,7 +14,6 @@ import (
|
||||
"net"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"dev.mediocregopher.com/mediocre-go-lib.git/mctx"
|
||||
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
|
||||
@ -299,45 +298,3 @@ func removeGarageBootstrapHost(
|
||||
ctx, garage.GlobalBucket, filePath, minio.RemoveObjectOptions{},
|
||||
)
|
||||
}
|
||||
|
||||
// We can wait for the garage instance to appear healthy, but there are cases
|
||||
// where they still haven't fully synced the list of buckets and bucket
|
||||
// credentials. For those cases it's necessary to do this as an additional
|
||||
// check.
|
||||
func garageWaitForAlloc(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
alloc daecommon.ConfigStorageAllocation,
|
||||
adminToken string,
|
||||
host bootstrap.Host,
|
||||
) error {
|
||||
var (
|
||||
hostIP = host.IP().String()
|
||||
adminClient = garage.NewAdminClient(
|
||||
garageAdminClientLogger(logger),
|
||||
net.JoinHostPort(hostIP, strconv.Itoa(alloc.AdminPort)),
|
||||
adminToken,
|
||||
)
|
||||
)
|
||||
|
||||
defer adminClient.Close()
|
||||
ctx = mctx.WithAnnotator(ctx, alloc)
|
||||
|
||||
for {
|
||||
logger.Info(ctx, "Checking if node has synced bucket list")
|
||||
buckets, err := adminClient.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("listing buckets: %w", err)
|
||||
} else if len(buckets) == 0 {
|
||||
logger.WarnString(ctx, "No buckets found, will wait a bit and try again")
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
continue
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"dev.mediocregopher.com/mediocre-go-lib.git/mctx"
|
||||
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
@ -35,8 +34,6 @@ type GarageClientParams struct {
|
||||
GlobalBucketS3APICredentials garage.S3APICredentials
|
||||
|
||||
// RPCSecret may be empty, if the secret is not available on the host.
|
||||
//
|
||||
// TODO this shouldn't really be here I don't think, remove it?
|
||||
RPCSecret string
|
||||
}
|
||||
|
||||
@ -184,15 +181,13 @@ type network struct {
|
||||
networkConfig daecommon.NetworkConfig
|
||||
currBootstrap bootstrap.Bootstrap
|
||||
|
||||
workerCtx context.Context
|
||||
workerCancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
shutdownCh chan struct{}
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// instatiateNetwork returns an instantiated *network instance which has not yet
|
||||
// been initialized.
|
||||
func instatiateNetwork(
|
||||
ctx context.Context,
|
||||
logger *mlog.Logger,
|
||||
networkConfig daecommon.NetworkConfig,
|
||||
envBinDirPath string,
|
||||
@ -200,8 +195,6 @@ func instatiateNetwork(
|
||||
runtimeDir toolkit.Dir,
|
||||
opts *Opts,
|
||||
) *network {
|
||||
ctx = context.WithoutCancel(ctx)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &network{
|
||||
logger: logger,
|
||||
networkConfig: networkConfig,
|
||||
@ -209,8 +202,7 @@ func instatiateNetwork(
|
||||
stateDir: stateDir,
|
||||
runtimeDir: runtimeDir,
|
||||
opts: opts.withDefaults(),
|
||||
workerCtx: ctx,
|
||||
workerCancel: cancel,
|
||||
shutdownCh: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
@ -252,7 +244,6 @@ func Load(
|
||||
Network, error,
|
||||
) {
|
||||
n := instatiateNetwork(
|
||||
ctx,
|
||||
logger,
|
||||
networkConfig,
|
||||
envBinDirPath,
|
||||
@ -274,7 +265,7 @@ func Load(
|
||||
return nil, fmt.Errorf(
|
||||
"loading bootstrap from %q: %w", bootstrapFilePath, err,
|
||||
)
|
||||
} else if err := n.initialize(ctx, currBootstrap, false); err != nil {
|
||||
} else if err := n.initialize(ctx, currBootstrap); err != nil {
|
||||
return nil, fmt.Errorf("initializing with bootstrap: %w", err)
|
||||
}
|
||||
|
||||
@ -298,7 +289,6 @@ func Join(
|
||||
Network, error,
|
||||
) {
|
||||
n := instatiateNetwork(
|
||||
ctx,
|
||||
logger,
|
||||
networkConfig,
|
||||
envBinDirPath,
|
||||
@ -317,7 +307,7 @@ func Join(
|
||||
return nil, fmt.Errorf("importing secrets: %w", err)
|
||||
}
|
||||
|
||||
if err := n.initialize(ctx, joiningBootstrap.Bootstrap, false); err != nil {
|
||||
if err := n.initialize(ctx, joiningBootstrap.Bootstrap); err != nil {
|
||||
return nil, fmt.Errorf("initializing with bootstrap: %w", err)
|
||||
}
|
||||
|
||||
@ -365,7 +355,6 @@ func Create(
|
||||
garageRPCSecret := toolkit.RandStr(32)
|
||||
|
||||
n := instatiateNetwork(
|
||||
ctx,
|
||||
logger,
|
||||
networkConfig,
|
||||
envBinDirPath,
|
||||
@ -401,7 +390,7 @@ func Create(
|
||||
return nil, fmt.Errorf("initializing bootstrap data: %w", err)
|
||||
}
|
||||
|
||||
if err := n.initialize(ctx, hostBootstrap, true); err != nil {
|
||||
if err := n.initialize(ctx, hostBootstrap); err != nil {
|
||||
return nil, fmt.Errorf("initializing with bootstrap: %w", err)
|
||||
}
|
||||
|
||||
@ -418,42 +407,8 @@ func (n *network) initializeDirs(mayExist bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *network) periodically(
|
||||
logger *mlog.Logger,
|
||||
fn func(context.Context) error,
|
||||
period time.Duration,
|
||||
) {
|
||||
n.wg.Add(1)
|
||||
go func() {
|
||||
defer n.wg.Done()
|
||||
|
||||
ctx := mctx.Annotate(n.workerCtx, "period", period)
|
||||
|
||||
ticker := time.NewTicker(period)
|
||||
defer ticker.Stop()
|
||||
|
||||
logger.Info(ctx, "Starting background job runner")
|
||||
defer logger.Info(ctx, "Stopping background job runner")
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
logger.Info(ctx, "Background job running")
|
||||
if err := fn(ctx); err != nil {
|
||||
logger.Error(ctx, "Background job failed", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (n *network) initialize(
|
||||
ctx context.Context,
|
||||
prevBootstrap bootstrap.Bootstrap,
|
||||
isCreate bool,
|
||||
ctx context.Context, prevBootstrap bootstrap.Bootstrap,
|
||||
) error {
|
||||
prevThisHost := prevBootstrap.ThisHost()
|
||||
|
||||
@ -494,37 +449,41 @@ func (n *network) initialize(
|
||||
|
||||
n.logger.Info(ctx, "Child processes created")
|
||||
|
||||
createGarageGlobalBucket := isCreate
|
||||
err = n.postChildrenInit(ctx, prevThisHost, createGarageGlobalBucket)
|
||||
if err != nil {
|
||||
if err := n.postInit(ctx, prevThisHost); err != nil {
|
||||
n.logger.Error(ctx, "Post-initialization failed, stopping child processes", err)
|
||||
n.children.Shutdown()
|
||||
return fmt.Errorf("performing post-initialization: %w", err)
|
||||
}
|
||||
|
||||
// Do this now so that everything is stable before returning. This also
|
||||
// serves a dual-purpose, as it makes sure that the PUT from the postChildrenInit
|
||||
// serves a dual-purpose, as it makes sure that the PUT from the postInit
|
||||
// above has propagated from the local garage instance, if there is one.
|
||||
n.logger.Info(ctx, "Reloading hosts from network storage")
|
||||
if err = n.reloadHosts(ctx); err != nil {
|
||||
return fmt.Errorf("Reloading network bootstrap: %w", err)
|
||||
}
|
||||
|
||||
n.periodically(
|
||||
n.logger.WithNamespace("reloadHosts"),
|
||||
n.reloadHosts,
|
||||
3*time.Minute,
|
||||
)
|
||||
ctx = context.WithoutCancel(ctx)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
n.wg.Add(1)
|
||||
go func() {
|
||||
defer n.wg.Done()
|
||||
<-n.shutdownCh
|
||||
cancel()
|
||||
}()
|
||||
|
||||
n.wg.Add(1)
|
||||
go func() {
|
||||
defer n.wg.Done()
|
||||
n.reloadLoop(ctx)
|
||||
n.logger.Debug(ctx, "Daemon reload loop stopped")
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// postChildrenInit performs steps which are required after children have been
|
||||
// initialized.
|
||||
func (n *network) postChildrenInit(
|
||||
ctx context.Context,
|
||||
prevThisHost bootstrap.Host,
|
||||
createGarageGlobalBucket bool,
|
||||
func (n *network) postInit(
|
||||
ctx context.Context, prevThisHost bootstrap.Host,
|
||||
) error {
|
||||
n.l.RLock()
|
||||
defer n.l.RUnlock()
|
||||
@ -544,7 +503,15 @@ func (n *network) postChildrenInit(
|
||||
}
|
||||
}
|
||||
|
||||
if createGarageGlobalBucket {
|
||||
// This is only necessary during network creation, otherwise the bootstrap
|
||||
// should already have these credentials built in.
|
||||
//
|
||||
// TODO this is pretty hacky, but there doesn't seem to be a better way to
|
||||
// manage it at the moment.
|
||||
_, err := daecommon.GetGarageS3APIGlobalBucketCredentials(
|
||||
ctx, n.secretsStore,
|
||||
)
|
||||
if errors.Is(err, secrets.ErrNotFound) {
|
||||
n.logger.Info(ctx, "Initializing garage shared global bucket")
|
||||
garageGlobalBucketCreds, err := garageInitializeGlobalBucket(
|
||||
ctx,
|
||||
@ -565,18 +532,8 @@ func (n *network) postChildrenInit(
|
||||
}
|
||||
}
|
||||
|
||||
for _, alloc := range n.networkConfig.Storage.Allocations {
|
||||
if err := garageWaitForAlloc(
|
||||
ctx, n.logger, alloc, n.opts.GarageAdminToken, thisHost,
|
||||
); err != nil {
|
||||
return fmt.Errorf(
|
||||
"waiting for alloc %+v to initialize: %w", alloc, err,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
n.logger.Info(ctx, "Updating host info in garage")
|
||||
err := putGarageBoostrapHost(ctx, n.secretsStore, n.currBootstrap)
|
||||
err = putGarageBoostrapHost(ctx, n.secretsStore, n.currBootstrap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating host info in garage: %w", err)
|
||||
}
|
||||
@ -618,6 +575,25 @@ func (n *network) reloadHosts(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *network) reloadLoop(ctx context.Context) {
|
||||
const period = 3 * time.Minute
|
||||
ticker := time.NewTicker(period)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
if err := n.reloadHosts(ctx); err != nil {
|
||||
n.logger.Error(ctx, "Attempting to reload", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// returns the bootstrap prior to the reload being applied.
|
||||
func (n *network) reload(
|
||||
ctx context.Context,
|
||||
@ -968,7 +944,7 @@ func (n *network) SetConfig(
|
||||
return fmt.Errorf("reloading config: %w", err)
|
||||
}
|
||||
|
||||
if err := n.postChildrenInit(ctx, prevBootstrap.ThisHost(), false); err != nil {
|
||||
if err := n.postInit(ctx, prevBootstrap.ThisHost()); err != nil {
|
||||
return fmt.Errorf("performing post-initialization: %w", err)
|
||||
}
|
||||
|
||||
@ -990,11 +966,10 @@ func (n *network) GetNetworkCreationParams(
|
||||
}
|
||||
|
||||
func (n *network) Shutdown() error {
|
||||
n.workerCancel()
|
||||
close(n.shutdownCh)
|
||||
n.wg.Wait()
|
||||
|
||||
if n.children != nil {
|
||||
n.logger.Info(context.Background(), "Shutting down children")
|
||||
n.children.Shutdown()
|
||||
}
|
||||
|
||||
|
@ -29,12 +29,6 @@ const (
|
||||
BucketPermissionOwner BucketPermission = "owner"
|
||||
)
|
||||
|
||||
// Bucket defines a bucket which has been created in a cluster
|
||||
type Bucket struct {
|
||||
ID BucketID `json:"id"`
|
||||
GlobalAliases []string `json:"globalAliases"`
|
||||
}
|
||||
|
||||
// AdminClientError gets returned from AdminClient Do methods for non-200
|
||||
// errors.
|
||||
type AdminClientError struct {
|
||||
@ -255,13 +249,6 @@ func (c *AdminClient) CreateBucket(
|
||||
return BucketID(res.ID), err
|
||||
}
|
||||
|
||||
// ListBuckets returns all buckets known to this garage node.
|
||||
func (c *AdminClient) ListBuckets(ctx context.Context) ([]Bucket, error) {
|
||||
var res []Bucket
|
||||
err := c.do(ctx, &res, "GET", "/v1/bucket?list", nil)
|
||||
return res, err
|
||||
}
|
||||
|
||||
// GrantBucketPermissions grants the S3APICredentials with the given ID
|
||||
// permission(s) to interact with the bucket of the given ID.
|
||||
func (c *AdminClient) GrantBucketPermissions(
|
||||
|
16
tests.sh
Executable file
16
tests.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
entrypoint="$(nix-build --no-out-link -A tests)"
|
||||
this_user="$(whoami)"
|
||||
|
||||
echo "Requesting sudo in order to set thread capabilities, will drop back down to user '$this_user' immediately"
|
||||
|
||||
sudo -E capsh \
|
||||
--caps="cap_net_admin,cap_net_bind_service+eip cap_setpcap,cap_setuid,cap_setgid+ep" \
|
||||
--keep=1 \
|
||||
--user="$this_user" \
|
||||
--addamb=cap_net_admin \
|
||||
--addamb=cap_net_bind_service \
|
||||
-- "$entrypoint" "$@"
|
10
tests/NOTES.txt
Normal file
10
tests/NOTES.txt
Normal file
@ -0,0 +1,10 @@
|
||||
Ctrl+A X -> exits
|
||||
|
||||
qemu-system-aarch64 -nographic -cdrom tests/alpine-virt-3.18.4-aarch64.iso
|
||||
|
||||
Ctrl+Alt+G -> Escape mouse capture
|
||||
qemu-system-x86_64 \
|
||||
-cdrom tests/virt/Win11_23H2_English_x64.iso \
|
||||
-m 8G \
|
||||
-boot order=d \
|
||||
-drive file=./tests/virt/winblows.qcow2
|
3
tests/cases/00-version.sh
Normal file
3
tests/cases/00-version.sh
Normal file
@ -0,0 +1,3 @@
|
||||
isle version | grep -q 'Release:'
|
||||
isle version | grep -q 'Platform:'
|
||||
isle version | grep -q 'Build Platform:'
|
17
tests/cases/dnsmasq/00-hosts.sh
Normal file
17
tests/cases/dnsmasq/00-hosts.sh
Normal file
@ -0,0 +1,17 @@
|
||||
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||
|
||||
function assert_a {
|
||||
want_ip="$1"
|
||||
hostname="$2"
|
||||
r="$(dig @"$current_ip" +noall +answer "$hostname")"
|
||||
echo "$r" | grep -q "$want_ip"
|
||||
}
|
||||
|
||||
as_primus
|
||||
assert_a "$primus_ip" primus.hosts.shared.test
|
||||
assert_a "$secondus_ip" secondus.hosts.shared.test
|
||||
|
||||
as_secondus
|
||||
assert_a "$primus_ip" primus.hosts.shared.test
|
||||
assert_a "$secondus_ip" secondus.hosts.shared.test
|
21
tests/cases/garage/00-cli.sh
Normal file
21
tests/cases/garage/00-cli.sh
Normal file
@ -0,0 +1,21 @@
|
||||
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||
|
||||
function do_tests {
|
||||
status="$(isle garage cli status | tail -n+3)"
|
||||
|
||||
[ "$(echo "$status" | wc -l)" = "3" ]
|
||||
echo "$status" | grep -q '10.6.9.1:3900'
|
||||
echo "$status" | grep -q '10.6.9.1:3910'
|
||||
echo "$status" | grep -q '10.6.9.1:3920'
|
||||
|
||||
buckets="$(isle garage cli bucket list | tail -n+2)"
|
||||
[ "$(echo "$buckets" | wc -l)" = 1 ]
|
||||
echo "$buckets" | grep -q 'global-shared'
|
||||
}
|
||||
|
||||
as_primus
|
||||
do_tests
|
||||
|
||||
as_secondus
|
||||
do_tests
|
16
tests/cases/garage/01-mc.sh
Normal file
16
tests/cases/garage/01-mc.sh
Normal file
@ -0,0 +1,16 @@
|
||||
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||
|
||||
function do_tests {
|
||||
files="$(isle garage mc -- tree --json garage)"
|
||||
[ "$(echo "$files" | jq -s '.|length')" -ge "1" ]
|
||||
|
||||
file="$(echo "$files" | jq -sr '.[0].key')"
|
||||
[ "$(isle garage mc -- cat "garage/$file" | wc -c)" -gt "0" ]
|
||||
}
|
||||
|
||||
as_primus
|
||||
do_tests
|
||||
|
||||
as_secondus
|
||||
do_tests
|
20
tests/cases/hosts/00-list.sh
Normal file
20
tests/cases/hosts/00-list.sh
Normal file
@ -0,0 +1,20 @@
|
||||
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||
|
||||
function do_tests {
|
||||
hosts="$(isle hosts list)"
|
||||
|
||||
[ "$(echo "$hosts" | jq -r '.[0].Name')" = "primus" ]
|
||||
[ "$(echo "$hosts" | jq -r '.[0].VPN.IP')" = "10.6.9.1" ]
|
||||
[ "$(echo "$hosts" | jq -r '.[0].Storage.Instances|length')" = "3" ]
|
||||
|
||||
[ "$(echo "$hosts" | jq -r '.[1].Name')" = "secondus" ]
|
||||
[ "$(echo "$hosts" | jq -r '.[1].VPN.IP')" = "$secondus_ip" ]
|
||||
[ "$(echo "$hosts" | jq -r '.[1].Storage.Instances|length')" = "0" ]
|
||||
}
|
||||
|
||||
as_primus
|
||||
do_tests
|
||||
|
||||
as_secondus
|
||||
do_tests
|
16
tests/cases/hosts/01-create.sh
Normal file
16
tests/cases/hosts/01-create.sh
Normal file
@ -0,0 +1,16 @@
|
||||
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||
|
||||
bs="$secondus_bootstrap" # set in with-1-data-1-empty-node-network.sh
|
||||
|
||||
[ "$(jq -r <"$bs" '.Bootstrap.NetworkCreationParams.Domain')" = "shared.test" ]
|
||||
[ "$(jq -r <"$bs" '.Bootstrap.NetworkCreationParams.Name')" = "testing" ]
|
||||
[ "$(jq -r <"$bs" '.Bootstrap.SignedHostAssigned.Body.Name')" = "secondus" ]
|
||||
|
||||
[ "$(jq -r <"$bs" '.Bootstrap.Hosts.primus.PublicCredentials')" \
|
||||
= "$(jq -r <"$BOOTSTRAP_FILE" '.SignedHostAssigned.Body.PublicCredentials')" ]
|
||||
|
||||
[ "$(jq <"$bs" '.Bootstrap.Hosts.primus.Garage.Instances|length')" = "3" ]
|
||||
|
||||
[ "$(jq <"$bs" '.Secrets["garage-rpc-secret"]')" != "null" ]
|
||||
|
12
tests/cases/nebula/00-show.sh
Normal file
12
tests/cases/nebula/00-show.sh
Normal file
@ -0,0 +1,12 @@
|
||||
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||
|
||||
info="$(isle nebula show)"
|
||||
|
||||
[ "$(echo "$info" | jq -r '.CACert')" \
|
||||
= "$(jq -r <"$BOOTSTRAP_FILE" '.CAPublicCredentials.Cert')" ]
|
||||
|
||||
[ "$(echo "$info" | jq -r '.SubnetCIDR')" = "10.6.9.0/24" ]
|
||||
[ "$(echo "$info" | jq -r '.Lighthouses|length')" = "1" ]
|
||||
[ "$(echo "$info" | jq -r '.Lighthouses[0].PublicAddr')" = "127.0.0.1:60000" ]
|
||||
[ "$(echo "$info" | jq -r '.Lighthouses[0].IP')" = "10.6.9.1" ]
|
17
tests/cases/nebula/01-create-cert.sh
Normal file
17
tests/cases/nebula/01-create-cert.sh
Normal file
@ -0,0 +1,17 @@
|
||||
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||
|
||||
nebula-cert keygen -out-key /dev/null -out-pub pubkey
|
||||
cat pubkey
|
||||
|
||||
(
|
||||
isle nebula create-cert \
|
||||
--hostname non-esiste \
|
||||
--public-key-path pubkey \
|
||||
2>&1 || true \
|
||||
) | grep '\[1002\] Host not found'
|
||||
|
||||
isle nebula create-cert \
|
||||
--hostname primus \
|
||||
--public-key-path pubkey \
|
||||
| grep -- '-----BEGIN NEBULA CERTIFICATE-----'
|
12
tests/cases/network/00-create.sh
Normal file
12
tests/cases/network/00-create.sh
Normal file
@ -0,0 +1,12 @@
|
||||
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||
|
||||
[ "$(cat a/meta/isle/rpc_port)" = "3900" ]
|
||||
[ "$(cat b/meta/isle/rpc_port)" = "3910" ]
|
||||
[ "$(cat c/meta/isle/rpc_port)" = "3920" ]
|
||||
|
||||
[ "$(jq -r <"$BOOTSTRAP_FILE" '.NetworkCreationParams.ID')" != "" ]
|
||||
[ "$(jq -r <"$BOOTSTRAP_FILE" '.NetworkCreationParams.Name')" = "testing" ]
|
||||
[ "$(jq -r <"$BOOTSTRAP_FILE" '.NetworkCreationParams.Domain')" = "shared.test" ]
|
||||
|
||||
[ "$(jq -r <"$BOOTSTRAP_FILE" '.SignedHostAssigned.Body.Name')" = "primus" ]
|
115
tests/entrypoint.sh
Normal file
115
tests/entrypoint.sh
Normal file
@ -0,0 +1,115 @@
|
||||
set -e
|
||||
|
||||
# cd into script's directory
|
||||
cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null
|
||||
root=$(pwd)
|
||||
|
||||
export UTILS="$root"/utils
|
||||
|
||||
REGEXS=()
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-h|--help)
|
||||
cat <<EOF
|
||||
USAGE: [flags] [test regexs...]
|
||||
FLAGS
|
||||
--keep-tmp
|
||||
--verbose (-v)
|
||||
--help (-h)
|
||||
EOF
|
||||
exit 1
|
||||
;;
|
||||
-v|--verbose)
|
||||
VERBOSE=1
|
||||
shift
|
||||
;;
|
||||
--keep-tmp)
|
||||
KEEP_TMP=1
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
REGEXS+=("$1")
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
[ -n "$VERBOSE" ] && set -x
|
||||
|
||||
ROOT_TMPDIR="$(mktemp --tmpdir -d isle-tests.XXXXXX)"
|
||||
if [ -z "$KEEP_TMP" ]; then trap 'rm -rf $ROOT_TMPDIR' EXIT; fi
|
||||
|
||||
TMPDIR="$ROOT_TMPDIR"
|
||||
|
||||
export ROOT_TMPDIR TMPDIR
|
||||
echo "tmp dir is $ROOT_TMPDIR"
|
||||
|
||||
# Blackhole these directories so that tests don't accidentally use the host's
|
||||
# real ones.
|
||||
export XDG_RUNTIME_DIR=/dev/null
|
||||
export XDG_STATE_HOME=/dev/null
|
||||
|
||||
test_files=$(
|
||||
find ./cases -type f -name '*.sh' \
|
||||
| sed "s|^\./cases/||" \
|
||||
| grep -v entrypoint.sh \
|
||||
| sort
|
||||
)
|
||||
|
||||
for r in "${REGEXS[@]}"; do
|
||||
test_files="$(echo "$test_files" | grep "$r")"
|
||||
done
|
||||
|
||||
echo -e "number of tests: $(echo "$test_files" | wc -l)\n"
|
||||
for file in $test_files; do
|
||||
echo "Running test case: $file"
|
||||
|
||||
if [ -z "$VERBOSE" ]; then
|
||||
output="$TMPDIR/$file.log"
|
||||
mkdir -p "$(dirname "$output")"
|
||||
exec 3>"$output"
|
||||
else
|
||||
exec 3>&1
|
||||
fi
|
||||
|
||||
(
|
||||
export TEST_CASE_FILE="$file"
|
||||
|
||||
if ! $SHELL -e -x "$root/cases/$file" >&3 2>&1; then
|
||||
echo "$file FAILED"
|
||||
if [ -z "$VERBOSE" ]; then
|
||||
echo "output of test is as follows"
|
||||
echo "------------------------------"
|
||||
cat "$output"
|
||||
echo "------------------------------"
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
) || TESTS_FAILED=1
|
||||
|
||||
if [ -n "$TESTS_FAILED" ]; then break; fi
|
||||
done
|
||||
|
||||
|
||||
# Clean up any shared running networks. Each cleanup script is responsible for
|
||||
# figuring out if its shared network was actually instantiated during any tests.
|
||||
|
||||
if [ -e "$ROOT_TMPDIR/cleanup-pids" ]; then
|
||||
echo "Cleaning up running pids"
|
||||
tac "$ROOT_TMPDIR/cleanup-pids" | while read -r line; do
|
||||
pid="$(echo "$line" | cut -d' ' -f1)"
|
||||
descr="$(echo "$line" | cut -d' ' -f2-)"
|
||||
echo "Killing $descr ($pid)"
|
||||
kill "$pid"
|
||||
done
|
||||
|
||||
# This is easier than checking if the pids are still running, and for some
|
||||
# reason it doesn't occur until after the pids have died anyway
|
||||
echo "Waiting for appimage mounts to unmount"
|
||||
while [ "$(find "$ROOT_TMPDIR" -type d -name '*.mount_isle*' | wc -l)" -ge "1" ]; do
|
||||
sleep 1
|
||||
done
|
||||
fi
|
||||
|
||||
if [ -z "$TESTS_FAILED" ]; then echo -e '\nall tests succeeded!'; fi
|
3
tests/utils/register-cleanup.sh
Normal file
3
tests/utils/register-cleanup.sh
Normal file
@ -0,0 +1,3 @@
|
||||
set -e
|
||||
|
||||
echo "$1" "$2" >> "$ROOT_TMPDIR/cleanup-pids"
|
18
tests/utils/shared-daemon-env.sh
Normal file
18
tests/utils/shared-daemon-env.sh
Normal file
@ -0,0 +1,18 @@
|
||||
set -e
|
||||
|
||||
base="$1"
|
||||
|
||||
TMPDIR="$ROOT_TMPDIR/$base"
|
||||
XDG_RUNTIME_DIR="$TMPDIR/.run"
|
||||
XDG_STATE_HOME="$TMPDIR/.state"
|
||||
|
||||
mkdir -p "$TMPDIR" "$XDG_RUNTIME_DIR" "$XDG_STATE_HOME"
|
||||
|
||||
cat <<EOF
|
||||
export TMPDIR="$TMPDIR"
|
||||
export XDG_RUNTIME_DIR="$XDG_RUNTIME_DIR"
|
||||
export XDG_STATE_HOME="$XDG_STATE_HOME"
|
||||
export ISLE_DAEMON_HTTP_SOCKET_PATH="$ROOT_TMPDIR/$base-daemon.sock"
|
||||
BOOTSTRAP_FILE="$XDG_STATE_HOME/isle/networks/$NETWORK_ID/bootstrap.json"
|
||||
cd "$TMPDIR"
|
||||
EOF
|
107
tests/utils/with-1-data-1-empty-node-network.sh
Normal file
107
tests/utils/with-1-data-1-empty-node-network.sh
Normal file
@ -0,0 +1,107 @@
|
||||
set -e
|
||||
|
||||
base="shared/1-data-1-empty"
|
||||
|
||||
ipNet="10.6.9.0/24"
|
||||
|
||||
primus_base="$base/primus"
|
||||
primus_ip="10.6.9.1"
|
||||
|
||||
secondus_base="$base/secondus"
|
||||
|
||||
function as_primus {
|
||||
current_ip="$primus_ip"
|
||||
eval "$($SHELL "$UTILS/shared-daemon-env.sh" "$primus_base")"
|
||||
}
|
||||
|
||||
function as_secondus {
|
||||
current_ip="$secondus_ip"
|
||||
eval "$($SHELL "$UTILS/shared-daemon-env.sh" "$secondus_base")"
|
||||
}
|
||||
|
||||
# Even if it's already intialized, we want to put the caller in primus'
|
||||
# environment
|
||||
as_primus
|
||||
|
||||
secondus_bootstrap="$(pwd)/secondus-bootstrap.json"
|
||||
|
||||
if [ ! -d "$XDG_RUNTIME_DIR/isle" ]; then
|
||||
echo "Initializing shared single node network"
|
||||
|
||||
mkdir a
|
||||
mkdir b
|
||||
mkdir c
|
||||
|
||||
cat >daemon.yml <<EOF
|
||||
networks:
|
||||
testing:
|
||||
vpn:
|
||||
public_addr: 127.0.0.1:60000
|
||||
tun:
|
||||
device: isle-primus
|
||||
storage:
|
||||
allocations:
|
||||
- data_path: a/data
|
||||
meta_path: a/meta
|
||||
capacity: 1
|
||||
- data_path: b/data
|
||||
meta_path: b/meta
|
||||
capacity: 1
|
||||
- data_path: c/data
|
||||
meta_path: c/meta
|
||||
capacity: 1
|
||||
EOF
|
||||
|
||||
isle daemon -l debug --config-path daemon.yml >daemon.log 2>&1 &
|
||||
pid="$!"
|
||||
$SHELL "$UTILS/register-cleanup.sh" "$pid" "1-data-1-empty-node-network/primus"
|
||||
|
||||
echo "Waiting for primus daemon (process $pid) to start"
|
||||
while ! [ -e "$ISLE_DAEMON_HTTP_SOCKET_PATH" ]; do sleep 1; done
|
||||
|
||||
echo "Creating 1-data-1-empty network"
|
||||
isle network create \
|
||||
--domain shared.test \
|
||||
--hostname primus \
|
||||
--ip-net "$ipNet" \
|
||||
--name "testing"
|
||||
|
||||
echo "Creating secondus bootstrap"
|
||||
isle hosts create \
|
||||
--hostname secondus \
|
||||
> "$secondus_bootstrap"
|
||||
|
||||
(
|
||||
as_secondus
|
||||
|
||||
cat >daemon.yml <<EOF
|
||||
vpn:
|
||||
tun:
|
||||
device: isle-secondus
|
||||
EOF
|
||||
|
||||
isle daemon -l debug -c daemon.yml >daemon.log 2>&1 &
|
||||
pid="$!"
|
||||
$SHELL "$UTILS/register-cleanup.sh" "$pid" "1-data-1-empty-node-network/secondus"
|
||||
|
||||
echo "Waiting for secondus daemon (process $!) to start"
|
||||
while ! [ -e "$ISLE_DAEMON_HTTP_SOCKET_PATH" ]; do sleep 1; done
|
||||
|
||||
echo "Joining secondus to the network"
|
||||
isle network join -b "$secondus_bootstrap"
|
||||
)
|
||||
fi
|
||||
|
||||
secondus_ip="$(
|
||||
nebula-cert print -json \
|
||||
-path <(jq -r '.Bootstrap.Hosts["secondus"].PublicCredentials.Cert' "$secondus_bootstrap") \
|
||||
| jq -r '.details.ips[0]' \
|
||||
| cut -d/ -f1
|
||||
)"
|
||||
|
||||
NETWORK_ID="$(jq '.Bootstrap.NetworkCreationParams.ID' "$secondus_bootstrap")"
|
||||
export NETWORK_ID
|
||||
|
||||
# shared-daemon-env.sh depends on NETWORK_ID, so we re-call as_primus in order
|
||||
# to fully populate the envvars we need.
|
||||
as_primus
|
9
tests/utils/with-tmp-for-case.sh
Normal file
9
tests/utils/with-tmp-for-case.sh
Normal file
@ -0,0 +1,9 @@
|
||||
set -e
|
||||
|
||||
TMPDIR="$TMPDIR/$TEST_CASE_FILE.tmp"
|
||||
XDG_RUNTIME_DIR="$TMPDIR/.run"
|
||||
XDG_STATE_HOME="$TMPDIR/.state"
|
||||
|
||||
mkdir -p "$TMPDIR" "$XDG_RUNTIME_DIR" "$XDG_STATE_HOME"
|
||||
|
||||
cd "$TMPDIR"
|
Loading…
Reference in New Issue
Block a user