Compare commits
1 Commits
6032c6d8dc
...
2bf36a8ead
Author | SHA1 | Date | |
---|---|---|---|
2bf36a8ead |
13
default.nix
13
default.nix
@ -118,6 +118,19 @@ in rec {
|
|||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
tests = pkgs.writeScript "isle-tests" ''
|
||||||
|
export PATH=${pkgs.lib.makeBinPath [
|
||||||
|
build.appImage
|
||||||
|
pkgs.busybox
|
||||||
|
pkgs.yq-go
|
||||||
|
pkgs.jq
|
||||||
|
pkgs.dig
|
||||||
|
pkgs.nebula
|
||||||
|
]}
|
||||||
|
export SHELL=${pkgs.bash}/bin/bash
|
||||||
|
exec ${pkgs.bash}/bin/bash ${./tests}/entrypoint.sh "$@"
|
||||||
|
'';
|
||||||
|
|
||||||
devShell = pkgs.mkShell {
|
devShell = pkgs.mkShell {
|
||||||
buildInputs = [
|
buildInputs = [
|
||||||
pkgs.go
|
pkgs.go
|
||||||
|
@ -11,6 +11,5 @@ to better understand how to navigate and work on the codebase.
|
|||||||
These pages can be helpful in specific situations.
|
These pages can be helpful in specific situations.
|
||||||
|
|
||||||
* [Building Isle](./building.md)
|
* [Building Isle](./building.md)
|
||||||
* [Testing Isle](./testing.md)
|
|
||||||
* [Rebuilding Documentation](./rebuilding-documentation.md)
|
* [Rebuilding Documentation](./rebuilding-documentation.md)
|
||||||
* [Releases](./releases.md)
|
* [Releases](./releases.md)
|
||||||
|
@ -1,39 +0,0 @@
|
|||||||
# Testing Isle
|
|
||||||
|
|
||||||
All tests are currently written as go tests, and as such can be run from the
|
|
||||||
`go` directory using the normal go testing tool.
|
|
||||||
|
|
||||||
```
|
|
||||||
cd go
|
|
||||||
go test -run Foo ./daemon
|
|
||||||
go test ./... # Test everything
|
|
||||||
```
|
|
||||||
|
|
||||||
## Integration Tests
|
|
||||||
|
|
||||||
Integration tests are those which require processes or state external to the
|
|
||||||
test itself. Integration tests are marked using the
|
|
||||||
`toolkit.MarkIntegrationTest` function, which will cause them to be skipped
|
|
||||||
unless being run in the integration test environment.
|
|
||||||
|
|
||||||
Besides a normal nix installation (like all Isle development needs), integration
|
|
||||||
tests also require `sudo` and [capsh][capsh] to be installed on the system.
|
|
||||||
|
|
||||||
[capsh]: https://www.man7.org/linux/man-pages/man1/capsh.1.html
|
|
||||||
|
|
||||||
By running tests using the `go/integration_test.sh` script the tests will be
|
|
||||||
automatically run in the integration test environment. All arguments will be
|
|
||||||
passed directly to the go testing tool.
|
|
||||||
|
|
||||||
```
|
|
||||||
cd go
|
|
||||||
./integration_test.sh -run Foo ./daemon
|
|
||||||
```
|
|
||||||
|
|
||||||
`integration_test.sh` wraps a call to `go test` in a bash shell which has all
|
|
||||||
required binaries available to it, and which has acquired necessary
|
|
||||||
[capabilities][capabilities] to use the binaries as needed. Acquiring
|
|
||||||
capabilities is done by elevating the user to root using `sudo`, and then
|
|
||||||
dropping them back down to a shell of the original user with capabilities set.
|
|
||||||
|
|
||||||
[capabilities]: https://wiki.archlinux.org/title/Capabilities
|
|
@ -4,26 +4,26 @@ import (
|
|||||||
"isle/garage"
|
"isle/garage"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GarageNodes returns a Node for each known garage instance in the network.
|
// GaragePeers returns a Peer for each known garage instance in the network.
|
||||||
func (b Bootstrap) GarageNodes() []garage.RemoteNode {
|
func (b Bootstrap) GaragePeers() []garage.RemotePeer {
|
||||||
var nodes []garage.RemoteNode
|
var peers []garage.RemotePeer
|
||||||
for _, host := range b.Hosts {
|
for _, host := range b.Hosts {
|
||||||
nodes = append(nodes, host.GarageNodes()...)
|
peers = append(peers, host.GaragePeers()...)
|
||||||
}
|
}
|
||||||
return nodes
|
return peers
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChooseGarageNode returns a RemoteNode for a garage instance from the network.
|
// ChooseGaragePeer returns a Peer for a garage instance from the network. It
|
||||||
// It will prefer a garage instance on this particular host, if there is one,
|
// will prefer a garage instance on this particular host, if there is one, but
|
||||||
// but will otherwise return a random endpoint.
|
// will otherwise return a random endpoint.
|
||||||
func (b Bootstrap) ChooseGarageNode() garage.RemoteNode {
|
func (b Bootstrap) ChooseGaragePeer() garage.RemotePeer {
|
||||||
thisHost := b.ThisHost()
|
thisHost := b.ThisHost()
|
||||||
if len(thisHost.Garage.Instances) > 0 {
|
if len(thisHost.Garage.Instances) > 0 {
|
||||||
return thisHost.GarageNodes()[0]
|
return thisHost.GaragePeers()[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, node := range b.GarageNodes() {
|
for _, peer := range b.GaragePeers() {
|
||||||
return node
|
return peer
|
||||||
}
|
}
|
||||||
|
|
||||||
panic("no garage instances configured")
|
panic("no garage instances configured")
|
||||||
|
@ -93,17 +93,17 @@ func (h Host) IP() netip.Addr {
|
|||||||
return addr
|
return addr
|
||||||
}
|
}
|
||||||
|
|
||||||
// GarageNodes returns a RemoteNode for each garage instance advertised by this
|
// GaragePeers returns a RemotePeer for each garage instance advertised by this
|
||||||
// Host.
|
// Host.
|
||||||
func (h Host) GarageNodes() []garage.RemoteNode {
|
func (h Host) GaragePeers() []garage.RemotePeer {
|
||||||
var nodes []garage.RemoteNode
|
var peers []garage.RemotePeer
|
||||||
for _, instance := range h.Garage.Instances {
|
for _, instance := range h.Garage.Instances {
|
||||||
nodes = append(nodes, garage.RemoteNode{
|
peers = append(peers, garage.RemotePeer{
|
||||||
ID: instance.ID,
|
ID: instance.ID,
|
||||||
IP: h.IP().String(),
|
IP: h.IP().String(),
|
||||||
RPCPort: instance.RPCPort,
|
RPCPort: instance.RPCPort,
|
||||||
S3APIPort: instance.S3APIPort,
|
S3APIPort: instance.S3APIPort,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return nodes
|
return peers
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ var subCmdGarageMC = subCmd{
|
|||||||
return fmt.Errorf("calling GetGarageClientParams: %w", err)
|
return fmt.Errorf("calling GetGarageClientParams: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s3APIAddr := clientParams.Node.S3APIAddr()
|
s3APIAddr := clientParams.Peer.S3APIAddr()
|
||||||
|
|
||||||
if *keyID == "" {
|
if *keyID == "" {
|
||||||
*keyID = clientParams.GlobalBucketS3APICredentials.ID
|
*keyID = clientParams.GlobalBucketS3APICredentials.ID
|
||||||
@ -135,7 +135,7 @@ var subCmdGarageCLI = subCmd{
|
|||||||
args = append([]string{"garage"}, ctx.args...)
|
args = append([]string{"garage"}, ctx.args...)
|
||||||
cliEnv = append(
|
cliEnv = append(
|
||||||
os.Environ(),
|
os.Environ(),
|
||||||
"GARAGE_RPC_HOST="+clientParams.Node.RPCNodeAddr(),
|
"GARAGE_RPC_HOST="+clientParams.Peer.RPCPeerAddr(),
|
||||||
"GARAGE_RPC_SECRET="+clientParams.RPCSecret,
|
"GARAGE_RPC_SECRET="+clientParams.RPCSecret,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -76,8 +76,8 @@ func garageWriteChildConfig(
|
|||||||
thisHost = hostBootstrap.ThisHost()
|
thisHost = hostBootstrap.ThisHost()
|
||||||
id = daecommon.BootstrapGarageHostForAlloc(thisHost, alloc).ID
|
id = daecommon.BootstrapGarageHostForAlloc(thisHost, alloc).ID
|
||||||
|
|
||||||
node = garage.LocalNode{
|
peer = garage.LocalPeer{
|
||||||
RemoteNode: garage.RemoteNode{
|
RemotePeer: garage.RemotePeer{
|
||||||
ID: id,
|
ID: id,
|
||||||
IP: thisHost.IP().String(),
|
IP: thisHost.IP().String(),
|
||||||
RPCPort: alloc.RPCPort,
|
RPCPort: alloc.RPCPort,
|
||||||
@ -102,8 +102,8 @@ func garageWriteChildConfig(
|
|||||||
RPCSecret: rpcSecret,
|
RPCSecret: rpcSecret,
|
||||||
AdminToken: adminToken,
|
AdminToken: adminToken,
|
||||||
|
|
||||||
LocalNode: node,
|
LocalPeer: peer,
|
||||||
BootstrapPeers: hostBootstrap.GarageNodes(),
|
BootstrapPeers: hostBootstrap.GaragePeers(),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ func getGarageClientParams(
|
|||||||
}
|
}
|
||||||
|
|
||||||
return GarageClientParams{
|
return GarageClientParams{
|
||||||
Node: currBootstrap.ChooseGarageNode(),
|
Peer: currBootstrap.ChooseGaragePeer(),
|
||||||
GlobalBucketS3APICredentials: creds,
|
GlobalBucketS3APICredentials: creds,
|
||||||
RPCSecret: rpcSecret,
|
RPCSecret: rpcSecret,
|
||||||
}, nil
|
}, nil
|
||||||
@ -91,8 +91,8 @@ func garageApplyLayout(
|
|||||||
)
|
)
|
||||||
hostName = currHost.Name
|
hostName = currHost.Name
|
||||||
allocs = networkConfig.Storage.Allocations
|
allocs = networkConfig.Storage.Allocations
|
||||||
roles = make([]garage.Role, len(allocs))
|
peers = make([]garage.PeerLayout, len(allocs))
|
||||||
roleIDs = map[string]struct{}{}
|
peerIDs = map[string]struct{}{}
|
||||||
|
|
||||||
idsToRemove = make([]string, 0, len(prevHost.Garage.Instances))
|
idsToRemove = make([]string, 0, len(prevHost.Garage.Instances))
|
||||||
)
|
)
|
||||||
@ -101,14 +101,14 @@ func garageApplyLayout(
|
|||||||
|
|
||||||
for i, alloc := range allocs {
|
for i, alloc := range allocs {
|
||||||
id := daecommon.BootstrapGarageHostForAlloc(currHost, alloc).ID
|
id := daecommon.BootstrapGarageHostForAlloc(currHost, alloc).ID
|
||||||
roleIDs[id] = struct{}{}
|
peerIDs[id] = struct{}{}
|
||||||
|
|
||||||
zone := string(hostName)
|
zone := string(hostName)
|
||||||
if alloc.Zone != "" {
|
if alloc.Zone != "" {
|
||||||
zone = alloc.Zone
|
zone = alloc.Zone
|
||||||
}
|
}
|
||||||
|
|
||||||
roles[i] = garage.Role{
|
peers[i] = garage.PeerLayout{
|
||||||
ID: id,
|
ID: id,
|
||||||
Capacity: alloc.Capacity * 1_000_000_000,
|
Capacity: alloc.Capacity * 1_000_000_000,
|
||||||
Zone: zone,
|
Zone: zone,
|
||||||
@ -117,12 +117,14 @@ func garageApplyLayout(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, prevInst := range prevHost.Garage.Instances {
|
for _, prevInst := range prevHost.Garage.Instances {
|
||||||
if _, ok := roleIDs[prevInst.ID]; !ok {
|
if _, ok := peerIDs[prevInst.ID]; !ok {
|
||||||
idsToRemove = append(idsToRemove, prevInst.ID)
|
idsToRemove = append(idsToRemove, prevInst.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return adminClient.ApplyLayout(ctx, roles, idsToRemove)
|
_, _ = adminClient.Status(ctx) // TODO remove this
|
||||||
|
|
||||||
|
return adminClient.ApplyLayout(ctx, peers, idsToRemove)
|
||||||
}
|
}
|
||||||
|
|
||||||
func garageInitializeGlobalBucket(
|
func garageInitializeGlobalBucket(
|
||||||
@ -361,37 +363,37 @@ func garageWaitForAlloc(
|
|||||||
func garageNodeBuddyPeers(
|
func garageNodeBuddyPeers(
|
||||||
status garage.ClusterStatus, host bootstrap.Host,
|
status garage.ClusterStatus, host bootstrap.Host,
|
||||||
) (
|
) (
|
||||||
netip.Addr, []garage.Role,
|
netip.Addr, []garage.PeerLayout,
|
||||||
) {
|
) {
|
||||||
var (
|
var (
|
||||||
thisIP = host.IP()
|
thisIP = host.IP()
|
||||||
rolesByID = make(
|
peersByID = make(
|
||||||
map[string]garage.Role, len(status.Layout.Roles),
|
map[string]garage.PeerLayout, len(status.Layout.Peers),
|
||||||
)
|
)
|
||||||
nodeRolesByIP = map[netip.Addr][]garage.Role{}
|
nodePeersByIP = map[netip.Addr][]garage.PeerLayout{}
|
||||||
)
|
)
|
||||||
|
|
||||||
for _, role := range status.Layout.Roles {
|
for _, peer := range status.Layout.Peers {
|
||||||
rolesByID[role.ID] = role
|
peersByID[peer.ID] = peer
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, node := range status.Nodes {
|
for _, node := range status.Nodes {
|
||||||
role, ok := rolesByID[node.ID]
|
peer, ok := peersByID[node.ID]
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
ip := node.Addr.Addr()
|
ip := node.Addr.Addr()
|
||||||
nodeRolesByIP[ip] = append(nodeRolesByIP[ip], role)
|
nodePeersByIP[ip] = append(nodePeersByIP[ip], peer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is only a single host in the cluster (or, somehow, none) then
|
// If there is only a single host in the cluster (or, somehow, none) then
|
||||||
// that host has no buddy.
|
// that host has no buddy.
|
||||||
if len(nodeRolesByIP) < 2 {
|
if len(nodePeersByIP) < 2 {
|
||||||
return netip.Addr{}, nil
|
return netip.Addr{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeIPs := maps.Keys(nodeRolesByIP)
|
nodeIPs := maps.Keys(nodePeersByIP)
|
||||||
slices.SortFunc(nodeIPs, netip.Addr.Compare)
|
slices.SortFunc(nodeIPs, netip.Addr.Compare)
|
||||||
|
|
||||||
for i, nodeIP := range nodeIPs {
|
for i, nodeIP := range nodeIPs {
|
||||||
@ -403,7 +405,7 @@ func garageNodeBuddyPeers(
|
|||||||
} else {
|
} else {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return buddyIP, nodeRolesByIP[buddyIP]
|
return buddyIP, nodePeersByIP[buddyIP]
|
||||||
}
|
}
|
||||||
|
|
||||||
panic("Unreachable")
|
panic("Unreachable")
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
// GarageClientParams contains all the data needed to instantiate garage
|
// GarageClientParams contains all the data needed to instantiate garage
|
||||||
// clients.
|
// clients.
|
||||||
type GarageClientParams struct {
|
type GarageClientParams struct {
|
||||||
Node garage.RemoteNode
|
Peer garage.RemotePeer
|
||||||
GlobalBucketS3APICredentials garage.S3APICredentials
|
GlobalBucketS3APICredentials garage.S3APICredentials
|
||||||
|
|
||||||
// RPCSecret may be empty, if the secret is not available on the host.
|
// RPCSecret may be empty, if the secret is not available on the host.
|
||||||
@ -44,7 +44,7 @@ type GarageClientParams struct {
|
|||||||
// the global bucket.
|
// the global bucket.
|
||||||
func (p GarageClientParams) GlobalBucketS3APIClient() *garage.S3APIClient {
|
func (p GarageClientParams) GlobalBucketS3APIClient() *garage.S3APIClient {
|
||||||
var (
|
var (
|
||||||
addr = p.Node.S3APIAddr()
|
addr = p.Peer.S3APIAddr()
|
||||||
creds = p.GlobalBucketS3APICredentials
|
creds = p.GlobalBucketS3APICredentials
|
||||||
)
|
)
|
||||||
return garage.NewS3APIClient(addr, creds)
|
return garage.NewS3APIClient(addr, creds)
|
||||||
@ -531,7 +531,7 @@ func (n *network) postChildrenInit(
|
|||||||
|
|
||||||
thisHost := n.currBootstrap.ThisHost()
|
thisHost := n.currBootstrap.ThisHost()
|
||||||
|
|
||||||
if len(thisHost.Garage.Instances) > 0 {
|
if len(prevThisHost.Garage.Instances)+len(thisHost.Garage.Instances) > 0 {
|
||||||
n.logger.Info(ctx, "Applying garage layout")
|
n.logger.Info(ctx, "Applying garage layout")
|
||||||
if err := garageApplyLayout(
|
if err := garageApplyLayout(
|
||||||
ctx,
|
ctx,
|
||||||
|
@ -107,19 +107,19 @@ func TestNetwork_GetConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNetwork_SetConfig(t *testing.T) {
|
func TestNetwork_SetConfig(t *testing.T) {
|
||||||
allocsToRoles := func(
|
allocsToPeerLayouts := func(
|
||||||
hostName nebula.HostName, allocs []bootstrap.GarageHostInstance,
|
hostName nebula.HostName, allocs []bootstrap.GarageHostInstance,
|
||||||
) []garage.Role {
|
) []garage.PeerLayout {
|
||||||
roles := make([]garage.Role, len(allocs))
|
peers := make([]garage.PeerLayout, len(allocs))
|
||||||
for i := range allocs {
|
for i := range allocs {
|
||||||
roles[i] = garage.Role{
|
peers[i] = garage.PeerLayout{
|
||||||
ID: allocs[i].ID,
|
ID: allocs[i].ID,
|
||||||
Capacity: 1_000_000_000,
|
Capacity: 1_000_000_000,
|
||||||
Zone: string(hostName),
|
Zone: string(hostName),
|
||||||
Tags: []string{},
|
Tags: []string{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return roles
|
return peers
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("add storage alloc", func(t *testing.T) {
|
t.Run("add storage alloc", func(t *testing.T) {
|
||||||
@ -167,10 +167,10 @@ func TestNetwork_SetConfig(t *testing.T) {
|
|||||||
assert.Equal(t, newHostsByName, storedBootstrap.Hosts)
|
assert.Equal(t, newHostsByName, storedBootstrap.Hosts)
|
||||||
|
|
||||||
t.Log("Checking that garage layout contains the new allocation")
|
t.Log("Checking that garage layout contains the new allocation")
|
||||||
expRoles := allocsToRoles(network.hostName, allocs)
|
expPeers := allocsToPeerLayouts(network.hostName, allocs)
|
||||||
layout, err := network.garageAdminClient(t).GetLayout(h.ctx)
|
layout, err := network.garageAdminClient(t).GetLayout(h.ctx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.ElementsMatch(t, expRoles, layout.Roles)
|
assert.ElementsMatch(t, expPeers, layout.Peers)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("remove storage alloc", func(t *testing.T) {
|
t.Run("remove storage alloc", func(t *testing.T) {
|
||||||
@ -208,10 +208,10 @@ func TestNetwork_SetConfig(t *testing.T) {
|
|||||||
assert.Equal(t, newHostsByName, storedBootstrap.Hosts)
|
assert.Equal(t, newHostsByName, storedBootstrap.Hosts)
|
||||||
|
|
||||||
t.Log("Checking that garage layout contains the new allocation")
|
t.Log("Checking that garage layout contains the new allocation")
|
||||||
expRoles := allocsToRoles(network.hostName, allocs)
|
expPeers := allocsToPeerLayouts(network.hostName, allocs)
|
||||||
layout, err := network.garageAdminClient(t).GetLayout(h.ctx)
|
layout, err := network.garageAdminClient(t).GetLayout(h.ctx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.ElementsMatch(t, expRoles, layout.Roles)
|
assert.ElementsMatch(t, expPeers, layout.Peers)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("remove all storage allocs", func(t *testing.T) {
|
t.Run("remove all storage allocs", func(t *testing.T) {
|
||||||
@ -227,7 +227,7 @@ func TestNetwork_SetConfig(t *testing.T) {
|
|||||||
|
|
||||||
prevHost = secondus.getHostsByName(t)[secondus.hostName]
|
prevHost = secondus.getHostsByName(t)[secondus.hostName]
|
||||||
//removedAlloc = networkConfig.Storage.Allocations[0]
|
//removedAlloc = networkConfig.Storage.Allocations[0]
|
||||||
removedRole = allocsToRoles(
|
removedPeer = allocsToPeerLayouts(
|
||||||
secondus.hostName, prevHost.Garage.Instances,
|
secondus.hostName, prevHost.Garage.Instances,
|
||||||
)[0]
|
)[0]
|
||||||
//removedGarageInst = daecommon.BootstrapGarageHostForAlloc(
|
//removedGarageInst = daecommon.BootstrapGarageHostForAlloc(
|
||||||
@ -241,6 +241,6 @@ func TestNetwork_SetConfig(t *testing.T) {
|
|||||||
t.Log("Checking that garage layout still contains the old allocation")
|
t.Log("Checking that garage layout still contains the old allocation")
|
||||||
layout, err := secondus.garageAdminClient(t).GetLayout(h.ctx)
|
layout, err := secondus.garageAdminClient(t).GetLayout(h.ctx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Contains(t, layout.Roles, removedRole)
|
assert.Contains(t, layout.Peers, removedPeer)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -169,9 +169,11 @@ type KnownNode struct {
|
|||||||
HostName string `json:"hostname"`
|
HostName string `json:"hostname"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Role descibes a node's role in the garage cluster, i.e. what storage it is
|
// PeerLayout describes the properties of a garage peer in the context of the
|
||||||
// providing.
|
// layout of the cluster.
|
||||||
type Role struct {
|
//
|
||||||
|
// TODO This should be called Role.
|
||||||
|
type PeerLayout struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Capacity int `json:"capacity"` // Gb (SI units)
|
Capacity int `json:"capacity"` // Gb (SI units)
|
||||||
Zone string `json:"zone"`
|
Zone string `json:"zone"`
|
||||||
@ -180,7 +182,7 @@ type Role struct {
|
|||||||
|
|
||||||
// ClusterLayout describes the layout of the cluster as a whole.
|
// ClusterLayout describes the layout of the cluster as a whole.
|
||||||
type ClusterLayout struct {
|
type ClusterLayout struct {
|
||||||
Roles []Role `json:"roles"`
|
Peers []PeerLayout `json:"roles"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClusterStatus is returned from the Status endpoint, describing the currently
|
// ClusterStatus is returned from the Status endpoint, describing the currently
|
||||||
@ -334,34 +336,42 @@ func (c *AdminClient) GetLayout(ctx context.Context) (ClusterLayout, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ApplyLayout modifies the layout of the garage cluster. Only layout of the
|
// ApplyLayout modifies the layout of the garage cluster. Only layout of the
|
||||||
// given roles will be modified/created/removed, other roles are not affected.
|
// given peers will be modified/created/removed, other peers are not affected.
|
||||||
func (c *AdminClient) ApplyLayout(
|
func (c *AdminClient) ApplyLayout(
|
||||||
ctx context.Context, addModifyRoles []Role, removeRoleIDs []string,
|
ctx context.Context, addModifyPeers []PeerLayout, removePeerIDs []string,
|
||||||
) error {
|
) error {
|
||||||
type removeRole struct {
|
type removePeer struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Remove bool `json:"remove"`
|
Remove bool `json:"remove"`
|
||||||
}
|
}
|
||||||
|
|
||||||
roles := make([]any, 0, len(addModifyRoles)+len(removeRoleIDs))
|
peers := make([]any, 0, len(addModifyPeers)+len(removePeerIDs))
|
||||||
for _, p := range addModifyRoles {
|
for _, p := range addModifyPeers {
|
||||||
roles = append(roles, p)
|
peers = append(peers, p)
|
||||||
|
}
|
||||||
|
for _, id := range removePeerIDs {
|
||||||
|
peers = append(peers, removePeer{ID: id, Remove: true})
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html#tag/Layout/operation/ApplyLayout
|
||||||
|
err := c.do(ctx, nil, "POST", "/v1/layout", peers)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("staging layout changes: %w", err)
|
||||||
}
|
}
|
||||||
for _, id := range removeRoleIDs {
|
|
||||||
roles = append(roles, removeRole{ID: id, Remove: true})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html#tag/Layout/operation/GetLayout
|
// https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html#tag/Layout/operation/GetLayout
|
||||||
var clusterLayout struct {
|
var clusterLayout struct {
|
||||||
Version int `json:"version"`
|
Version int `json:"version"`
|
||||||
StagedRoleChanges []Role `json:"stagedRoleChanges"`
|
StagedRoleChanges []PeerLayout `json:"stagedRoleChanges"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html#tag/Layout/operation/ApplyLayout
|
if err := c.do(ctx, &clusterLayout, "GET", "/v1/layout", nil); err != nil {
|
||||||
err := c.do(ctx, &clusterLayout, "POST", "/v1/layout", roles)
|
return fmt.Errorf("retrieving staged layout change: %w", err)
|
||||||
if err != nil {
|
}
|
||||||
return fmt.Errorf("staging layout changes: %w", err)
|
|
||||||
} else if len(clusterLayout.StagedRoleChanges) == 0 {
|
if len(clusterLayout.StagedRoleChanges) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -372,7 +382,7 @@ func (c *AdminClient) ApplyLayout(
|
|||||||
Version: clusterLayout.Version + 1,
|
Version: clusterLayout.Version + 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.do(ctx, nil, "POST", "/v1/layout/apply", applyClusterLayout)
|
err := c.do(ctx, nil, "POST", "/v1/layout/apply", applyClusterLayout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("applying new layout (new version:%d): %w", applyClusterLayout.Version, err)
|
return fmt.Errorf("applying new layout (new version:%d): %w", applyClusterLayout.Version, err)
|
||||||
}
|
}
|
||||||
|
@ -23,8 +23,8 @@ type GarageTomlData struct {
|
|||||||
RPCSecret string
|
RPCSecret string
|
||||||
AdminToken string
|
AdminToken string
|
||||||
|
|
||||||
garage.LocalNode
|
garage.LocalPeer
|
||||||
BootstrapPeers []garage.RemoteNode
|
BootstrapPeers []garage.RemotePeer
|
||||||
}
|
}
|
||||||
|
|
||||||
var garageTomlTpl = template.Must(template.New("").Parse(`
|
var garageTomlTpl = template.Must(template.New("").Parse(`
|
||||||
@ -38,7 +38,7 @@ rpc_bind_addr = "{{ .RPCAddr }}"
|
|||||||
rpc_public_addr = "{{ .RPCAddr }}"
|
rpc_public_addr = "{{ .RPCAddr }}"
|
||||||
|
|
||||||
bootstrap_peers = [{{- range .BootstrapPeers }}
|
bootstrap_peers = [{{- range .BootstrapPeers }}
|
||||||
"{{ .RPCNodeAddr }}",
|
"{{ .RPCPeerAddr }}",
|
||||||
{{ end -}}]
|
{{ end -}}]
|
||||||
|
|
||||||
[s3_api]
|
[s3_api]
|
||||||
@ -66,7 +66,7 @@ func WriteGarageTomlFile(
|
|||||||
) (
|
) (
|
||||||
bool, error,
|
bool, error,
|
||||||
) {
|
) {
|
||||||
slices.SortFunc(data.BootstrapPeers, func(i, j garage.RemoteNode) int {
|
slices.SortFunc(data.BootstrapPeers, func(i, j garage.RemotePeer) int {
|
||||||
return cmp.Or(
|
return cmp.Or(
|
||||||
cmp.Compare(i.IP, j.IP),
|
cmp.Compare(i.IP, j.IP),
|
||||||
cmp.Compare(i.RPCPort, j.RPCPort),
|
cmp.Compare(i.RPCPort, j.RPCPort),
|
||||||
|
@ -6,39 +6,39 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RemoteNode describes all information necessary to connect to a given garage
|
// RemotePeer describes all information necessary to connect to a given garage
|
||||||
// node.
|
// node.
|
||||||
type RemoteNode struct {
|
type RemotePeer struct {
|
||||||
ID string
|
ID string
|
||||||
IP string
|
IP string
|
||||||
RPCPort int
|
RPCPort int
|
||||||
S3APIPort int
|
S3APIPort int
|
||||||
}
|
}
|
||||||
|
|
||||||
// LocalNode describes the configuration of a local garage instance.
|
// LocalPeer describes the configuration of a local garage instance.
|
||||||
type LocalNode struct {
|
type LocalPeer struct {
|
||||||
RemoteNode
|
RemotePeer
|
||||||
|
|
||||||
AdminPort int
|
AdminPort int
|
||||||
}
|
}
|
||||||
|
|
||||||
// RPCAddr returns the address of the node's RPC port.
|
// RPCAddr returns the address of the peer's RPC port.
|
||||||
func (p RemoteNode) RPCAddr() string {
|
func (p RemotePeer) RPCAddr() string {
|
||||||
return net.JoinHostPort(p.IP, strconv.Itoa(p.RPCPort))
|
return net.JoinHostPort(p.IP, strconv.Itoa(p.RPCPort))
|
||||||
}
|
}
|
||||||
|
|
||||||
// RPCNodeAddr returns the full node address (e.g. "id@ip:port") of the garage
|
// RPCPeerAddr returns the full peer address (e.g. "id@ip:port") of the garage
|
||||||
// node for use in communicating over RPC.
|
// node for use in communicating over RPC.
|
||||||
func (p RemoteNode) RPCNodeAddr() string {
|
func (p RemotePeer) RPCPeerAddr() string {
|
||||||
return fmt.Sprintf("%s@%s", p.ID, p.RPCAddr())
|
return fmt.Sprintf("%s@%s", p.ID, p.RPCAddr())
|
||||||
}
|
}
|
||||||
|
|
||||||
// S3APIAddr returns the address of the node's S3 API port.
|
// S3APIAddr returns the address of the peer's S3 API port.
|
||||||
func (p RemoteNode) S3APIAddr() string {
|
func (p RemotePeer) S3APIAddr() string {
|
||||||
return net.JoinHostPort(p.IP, strconv.Itoa(p.S3APIPort))
|
return net.JoinHostPort(p.IP, strconv.Itoa(p.S3APIPort))
|
||||||
}
|
}
|
||||||
|
|
||||||
// AdminAddr returns the address of the node's S3 API port.
|
// AdminAddr returns the address of the peer's S3 API port.
|
||||||
func (p LocalNode) AdminAddr() string {
|
func (p LocalPeer) AdminAddr() string {
|
||||||
return net.JoinHostPort(p.IP, strconv.Itoa(p.AdminPort))
|
return net.JoinHostPort(p.IP, strconv.Itoa(p.AdminPort))
|
||||||
}
|
}
|
||||||
|
16
tests.sh
Executable file
16
tests.sh
Executable file
@ -0,0 +1,16 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
entrypoint="$(nix-build --no-out-link -A tests)"
|
||||||
|
this_user="$(whoami)"
|
||||||
|
|
||||||
|
echo "Requesting sudo in order to set thread capabilities, will drop back down to user '$this_user' immediately"
|
||||||
|
|
||||||
|
sudo -E capsh \
|
||||||
|
--caps="cap_net_admin,cap_net_bind_service+eip cap_setpcap,cap_setuid,cap_setgid+ep" \
|
||||||
|
--keep=1 \
|
||||||
|
--user="$this_user" \
|
||||||
|
--addamb=cap_net_admin \
|
||||||
|
--addamb=cap_net_bind_service \
|
||||||
|
-- "$entrypoint" "$@"
|
10
tests/NOTES.txt
Normal file
10
tests/NOTES.txt
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
Ctrl+A X -> exits
|
||||||
|
|
||||||
|
qemu-system-aarch64 -nographic -cdrom tests/alpine-virt-3.18.4-aarch64.iso
|
||||||
|
|
||||||
|
Ctrl+Alt+G -> Escape mouse capture
|
||||||
|
qemu-system-x86_64 \
|
||||||
|
-cdrom tests/virt/Win11_23H2_English_x64.iso \
|
||||||
|
-m 8G \
|
||||||
|
-boot order=d \
|
||||||
|
-drive file=./tests/virt/winblows.qcow2
|
3
tests/cases/00-version.sh
Normal file
3
tests/cases/00-version.sh
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
isle version | grep -q 'Release:'
|
||||||
|
isle version | grep -q 'Platform:'
|
||||||
|
isle version | grep -q 'Build Platform:'
|
17
tests/cases/dnsmasq/00-hosts.sh
Normal file
17
tests/cases/dnsmasq/00-hosts.sh
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||||
|
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||||
|
|
||||||
|
function assert_a {
|
||||||
|
want_ip="$1"
|
||||||
|
hostname="$2"
|
||||||
|
r="$(dig @"$current_ip" +noall +answer "$hostname")"
|
||||||
|
echo "$r" | grep -q "$want_ip"
|
||||||
|
}
|
||||||
|
|
||||||
|
as_primus
|
||||||
|
assert_a "$primus_ip" primus.hosts.shared.test
|
||||||
|
assert_a "$secondus_ip" secondus.hosts.shared.test
|
||||||
|
|
||||||
|
as_secondus
|
||||||
|
assert_a "$primus_ip" primus.hosts.shared.test
|
||||||
|
assert_a "$secondus_ip" secondus.hosts.shared.test
|
21
tests/cases/garage/00-cli.sh
Normal file
21
tests/cases/garage/00-cli.sh
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||||
|
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||||
|
|
||||||
|
function do_tests {
|
||||||
|
status="$(isle garage cli status | tail -n+3)"
|
||||||
|
|
||||||
|
[ "$(echo "$status" | wc -l)" = "3" ]
|
||||||
|
echo "$status" | grep -q '10.6.9.1:3900'
|
||||||
|
echo "$status" | grep -q '10.6.9.1:3910'
|
||||||
|
echo "$status" | grep -q '10.6.9.1:3920'
|
||||||
|
|
||||||
|
buckets="$(isle garage cli bucket list | tail -n+2)"
|
||||||
|
[ "$(echo "$buckets" | wc -l)" = 1 ]
|
||||||
|
echo "$buckets" | grep -q 'global-shared'
|
||||||
|
}
|
||||||
|
|
||||||
|
as_primus
|
||||||
|
do_tests
|
||||||
|
|
||||||
|
as_secondus
|
||||||
|
do_tests
|
16
tests/cases/garage/01-mc.sh
Normal file
16
tests/cases/garage/01-mc.sh
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||||
|
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||||
|
|
||||||
|
function do_tests {
|
||||||
|
files="$(isle garage mc -- tree --json garage)"
|
||||||
|
[ "$(echo "$files" | jq -s '.|length')" -ge "1" ]
|
||||||
|
|
||||||
|
file="$(echo "$files" | jq -sr '.[0].key')"
|
||||||
|
[ "$(isle garage mc -- cat "garage/$file" | wc -c)" -gt "0" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
as_primus
|
||||||
|
do_tests
|
||||||
|
|
||||||
|
as_secondus
|
||||||
|
do_tests
|
20
tests/cases/hosts/00-list.sh
Normal file
20
tests/cases/hosts/00-list.sh
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||||
|
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||||
|
|
||||||
|
function do_tests {
|
||||||
|
hosts="$(isle hosts list)"
|
||||||
|
|
||||||
|
[ "$(echo "$hosts" | jq -r '.[0].Name')" = "primus" ]
|
||||||
|
[ "$(echo "$hosts" | jq -r '.[0].VPN.IP')" = "10.6.9.1" ]
|
||||||
|
[ "$(echo "$hosts" | jq -r '.[0].Storage.Instances|length')" = "3" ]
|
||||||
|
|
||||||
|
[ "$(echo "$hosts" | jq -r '.[1].Name')" = "secondus" ]
|
||||||
|
[ "$(echo "$hosts" | jq -r '.[1].VPN.IP')" = "$secondus_ip" ]
|
||||||
|
[ "$(echo "$hosts" | jq -r '.[1].Storage.Instances|length')" = "0" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
as_primus
|
||||||
|
do_tests
|
||||||
|
|
||||||
|
as_secondus
|
||||||
|
do_tests
|
16
tests/cases/hosts/01-create.sh
Normal file
16
tests/cases/hosts/01-create.sh
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||||
|
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||||
|
|
||||||
|
bs="$secondus_bootstrap" # set in with-1-data-1-empty-node-network.sh
|
||||||
|
|
||||||
|
[ "$(jq -r <"$bs" '.Bootstrap.NetworkCreationParams.Domain')" = "shared.test" ]
|
||||||
|
[ "$(jq -r <"$bs" '.Bootstrap.NetworkCreationParams.Name')" = "testing" ]
|
||||||
|
[ "$(jq -r <"$bs" '.Bootstrap.SignedHostAssigned.Body.Name')" = "secondus" ]
|
||||||
|
|
||||||
|
[ "$(jq -r <"$bs" '.Bootstrap.Hosts.primus.PublicCredentials')" \
|
||||||
|
= "$(jq -r <"$BOOTSTRAP_FILE" '.SignedHostAssigned.Body.PublicCredentials')" ]
|
||||||
|
|
||||||
|
[ "$(jq <"$bs" '.Bootstrap.Hosts.primus.Garage.Instances|length')" = "3" ]
|
||||||
|
|
||||||
|
[ "$(jq <"$bs" '.Secrets["garage-rpc-secret"]')" != "null" ]
|
||||||
|
|
12
tests/cases/nebula/00-show.sh
Normal file
12
tests/cases/nebula/00-show.sh
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||||
|
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||||
|
|
||||||
|
info="$(isle nebula show)"
|
||||||
|
|
||||||
|
[ "$(echo "$info" | jq -r '.CACert')" \
|
||||||
|
= "$(jq -r <"$BOOTSTRAP_FILE" '.CAPublicCredentials.Cert')" ]
|
||||||
|
|
||||||
|
[ "$(echo "$info" | jq -r '.SubnetCIDR')" = "10.6.9.0/24" ]
|
||||||
|
[ "$(echo "$info" | jq -r '.Lighthouses|length')" = "1" ]
|
||||||
|
[ "$(echo "$info" | jq -r '.Lighthouses[0].PublicAddr')" = "127.0.0.1:60000" ]
|
||||||
|
[ "$(echo "$info" | jq -r '.Lighthouses[0].IP')" = "10.6.9.1" ]
|
17
tests/cases/nebula/01-create-cert.sh
Normal file
17
tests/cases/nebula/01-create-cert.sh
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||||
|
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||||
|
|
||||||
|
nebula-cert keygen -out-key /dev/null -out-pub pubkey
|
||||||
|
cat pubkey
|
||||||
|
|
||||||
|
(
|
||||||
|
isle nebula create-cert \
|
||||||
|
--hostname non-esiste \
|
||||||
|
--public-key-path pubkey \
|
||||||
|
2>&1 || true \
|
||||||
|
) | grep '\[1002\] Host not found'
|
||||||
|
|
||||||
|
isle nebula create-cert \
|
||||||
|
--hostname primus \
|
||||||
|
--public-key-path pubkey \
|
||||||
|
| grep -- '-----BEGIN NEBULA CERTIFICATE-----'
|
12
tests/cases/network/00-create.sh
Normal file
12
tests/cases/network/00-create.sh
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
# shellcheck source=../../utils/with-1-data-1-empty-node-network.sh
|
||||||
|
source "$UTILS"/with-1-data-1-empty-node-network.sh
|
||||||
|
|
||||||
|
[ "$(cat a/meta/isle/rpc_port)" = "3900" ]
|
||||||
|
[ "$(cat b/meta/isle/rpc_port)" = "3910" ]
|
||||||
|
[ "$(cat c/meta/isle/rpc_port)" = "3920" ]
|
||||||
|
|
||||||
|
[ "$(jq -r <"$BOOTSTRAP_FILE" '.NetworkCreationParams.ID')" != "" ]
|
||||||
|
[ "$(jq -r <"$BOOTSTRAP_FILE" '.NetworkCreationParams.Name')" = "testing" ]
|
||||||
|
[ "$(jq -r <"$BOOTSTRAP_FILE" '.NetworkCreationParams.Domain')" = "shared.test" ]
|
||||||
|
|
||||||
|
[ "$(jq -r <"$BOOTSTRAP_FILE" '.SignedHostAssigned.Body.Name')" = "primus" ]
|
115
tests/entrypoint.sh
Normal file
115
tests/entrypoint.sh
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
set -e
|
||||||
|
|
||||||
|
# cd into script's directory
|
||||||
|
cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null
|
||||||
|
root=$(pwd)
|
||||||
|
|
||||||
|
export UTILS="$root"/utils
|
||||||
|
|
||||||
|
REGEXS=()
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
-h|--help)
|
||||||
|
cat <<EOF
|
||||||
|
USAGE: [flags] [test regexs...]
|
||||||
|
FLAGS
|
||||||
|
--keep-tmp
|
||||||
|
--verbose (-v)
|
||||||
|
--help (-h)
|
||||||
|
EOF
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
-v|--verbose)
|
||||||
|
VERBOSE=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--keep-tmp)
|
||||||
|
KEEP_TMP=1
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
REGEXS+=("$1")
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
[ -n "$VERBOSE" ] && set -x
|
||||||
|
|
||||||
|
ROOT_TMPDIR="$(mktemp --tmpdir -d isle-tests.XXXXXX)"
|
||||||
|
if [ -z "$KEEP_TMP" ]; then trap 'rm -rf $ROOT_TMPDIR' EXIT; fi
|
||||||
|
|
||||||
|
TMPDIR="$ROOT_TMPDIR"
|
||||||
|
|
||||||
|
export ROOT_TMPDIR TMPDIR
|
||||||
|
echo "tmp dir is $ROOT_TMPDIR"
|
||||||
|
|
||||||
|
# Blackhole these directories so that tests don't accidentally use the host's
|
||||||
|
# real ones.
|
||||||
|
export XDG_RUNTIME_DIR=/dev/null
|
||||||
|
export XDG_STATE_HOME=/dev/null
|
||||||
|
|
||||||
|
test_files=$(
|
||||||
|
find ./cases -type f -name '*.sh' \
|
||||||
|
| sed "s|^\./cases/||" \
|
||||||
|
| grep -v entrypoint.sh \
|
||||||
|
| sort
|
||||||
|
)
|
||||||
|
|
||||||
|
for r in "${REGEXS[@]}"; do
|
||||||
|
test_files="$(echo "$test_files" | grep "$r")"
|
||||||
|
done
|
||||||
|
|
||||||
|
echo -e "number of tests: $(echo "$test_files" | wc -l)\n"
|
||||||
|
for file in $test_files; do
|
||||||
|
echo "Running test case: $file"
|
||||||
|
|
||||||
|
if [ -z "$VERBOSE" ]; then
|
||||||
|
output="$TMPDIR/$file.log"
|
||||||
|
mkdir -p "$(dirname "$output")"
|
||||||
|
exec 3>"$output"
|
||||||
|
else
|
||||||
|
exec 3>&1
|
||||||
|
fi
|
||||||
|
|
||||||
|
(
|
||||||
|
export TEST_CASE_FILE="$file"
|
||||||
|
|
||||||
|
if ! $SHELL -e -x "$root/cases/$file" >&3 2>&1; then
|
||||||
|
echo "$file FAILED"
|
||||||
|
if [ -z "$VERBOSE" ]; then
|
||||||
|
echo "output of test is as follows"
|
||||||
|
echo "------------------------------"
|
||||||
|
cat "$output"
|
||||||
|
echo "------------------------------"
|
||||||
|
fi
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
) || TESTS_FAILED=1
|
||||||
|
|
||||||
|
if [ -n "$TESTS_FAILED" ]; then break; fi
|
||||||
|
done
|
||||||
|
|
||||||
|
|
||||||
|
# Clean up any shared running networks. Each cleanup script is responsible for
|
||||||
|
# figuring out if its shared network was actually instantiated during any tests.
|
||||||
|
|
||||||
|
if [ -e "$ROOT_TMPDIR/cleanup-pids" ]; then
|
||||||
|
echo "Cleaning up running pids"
|
||||||
|
tac "$ROOT_TMPDIR/cleanup-pids" | while read -r line; do
|
||||||
|
pid="$(echo "$line" | cut -d' ' -f1)"
|
||||||
|
descr="$(echo "$line" | cut -d' ' -f2-)"
|
||||||
|
echo "Killing $descr ($pid)"
|
||||||
|
kill "$pid"
|
||||||
|
done
|
||||||
|
|
||||||
|
# This is easier than checking if the pids are still running, and for some
|
||||||
|
# reason it doesn't occur until after the pids have died anyway
|
||||||
|
echo "Waiting for appimage mounts to unmount"
|
||||||
|
while [ "$(find "$ROOT_TMPDIR" -type d -name '*.mount_isle*' | wc -l)" -ge "1" ]; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$TESTS_FAILED" ]; then echo -e '\nall tests succeeded!'; fi
|
3
tests/utils/register-cleanup.sh
Normal file
3
tests/utils/register-cleanup.sh
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
set -e
|
||||||
|
|
||||||
|
echo "$1" "$2" >> "$ROOT_TMPDIR/cleanup-pids"
|
18
tests/utils/shared-daemon-env.sh
Normal file
18
tests/utils/shared-daemon-env.sh
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
set -e
|
||||||
|
|
||||||
|
base="$1"
|
||||||
|
|
||||||
|
TMPDIR="$ROOT_TMPDIR/$base"
|
||||||
|
XDG_RUNTIME_DIR="$TMPDIR/.run"
|
||||||
|
XDG_STATE_HOME="$TMPDIR/.state"
|
||||||
|
|
||||||
|
mkdir -p "$TMPDIR" "$XDG_RUNTIME_DIR" "$XDG_STATE_HOME"
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
export TMPDIR="$TMPDIR"
|
||||||
|
export XDG_RUNTIME_DIR="$XDG_RUNTIME_DIR"
|
||||||
|
export XDG_STATE_HOME="$XDG_STATE_HOME"
|
||||||
|
export ISLE_DAEMON_HTTP_SOCKET_PATH="$ROOT_TMPDIR/$base-daemon.sock"
|
||||||
|
BOOTSTRAP_FILE="$XDG_STATE_HOME/isle/networks/$NETWORK_ID/bootstrap.json"
|
||||||
|
cd "$TMPDIR"
|
||||||
|
EOF
|
107
tests/utils/with-1-data-1-empty-node-network.sh
Normal file
107
tests/utils/with-1-data-1-empty-node-network.sh
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
set -e
|
||||||
|
|
||||||
|
base="shared/1-data-1-empty"
|
||||||
|
|
||||||
|
ipNet="10.6.9.0/24"
|
||||||
|
|
||||||
|
primus_base="$base/primus"
|
||||||
|
primus_ip="10.6.9.1"
|
||||||
|
|
||||||
|
secondus_base="$base/secondus"
|
||||||
|
|
||||||
|
function as_primus {
|
||||||
|
current_ip="$primus_ip"
|
||||||
|
eval "$($SHELL "$UTILS/shared-daemon-env.sh" "$primus_base")"
|
||||||
|
}
|
||||||
|
|
||||||
|
function as_secondus {
|
||||||
|
current_ip="$secondus_ip"
|
||||||
|
eval "$($SHELL "$UTILS/shared-daemon-env.sh" "$secondus_base")"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Even if it's already intialized, we want to put the caller in primus'
|
||||||
|
# environment
|
||||||
|
as_primus
|
||||||
|
|
||||||
|
secondus_bootstrap="$(pwd)/secondus-bootstrap.json"
|
||||||
|
|
||||||
|
if [ ! -d "$XDG_RUNTIME_DIR/isle" ]; then
|
||||||
|
echo "Initializing shared single node network"
|
||||||
|
|
||||||
|
mkdir a
|
||||||
|
mkdir b
|
||||||
|
mkdir c
|
||||||
|
|
||||||
|
cat >daemon.yml <<EOF
|
||||||
|
networks:
|
||||||
|
testing:
|
||||||
|
vpn:
|
||||||
|
public_addr: 127.0.0.1:60000
|
||||||
|
tun:
|
||||||
|
device: isle-primus
|
||||||
|
storage:
|
||||||
|
allocations:
|
||||||
|
- data_path: a/data
|
||||||
|
meta_path: a/meta
|
||||||
|
capacity: 1
|
||||||
|
- data_path: b/data
|
||||||
|
meta_path: b/meta
|
||||||
|
capacity: 1
|
||||||
|
- data_path: c/data
|
||||||
|
meta_path: c/meta
|
||||||
|
capacity: 1
|
||||||
|
EOF
|
||||||
|
|
||||||
|
isle daemon -l debug --config-path daemon.yml >daemon.log 2>&1 &
|
||||||
|
pid="$!"
|
||||||
|
$SHELL "$UTILS/register-cleanup.sh" "$pid" "1-data-1-empty-node-network/primus"
|
||||||
|
|
||||||
|
echo "Waiting for primus daemon (process $pid) to start"
|
||||||
|
while ! [ -e "$ISLE_DAEMON_HTTP_SOCKET_PATH" ]; do sleep 1; done
|
||||||
|
|
||||||
|
echo "Creating 1-data-1-empty network"
|
||||||
|
isle network create \
|
||||||
|
--domain shared.test \
|
||||||
|
--hostname primus \
|
||||||
|
--ip-net "$ipNet" \
|
||||||
|
--name "testing"
|
||||||
|
|
||||||
|
echo "Creating secondus bootstrap"
|
||||||
|
isle hosts create \
|
||||||
|
--hostname secondus \
|
||||||
|
> "$secondus_bootstrap"
|
||||||
|
|
||||||
|
(
|
||||||
|
as_secondus
|
||||||
|
|
||||||
|
cat >daemon.yml <<EOF
|
||||||
|
vpn:
|
||||||
|
tun:
|
||||||
|
device: isle-secondus
|
||||||
|
EOF
|
||||||
|
|
||||||
|
isle daemon -l debug -c daemon.yml >daemon.log 2>&1 &
|
||||||
|
pid="$!"
|
||||||
|
$SHELL "$UTILS/register-cleanup.sh" "$pid" "1-data-1-empty-node-network/secondus"
|
||||||
|
|
||||||
|
echo "Waiting for secondus daemon (process $!) to start"
|
||||||
|
while ! [ -e "$ISLE_DAEMON_HTTP_SOCKET_PATH" ]; do sleep 1; done
|
||||||
|
|
||||||
|
echo "Joining secondus to the network"
|
||||||
|
isle network join -b "$secondus_bootstrap"
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
secondus_ip="$(
|
||||||
|
nebula-cert print -json \
|
||||||
|
-path <(jq -r '.Bootstrap.Hosts["secondus"].PublicCredentials.Cert' "$secondus_bootstrap") \
|
||||||
|
| jq -r '.details.ips[0]' \
|
||||||
|
| cut -d/ -f1
|
||||||
|
)"
|
||||||
|
|
||||||
|
NETWORK_ID="$(jq '.Bootstrap.NetworkCreationParams.ID' "$secondus_bootstrap")"
|
||||||
|
export NETWORK_ID
|
||||||
|
|
||||||
|
# shared-daemon-env.sh depends on NETWORK_ID, so we re-call as_primus in order
|
||||||
|
# to fully populate the envvars we need.
|
||||||
|
as_primus
|
9
tests/utils/with-tmp-for-case.sh
Normal file
9
tests/utils/with-tmp-for-case.sh
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
set -e
|
||||||
|
|
||||||
|
TMPDIR="$TMPDIR/$TEST_CASE_FILE.tmp"
|
||||||
|
XDG_RUNTIME_DIR="$TMPDIR/.run"
|
||||||
|
XDG_STATE_HOME="$TMPDIR/.state"
|
||||||
|
|
||||||
|
mkdir -p "$TMPDIR" "$XDG_RUNTIME_DIR" "$XDG_STATE_HOME"
|
||||||
|
|
||||||
|
cd "$TMPDIR"
|
Loading…
Reference in New Issue
Block a user