273 lines
7.5 KiB
Go
273 lines
7.5 KiB
Go
package network
|
|
|
|
import (
|
|
"isle/bootstrap"
|
|
"isle/daemon/daecommon"
|
|
"isle/garage"
|
|
"isle/jsonutil"
|
|
"isle/nebula"
|
|
"testing"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
)
|
|
|
|
func TestCreate(t *testing.T) {
|
|
var (
|
|
h = newIntegrationHarness(t)
|
|
network = h.createNetwork(t, "primus", nil)
|
|
)
|
|
|
|
gotCreationParams, err := LoadCreationParams(network.stateDir)
|
|
assert.NoError(t, err)
|
|
assert.Equal(
|
|
t, gotCreationParams, network.getBootstrap(t).NetworkCreationParams,
|
|
)
|
|
}
|
|
|
|
func TestLoad(t *testing.T) {
|
|
var (
|
|
h = newIntegrationHarness(t)
|
|
network = h.createNetwork(t, "primus", &createNetworkOpts{
|
|
manualShutdown: true,
|
|
})
|
|
)
|
|
|
|
t.Log("Shutting down network")
|
|
assert.NoError(t, network.Shutdown())
|
|
|
|
t.Log("Calling Load")
|
|
loadedNetwork, err := Load(
|
|
h.ctx,
|
|
h.logger.WithNamespace("loadedNetwork"),
|
|
network.getConfig(t),
|
|
getEnvBinDirPath(),
|
|
network.stateDir,
|
|
h.mkDir(t, "runtime"),
|
|
network.opts,
|
|
)
|
|
assert.NoError(t, err)
|
|
|
|
t.Cleanup(func() {
|
|
t.Log("Shutting down loadedNetwork")
|
|
assert.NoError(t, loadedNetwork.Shutdown())
|
|
})
|
|
}
|
|
|
|
func TestJoin(t *testing.T) {
|
|
t.Run("simple", func(t *testing.T) {
|
|
var (
|
|
h = newIntegrationHarness(t)
|
|
primus = h.createNetwork(t, "primus", nil)
|
|
secondus = h.joinNetwork(t, primus, "secondus", nil)
|
|
)
|
|
|
|
primusHosts, err := primus.GetHosts(h.ctx)
|
|
assert.NoError(t, err)
|
|
|
|
secondusHosts, err := secondus.GetHosts(h.ctx)
|
|
assert.NoError(t, err)
|
|
|
|
assert.Equal(t, primusHosts, secondusHosts)
|
|
})
|
|
|
|
t.Run("with alloc", func(t *testing.T) {
|
|
var (
|
|
h = newIntegrationHarness(t)
|
|
primus = h.createNetwork(t, "primus", nil)
|
|
secondus = h.joinNetwork(t, primus, "secondus", &joinNetworkOpts{
|
|
networkConfigOpts: &networkConfigOpts{
|
|
numStorageAllocs: 1,
|
|
},
|
|
})
|
|
)
|
|
|
|
t.Log("reloading primus' hosts")
|
|
assert.NoError(t, primus.Network.(*network).reloadHosts(h.ctx))
|
|
|
|
primusHosts, err := primus.GetHosts(h.ctx)
|
|
assert.NoError(t, err)
|
|
|
|
secondusHosts, err := secondus.GetHosts(h.ctx)
|
|
assert.NoError(t, err)
|
|
|
|
assert.Equal(t, primusHosts, secondusHosts)
|
|
})
|
|
}
|
|
|
|
func TestNetwork_GetConfig(t *testing.T) {
|
|
var (
|
|
h = newIntegrationHarness(t)
|
|
network = h.createNetwork(t, "primus", nil)
|
|
)
|
|
|
|
config, err := network.GetConfig(h.ctx)
|
|
assert.NoError(t, err)
|
|
|
|
assert.Equal(t, config, network.getConfig(t))
|
|
}
|
|
|
|
func TestNetwork_SetConfig(t *testing.T) {
|
|
allocsToRoles := func(
|
|
hostName nebula.HostName, allocs []bootstrap.GarageHostInstance,
|
|
) []garage.Role {
|
|
roles := make([]garage.Role, len(allocs))
|
|
for i := range allocs {
|
|
roles[i] = garage.Role{
|
|
ID: allocs[i].ID,
|
|
Capacity: 1_000_000_000,
|
|
Zone: string(hostName),
|
|
Tags: []string{},
|
|
}
|
|
}
|
|
return roles
|
|
}
|
|
|
|
t.Run("add storage alloc", func(t *testing.T) {
|
|
var (
|
|
h = newIntegrationHarness(t)
|
|
network = h.createNetwork(t, "primus", nil)
|
|
networkConfig = network.getConfig(t)
|
|
)
|
|
|
|
networkConfig.Storage.Allocations = append(
|
|
networkConfig.Storage.Allocations,
|
|
daecommon.ConfigStorageAllocation{
|
|
DataPath: h.mkDir(t, "data").Path,
|
|
MetaPath: h.mkDir(t, "meta").Path,
|
|
Capacity: 1,
|
|
S3APIPort: 4900,
|
|
RPCPort: 4901,
|
|
AdminPort: 4902,
|
|
},
|
|
)
|
|
|
|
assert.NoError(t, network.SetConfig(h.ctx, networkConfig))
|
|
|
|
t.Log("Checking that the Host information was updated")
|
|
newHostsByName := network.getHostsByName(t)
|
|
newHost, ok := newHostsByName[network.hostName]
|
|
assert.True(t, ok)
|
|
|
|
allocs := newHost.HostConfigured.Garage.Instances
|
|
assert.Len(t, allocs, 4)
|
|
|
|
newAlloc := allocs[3]
|
|
assert.NotEmpty(t, newAlloc.ID)
|
|
newAlloc.ID = ""
|
|
assert.Equal(t, bootstrap.GarageHostInstance{
|
|
S3APIPort: 4900,
|
|
RPCPort: 4901,
|
|
}, newAlloc)
|
|
|
|
t.Log("Checking that the bootstrap file was written with the new host config")
|
|
var storedBootstrap bootstrap.Bootstrap
|
|
assert.NoError(t, jsonutil.LoadFile(
|
|
&storedBootstrap, bootstrap.StateDirPath(network.stateDir.Path),
|
|
))
|
|
assert.Equal(t, newHostsByName, storedBootstrap.Hosts)
|
|
|
|
t.Log("Checking that garage layout contains the new allocation")
|
|
expRoles := allocsToRoles(network.hostName, allocs)
|
|
layout, err := network.garageAdminClient(t).GetLayout(h.ctx)
|
|
assert.NoError(t, err)
|
|
assert.ElementsMatch(t, expRoles, layout.Roles)
|
|
})
|
|
|
|
t.Run("remove storage alloc", func(t *testing.T) {
|
|
var (
|
|
h = newIntegrationHarness(t)
|
|
network = h.createNetwork(t, "primus", &createNetworkOpts{
|
|
numStorageAllocs: 4,
|
|
})
|
|
networkConfig = network.getConfig(t)
|
|
|
|
prevHost = network.getHostsByName(t)[network.hostName]
|
|
removedAlloc = networkConfig.Storage.Allocations[3]
|
|
removedGarageInst = daecommon.BootstrapGarageHostForAlloc(
|
|
prevHost, removedAlloc,
|
|
)
|
|
)
|
|
|
|
networkConfig.Storage.Allocations = networkConfig.Storage.Allocations[:3]
|
|
assert.NoError(t, network.SetConfig(h.ctx, networkConfig))
|
|
|
|
t.Log("Checking that the Host information was updated")
|
|
newHostsByName := network.getHostsByName(t)
|
|
newHost, ok := newHostsByName[network.hostName]
|
|
assert.True(t, ok)
|
|
|
|
allocs := newHost.HostConfigured.Garage.Instances
|
|
assert.Len(t, allocs, 3)
|
|
assert.NotContains(t, allocs, removedGarageInst)
|
|
|
|
t.Log("Checking that the bootstrap file was written with the new host config")
|
|
var storedBootstrap bootstrap.Bootstrap
|
|
assert.NoError(t, jsonutil.LoadFile(
|
|
&storedBootstrap, bootstrap.StateDirPath(network.stateDir.Path),
|
|
))
|
|
assert.Equal(t, newHostsByName, storedBootstrap.Hosts)
|
|
|
|
t.Log("Checking that garage layout contains the new allocation")
|
|
expRoles := allocsToRoles(network.hostName, allocs)
|
|
layout, err := network.garageAdminClient(t).GetLayout(h.ctx)
|
|
assert.NoError(t, err)
|
|
assert.ElementsMatch(t, expRoles, layout.Roles)
|
|
})
|
|
|
|
t.Run("remove all storage allocs", func(t *testing.T) {
|
|
var (
|
|
h = newIntegrationHarness(t)
|
|
primus = h.createNetwork(t, "primus", nil)
|
|
secondus = h.joinNetwork(t, primus, "secondus", &joinNetworkOpts{
|
|
networkConfigOpts: &networkConfigOpts{
|
|
numStorageAllocs: 1,
|
|
},
|
|
})
|
|
networkConfig = secondus.getConfig(t)
|
|
|
|
prevHost = secondus.getHostsByName(t)[secondus.hostName]
|
|
removedAlloc = networkConfig.Storage.Allocations[0]
|
|
removedRole = allocsToRoles(
|
|
secondus.hostName, prevHost.Garage.Instances,
|
|
)[0]
|
|
removedGarageInst = daecommon.BootstrapGarageHostForAlloc(
|
|
prevHost, removedAlloc,
|
|
)
|
|
|
|
primusGarageAdminClient = primus.garageAdminClient(t)
|
|
)
|
|
|
|
networkConfig.Storage.Allocations = nil
|
|
assert.NoError(t, secondus.SetConfig(h.ctx, networkConfig))
|
|
|
|
t.Log("Checking that the Host information was updated")
|
|
newHostsByName := primus.getHostsByName(t)
|
|
newHost, ok := newHostsByName[secondus.hostName]
|
|
assert.True(t, ok)
|
|
|
|
allocs := newHost.HostConfigured.Garage.Instances
|
|
assert.Len(t, allocs, 3)
|
|
assert.NotContains(t, allocs, removedGarageInst)
|
|
|
|
t.Log("Checking that garage layout still contains the old allocation")
|
|
layout, err := primusGarageAdminClient.GetLayout(h.ctx)
|
|
assert.NoError(t, err)
|
|
assert.Contains(t, layout.Roles, removedRole)
|
|
|
|
t.Log("Removing orphan garage nodes with primus")
|
|
assert.NoError(
|
|
t, primus.Network.(*network).removeOrphanGarageNodes(h.ctx),
|
|
)
|
|
|
|
t.Log("Checking that garage layout no longer contains the old allocation")
|
|
layout, err = primusGarageAdminClient.GetLayout(h.ctx)
|
|
assert.NoError(t, err)
|
|
assert.NotContains(t, layout.Roles, removedRole)
|
|
})
|
|
|
|
// TODO make sure that if two nodes each have 3, and one removes all 3, that
|
|
// the other is able to retain availability of all data. This means that the
|
|
// garage nodes on the node which removed all allocs need to stay online
|
|
// some time, in order to replicate data to the leftover nodes.
|
|
}
|