936ca8d48f
The new code runs the equivalent functionality within the daemon go code. It was required to make Env be immutable in order to prevent race conditions (this really should have been done from the beginning anyway).
274 lines
6.6 KiB
Go
274 lines
6.6 KiB
Go
package entrypoint
|
|
|
|
import (
|
|
"context"
|
|
crypticnet "cryptic-net"
|
|
"cryptic-net/garage"
|
|
"fmt"
|
|
"net"
|
|
"os"
|
|
"path/filepath"
|
|
"strconv"
|
|
"time"
|
|
|
|
"github.com/cryptic-io/pmux/pmuxlib"
|
|
)
|
|
|
|
func waitForGarage(ctx context.Context, env crypticnet.Env) error {
|
|
|
|
for _, alloc := range env.ThisDaemon().Storage.Allocations {
|
|
|
|
adminAddr := net.JoinHostPort(
|
|
env.Bootstrap.ThisHost().Nebula.IP,
|
|
strconv.Itoa(alloc.AdminPort),
|
|
)
|
|
|
|
adminClient := garage.NewAdminClient(
|
|
adminAddr,
|
|
env.Bootstrap.GarageAdminToken,
|
|
)
|
|
|
|
if err := adminClient.Wait(ctx); err != nil {
|
|
return fmt.Errorf("waiting for instance %q to start up: %w", adminAddr, err)
|
|
}
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func waitForGarageArgs(env crypticnet.Env, args ...string) []string {
|
|
|
|
thisHost := env.Bootstrap.ThisHost()
|
|
allocs := env.ThisDaemon().Storage.Allocations
|
|
|
|
if len(allocs) == 0 {
|
|
return waitForNebulaArgs(env, args...)
|
|
}
|
|
|
|
var preArgs []string
|
|
|
|
for _, alloc := range allocs {
|
|
preArgs = append(
|
|
preArgs,
|
|
"wait-for",
|
|
net.JoinHostPort(thisHost.Nebula.IP, strconv.Itoa(alloc.RPCPort)),
|
|
"--",
|
|
)
|
|
}
|
|
|
|
return append(preArgs, args...)
|
|
}
|
|
|
|
func garageWriteChildConf(
|
|
env crypticnet.Env,
|
|
alloc crypticnet.DaemonYmlStorageAllocation,
|
|
) (
|
|
string, error,
|
|
) {
|
|
|
|
if err := os.MkdirAll(alloc.MetaPath, 0750); err != nil {
|
|
return "", fmt.Errorf("making directory %q: %w", alloc.MetaPath, err)
|
|
}
|
|
|
|
thisHost := env.Bootstrap.ThisHost()
|
|
|
|
peer := garage.Peer{
|
|
IP: thisHost.Nebula.IP,
|
|
RPCPort: alloc.RPCPort,
|
|
S3APIPort: alloc.S3APIPort,
|
|
}
|
|
|
|
pubKey, privKey := peer.RPCPeerKey()
|
|
|
|
nodeKeyPath := filepath.Join(alloc.MetaPath, "node_key")
|
|
nodeKeyPubPath := filepath.Join(alloc.MetaPath, "node_keypub")
|
|
|
|
if err := os.WriteFile(nodeKeyPath, privKey, 0400); err != nil {
|
|
return "", fmt.Errorf("writing private key to %q: %w", nodeKeyPath, err)
|
|
|
|
} else if err := os.WriteFile(nodeKeyPubPath, pubKey, 0440); err != nil {
|
|
return "", fmt.Errorf("writing public key to %q: %w", nodeKeyPubPath, err)
|
|
}
|
|
|
|
garageTomlPath := filepath.Join(
|
|
env.RuntimeDirPath, fmt.Sprintf("garage-%d.toml", alloc.RPCPort),
|
|
)
|
|
|
|
err := garage.WriteGarageTomlFile(garageTomlPath, garage.GarageTomlData{
|
|
MetaPath: alloc.MetaPath,
|
|
DataPath: alloc.DataPath,
|
|
|
|
RPCSecret: env.Bootstrap.GarageRPCSecret,
|
|
AdminToken: env.Bootstrap.GarageAdminToken,
|
|
|
|
RPCAddr: net.JoinHostPort(thisHost.Nebula.IP, strconv.Itoa(alloc.RPCPort)),
|
|
APIAddr: net.JoinHostPort(thisHost.Nebula.IP, strconv.Itoa(alloc.S3APIPort)),
|
|
AdminAddr: net.JoinHostPort(thisHost.Nebula.IP, strconv.Itoa(alloc.AdminPort)),
|
|
|
|
BootstrapPeers: env.Bootstrap.GarageRPCPeerAddrs(),
|
|
})
|
|
|
|
if err != nil {
|
|
return "", fmt.Errorf("creating garage.toml file at %q: %w", garageTomlPath, err)
|
|
}
|
|
|
|
return garageTomlPath, nil
|
|
}
|
|
|
|
func garageChildrenPmuxProcConfigs(env crypticnet.Env) ([]pmuxlib.ProcessConfig, error) {
|
|
|
|
var pmuxProcConfigs []pmuxlib.ProcessConfig
|
|
|
|
for _, alloc := range env.ThisDaemon().Storage.Allocations {
|
|
|
|
childConfPath, err := garageWriteChildConf(env, alloc)
|
|
|
|
if err != nil {
|
|
return nil, fmt.Errorf("writing child config file for alloc %+v: %w", alloc, err)
|
|
}
|
|
|
|
pmuxProcConfigs = append(pmuxProcConfigs, pmuxlib.ProcessConfig{
|
|
Name: fmt.Sprintf("garage-%d", alloc.RPCPort),
|
|
Cmd: "garage",
|
|
Args: []string{"-c", childConfPath, "server"},
|
|
SigKillWait: 1 * time.Minute,
|
|
})
|
|
}
|
|
|
|
return pmuxProcConfigs, nil
|
|
}
|
|
|
|
func garageApplyLayoutDiffPmuxProcConfig(env crypticnet.Env) pmuxlib.ProcessConfig {
|
|
return pmuxlib.ProcessConfig{
|
|
Name: "garage-apply-layout-diff",
|
|
Cmd: "bash",
|
|
Args: waitForGarageArgs(env, "bash", "garage-apply-layout-diff"),
|
|
NoRestartOn: []int{0},
|
|
}
|
|
}
|
|
|
|
func garageInitializeGlobalBucket(ctx context.Context, env crypticnet.Env) error {
|
|
|
|
var (
|
|
adminClient = env.GarageAdminClient()
|
|
globalBucketCreds = env.Bootstrap.GarageGlobalBucketS3APICredentials
|
|
)
|
|
|
|
// first attempt to import the key
|
|
err := adminClient.Do(ctx, nil, "POST", "/v0/key/import", map[string]string{
|
|
"accessKeyId": globalBucketCreds.ID,
|
|
"secretAccessKey": globalBucketCreds.Secret,
|
|
"name": "shared-global-bucket-key",
|
|
})
|
|
|
|
if err != nil {
|
|
return fmt.Errorf("importing global bucket key into garage: %w", err)
|
|
}
|
|
|
|
// create global bucket
|
|
err = adminClient.Do(ctx, nil, "POST", "/v0/bucket", map[string]string{
|
|
"globalAlias": garage.GlobalBucket,
|
|
})
|
|
|
|
if err != nil {
|
|
return fmt.Errorf("creating global bucket: %w", err)
|
|
}
|
|
|
|
// retrieve newly created bucket's id
|
|
var getBucketRes struct {
|
|
ID string `json:"id"`
|
|
}
|
|
|
|
err = adminClient.Do(
|
|
ctx, &getBucketRes,
|
|
"GET", "/v0/bucket?globalAlias="+garage.GlobalBucket, nil,
|
|
)
|
|
|
|
if err != nil {
|
|
return fmt.Errorf("fetching global bucket id: %w", err)
|
|
}
|
|
|
|
// allow shared global bucket key to perform all operations
|
|
err = adminClient.Do(ctx, nil, "POST", "/v0/bucket/allow", map[string]interface{}{
|
|
"bucketId": getBucketRes.ID,
|
|
"accessKeyId": globalBucketCreds.ID,
|
|
"permissions": map[string]bool{
|
|
"read": true,
|
|
"write": true,
|
|
},
|
|
})
|
|
|
|
if err != nil {
|
|
return fmt.Errorf("granting permissions to shared global bucket key: %w", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func garageApplyLayout(ctx context.Context, env crypticnet.Env) error {
|
|
|
|
var (
|
|
adminClient = env.GarageAdminClient()
|
|
thisHost = env.Bootstrap.ThisHost()
|
|
hostName = thisHost.Name
|
|
ip = thisHost.Nebula.IP
|
|
allocs = env.ThisDaemon().Storage.Allocations
|
|
)
|
|
|
|
type peerLayout struct {
|
|
Capacity int `json:"capacity"`
|
|
Zone string `json:"zone"`
|
|
Tags []string `json:"tags"`
|
|
}
|
|
|
|
{
|
|
clusterLayout := map[string]peerLayout{}
|
|
|
|
for _, alloc := range allocs {
|
|
|
|
peer := garage.Peer{
|
|
IP: ip,
|
|
RPCPort: alloc.RPCPort,
|
|
S3APIPort: alloc.S3APIPort,
|
|
}
|
|
|
|
clusterLayout[peer.RPCPeerID()] = peerLayout{
|
|
Capacity: alloc.Capacity / 100,
|
|
Zone: hostName,
|
|
}
|
|
}
|
|
|
|
err := adminClient.Do(ctx, nil, "POST", "/v0/layout", clusterLayout)
|
|
if err != nil {
|
|
return fmt.Errorf("staging layout changes: %w", err)
|
|
}
|
|
}
|
|
|
|
var clusterLayout struct {
|
|
Version int `json:"version"`
|
|
StagedRoleChanges map[string]peerLayout `json:"stagedRoleChanges"`
|
|
}
|
|
|
|
if err := adminClient.Do(ctx, &clusterLayout, "GET", "/v0/layout", nil); err != nil {
|
|
return fmt.Errorf("retrieving staged layout change: %w", err)
|
|
}
|
|
|
|
if len(clusterLayout.StagedRoleChanges) == 0 {
|
|
return nil
|
|
}
|
|
|
|
applyClusterLayout := struct {
|
|
Version int `json:"version"`
|
|
}{
|
|
Version: clusterLayout.Version + 1,
|
|
}
|
|
|
|
err := adminClient.Do(ctx, nil, "POST", "/v0/layout/apply", applyClusterLayout)
|
|
if err != nil {
|
|
return fmt.Errorf("applying new layout (new version:%d): %w", applyClusterLayout.Version, err)
|
|
}
|
|
|
|
return nil
|
|
}
|