isle/go/cmd/entrypoint/garage_util.go
Brian Picciano 68f417b5ba Upgrade garage to v1.0.0
This required switching all garage admin API calls to the new v1
versions, and redoing how the global bucket key is created so it is
created via the "create key" API call.
2024-06-11 16:57:31 +02:00

322 lines
7.5 KiB
Go

package main
import (
"context"
"fmt"
"isle/bootstrap"
"isle/daemon"
"isle/garage"
"net"
"path/filepath"
"strconv"
"code.betamike.com/micropelago/pmux/pmuxlib"
"github.com/mediocregopher/mediocre-go-lib/v2/mctx"
"github.com/mediocregopher/mediocre-go-lib/v2/mlog"
)
func garageAdminClientLogger(logger *mlog.Logger) *mlog.Logger {
return logger.WithNamespace("garageAdminClient")
}
// newGarageAdminClient will return an AdminClient for a local garage instance,
// or it will _panic_ if there is no local instance configured.
func newGarageAdminClient(
logger *mlog.Logger,
hostBootstrap bootstrap.Bootstrap,
daemonConfig daemon.Config,
) *garage.AdminClient {
thisHost := hostBootstrap.ThisHost()
return garage.NewAdminClient(
net.JoinHostPort(
thisHost.IP().String(),
strconv.Itoa(daemonConfig.Storage.Allocations[0].AdminPort),
),
hostBootstrap.Garage.AdminToken,
garageAdminClientLogger(logger),
)
}
func waitForGarageAndNebula(
ctx context.Context,
logger *mlog.Logger,
hostBootstrap bootstrap.Bootstrap,
daemonConfig daemon.Config,
) error {
if err := waitForNebula(ctx, hostBootstrap); err != nil {
return fmt.Errorf("waiting for nebula to start: %w", err)
}
allocs := daemonConfig.Storage.Allocations
// if this host doesn't have any allocations specified then fall back to
// waiting for nebula
if len(allocs) == 0 {
return nil
}
adminClientLogger := garageAdminClientLogger(logger)
for _, alloc := range allocs {
adminAddr := net.JoinHostPort(
hostBootstrap.ThisHost().IP().String(),
strconv.Itoa(alloc.AdminPort),
)
adminClient := garage.NewAdminClient(
adminAddr,
hostBootstrap.Garage.AdminToken,
adminClientLogger,
)
ctx := mctx.Annotate(ctx, "garageAdminAddr", adminAddr)
logger.Debug(ctx, "wating for garage instance to start")
if err := adminClient.Wait(ctx); err != nil {
return fmt.Errorf("waiting for garage instance %q to start up: %w", adminAddr, err)
}
}
return nil
}
// bootstrapGarageHostForAlloc returns the bootstrap.GarageHostInstance which
// corresponds with the given alloc from the daemon config. This will panic if
// no associated instance can be found.
//
// This assumes that coalesceDaemonConfigAndBootstrap has already been called.
func bootstrapGarageHostForAlloc(
host bootstrap.Host,
alloc daemon.ConfigStorageAllocation,
) bootstrap.GarageHostInstance {
for _, inst := range host.Garage.Instances {
if inst.RPCPort == alloc.RPCPort {
return inst
}
}
panic(fmt.Sprintf("could not find alloc %+v in the bootstrap data", alloc))
}
func garageWriteChildConfig(
hostBootstrap bootstrap.Bootstrap,
alloc daemon.ConfigStorageAllocation,
) (
string, error,
) {
thisHost := hostBootstrap.ThisHost()
id := bootstrapGarageHostForAlloc(thisHost, alloc).ID
peer := garage.LocalPeer{
RemotePeer: garage.RemotePeer{
ID: id,
IP: thisHost.IP().String(),
RPCPort: alloc.RPCPort,
S3APIPort: alloc.S3APIPort,
},
AdminPort: alloc.AdminPort,
}
garageTomlPath := filepath.Join(
envRuntimeDirPath, fmt.Sprintf("garage-%d.toml", alloc.RPCPort),
)
err := garage.WriteGarageTomlFile(garageTomlPath, garage.GarageTomlData{
MetaPath: alloc.MetaPath,
DataPath: alloc.DataPath,
RPCSecret: hostBootstrap.Garage.RPCSecret,
AdminToken: hostBootstrap.Garage.AdminToken,
RPCAddr: peer.RPCAddr(),
S3APIAddr: peer.S3APIAddr(),
AdminAddr: peer.AdminAddr(),
BootstrapPeers: hostBootstrap.GarageRPCPeerAddrs(),
})
if err != nil {
return "", fmt.Errorf("creating garage.toml file at %q: %w", garageTomlPath, err)
}
return garageTomlPath, nil
}
func garagePmuxProcConfigs(
hostBootstrap bootstrap.Bootstrap,
daemonConfig daemon.Config,
) (
[]pmuxlib.ProcessConfig, error,
) {
var pmuxProcConfigs []pmuxlib.ProcessConfig
for _, alloc := range daemonConfig.Storage.Allocations {
childConfigPath, err := garageWriteChildConfig(hostBootstrap, alloc)
if err != nil {
return nil, fmt.Errorf("writing child config file for alloc %+v: %w", alloc, err)
}
pmuxProcConfigs = append(pmuxProcConfigs, pmuxlib.ProcessConfig{
Name: fmt.Sprintf("garage-%d", alloc.RPCPort),
Cmd: binPath("garage"),
Args: []string{"-c", childConfigPath, "server"},
StartAfterFunc: func(ctx context.Context) error {
return waitForNebula(ctx, hostBootstrap)
},
})
}
return pmuxProcConfigs, nil
}
func garageInitializeGlobalBucket(
ctx context.Context,
logger *mlog.Logger,
hostBootstrap bootstrap.Bootstrap,
daemonConfig daemon.Config,
) (
garage.S3APICredentials, error,
) {
adminClient := newGarageAdminClient(logger, hostBootstrap, daemonConfig)
var createKeyRes struct {
ID string `json:"accessKeyId"`
Secret string `json:"secretAccessKey"`
}
// first attempt to import the key
err := adminClient.Do(
ctx, &createKeyRes, "POST", "/v1/key", map[string]string{
"name": "shared-global-bucket-key",
},
)
if err != nil {
return garage.S3APICredentials{}, fmt.Errorf(
"importing global bucket key into garage: %w", err,
)
}
// create global bucket
var createBucketRes struct {
ID string `json:"id"`
}
err = adminClient.Do(
ctx, &createBucketRes, "POST", "/v1/bucket", map[string]string{
"globalAlias": garage.GlobalBucket,
},
)
if err != nil {
return garage.S3APICredentials{}, fmt.Errorf(
"creating global bucket: %w", err,
)
}
// allow shared global bucket key to perform all operations
err = adminClient.Do(ctx, nil, "POST", "/v1/bucket/allow", map[string]interface{}{
"bucketId": createBucketRes.ID,
"accessKeyId": createKeyRes.ID,
"permissions": map[string]bool{
"read": true,
"write": true,
},
})
if err != nil {
return garage.S3APICredentials{}, fmt.Errorf(
"granting permissions to shared global bucket key: %w", err,
)
}
return garage.S3APICredentials{
ID: createKeyRes.ID,
Secret: createKeyRes.Secret,
}, nil
}
func garageApplyLayout(
ctx context.Context,
logger *mlog.Logger,
hostBootstrap bootstrap.Bootstrap,
daemonConfig daemon.Config,
) error {
var (
adminClient = newGarageAdminClient(logger, hostBootstrap, daemonConfig)
thisHost = hostBootstrap.ThisHost()
hostName = thisHost.Name
allocs = daemonConfig.Storage.Allocations
)
type peerLayout struct {
ID string `json:"id"`
Capacity int `json:"capacity"`
Zone string `json:"zone"`
Tags []string `json:"tags"`
}
{
clusterLayout := make([]peerLayout, len(allocs))
for i, alloc := range allocs {
id := bootstrapGarageHostForAlloc(thisHost, alloc).ID
zone := hostName
if alloc.Zone != "" {
zone = alloc.Zone
}
clusterLayout[i] = peerLayout{
ID: id,
Capacity: alloc.Capacity * 1_000_000_000,
Zone: zone,
Tags: []string{},
}
}
err := adminClient.Do(ctx, nil, "POST", "/v1/layout", clusterLayout)
if err != nil {
return fmt.Errorf("staging layout changes: %w", err)
}
}
var clusterLayout struct {
Version int `json:"version"`
StagedRoleChanges []peerLayout `json:"stagedRoleChanges"`
}
if err := adminClient.Do(ctx, &clusterLayout, "GET", "/v1/layout", nil); err != nil {
return fmt.Errorf("retrieving staged layout change: %w", err)
}
if len(clusterLayout.StagedRoleChanges) == 0 {
return nil
}
applyClusterLayout := struct {
Version int `json:"version"`
}{
Version: clusterLayout.Version + 1,
}
err := adminClient.Do(ctx, nil, "POST", "/v1/layout/apply", applyClusterLayout)
if err != nil {
return fmt.Errorf("applying new layout (new version:%d): %w", applyClusterLayout.Version, err)
}
return nil
}