Write rpc_port file to garage meta dir
This file is then used in later startups when determining the rpc port, thus preventing the user from changing the port by accident.
This commit is contained in:
parent
ca003eaf85
commit
1dc22701cd
@ -183,7 +183,7 @@ var subCmdAdminCreateNetwork = subCmd{
|
||||
hostBootstrap.Garage.AdminToken = randStr(32)
|
||||
hostBootstrap.Garage.GlobalBucketS3APICredentials = garage.NewS3APICredentials()
|
||||
|
||||
if hostBootstrap, err = mergeDaemonConfigIntoBootstrap(hostBootstrap, daemonConfig); err != nil {
|
||||
if hostBootstrap, daemonConfig, err = coalesceDaemonConfigAndBootstrap(hostBootstrap, daemonConfig); err != nil {
|
||||
return fmt.Errorf("merging daemon config into bootstrap data: %w", err)
|
||||
}
|
||||
|
||||
|
@ -305,7 +305,7 @@ var subCmdDaemon = subCmd{
|
||||
// up-to-date possible bootstrap. This updated bootstrap will later get
|
||||
// updated in garage using bootstrap.PutGarageBoostrapHost, so other
|
||||
// hosts will see it as well.
|
||||
if hostBootstrap, err = mergeDaemonConfigIntoBootstrap(hostBootstrap, daemonConfig); err != nil {
|
||||
if hostBootstrap, daemonConfig, err = coalesceDaemonConfigAndBootstrap(hostBootstrap, daemonConfig); err != nil {
|
||||
return fmt.Errorf("merging daemon config into bootstrap data: %w", err)
|
||||
}
|
||||
|
||||
|
@ -9,11 +9,11 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func mergeDaemonConfigIntoBootstrap(
|
||||
func coalesceDaemonConfigAndBootstrap(
|
||||
hostBootstrap bootstrap.Bootstrap,
|
||||
daemonConfig daemon.Config,
|
||||
) (
|
||||
bootstrap.Bootstrap, error,
|
||||
bootstrap.Bootstrap, daemon.Config, error,
|
||||
) {
|
||||
|
||||
host := bootstrap.Host{
|
||||
@ -26,28 +26,30 @@ func mergeDaemonConfigIntoBootstrap(
|
||||
|
||||
if allocs := daemonConfig.Storage.Allocations; len(allocs) > 0 {
|
||||
|
||||
for _, alloc := range allocs {
|
||||
for i, alloc := range allocs {
|
||||
|
||||
id, err := garage.InitAlloc(alloc.MetaPath)
|
||||
id, rpcPort, err := garage.InitAlloc(alloc.MetaPath, alloc.RPCPort)
|
||||
if err != nil {
|
||||
return bootstrap.Bootstrap{}, fmt.Errorf("initializing alloc at %q: %w", alloc.MetaPath, err)
|
||||
return bootstrap.Bootstrap{}, daemon.Config{}, fmt.Errorf("initializing alloc at %q: %w", alloc.MetaPath, err)
|
||||
}
|
||||
|
||||
host.Garage.Instances = append(host.Garage.Instances, bootstrap.GarageHostInstance{
|
||||
ID: id,
|
||||
RPCPort: alloc.RPCPort,
|
||||
RPCPort: rpcPort,
|
||||
S3APIPort: alloc.S3APIPort,
|
||||
})
|
||||
|
||||
allocs[i].RPCPort = rpcPort
|
||||
}
|
||||
}
|
||||
|
||||
hostBootstrap.Hosts[host.Name] = host
|
||||
|
||||
if err := writeBootstrapToDataDir(hostBootstrap); err != nil {
|
||||
return bootstrap.Bootstrap{}, fmt.Errorf("writing bootstrap file: %w", err)
|
||||
return bootstrap.Bootstrap{}, daemon.Config{}, fmt.Errorf("writing bootstrap file: %w", err)
|
||||
}
|
||||
|
||||
return hostBootstrap, nil
|
||||
return hostBootstrap, daemonConfig, nil
|
||||
}
|
||||
|
||||
func doOnce(ctx context.Context, fn func(context.Context) error) error {
|
||||
|
@ -89,7 +89,7 @@ func waitForGarageAndNebula(
|
||||
// corresponds with the given alloc from the daemon config. This will panic if
|
||||
// no associated instance can be found.
|
||||
//
|
||||
// This assumes that mergeDaemonConfigIntoBootstrap has already been called.
|
||||
// This assumes that coalesceDaemonConfigAndBootstrap has already been called.
|
||||
func bootstrapGarageHostForAlloc(
|
||||
host bootstrap.Host,
|
||||
alloc daemon.ConfigStorageAllocation,
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -33,9 +34,13 @@ func nodeKeyPubPath(metaDirPath string) string {
|
||||
return filepath.Join(metaDirPath, "node_key.pub")
|
||||
}
|
||||
|
||||
// LoadAllocID returns the peer ID (ie the public key) of the node at the given
|
||||
func nodeRPCPortPath(metaDirPath string) string {
|
||||
return filepath.Join(metaDirPath, "cryptic-net", "rpc_port")
|
||||
}
|
||||
|
||||
// loadAllocID returns the peer ID (ie the public key) of the node at the given
|
||||
// meta directory.
|
||||
func LoadAllocID(metaDirPath string) (string, error) {
|
||||
func loadAllocID(metaDirPath string) (string, error) {
|
||||
nodeKeyPubPath := nodeKeyPubPath(metaDirPath)
|
||||
|
||||
pubKey, err := os.ReadFile(nodeKeyPubPath)
|
||||
@ -48,8 +53,13 @@ func LoadAllocID(metaDirPath string) (string, error) {
|
||||
|
||||
// InitAlloc initializes the meta directory and keys for a particular
|
||||
// allocation, if it hasn't been done so already. It returns the peer ID (ie the
|
||||
// public key) in any case.
|
||||
func InitAlloc(metaDirPath string) (string, error) {
|
||||
// public key) and the rpc port in any case.
|
||||
func InitAlloc(metaDirPath string, initRPCPort int) (string, int, error) {
|
||||
|
||||
initDirFor := func(path string) error {
|
||||
dir := filepath.Dir(path)
|
||||
return os.MkdirAll(dir, 0750)
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
@ -72,34 +82,72 @@ func InitAlloc(metaDirPath string) (string, error) {
|
||||
|
||||
nodeKeyPath := nodeKeyPath(metaDirPath)
|
||||
nodeKeyPubPath := nodeKeyPubPath(metaDirPath)
|
||||
nodeRPCPortPath := nodeRPCPortPath(metaDirPath)
|
||||
|
||||
nodeKeyPathExists := exists(nodeKeyPath)
|
||||
nodeKeyPubPathExists := exists(nodeKeyPubPath)
|
||||
nodeRPCPortPathExists := exists(nodeRPCPortPath)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", 0, err
|
||||
|
||||
} else if nodeKeyPubPathExists != nodeKeyPathExists {
|
||||
return "", fmt.Errorf("%q or %q exist without the other existing", nodeKeyPath, nodeKeyPubPath)
|
||||
return "", 0, fmt.Errorf("%q or %q exist without the other existing", nodeKeyPath, nodeKeyPubPath)
|
||||
|
||||
} else if nodeKeyPathExists {
|
||||
return LoadAllocID(metaDirPath)
|
||||
}
|
||||
|
||||
// node key hasn't been written, write it
|
||||
var (
|
||||
pubKeyStr string
|
||||
rpcPort int
|
||||
)
|
||||
|
||||
if err := os.MkdirAll(metaDirPath, 0750); err != nil {
|
||||
return "", fmt.Errorf("making directory %q: %w", metaDirPath, err)
|
||||
if nodeKeyPathExists {
|
||||
|
||||
if pubKeyStr, err = loadAllocID(metaDirPath); err != nil {
|
||||
return "", 0, fmt.Errorf("reading node public key file: %w", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
if err := initDirFor(nodeKeyPath); err != nil {
|
||||
return "", 0, fmt.Errorf("creating directory for %q: %w", nodeKeyPath, err)
|
||||
}
|
||||
|
||||
pubKey, privKey := GeneratePeerKey()
|
||||
|
||||
if err := os.WriteFile(nodeKeyPath, privKey, 0400); err != nil {
|
||||
return "", fmt.Errorf("writing private key to %q: %w", nodeKeyPath, err)
|
||||
return "", 0, fmt.Errorf("writing private key to %q: %w", nodeKeyPath, err)
|
||||
|
||||
} else if err := os.WriteFile(nodeKeyPubPath, pubKey, 0440); err != nil {
|
||||
return "", fmt.Errorf("writing public key to %q: %w", nodeKeyPubPath, err)
|
||||
return "", 0, fmt.Errorf("writing public key to %q: %w", nodeKeyPubPath, err)
|
||||
}
|
||||
|
||||
return hex.EncodeToString(pubKey), nil
|
||||
pubKeyStr = hex.EncodeToString(pubKey)
|
||||
}
|
||||
|
||||
if nodeRPCPortPathExists {
|
||||
|
||||
if rpcPortStr, err := os.ReadFile(nodeRPCPortPath); err != nil {
|
||||
return "", 0, fmt.Errorf("reading rpc port from %q: %w", nodeRPCPortPath, err)
|
||||
|
||||
} else if rpcPort, err = strconv.Atoi(string(rpcPortStr)); err != nil {
|
||||
return "", 0, fmt.Errorf("parsing rpc port %q from %q: %w", rpcPortStr, nodeRPCPortPath, err)
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
if err := initDirFor(nodeRPCPortPath); err != nil {
|
||||
return "", 0, fmt.Errorf("creating directory for %q: %w", nodeRPCPortPath, err)
|
||||
}
|
||||
|
||||
rpcPortStr := strconv.Itoa(initRPCPort)
|
||||
|
||||
if err := os.WriteFile(nodeRPCPortPath, []byte(rpcPortStr), 0440); err != nil {
|
||||
return "", 0, fmt.Errorf("writing rpc port %q to %q: %w", rpcPortStr, nodeRPCPortPath, err)
|
||||
}
|
||||
|
||||
rpcPort = initRPCPort
|
||||
}
|
||||
|
||||
return pubKeyStr, rpcPort, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user