Use the term 'role' or 'node' rather than 'peer' in the context of garage
This commit is contained in:
parent
734406d4bb
commit
53a06af9ba
@ -4,26 +4,26 @@ import (
|
|||||||
"isle/garage"
|
"isle/garage"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GaragePeers returns a Peer for each known garage instance in the network.
|
// GarageNodes returns a Node for each known garage instance in the network.
|
||||||
func (b Bootstrap) GaragePeers() []garage.RemotePeer {
|
func (b Bootstrap) GarageNodes() []garage.RemoteNode {
|
||||||
var peers []garage.RemotePeer
|
var nodes []garage.RemoteNode
|
||||||
for _, host := range b.Hosts {
|
for _, host := range b.Hosts {
|
||||||
peers = append(peers, host.GaragePeers()...)
|
nodes = append(nodes, host.GarageNodes()...)
|
||||||
}
|
}
|
||||||
return peers
|
return nodes
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChooseGaragePeer returns a Peer for a garage instance from the network. It
|
// ChooseGarageNode returns a RemoteNode for a garage instance from the network.
|
||||||
// will prefer a garage instance on this particular host, if there is one, but
|
// It will prefer a garage instance on this particular host, if there is one,
|
||||||
// will otherwise return a random endpoint.
|
// but will otherwise return a random endpoint.
|
||||||
func (b Bootstrap) ChooseGaragePeer() garage.RemotePeer {
|
func (b Bootstrap) ChooseGarageNode() garage.RemoteNode {
|
||||||
thisHost := b.ThisHost()
|
thisHost := b.ThisHost()
|
||||||
if len(thisHost.Garage.Instances) > 0 {
|
if len(thisHost.Garage.Instances) > 0 {
|
||||||
return thisHost.GaragePeers()[0]
|
return thisHost.GarageNodes()[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, peer := range b.GaragePeers() {
|
for _, node := range b.GarageNodes() {
|
||||||
return peer
|
return node
|
||||||
}
|
}
|
||||||
|
|
||||||
panic("no garage instances configured")
|
panic("no garage instances configured")
|
||||||
|
@ -93,17 +93,17 @@ func (h Host) IP() netip.Addr {
|
|||||||
return addr
|
return addr
|
||||||
}
|
}
|
||||||
|
|
||||||
// GaragePeers returns a RemotePeer for each garage instance advertised by this
|
// GarageNodes returns a RemoteNode for each garage instance advertised by this
|
||||||
// Host.
|
// Host.
|
||||||
func (h Host) GaragePeers() []garage.RemotePeer {
|
func (h Host) GarageNodes() []garage.RemoteNode {
|
||||||
var peers []garage.RemotePeer
|
var nodes []garage.RemoteNode
|
||||||
for _, instance := range h.Garage.Instances {
|
for _, instance := range h.Garage.Instances {
|
||||||
peers = append(peers, garage.RemotePeer{
|
nodes = append(nodes, garage.RemoteNode{
|
||||||
ID: instance.ID,
|
ID: instance.ID,
|
||||||
IP: h.IP().String(),
|
IP: h.IP().String(),
|
||||||
RPCPort: instance.RPCPort,
|
RPCPort: instance.RPCPort,
|
||||||
S3APIPort: instance.S3APIPort,
|
S3APIPort: instance.S3APIPort,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return peers
|
return nodes
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ var subCmdGarageMC = subCmd{
|
|||||||
return fmt.Errorf("calling GetGarageClientParams: %w", err)
|
return fmt.Errorf("calling GetGarageClientParams: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s3APIAddr := clientParams.Peer.S3APIAddr()
|
s3APIAddr := clientParams.Node.S3APIAddr()
|
||||||
|
|
||||||
if *keyID == "" {
|
if *keyID == "" {
|
||||||
*keyID = clientParams.GlobalBucketS3APICredentials.ID
|
*keyID = clientParams.GlobalBucketS3APICredentials.ID
|
||||||
@ -135,7 +135,7 @@ var subCmdGarageCLI = subCmd{
|
|||||||
args = append([]string{"garage"}, ctx.args...)
|
args = append([]string{"garage"}, ctx.args...)
|
||||||
cliEnv = append(
|
cliEnv = append(
|
||||||
os.Environ(),
|
os.Environ(),
|
||||||
"GARAGE_RPC_HOST="+clientParams.Peer.RPCPeerAddr(),
|
"GARAGE_RPC_HOST="+clientParams.Node.RPCNodeAddr(),
|
||||||
"GARAGE_RPC_SECRET="+clientParams.RPCSecret,
|
"GARAGE_RPC_SECRET="+clientParams.RPCSecret,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -76,8 +76,8 @@ func garageWriteChildConfig(
|
|||||||
thisHost = hostBootstrap.ThisHost()
|
thisHost = hostBootstrap.ThisHost()
|
||||||
id = daecommon.BootstrapGarageHostForAlloc(thisHost, alloc).ID
|
id = daecommon.BootstrapGarageHostForAlloc(thisHost, alloc).ID
|
||||||
|
|
||||||
peer = garage.LocalPeer{
|
node = garage.LocalNode{
|
||||||
RemotePeer: garage.RemotePeer{
|
RemoteNode: garage.RemoteNode{
|
||||||
ID: id,
|
ID: id,
|
||||||
IP: thisHost.IP().String(),
|
IP: thisHost.IP().String(),
|
||||||
RPCPort: alloc.RPCPort,
|
RPCPort: alloc.RPCPort,
|
||||||
@ -102,8 +102,8 @@ func garageWriteChildConfig(
|
|||||||
RPCSecret: rpcSecret,
|
RPCSecret: rpcSecret,
|
||||||
AdminToken: adminToken,
|
AdminToken: adminToken,
|
||||||
|
|
||||||
LocalPeer: peer,
|
LocalNode: node,
|
||||||
BootstrapPeers: hostBootstrap.GaragePeers(),
|
BootstrapPeers: hostBootstrap.GarageNodes(),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ func getGarageClientParams(
|
|||||||
}
|
}
|
||||||
|
|
||||||
return GarageClientParams{
|
return GarageClientParams{
|
||||||
Peer: currBootstrap.ChooseGaragePeer(),
|
Node: currBootstrap.ChooseGarageNode(),
|
||||||
GlobalBucketS3APICredentials: creds,
|
GlobalBucketS3APICredentials: creds,
|
||||||
RPCSecret: rpcSecret,
|
RPCSecret: rpcSecret,
|
||||||
}, nil
|
}, nil
|
||||||
@ -88,8 +88,8 @@ func garageApplyLayout(
|
|||||||
)
|
)
|
||||||
hostName = currHost.Name
|
hostName = currHost.Name
|
||||||
allocs = networkConfig.Storage.Allocations
|
allocs = networkConfig.Storage.Allocations
|
||||||
peers = make([]garage.PeerLayout, len(allocs))
|
roles = make([]garage.Role, len(allocs))
|
||||||
peerIDs = map[string]struct{}{}
|
roleIDs = map[string]struct{}{}
|
||||||
|
|
||||||
idsToRemove = make([]string, 0, len(prevHost.Garage.Instances))
|
idsToRemove = make([]string, 0, len(prevHost.Garage.Instances))
|
||||||
)
|
)
|
||||||
@ -98,14 +98,14 @@ func garageApplyLayout(
|
|||||||
|
|
||||||
for i, alloc := range allocs {
|
for i, alloc := range allocs {
|
||||||
id := daecommon.BootstrapGarageHostForAlloc(currHost, alloc).ID
|
id := daecommon.BootstrapGarageHostForAlloc(currHost, alloc).ID
|
||||||
peerIDs[id] = struct{}{}
|
roleIDs[id] = struct{}{}
|
||||||
|
|
||||||
zone := string(hostName)
|
zone := string(hostName)
|
||||||
if alloc.Zone != "" {
|
if alloc.Zone != "" {
|
||||||
zone = alloc.Zone
|
zone = alloc.Zone
|
||||||
}
|
}
|
||||||
|
|
||||||
peers[i] = garage.PeerLayout{
|
roles[i] = garage.Role{
|
||||||
ID: id,
|
ID: id,
|
||||||
Capacity: alloc.Capacity * 1_000_000_000,
|
Capacity: alloc.Capacity * 1_000_000_000,
|
||||||
Zone: zone,
|
Zone: zone,
|
||||||
@ -114,12 +114,12 @@ func garageApplyLayout(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, prevInst := range prevHost.Garage.Instances {
|
for _, prevInst := range prevHost.Garage.Instances {
|
||||||
if _, ok := peerIDs[prevInst.ID]; !ok {
|
if _, ok := roleIDs[prevInst.ID]; !ok {
|
||||||
idsToRemove = append(idsToRemove, prevInst.ID)
|
idsToRemove = append(idsToRemove, prevInst.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return adminClient.ApplyLayout(ctx, peers, idsToRemove)
|
return adminClient.ApplyLayout(ctx, roles, idsToRemove)
|
||||||
}
|
}
|
||||||
|
|
||||||
func garageInitializeGlobalBucket(
|
func garageInitializeGlobalBucket(
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
// GarageClientParams contains all the data needed to instantiate garage
|
// GarageClientParams contains all the data needed to instantiate garage
|
||||||
// clients.
|
// clients.
|
||||||
type GarageClientParams struct {
|
type GarageClientParams struct {
|
||||||
Peer garage.RemotePeer
|
Node garage.RemoteNode
|
||||||
GlobalBucketS3APICredentials garage.S3APICredentials
|
GlobalBucketS3APICredentials garage.S3APICredentials
|
||||||
|
|
||||||
// RPCSecret may be empty, if the secret is not available on the host.
|
// RPCSecret may be empty, if the secret is not available on the host.
|
||||||
@ -44,7 +44,7 @@ type GarageClientParams struct {
|
|||||||
// the global bucket.
|
// the global bucket.
|
||||||
func (p GarageClientParams) GlobalBucketS3APIClient() *garage.S3APIClient {
|
func (p GarageClientParams) GlobalBucketS3APIClient() *garage.S3APIClient {
|
||||||
var (
|
var (
|
||||||
addr = p.Peer.S3APIAddr()
|
addr = p.Node.S3APIAddr()
|
||||||
creds = p.GlobalBucketS3APICredentials
|
creds = p.GlobalBucketS3APICredentials
|
||||||
)
|
)
|
||||||
return garage.NewS3APIClient(addr, creds)
|
return garage.NewS3APIClient(addr, creds)
|
||||||
|
@ -82,19 +82,19 @@ func TestNetwork_GetConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNetwork_SetConfig(t *testing.T) {
|
func TestNetwork_SetConfig(t *testing.T) {
|
||||||
allocsToPeerLayouts := func(
|
allocsToRoles := func(
|
||||||
hostName nebula.HostName, allocs []bootstrap.GarageHostInstance,
|
hostName nebula.HostName, allocs []bootstrap.GarageHostInstance,
|
||||||
) []garage.PeerLayout {
|
) []garage.Role {
|
||||||
peers := make([]garage.PeerLayout, len(allocs))
|
roles := make([]garage.Role, len(allocs))
|
||||||
for i := range allocs {
|
for i := range allocs {
|
||||||
peers[i] = garage.PeerLayout{
|
roles[i] = garage.Role{
|
||||||
ID: allocs[i].ID,
|
ID: allocs[i].ID,
|
||||||
Capacity: 1_000_000_000,
|
Capacity: 1_000_000_000,
|
||||||
Zone: string(hostName),
|
Zone: string(hostName),
|
||||||
Tags: []string{},
|
Tags: []string{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return peers
|
return roles
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("add storage alloc", func(t *testing.T) {
|
t.Run("add storage alloc", func(t *testing.T) {
|
||||||
@ -142,10 +142,10 @@ func TestNetwork_SetConfig(t *testing.T) {
|
|||||||
assert.Equal(t, newHostsByName, storedBootstrap.Hosts)
|
assert.Equal(t, newHostsByName, storedBootstrap.Hosts)
|
||||||
|
|
||||||
t.Log("Checking that garage layout contains the new allocation")
|
t.Log("Checking that garage layout contains the new allocation")
|
||||||
expPeers := allocsToPeerLayouts(network.hostName, allocs)
|
expRoles := allocsToRoles(network.hostName, allocs)
|
||||||
layout, err := network.garageAdminClient(t).GetLayout(h.ctx)
|
layout, err := network.garageAdminClient(t).GetLayout(h.ctx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.ElementsMatch(t, expPeers, layout.Peers)
|
assert.ElementsMatch(t, expRoles, layout.Roles)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("remove storage alloc", func(t *testing.T) {
|
t.Run("remove storage alloc", func(t *testing.T) {
|
||||||
@ -183,10 +183,10 @@ func TestNetwork_SetConfig(t *testing.T) {
|
|||||||
assert.Equal(t, newHostsByName, storedBootstrap.Hosts)
|
assert.Equal(t, newHostsByName, storedBootstrap.Hosts)
|
||||||
|
|
||||||
t.Log("Checking that garage layout contains the new allocation")
|
t.Log("Checking that garage layout contains the new allocation")
|
||||||
expPeers := allocsToPeerLayouts(network.hostName, allocs)
|
expRoles := allocsToRoles(network.hostName, allocs)
|
||||||
layout, err := network.garageAdminClient(t).GetLayout(h.ctx)
|
layout, err := network.garageAdminClient(t).GetLayout(h.ctx)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.ElementsMatch(t, expPeers, layout.Peers)
|
assert.ElementsMatch(t, expRoles, layout.Roles)
|
||||||
})
|
})
|
||||||
|
|
||||||
// TODO a host having allocs but removing all of them
|
// TODO a host having allocs but removing all of them
|
||||||
|
@ -283,9 +283,9 @@ func (c *AdminClient) GrantBucketPermissions(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// PeerLayout describes the properties of a garage peer in the context of the
|
// Role descibes a node's role in the garage cluster, i.e. what storage it is
|
||||||
// layout of the cluster.
|
// providing.
|
||||||
type PeerLayout struct {
|
type Role struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Capacity int `json:"capacity"` // Gb (SI units)
|
Capacity int `json:"capacity"` // Gb (SI units)
|
||||||
Zone string `json:"zone"`
|
Zone string `json:"zone"`
|
||||||
@ -294,7 +294,7 @@ type PeerLayout struct {
|
|||||||
|
|
||||||
// ClusterLayout describes the layout of the cluster as a whole.
|
// ClusterLayout describes the layout of the cluster as a whole.
|
||||||
type ClusterLayout struct {
|
type ClusterLayout struct {
|
||||||
Peers []PeerLayout `json:"roles"`
|
Roles []Role `json:"roles"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLayout returns the currently applied ClusterLayout.
|
// GetLayout returns the currently applied ClusterLayout.
|
||||||
@ -306,42 +306,34 @@ func (c *AdminClient) GetLayout(ctx context.Context) (ClusterLayout, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ApplyLayout modifies the layout of the garage cluster. Only layout of the
|
// ApplyLayout modifies the layout of the garage cluster. Only layout of the
|
||||||
// given peers will be modified/created/removed, other peers are not affected.
|
// given roles will be modified/created/removed, other roles are not affected.
|
||||||
func (c *AdminClient) ApplyLayout(
|
func (c *AdminClient) ApplyLayout(
|
||||||
ctx context.Context, addModifyPeers []PeerLayout, removePeerIDs []string,
|
ctx context.Context, addModifyRoles []Role, removeRoleIDs []string,
|
||||||
) error {
|
) error {
|
||||||
type removePeer struct {
|
type removeRole struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Remove bool `json:"remove"`
|
Remove bool `json:"remove"`
|
||||||
}
|
}
|
||||||
|
|
||||||
peers := make([]any, 0, len(addModifyPeers)+len(removePeerIDs))
|
roles := make([]any, 0, len(addModifyRoles)+len(removeRoleIDs))
|
||||||
for _, p := range addModifyPeers {
|
for _, p := range addModifyRoles {
|
||||||
peers = append(peers, p)
|
roles = append(roles, p)
|
||||||
}
|
|
||||||
for _, id := range removePeerIDs {
|
|
||||||
peers = append(peers, removePeer{ID: id, Remove: true})
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
// https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html#tag/Layout/operation/ApplyLayout
|
|
||||||
err := c.do(ctx, nil, "POST", "/v1/layout", peers)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("staging layout changes: %w", err)
|
|
||||||
}
|
}
|
||||||
|
for _, id := range removeRoleIDs {
|
||||||
|
roles = append(roles, removeRole{ID: id, Remove: true})
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html#tag/Layout/operation/GetLayout
|
// https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html#tag/Layout/operation/GetLayout
|
||||||
var clusterLayout struct {
|
var clusterLayout struct {
|
||||||
Version int `json:"version"`
|
Version int `json:"version"`
|
||||||
StagedRoleChanges []PeerLayout `json:"stagedRoleChanges"`
|
StagedRoleChanges []Role `json:"stagedRoleChanges"`
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.do(ctx, &clusterLayout, "GET", "/v1/layout", nil); err != nil {
|
// https://garagehq.deuxfleurs.fr/api/garage-admin-v1.html#tag/Layout/operation/ApplyLayout
|
||||||
return fmt.Errorf("retrieving staged layout change: %w", err)
|
err := c.do(ctx, &clusterLayout, "POST", "/v1/layout", roles)
|
||||||
}
|
if err != nil {
|
||||||
|
return fmt.Errorf("staging layout changes: %w", err)
|
||||||
if len(clusterLayout.StagedRoleChanges) == 0 {
|
} else if len(clusterLayout.StagedRoleChanges) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -352,7 +344,7 @@ func (c *AdminClient) ApplyLayout(
|
|||||||
Version: clusterLayout.Version + 1,
|
Version: clusterLayout.Version + 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
err := c.do(ctx, nil, "POST", "/v1/layout/apply", applyClusterLayout)
|
err = c.do(ctx, nil, "POST", "/v1/layout/apply", applyClusterLayout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("applying new layout (new version:%d): %w", applyClusterLayout.Version, err)
|
return fmt.Errorf("applying new layout (new version:%d): %w", applyClusterLayout.Version, err)
|
||||||
}
|
}
|
||||||
|
@ -23,8 +23,8 @@ type GarageTomlData struct {
|
|||||||
RPCSecret string
|
RPCSecret string
|
||||||
AdminToken string
|
AdminToken string
|
||||||
|
|
||||||
garage.LocalPeer
|
garage.LocalNode
|
||||||
BootstrapPeers []garage.RemotePeer
|
BootstrapPeers []garage.RemoteNode
|
||||||
}
|
}
|
||||||
|
|
||||||
var garageTomlTpl = template.Must(template.New("").Parse(`
|
var garageTomlTpl = template.Must(template.New("").Parse(`
|
||||||
@ -38,7 +38,7 @@ rpc_bind_addr = "{{ .RPCAddr }}"
|
|||||||
rpc_public_addr = "{{ .RPCAddr }}"
|
rpc_public_addr = "{{ .RPCAddr }}"
|
||||||
|
|
||||||
bootstrap_peers = [{{- range .BootstrapPeers }}
|
bootstrap_peers = [{{- range .BootstrapPeers }}
|
||||||
"{{ .RPCPeerAddr }}",
|
"{{ .RPCNodeAddr }}",
|
||||||
{{ end -}}]
|
{{ end -}}]
|
||||||
|
|
||||||
[s3_api]
|
[s3_api]
|
||||||
@ -66,7 +66,7 @@ func WriteGarageTomlFile(
|
|||||||
) (
|
) (
|
||||||
bool, error,
|
bool, error,
|
||||||
) {
|
) {
|
||||||
slices.SortFunc(data.BootstrapPeers, func(i, j garage.RemotePeer) int {
|
slices.SortFunc(data.BootstrapPeers, func(i, j garage.RemoteNode) int {
|
||||||
return cmp.Or(
|
return cmp.Or(
|
||||||
cmp.Compare(i.IP, j.IP),
|
cmp.Compare(i.IP, j.IP),
|
||||||
cmp.Compare(i.RPCPort, j.RPCPort),
|
cmp.Compare(i.RPCPort, j.RPCPort),
|
||||||
|
@ -6,39 +6,39 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RemotePeer describes all information necessary to connect to a given garage
|
// RemoteNode describes all information necessary to connect to a given garage
|
||||||
// node.
|
// node.
|
||||||
type RemotePeer struct {
|
type RemoteNode struct {
|
||||||
ID string
|
ID string
|
||||||
IP string
|
IP string
|
||||||
RPCPort int
|
RPCPort int
|
||||||
S3APIPort int
|
S3APIPort int
|
||||||
}
|
}
|
||||||
|
|
||||||
// LocalPeer describes the configuration of a local garage instance.
|
// LocalNode describes the configuration of a local garage instance.
|
||||||
type LocalPeer struct {
|
type LocalNode struct {
|
||||||
RemotePeer
|
RemoteNode
|
||||||
|
|
||||||
AdminPort int
|
AdminPort int
|
||||||
}
|
}
|
||||||
|
|
||||||
// RPCAddr returns the address of the peer's RPC port.
|
// RPCAddr returns the address of the node's RPC port.
|
||||||
func (p RemotePeer) RPCAddr() string {
|
func (p RemoteNode) RPCAddr() string {
|
||||||
return net.JoinHostPort(p.IP, strconv.Itoa(p.RPCPort))
|
return net.JoinHostPort(p.IP, strconv.Itoa(p.RPCPort))
|
||||||
}
|
}
|
||||||
|
|
||||||
// RPCPeerAddr returns the full peer address (e.g. "id@ip:port") of the garage
|
// RPCNodeAddr returns the full node address (e.g. "id@ip:port") of the garage
|
||||||
// node for use in communicating over RPC.
|
// node for use in communicating over RPC.
|
||||||
func (p RemotePeer) RPCPeerAddr() string {
|
func (p RemoteNode) RPCNodeAddr() string {
|
||||||
return fmt.Sprintf("%s@%s", p.ID, p.RPCAddr())
|
return fmt.Sprintf("%s@%s", p.ID, p.RPCAddr())
|
||||||
}
|
}
|
||||||
|
|
||||||
// S3APIAddr returns the address of the peer's S3 API port.
|
// S3APIAddr returns the address of the node's S3 API port.
|
||||||
func (p RemotePeer) S3APIAddr() string {
|
func (p RemoteNode) S3APIAddr() string {
|
||||||
return net.JoinHostPort(p.IP, strconv.Itoa(p.S3APIPort))
|
return net.JoinHostPort(p.IP, strconv.Itoa(p.S3APIPort))
|
||||||
}
|
}
|
||||||
|
|
||||||
// AdminAddr returns the address of the peer's S3 API port.
|
// AdminAddr returns the address of the node's S3 API port.
|
||||||
func (p LocalPeer) AdminAddr() string {
|
func (p LocalNode) AdminAddr() string {
|
||||||
return net.JoinHostPort(p.IP, strconv.Itoa(p.AdminPort))
|
return net.JoinHostPort(p.IP, strconv.Itoa(p.AdminPort))
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user