Compare commits

..

3 Commits

15 changed files with 653 additions and 115 deletions

View File

@ -6,7 +6,7 @@ import (
) )
func (ctx subCmdCtx) getHosts() ([]bootstrap.Host, error) { func (ctx subCmdCtx) getHosts() ([]bootstrap.Host, error) {
res, err := newDaemonRPCClient().GetHosts(ctx) res, err := ctx.getDaemonRPC().GetHosts(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("calling GetHosts: %w", err) return nil, fmt.Errorf("calling GetHosts: %w", err)
} }

View File

@ -38,9 +38,6 @@ var subCmdDaemon = subCmd{
return daecommon.CopyDefaultConfig(os.Stdout) return daecommon.CopyDefaultConfig(os.Stdout)
} }
logger := ctx.logger()
defer logger.Close()
// TODO check that daemon is either running as root, or that the // TODO check that daemon is either running as root, or that the
// required linux capabilities are set. // required linux capabilities are set.
// TODO check that the tun module is loaded (for nebula). // TODO check that the tun module is loaded (for nebula).
@ -52,7 +49,7 @@ var subCmdDaemon = subCmd{
networkLoader, err := network.NewLoader( networkLoader, err := network.NewLoader(
ctx, ctx,
logger.WithNamespace("loader"), ctx.logger.WithNamespace("loader"),
envBinDirPath, envBinDirPath,
nil, nil,
) )
@ -60,20 +57,22 @@ var subCmdDaemon = subCmd{
return fmt.Errorf("instantiating network loader: %w", err) return fmt.Errorf("instantiating network loader: %w", err)
} }
daemonInst, err := daemon.New(ctx, logger, networkLoader, daemonConfig) daemonInst, err := daemon.New(
ctx, ctx.logger, networkLoader, daemonConfig,
)
if err != nil { if err != nil {
return fmt.Errorf("starting daemon: %w", err) return fmt.Errorf("starting daemon: %w", err)
} }
defer func() { defer func() {
logger.Info(ctx, "Stopping child processes") ctx.logger.Info(ctx, "Stopping child processes")
if err := daemonInst.Shutdown(); err != nil { if err := daemonInst.Shutdown(); err != nil {
logger.Error(ctx, "Shutting down daemon cleanly failed, there may be orphaned child processes", err) ctx.logger.Error(ctx, "Shutting down daemon cleanly failed, there may be orphaned child processes", err)
} }
logger.Info(ctx, "Child processes successfully stopped") ctx.logger.Info(ctx, "Child processes successfully stopped")
}() }()
{ {
logger := logger.WithNamespace("http") logger := ctx.logger.WithNamespace("http")
httpSrv, err := newHTTPServer( httpSrv, err := newHTTPServer(
ctx, logger, daemonInst, ctx, logger, daemonInst,
) )

View File

@ -6,7 +6,6 @@ import (
"fmt" "fmt"
"io/fs" "io/fs"
"isle/daemon" "isle/daemon"
"isle/daemon/jsonrpc2"
"net" "net"
"net/http" "net/http"
"os" "os"
@ -17,14 +16,6 @@ import (
const daemonHTTPRPCPath = "/rpc/v0.json" const daemonHTTPRPCPath = "/rpc/v0.json"
func newDaemonRPCClient() daemon.RPC {
return daemon.RPCFromClient(
jsonrpc2.NewUnixHTTPClient(
daemon.HTTPSocketPath(), daemonHTTPRPCPath,
),
)
}
func newHTTPServer( func newHTTPServer(
ctx context.Context, logger *mlog.Logger, daemonInst *daemon.Daemon, ctx context.Context, logger *mlog.Logger, daemonInst *daemon.Daemon,
) ( ) (

View File

@ -51,7 +51,7 @@ var subCmdGarageMC = subCmd{
return fmt.Errorf("parsing flags: %w", err) return fmt.Errorf("parsing flags: %w", err)
} }
clientParams, err := newDaemonRPCClient().GetGarageClientParams(ctx) clientParams, err := ctx.getDaemonRPC().GetGarageClientParams(ctx)
if err != nil { if err != nil {
return fmt.Errorf("calling GetGarageClientParams: %w", err) return fmt.Errorf("calling GetGarageClientParams: %w", err)
} }
@ -115,13 +115,14 @@ var subCmdGarageMC = subCmd{
var subCmdGarageCLI = subCmd{ var subCmdGarageCLI = subCmd{
name: "cli", name: "cli",
descr: "Runs the garage binary, automatically configured to point to the garage sub-process of a running isle daemon", descr: "Runs the garage binary, automatically configured to point to the garage sub-process of a running isle daemon",
passthroughArgs: true,
do: func(ctx subCmdCtx) error { do: func(ctx subCmdCtx) error {
ctx, err := ctx.withParsedFlags() ctx, err := ctx.withParsedFlags()
if err != nil { if err != nil {
return fmt.Errorf("parsing flags: %w", err) return fmt.Errorf("parsing flags: %w", err)
} }
clientParams, err := newDaemonRPCClient().GetGarageClientParams(ctx) clientParams, err := ctx.getDaemonRPC().GetGarageClientParams(ctx)
if err != nil { if err != nil {
return fmt.Errorf("calling GetGarageClientParams: %w", err) return fmt.Errorf("calling GetGarageClientParams: %w", err)
} }
@ -132,7 +133,7 @@ var subCmdGarageCLI = subCmd{
var ( var (
binPath = binPath("garage") binPath = binPath("garage")
args = append([]string{"garage"}, ctx.args...) args = append([]string{"garage"}, ctx.opts.args...)
cliEnv = append( cliEnv = append(
os.Environ(), os.Environ(),
"GARAGE_RPC_HOST="+clientParams.Node.RPCNodeAddr(), "GARAGE_RPC_HOST="+clientParams.Node.RPCNodeAddr(),

View File

@ -42,7 +42,7 @@ var subCmdHostCreate = subCmd{
return errors.New("--hostname is required") return errors.New("--hostname is required")
} }
res, err := newDaemonRPCClient().CreateHost( res, err := ctx.getDaemonRPC().CreateHost(
ctx, hostName.V, network.CreateHostOpts{ ctx, hostName.V, network.CreateHostOpts{
IP: ip.V, IP: ip.V,
CanCreateHosts: *canCreateHosts, CanCreateHosts: *canCreateHosts,
@ -120,7 +120,7 @@ var subCmdHostRemove = subCmd{
return errors.New("--hostname is required") return errors.New("--hostname is required")
} }
if err := newDaemonRPCClient().RemoveHost(ctx, hostName.V); err != nil { if err := ctx.getDaemonRPC().RemoveHost(ctx, hostName.V); err != nil {
return fmt.Errorf("calling RemoveHost: %w", err) return fmt.Errorf("calling RemoveHost: %w", err)
} }

View File

@ -2,6 +2,7 @@ package main
import ( import (
"context" "context"
"fmt"
"os" "os"
"os/signal" "os/signal"
"path/filepath" "path/filepath"
@ -28,6 +29,32 @@ func binPath(name string) string {
return filepath.Join(envBinDirPath, name) return filepath.Join(envBinDirPath, name)
} }
var rootCmd = subCmd{
name: "isle",
descr: "All Isle sub-commands",
noNetwork: true,
do: func(ctx subCmdCtx) error {
return ctx.doSubCmd(
subCmdDaemon,
subCmdGarage,
subCmdHost,
subCmdNebula,
subCmdNetwork,
subCmdStorage,
subCmdVersion,
)
},
}
func doRootCmd(
ctx context.Context,
logger *mlog.Logger,
opts *subCmdCtxOpts,
) error {
subCmdCtx := newSubCmdCtx(ctx, logger, rootCmd, opts)
return subCmdCtx.subCmd.do(subCmdCtx)
}
func main() { func main() {
logger := mlog.NewLogger(nil) logger := mlog.NewLogger(nil)
defer logger.Close() defer logger.Close()
@ -50,19 +77,7 @@ func main() {
logger.FatalString(ctx, "second signal received, force quitting, there may be zombie children left behind, good luck!") logger.FatalString(ctx, "second signal received, force quitting, there may be zombie children left behind, good luck!")
}() }()
err := subCmdCtx{ if err := doRootCmd(ctx, logger, nil); err != nil {
Context: ctx, fmt.Fprintln(os.Stderr, err)
args: os.Args[1:],
}.doSubCmd(
subCmdDaemon,
subCmdGarage,
subCmdHost,
subCmdNebula,
subCmdNetwork,
subCmdVersion,
)
if err != nil {
logger.Fatal(ctx, "error running command", err)
} }
} }

View File

@ -0,0 +1,58 @@
package main
import (
"bytes"
"context"
"isle/daemon"
"isle/toolkit"
"reflect"
"testing"
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
)
type runHarness struct {
ctx context.Context
logger *mlog.Logger
daemonRPC *daemon.MockRPC
stdout *bytes.Buffer
}
func newRunHarness(t *testing.T) *runHarness {
t.Parallel()
var (
ctx = context.Background()
logger = toolkit.NewTestLogger(t)
daemonRPC = daemon.NewMockRPC(t)
stdout = new(bytes.Buffer)
)
return &runHarness{ctx, logger, daemonRPC, stdout}
}
func (h *runHarness) run(_ *testing.T, args ...string) error {
return doRootCmd(h.ctx, h.logger, &subCmdCtxOpts{
args: args,
daemonRPC: h.daemonRPC,
stdout: h.stdout,
})
}
func (h *runHarness) runAssertStdout(
t *testing.T,
want any,
args ...string,
) {
var (
gotType = reflect.ValueOf(want)
got = reflect.New(gotType.Type())
)
h.stdout.Reset()
assert.NoError(t, h.run(t, args...))
assert.NoError(t, yaml.Unmarshal(h.stdout.Bytes(), got.Interface()))
assert.Equal(t, want, got.Elem().Interface())
}

View File

@ -43,7 +43,7 @@ var subCmdNebulaCreateCert = subCmd{
return fmt.Errorf("unmarshaling public key as PEM: %w", err) return fmt.Errorf("unmarshaling public key as PEM: %w", err)
} }
res, err := newDaemonRPCClient().CreateNebulaCertificate( res, err := ctx.getDaemonRPC().CreateNebulaCertificate(
ctx, hostName.V, hostPub, ctx, hostName.V, hostPub,
) )
if err != nil { if err != nil {
@ -77,7 +77,7 @@ var subCmdNebulaShow = subCmd{
return nil, fmt.Errorf("getting hosts: %w", err) return nil, fmt.Errorf("getting hosts: %w", err)
} }
caPublicCreds, err := newDaemonRPCClient().GetNebulaCAPublicCredentials(ctx) caPublicCreds, err := ctx.getDaemonRPC().GetNebulaCAPublicCredentials(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("calling GetNebulaCAPublicCredentials: %w", err) return nil, fmt.Errorf("calling GetNebulaCAPublicCredentials: %w", err)
} }

View File

@ -52,7 +52,7 @@ var subCmdNetworkCreate = subCmd{
return errors.New("--name, --domain, --ip-net, and --hostname are required") return errors.New("--name, --domain, --ip-net, and --hostname are required")
} }
err = newDaemonRPCClient().CreateNetwork( err = ctx.getDaemonRPC().CreateNetwork(
ctx, *name, *domain, ipNet.V, hostName.V, ctx, *name, *domain, ipNet.V, hostName.V,
) )
if err != nil { if err != nil {
@ -88,7 +88,7 @@ var subCmdNetworkJoin = subCmd{
) )
} }
return newDaemonRPCClient().JoinNetwork(ctx, newBootstrap) return ctx.getDaemonRPC().JoinNetwork(ctx, newBootstrap)
}, },
} }
@ -102,7 +102,7 @@ var subCmdNetworkList = subCmd{
return nil, fmt.Errorf("parsing flags: %w", err) return nil, fmt.Errorf("parsing flags: %w", err)
} }
return newDaemonRPCClient().GetNetworks(ctx) return ctx.getDaemonRPC().GetNetworks(ctx)
}), }),
} }
@ -115,7 +115,7 @@ var subCmdNetworkGetConfig = subCmd{
return nil, fmt.Errorf("parsing flags: %w", err) return nil, fmt.Errorf("parsing flags: %w", err)
} }
return newDaemonRPCClient().GetConfig(ctx) return ctx.getDaemonRPC().GetConfig(ctx)
}), }),
} }

View File

@ -0,0 +1,54 @@
package main
import (
"cmp"
"fmt"
"isle/daemon/daecommon"
"slices"
)
var subCmdStorageAllocationList = subCmd{
name: "list-allocation",
plural: "s",
descr: "Lists all storage which is currently allocated on this host",
do: doWithOutput(func(ctx subCmdCtx) (any, error) {
ctx, err := ctx.withParsedFlags()
if err != nil {
return nil, fmt.Errorf("parsing flags: %w", err)
}
config, err := ctx.getDaemonRPC().GetConfig(ctx)
if err != nil {
return nil, fmt.Errorf("getting network config: %w", err)
}
type alloc struct {
Index int `yaml:"index"`
daecommon.ConfigStorageAllocation `yaml:",inline"`
}
slices.SortFunc(
config.Storage.Allocations,
func(i, j daecommon.ConfigStorageAllocation) int {
return cmp.Compare(i.RPCPort, j.RPCPort)
},
)
allocs := make([]alloc, len(config.Storage.Allocations))
for i := range config.Storage.Allocations {
allocs[i] = alloc{i, config.Storage.Allocations[i]}
}
return allocs, nil
}),
}
var subCmdStorage = subCmd{
name: "storage",
descr: "Sub-commands having to do with configuration of storage on this host",
do: func(ctx subCmdCtx) error {
return ctx.doSubCmd(
subCmdStorageAllocationList,
)
},
}

View File

@ -0,0 +1,85 @@
package main
import (
"context"
"isle/daemon/daecommon"
"isle/toolkit"
"testing"
)
func TestStorageAllocationList(t *testing.T) {
t.Parallel()
tests := []struct {
name string
allocs []daecommon.ConfigStorageAllocation
want any
}{
{
name: "empty",
allocs: nil,
want: []any{},
},
{
// results should get sorted according to RPCPort, with index
// reflecting that order.
name: "success",
allocs: []daecommon.ConfigStorageAllocation{
{
DataPath: "b",
MetaPath: "B",
Capacity: 2,
S3APIPort: 2000,
RPCPort: 2001,
AdminPort: 2002,
},
{
DataPath: "a",
MetaPath: "A",
Capacity: 1,
S3APIPort: 1000,
RPCPort: 1001,
AdminPort: 1002,
},
},
want: []map[string]any{
{
"index": 0,
"data_path": "a",
"meta_path": "A",
"capacity": 1,
"s3_api_port": 1000,
"rpc_port": 1001,
"admin_port": 1002,
},
{
"index": 1,
"data_path": "b",
"meta_path": "B",
"capacity": 2,
"s3_api_port": 2000,
"rpc_port": 2001,
"admin_port": 2002,
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
h = newRunHarness(t)
config daecommon.NetworkConfig
)
config.Storage.Allocations = test.allocs
h.daemonRPC.
On("GetConfig", toolkit.MockArg[context.Context]()).
Return(config, nil).
Once()
h.runAssertStdout(t, test.want, "storage", "list-allocations")
})
}
}

View File

@ -4,7 +4,9 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"io"
"isle/daemon" "isle/daemon"
"isle/daemon/jsonrpc2"
"isle/jsonutil" "isle/jsonutil"
"os" "os"
"strings" "strings"
@ -14,13 +16,6 @@ import (
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
) )
type flagSet struct {
*pflag.FlagSet
network string
logLevel logLevelFlag
}
type subCmd struct { type subCmd struct {
name string name string
descr string descr string
@ -39,64 +34,53 @@ type subCmd struct {
passthroughArgs bool passthroughArgs bool
} }
type subCmdCtxOpts struct {
args []string // command-line arguments, excluding the subCmd itself.
subCmdNames []string // names of subCmds so far, including this one
daemonRPC daemon.RPC
stdout io.Writer
}
func (o *subCmdCtxOpts) withDefaults() *subCmdCtxOpts {
if o == nil {
o = new(subCmdCtxOpts)
}
if o.args == nil {
o.args = os.Args[1:]
}
if o.stdout == nil {
o.stdout = os.Stdout
}
return o
}
// subCmdCtx contains all information available to a subCmd's do method. // subCmdCtx contains all information available to a subCmd's do method.
type subCmdCtx struct { type subCmdCtx struct {
context.Context context.Context
logger *mlog.Logger
subCmd subCmd // the subCmd itself subCmd subCmd // the subCmd itself
args []string // command-line arguments, excluding the subCmd itself. opts *subCmdCtxOpts
subCmdNames []string // names of subCmds so far, including this one
flags *flagSet flags *pflag.FlagSet
} }
func newSubCmdCtx( func newSubCmdCtx(
ctx context.Context, ctx context.Context,
logger *mlog.Logger,
subCmd subCmd, subCmd subCmd,
args []string, opts *subCmdCtxOpts,
subCmdNames []string,
) subCmdCtx { ) subCmdCtx {
flags := pflag.NewFlagSet(subCmd.name, pflag.ExitOnError) opts = opts.withDefaults()
flags.Usage = func() {
var passthroughStr string
if subCmd.passthroughArgs {
passthroughStr = " [--] [args...]"
}
fmt.Fprintf(
os.Stderr, "%s[-h|--help] [%s flags...]%s\n\n",
usagePrefix(subCmdNames), subCmd.name, passthroughStr,
)
fmt.Fprintf(os.Stderr, "%s FLAGS:\n\n", strings.ToUpper(subCmd.name))
fmt.Fprintln(os.Stderr, flags.FlagUsages())
os.Stderr.Sync()
os.Exit(2)
}
fs := &flagSet{
FlagSet: flags,
logLevel: logLevelFlag{mlog.LevelInfo},
}
if !subCmd.noNetwork {
fs.FlagSet.StringVar(
&fs.network, "network", "", "Which network to perform the command against, if more than one is joined. Can be an ID, name, or domain.",
)
}
fs.FlagSet.VarP(
&fs.logLevel,
"log-level", "l",
"Maximum log level to output. Can be DEBUG, CHILD, INFO, WARN, ERROR, or FATAL.",
)
return subCmdCtx{ return subCmdCtx{
Context: ctx, Context: ctx,
logger: logger,
subCmd: subCmd, subCmd: subCmd,
args: args, opts: opts,
subCmdNames: subCmdNames, flags: pflag.NewFlagSet(subCmd.name, pflag.ExitOnError),
flags: fs,
} }
} }
@ -109,13 +93,34 @@ func usagePrefix(subCmdNames []string) string {
return fmt.Sprintf("\nUSAGE: %s %s", os.Args[0], subCmdNamesStr) return fmt.Sprintf("\nUSAGE: %s %s", os.Args[0], subCmdNamesStr)
} }
func (ctx subCmdCtx) logger() *mlog.Logger { func (ctx subCmdCtx) getDaemonRPC() daemon.RPC {
return mlog.NewLogger(&mlog.LoggerOpts{ if ctx.opts.daemonRPC == nil {
MaxLevel: ctx.flags.logLevel.Int(), ctx.opts.daemonRPC = daemon.RPCFromClient(
}) jsonrpc2.NewUnixHTTPClient(
daemon.HTTPSocketPath(), daemonHTTPRPCPath,
),
)
}
return ctx.opts.daemonRPC
} }
func (ctx subCmdCtx) withParsedFlags() (subCmdCtx, error) { func (ctx subCmdCtx) withParsedFlags() (subCmdCtx, error) {
logLevel := logLevelFlag{mlog.LevelInfo}
ctx.flags.VarP(
&logLevel,
"log-level", "l",
"Maximum log level to output. Can be DEBUG, CHILD, INFO, WARN, ERROR, or FATAL.",
)
var network string
if !ctx.subCmd.noNetwork {
ctx.flags.StringVar(
&network,
"network", "",
"Which network to perform the command against, if more than one is joined. Can be an ID, name, or domain.",
)
}
ctx.flags.VisitAll(func(f *pflag.Flag) { ctx.flags.VisitAll(func(f *pflag.Flag) {
if f.Shorthand == "h" { if f.Shorthand == "h" {
panic(fmt.Sprintf("flag %+v has reserved shorthand `-h`", f)) panic(fmt.Sprintf("flag %+v has reserved shorthand `-h`", f))
@ -125,11 +130,32 @@ func (ctx subCmdCtx) withParsedFlags() (subCmdCtx, error) {
} }
}) })
if err := ctx.flags.Parse(ctx.args); err != nil { ctx.flags.Usage = func() {
var passthroughStr string
if ctx.subCmd.passthroughArgs {
passthroughStr = " [--] [args...]"
}
fmt.Fprintf(
os.Stderr, "%s[-h|--help] [%s flags...]%s\n\n",
usagePrefix(ctx.opts.subCmdNames), ctx.subCmd.name, passthroughStr,
)
fmt.Fprintf(
os.Stderr, "%s FLAGS:\n\n", strings.ToUpper(ctx.subCmd.name),
)
fmt.Fprintln(os.Stderr, ctx.flags.FlagUsages())
os.Stderr.Sync()
os.Exit(2)
}
if err := ctx.flags.Parse(ctx.opts.args); err != nil {
return ctx, err return ctx, err
} }
ctx.Context = daemon.WithNetwork(ctx.Context, ctx.flags.network) ctx.Context = daemon.WithNetwork(ctx.Context, network)
ctx.logger = ctx.logger.WithMaxLevel(logLevel.Int())
return ctx, nil return ctx, nil
} }
@ -142,7 +168,7 @@ func (ctx subCmdCtx) doSubCmd(subCmds ...subCmd) error {
fmt.Fprintf( fmt.Fprintf(
os.Stderr, os.Stderr,
"%s<subCmd> [-h|--help] [sub-command flags...]\n", "%s<subCmd> [-h|--help] [sub-command flags...]\n",
usagePrefix(ctx.subCmdNames), usagePrefix(ctx.opts.subCmdNames),
) )
fmt.Fprintf(os.Stderr, "\nSUB-COMMANDS:\n\n") fmt.Fprintf(os.Stderr, "\nSUB-COMMANDS:\n\n")
@ -160,7 +186,7 @@ func (ctx subCmdCtx) doSubCmd(subCmds ...subCmd) error {
os.Exit(2) os.Exit(2)
} }
args := ctx.args args := ctx.opts.args
if len(args) == 0 { if len(args) == 0 {
printUsageExit("") printUsageExit("")
@ -181,11 +207,12 @@ func (ctx subCmdCtx) doSubCmd(subCmds ...subCmd) error {
printUsageExit(subCmdName) printUsageExit(subCmdName)
} }
nextSubCmdCtxOpts := *ctx.opts
nextSubCmdCtxOpts.args = args
nextSubCmdCtxOpts.subCmdNames = append(ctx.opts.subCmdNames, subCmdName)
nextSubCmdCtx := newSubCmdCtx( nextSubCmdCtx := newSubCmdCtx(
ctx.Context, ctx.Context, ctx.logger, subCmd, &nextSubCmdCtxOpts,
subCmd,
args,
append(ctx.subCmdNames, subCmdName),
) )
if err := subCmd.do(nextSubCmdCtx); err != nil { if err := subCmd.do(nextSubCmdCtx); err != nil {
@ -229,9 +256,9 @@ func doWithOutput(fn func(subCmdCtx) (any, error)) func(subCmdCtx) error {
switch outputFormat.V { switch outputFormat.V {
case "json": case "json":
return jsonutil.WriteIndented(os.Stdout, res) return jsonutil.WriteIndented(ctx.opts.stdout, res)
case "yaml": case "yaml":
return yaml.NewEncoder(os.Stdout).Encode(res) return yaml.NewEncoder(ctx.opts.stdout).Encode(res)
default: default:
panic(fmt.Sprintf("unexpected outputFormat %q", outputFormat)) panic(fmt.Sprintf("unexpected outputFormat %q", outputFormat))
} }

View File

@ -105,8 +105,7 @@ type RPC interface {
// existing host, given the public key for that host. This is currently // existing host, given the public key for that host. This is currently
// mostly useful for creating certs for mobile devices. // mostly useful for creating certs for mobile devices.
// //
// TODO replace this with CreateHostBootstrap, and the // TODO Specific error for if required secret isn't present.
// CreateNebulaCertificate RPC method can just pull cert out of that.
// //
// Errors: // Errors:
// - ErrHostNotFound // - ErrHostNotFound

View File

@ -1,3 +1,5 @@
//go:generate mockery --name RPC --inpackage --filename rpc_mock.go
package daemon package daemon
import ( import (

307
go/daemon/rpc_mock.go Normal file
View File

@ -0,0 +1,307 @@
// Code generated by mockery v2.43.1. DO NOT EDIT.
package daemon
import (
context "context"
bootstrap "isle/bootstrap"
daecommon "isle/daemon/daecommon"
mock "github.com/stretchr/testify/mock"
nebula "isle/nebula"
network "isle/daemon/network"
)
// MockRPC is an autogenerated mock type for the RPC type
type MockRPC struct {
mock.Mock
}
// CreateHost provides a mock function with given fields: _a0, _a1, _a2
func (_m *MockRPC) CreateHost(_a0 context.Context, _a1 nebula.HostName, _a2 network.CreateHostOpts) (network.JoiningBootstrap, error) {
ret := _m.Called(_a0, _a1, _a2)
if len(ret) == 0 {
panic("no return value specified for CreateHost")
}
var r0 network.JoiningBootstrap
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, nebula.HostName, network.CreateHostOpts) (network.JoiningBootstrap, error)); ok {
return rf(_a0, _a1, _a2)
}
if rf, ok := ret.Get(0).(func(context.Context, nebula.HostName, network.CreateHostOpts) network.JoiningBootstrap); ok {
r0 = rf(_a0, _a1, _a2)
} else {
r0 = ret.Get(0).(network.JoiningBootstrap)
}
if rf, ok := ret.Get(1).(func(context.Context, nebula.HostName, network.CreateHostOpts) error); ok {
r1 = rf(_a0, _a1, _a2)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateNebulaCertificate provides a mock function with given fields: _a0, _a1, _a2
func (_m *MockRPC) CreateNebulaCertificate(_a0 context.Context, _a1 nebula.HostName, _a2 nebula.EncryptingPublicKey) (nebula.Certificate, error) {
ret := _m.Called(_a0, _a1, _a2)
if len(ret) == 0 {
panic("no return value specified for CreateNebulaCertificate")
}
var r0 nebula.Certificate
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, nebula.HostName, nebula.EncryptingPublicKey) (nebula.Certificate, error)); ok {
return rf(_a0, _a1, _a2)
}
if rf, ok := ret.Get(0).(func(context.Context, nebula.HostName, nebula.EncryptingPublicKey) nebula.Certificate); ok {
r0 = rf(_a0, _a1, _a2)
} else {
r0 = ret.Get(0).(nebula.Certificate)
}
if rf, ok := ret.Get(1).(func(context.Context, nebula.HostName, nebula.EncryptingPublicKey) error); ok {
r1 = rf(_a0, _a1, _a2)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateNetwork provides a mock function with given fields: ctx, name, domain, ipNet, hostName
func (_m *MockRPC) CreateNetwork(ctx context.Context, name string, domain string, ipNet nebula.IPNet, hostName nebula.HostName) error {
ret := _m.Called(ctx, name, domain, ipNet, hostName)
if len(ret) == 0 {
panic("no return value specified for CreateNetwork")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string, string, nebula.IPNet, nebula.HostName) error); ok {
r0 = rf(ctx, name, domain, ipNet, hostName)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetConfig provides a mock function with given fields: _a0
func (_m *MockRPC) GetConfig(_a0 context.Context) (daecommon.NetworkConfig, error) {
ret := _m.Called(_a0)
if len(ret) == 0 {
panic("no return value specified for GetConfig")
}
var r0 daecommon.NetworkConfig
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) (daecommon.NetworkConfig, error)); ok {
return rf(_a0)
}
if rf, ok := ret.Get(0).(func(context.Context) daecommon.NetworkConfig); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(daecommon.NetworkConfig)
}
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetGarageClientParams provides a mock function with given fields: _a0
func (_m *MockRPC) GetGarageClientParams(_a0 context.Context) (network.GarageClientParams, error) {
ret := _m.Called(_a0)
if len(ret) == 0 {
panic("no return value specified for GetGarageClientParams")
}
var r0 network.GarageClientParams
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) (network.GarageClientParams, error)); ok {
return rf(_a0)
}
if rf, ok := ret.Get(0).(func(context.Context) network.GarageClientParams); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(network.GarageClientParams)
}
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetHosts provides a mock function with given fields: _a0
func (_m *MockRPC) GetHosts(_a0 context.Context) ([]bootstrap.Host, error) {
ret := _m.Called(_a0)
if len(ret) == 0 {
panic("no return value specified for GetHosts")
}
var r0 []bootstrap.Host
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) ([]bootstrap.Host, error)); ok {
return rf(_a0)
}
if rf, ok := ret.Get(0).(func(context.Context) []bootstrap.Host); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]bootstrap.Host)
}
}
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetNebulaCAPublicCredentials provides a mock function with given fields: _a0
func (_m *MockRPC) GetNebulaCAPublicCredentials(_a0 context.Context) (nebula.CAPublicCredentials, error) {
ret := _m.Called(_a0)
if len(ret) == 0 {
panic("no return value specified for GetNebulaCAPublicCredentials")
}
var r0 nebula.CAPublicCredentials
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) (nebula.CAPublicCredentials, error)); ok {
return rf(_a0)
}
if rf, ok := ret.Get(0).(func(context.Context) nebula.CAPublicCredentials); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(nebula.CAPublicCredentials)
}
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetNetworks provides a mock function with given fields: _a0
func (_m *MockRPC) GetNetworks(_a0 context.Context) ([]bootstrap.CreationParams, error) {
ret := _m.Called(_a0)
if len(ret) == 0 {
panic("no return value specified for GetNetworks")
}
var r0 []bootstrap.CreationParams
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) ([]bootstrap.CreationParams, error)); ok {
return rf(_a0)
}
if rf, ok := ret.Get(0).(func(context.Context) []bootstrap.CreationParams); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]bootstrap.CreationParams)
}
}
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// JoinNetwork provides a mock function with given fields: _a0, _a1
func (_m *MockRPC) JoinNetwork(_a0 context.Context, _a1 network.JoiningBootstrap) error {
ret := _m.Called(_a0, _a1)
if len(ret) == 0 {
panic("no return value specified for JoinNetwork")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, network.JoiningBootstrap) error); ok {
r0 = rf(_a0, _a1)
} else {
r0 = ret.Error(0)
}
return r0
}
// RemoveHost provides a mock function with given fields: ctx, hostName
func (_m *MockRPC) RemoveHost(ctx context.Context, hostName nebula.HostName) error {
ret := _m.Called(ctx, hostName)
if len(ret) == 0 {
panic("no return value specified for RemoveHost")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, nebula.HostName) error); ok {
r0 = rf(ctx, hostName)
} else {
r0 = ret.Error(0)
}
return r0
}
// SetConfig provides a mock function with given fields: _a0, _a1
func (_m *MockRPC) SetConfig(_a0 context.Context, _a1 daecommon.NetworkConfig) error {
ret := _m.Called(_a0, _a1)
if len(ret) == 0 {
panic("no return value specified for SetConfig")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, daecommon.NetworkConfig) error); ok {
r0 = rf(_a0, _a1)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewMockRPC creates a new instance of MockRPC. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockRPC(t interface {
mock.TestingT
Cleanup(func())
}) *MockRPC {
mock := &MockRPC{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}