Compare commits

..

No commits in common. "8eb3b1d98fef19024b3aadc3386a4e3771e8e8be" and "070524f686133b63081bb6b24c05fd7e952f66b0" have entirely different histories.

15 changed files with 115 additions and 653 deletions

View File

@ -6,7 +6,7 @@ import (
)
func (ctx subCmdCtx) getHosts() ([]bootstrap.Host, error) {
res, err := ctx.getDaemonRPC().GetHosts(ctx)
res, err := newDaemonRPCClient().GetHosts(ctx)
if err != nil {
return nil, fmt.Errorf("calling GetHosts: %w", err)
}

View File

@ -38,6 +38,9 @@ var subCmdDaemon = subCmd{
return daecommon.CopyDefaultConfig(os.Stdout)
}
logger := ctx.logger()
defer logger.Close()
// TODO check that daemon is either running as root, or that the
// required linux capabilities are set.
// TODO check that the tun module is loaded (for nebula).
@ -49,7 +52,7 @@ var subCmdDaemon = subCmd{
networkLoader, err := network.NewLoader(
ctx,
ctx.logger.WithNamespace("loader"),
logger.WithNamespace("loader"),
envBinDirPath,
nil,
)
@ -57,22 +60,20 @@ var subCmdDaemon = subCmd{
return fmt.Errorf("instantiating network loader: %w", err)
}
daemonInst, err := daemon.New(
ctx, ctx.logger, networkLoader, daemonConfig,
)
daemonInst, err := daemon.New(ctx, logger, networkLoader, daemonConfig)
if err != nil {
return fmt.Errorf("starting daemon: %w", err)
}
defer func() {
ctx.logger.Info(ctx, "Stopping child processes")
logger.Info(ctx, "Stopping child processes")
if err := daemonInst.Shutdown(); err != nil {
ctx.logger.Error(ctx, "Shutting down daemon cleanly failed, there may be orphaned child processes", err)
logger.Error(ctx, "Shutting down daemon cleanly failed, there may be orphaned child processes", err)
}
ctx.logger.Info(ctx, "Child processes successfully stopped")
logger.Info(ctx, "Child processes successfully stopped")
}()
{
logger := ctx.logger.WithNamespace("http")
logger := logger.WithNamespace("http")
httpSrv, err := newHTTPServer(
ctx, logger, daemonInst,
)

View File

@ -6,6 +6,7 @@ import (
"fmt"
"io/fs"
"isle/daemon"
"isle/daemon/jsonrpc2"
"net"
"net/http"
"os"
@ -16,6 +17,14 @@ import (
const daemonHTTPRPCPath = "/rpc/v0.json"
func newDaemonRPCClient() daemon.RPC {
return daemon.RPCFromClient(
jsonrpc2.NewUnixHTTPClient(
daemon.HTTPSocketPath(), daemonHTTPRPCPath,
),
)
}
func newHTTPServer(
ctx context.Context, logger *mlog.Logger, daemonInst *daemon.Daemon,
) (

View File

@ -51,7 +51,7 @@ var subCmdGarageMC = subCmd{
return fmt.Errorf("parsing flags: %w", err)
}
clientParams, err := ctx.getDaemonRPC().GetGarageClientParams(ctx)
clientParams, err := newDaemonRPCClient().GetGarageClientParams(ctx)
if err != nil {
return fmt.Errorf("calling GetGarageClientParams: %w", err)
}
@ -113,16 +113,15 @@ var subCmdGarageMC = subCmd{
}
var subCmdGarageCLI = subCmd{
name: "cli",
descr: "Runs the garage binary, automatically configured to point to the garage sub-process of a running isle daemon",
passthroughArgs: true,
name: "cli",
descr: "Runs the garage binary, automatically configured to point to the garage sub-process of a running isle daemon",
do: func(ctx subCmdCtx) error {
ctx, err := ctx.withParsedFlags()
if err != nil {
return fmt.Errorf("parsing flags: %w", err)
}
clientParams, err := ctx.getDaemonRPC().GetGarageClientParams(ctx)
clientParams, err := newDaemonRPCClient().GetGarageClientParams(ctx)
if err != nil {
return fmt.Errorf("calling GetGarageClientParams: %w", err)
}
@ -133,7 +132,7 @@ var subCmdGarageCLI = subCmd{
var (
binPath = binPath("garage")
args = append([]string{"garage"}, ctx.opts.args...)
args = append([]string{"garage"}, ctx.args...)
cliEnv = append(
os.Environ(),
"GARAGE_RPC_HOST="+clientParams.Node.RPCNodeAddr(),

View File

@ -42,7 +42,7 @@ var subCmdHostCreate = subCmd{
return errors.New("--hostname is required")
}
res, err := ctx.getDaemonRPC().CreateHost(
res, err := newDaemonRPCClient().CreateHost(
ctx, hostName.V, network.CreateHostOpts{
IP: ip.V,
CanCreateHosts: *canCreateHosts,
@ -120,7 +120,7 @@ var subCmdHostRemove = subCmd{
return errors.New("--hostname is required")
}
if err := ctx.getDaemonRPC().RemoveHost(ctx, hostName.V); err != nil {
if err := newDaemonRPCClient().RemoveHost(ctx, hostName.V); err != nil {
return fmt.Errorf("calling RemoveHost: %w", err)
}

View File

@ -2,7 +2,6 @@ package main
import (
"context"
"fmt"
"os"
"os/signal"
"path/filepath"
@ -29,32 +28,6 @@ func binPath(name string) string {
return filepath.Join(envBinDirPath, name)
}
var rootCmd = subCmd{
name: "isle",
descr: "All Isle sub-commands",
noNetwork: true,
do: func(ctx subCmdCtx) error {
return ctx.doSubCmd(
subCmdDaemon,
subCmdGarage,
subCmdHost,
subCmdNebula,
subCmdNetwork,
subCmdStorage,
subCmdVersion,
)
},
}
func doRootCmd(
ctx context.Context,
logger *mlog.Logger,
opts *subCmdCtxOpts,
) error {
subCmdCtx := newSubCmdCtx(ctx, logger, rootCmd, opts)
return subCmdCtx.subCmd.do(subCmdCtx)
}
func main() {
logger := mlog.NewLogger(nil)
defer logger.Close()
@ -77,7 +50,19 @@ func main() {
logger.FatalString(ctx, "second signal received, force quitting, there may be zombie children left behind, good luck!")
}()
if err := doRootCmd(ctx, logger, nil); err != nil {
fmt.Fprintln(os.Stderr, err)
err := subCmdCtx{
Context: ctx,
args: os.Args[1:],
}.doSubCmd(
subCmdDaemon,
subCmdGarage,
subCmdHost,
subCmdNebula,
subCmdNetwork,
subCmdVersion,
)
if err != nil {
logger.Fatal(ctx, "error running command", err)
}
}

View File

@ -1,58 +0,0 @@
package main
import (
"bytes"
"context"
"isle/daemon"
"isle/toolkit"
"reflect"
"testing"
"dev.mediocregopher.com/mediocre-go-lib.git/mlog"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
)
type runHarness struct {
ctx context.Context
logger *mlog.Logger
daemonRPC *daemon.MockRPC
stdout *bytes.Buffer
}
func newRunHarness(t *testing.T) *runHarness {
t.Parallel()
var (
ctx = context.Background()
logger = toolkit.NewTestLogger(t)
daemonRPC = daemon.NewMockRPC(t)
stdout = new(bytes.Buffer)
)
return &runHarness{ctx, logger, daemonRPC, stdout}
}
func (h *runHarness) run(_ *testing.T, args ...string) error {
return doRootCmd(h.ctx, h.logger, &subCmdCtxOpts{
args: args,
daemonRPC: h.daemonRPC,
stdout: h.stdout,
})
}
func (h *runHarness) runAssertStdout(
t *testing.T,
want any,
args ...string,
) {
var (
gotType = reflect.ValueOf(want)
got = reflect.New(gotType.Type())
)
h.stdout.Reset()
assert.NoError(t, h.run(t, args...))
assert.NoError(t, yaml.Unmarshal(h.stdout.Bytes(), got.Interface()))
assert.Equal(t, want, got.Elem().Interface())
}

View File

@ -43,7 +43,7 @@ var subCmdNebulaCreateCert = subCmd{
return fmt.Errorf("unmarshaling public key as PEM: %w", err)
}
res, err := ctx.getDaemonRPC().CreateNebulaCertificate(
res, err := newDaemonRPCClient().CreateNebulaCertificate(
ctx, hostName.V, hostPub,
)
if err != nil {
@ -77,7 +77,7 @@ var subCmdNebulaShow = subCmd{
return nil, fmt.Errorf("getting hosts: %w", err)
}
caPublicCreds, err := ctx.getDaemonRPC().GetNebulaCAPublicCredentials(ctx)
caPublicCreds, err := newDaemonRPCClient().GetNebulaCAPublicCredentials(ctx)
if err != nil {
return nil, fmt.Errorf("calling GetNebulaCAPublicCredentials: %w", err)
}

View File

@ -52,7 +52,7 @@ var subCmdNetworkCreate = subCmd{
return errors.New("--name, --domain, --ip-net, and --hostname are required")
}
err = ctx.getDaemonRPC().CreateNetwork(
err = newDaemonRPCClient().CreateNetwork(
ctx, *name, *domain, ipNet.V, hostName.V,
)
if err != nil {
@ -88,7 +88,7 @@ var subCmdNetworkJoin = subCmd{
)
}
return ctx.getDaemonRPC().JoinNetwork(ctx, newBootstrap)
return newDaemonRPCClient().JoinNetwork(ctx, newBootstrap)
},
}
@ -102,7 +102,7 @@ var subCmdNetworkList = subCmd{
return nil, fmt.Errorf("parsing flags: %w", err)
}
return ctx.getDaemonRPC().GetNetworks(ctx)
return newDaemonRPCClient().GetNetworks(ctx)
}),
}
@ -115,7 +115,7 @@ var subCmdNetworkGetConfig = subCmd{
return nil, fmt.Errorf("parsing flags: %w", err)
}
return ctx.getDaemonRPC().GetConfig(ctx)
return newDaemonRPCClient().GetConfig(ctx)
}),
}

View File

@ -1,54 +0,0 @@
package main
import (
"cmp"
"fmt"
"isle/daemon/daecommon"
"slices"
)
var subCmdStorageAllocationList = subCmd{
name: "list-allocation",
plural: "s",
descr: "Lists all storage which is currently allocated on this host",
do: doWithOutput(func(ctx subCmdCtx) (any, error) {
ctx, err := ctx.withParsedFlags()
if err != nil {
return nil, fmt.Errorf("parsing flags: %w", err)
}
config, err := ctx.getDaemonRPC().GetConfig(ctx)
if err != nil {
return nil, fmt.Errorf("getting network config: %w", err)
}
type alloc struct {
Index int `yaml:"index"`
daecommon.ConfigStorageAllocation `yaml:",inline"`
}
slices.SortFunc(
config.Storage.Allocations,
func(i, j daecommon.ConfigStorageAllocation) int {
return cmp.Compare(i.RPCPort, j.RPCPort)
},
)
allocs := make([]alloc, len(config.Storage.Allocations))
for i := range config.Storage.Allocations {
allocs[i] = alloc{i, config.Storage.Allocations[i]}
}
return allocs, nil
}),
}
var subCmdStorage = subCmd{
name: "storage",
descr: "Sub-commands having to do with configuration of storage on this host",
do: func(ctx subCmdCtx) error {
return ctx.doSubCmd(
subCmdStorageAllocationList,
)
},
}

View File

@ -1,85 +0,0 @@
package main
import (
"context"
"isle/daemon/daecommon"
"isle/toolkit"
"testing"
)
func TestStorageAllocationList(t *testing.T) {
t.Parallel()
tests := []struct {
name string
allocs []daecommon.ConfigStorageAllocation
want any
}{
{
name: "empty",
allocs: nil,
want: []any{},
},
{
// results should get sorted according to RPCPort, with index
// reflecting that order.
name: "success",
allocs: []daecommon.ConfigStorageAllocation{
{
DataPath: "b",
MetaPath: "B",
Capacity: 2,
S3APIPort: 2000,
RPCPort: 2001,
AdminPort: 2002,
},
{
DataPath: "a",
MetaPath: "A",
Capacity: 1,
S3APIPort: 1000,
RPCPort: 1001,
AdminPort: 1002,
},
},
want: []map[string]any{
{
"index": 0,
"data_path": "a",
"meta_path": "A",
"capacity": 1,
"s3_api_port": 1000,
"rpc_port": 1001,
"admin_port": 1002,
},
{
"index": 1,
"data_path": "b",
"meta_path": "B",
"capacity": 2,
"s3_api_port": 2000,
"rpc_port": 2001,
"admin_port": 2002,
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
h = newRunHarness(t)
config daecommon.NetworkConfig
)
config.Storage.Allocations = test.allocs
h.daemonRPC.
On("GetConfig", toolkit.MockArg[context.Context]()).
Return(config, nil).
Once()
h.runAssertStdout(t, test.want, "storage", "list-allocations")
})
}
}

View File

@ -4,9 +4,7 @@ import (
"context"
"errors"
"fmt"
"io"
"isle/daemon"
"isle/daemon/jsonrpc2"
"isle/jsonutil"
"os"
"strings"
@ -16,6 +14,13 @@ import (
"gopkg.in/yaml.v3"
)
type flagSet struct {
*pflag.FlagSet
network string
logLevel logLevelFlag
}
type subCmd struct {
name string
descr string
@ -34,53 +39,64 @@ type subCmd struct {
passthroughArgs bool
}
type subCmdCtxOpts struct {
args []string // command-line arguments, excluding the subCmd itself.
subCmdNames []string // names of subCmds so far, including this one
daemonRPC daemon.RPC
stdout io.Writer
}
func (o *subCmdCtxOpts) withDefaults() *subCmdCtxOpts {
if o == nil {
o = new(subCmdCtxOpts)
}
if o.args == nil {
o.args = os.Args[1:]
}
if o.stdout == nil {
o.stdout = os.Stdout
}
return o
}
// subCmdCtx contains all information available to a subCmd's do method.
type subCmdCtx struct {
context.Context
logger *mlog.Logger
subCmd subCmd // the subCmd itself
opts *subCmdCtxOpts
flags *pflag.FlagSet
subCmd subCmd // the subCmd itself
args []string // command-line arguments, excluding the subCmd itself.
subCmdNames []string // names of subCmds so far, including this one
flags *flagSet
}
func newSubCmdCtx(
ctx context.Context,
logger *mlog.Logger,
subCmd subCmd,
opts *subCmdCtxOpts,
args []string,
subCmdNames []string,
) subCmdCtx {
opts = opts.withDefaults()
flags := pflag.NewFlagSet(subCmd.name, pflag.ExitOnError)
flags.Usage = func() {
var passthroughStr string
if subCmd.passthroughArgs {
passthroughStr = " [--] [args...]"
}
fmt.Fprintf(
os.Stderr, "%s[-h|--help] [%s flags...]%s\n\n",
usagePrefix(subCmdNames), subCmd.name, passthroughStr,
)
fmt.Fprintf(os.Stderr, "%s FLAGS:\n\n", strings.ToUpper(subCmd.name))
fmt.Fprintln(os.Stderr, flags.FlagUsages())
os.Stderr.Sync()
os.Exit(2)
}
fs := &flagSet{
FlagSet: flags,
logLevel: logLevelFlag{mlog.LevelInfo},
}
if !subCmd.noNetwork {
fs.FlagSet.StringVar(
&fs.network, "network", "", "Which network to perform the command against, if more than one is joined. Can be an ID, name, or domain.",
)
}
fs.FlagSet.VarP(
&fs.logLevel,
"log-level", "l",
"Maximum log level to output. Can be DEBUG, CHILD, INFO, WARN, ERROR, or FATAL.",
)
return subCmdCtx{
Context: ctx,
logger: logger,
subCmd: subCmd,
opts: opts,
flags: pflag.NewFlagSet(subCmd.name, pflag.ExitOnError),
Context: ctx,
subCmd: subCmd,
args: args,
subCmdNames: subCmdNames,
flags: fs,
}
}
@ -93,34 +109,13 @@ func usagePrefix(subCmdNames []string) string {
return fmt.Sprintf("\nUSAGE: %s %s", os.Args[0], subCmdNamesStr)
}
func (ctx subCmdCtx) getDaemonRPC() daemon.RPC {
if ctx.opts.daemonRPC == nil {
ctx.opts.daemonRPC = daemon.RPCFromClient(
jsonrpc2.NewUnixHTTPClient(
daemon.HTTPSocketPath(), daemonHTTPRPCPath,
),
)
}
return ctx.opts.daemonRPC
func (ctx subCmdCtx) logger() *mlog.Logger {
return mlog.NewLogger(&mlog.LoggerOpts{
MaxLevel: ctx.flags.logLevel.Int(),
})
}
func (ctx subCmdCtx) withParsedFlags() (subCmdCtx, error) {
logLevel := logLevelFlag{mlog.LevelInfo}
ctx.flags.VarP(
&logLevel,
"log-level", "l",
"Maximum log level to output. Can be DEBUG, CHILD, INFO, WARN, ERROR, or FATAL.",
)
var network string
if !ctx.subCmd.noNetwork {
ctx.flags.StringVar(
&network,
"network", "",
"Which network to perform the command against, if more than one is joined. Can be an ID, name, or domain.",
)
}
ctx.flags.VisitAll(func(f *pflag.Flag) {
if f.Shorthand == "h" {
panic(fmt.Sprintf("flag %+v has reserved shorthand `-h`", f))
@ -130,32 +125,11 @@ func (ctx subCmdCtx) withParsedFlags() (subCmdCtx, error) {
}
})
ctx.flags.Usage = func() {
var passthroughStr string
if ctx.subCmd.passthroughArgs {
passthroughStr = " [--] [args...]"
}
fmt.Fprintf(
os.Stderr, "%s[-h|--help] [%s flags...]%s\n\n",
usagePrefix(ctx.opts.subCmdNames), ctx.subCmd.name, passthroughStr,
)
fmt.Fprintf(
os.Stderr, "%s FLAGS:\n\n", strings.ToUpper(ctx.subCmd.name),
)
fmt.Fprintln(os.Stderr, ctx.flags.FlagUsages())
os.Stderr.Sync()
os.Exit(2)
}
if err := ctx.flags.Parse(ctx.opts.args); err != nil {
if err := ctx.flags.Parse(ctx.args); err != nil {
return ctx, err
}
ctx.Context = daemon.WithNetwork(ctx.Context, network)
ctx.logger = ctx.logger.WithMaxLevel(logLevel.Int())
ctx.Context = daemon.WithNetwork(ctx.Context, ctx.flags.network)
return ctx, nil
}
@ -168,7 +142,7 @@ func (ctx subCmdCtx) doSubCmd(subCmds ...subCmd) error {
fmt.Fprintf(
os.Stderr,
"%s<subCmd> [-h|--help] [sub-command flags...]\n",
usagePrefix(ctx.opts.subCmdNames),
usagePrefix(ctx.subCmdNames),
)
fmt.Fprintf(os.Stderr, "\nSUB-COMMANDS:\n\n")
@ -186,7 +160,7 @@ func (ctx subCmdCtx) doSubCmd(subCmds ...subCmd) error {
os.Exit(2)
}
args := ctx.opts.args
args := ctx.args
if len(args) == 0 {
printUsageExit("")
@ -207,12 +181,11 @@ func (ctx subCmdCtx) doSubCmd(subCmds ...subCmd) error {
printUsageExit(subCmdName)
}
nextSubCmdCtxOpts := *ctx.opts
nextSubCmdCtxOpts.args = args
nextSubCmdCtxOpts.subCmdNames = append(ctx.opts.subCmdNames, subCmdName)
nextSubCmdCtx := newSubCmdCtx(
ctx.Context, ctx.logger, subCmd, &nextSubCmdCtxOpts,
ctx.Context,
subCmd,
args,
append(ctx.subCmdNames, subCmdName),
)
if err := subCmd.do(nextSubCmdCtx); err != nil {
@ -256,9 +229,9 @@ func doWithOutput(fn func(subCmdCtx) (any, error)) func(subCmdCtx) error {
switch outputFormat.V {
case "json":
return jsonutil.WriteIndented(ctx.opts.stdout, res)
return jsonutil.WriteIndented(os.Stdout, res)
case "yaml":
return yaml.NewEncoder(ctx.opts.stdout).Encode(res)
return yaml.NewEncoder(os.Stdout).Encode(res)
default:
panic(fmt.Sprintf("unexpected outputFormat %q", outputFormat))
}

View File

@ -105,7 +105,8 @@ type RPC interface {
// existing host, given the public key for that host. This is currently
// mostly useful for creating certs for mobile devices.
//
// TODO Specific error for if required secret isn't present.
// TODO replace this with CreateHostBootstrap, and the
// CreateNebulaCertificate RPC method can just pull cert out of that.
//
// Errors:
// - ErrHostNotFound

View File

@ -1,5 +1,3 @@
//go:generate mockery --name RPC --inpackage --filename rpc_mock.go
package daemon
import (

View File

@ -1,307 +0,0 @@
// Code generated by mockery v2.43.1. DO NOT EDIT.
package daemon
import (
context "context"
bootstrap "isle/bootstrap"
daecommon "isle/daemon/daecommon"
mock "github.com/stretchr/testify/mock"
nebula "isle/nebula"
network "isle/daemon/network"
)
// MockRPC is an autogenerated mock type for the RPC type
type MockRPC struct {
mock.Mock
}
// CreateHost provides a mock function with given fields: _a0, _a1, _a2
func (_m *MockRPC) CreateHost(_a0 context.Context, _a1 nebula.HostName, _a2 network.CreateHostOpts) (network.JoiningBootstrap, error) {
ret := _m.Called(_a0, _a1, _a2)
if len(ret) == 0 {
panic("no return value specified for CreateHost")
}
var r0 network.JoiningBootstrap
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, nebula.HostName, network.CreateHostOpts) (network.JoiningBootstrap, error)); ok {
return rf(_a0, _a1, _a2)
}
if rf, ok := ret.Get(0).(func(context.Context, nebula.HostName, network.CreateHostOpts) network.JoiningBootstrap); ok {
r0 = rf(_a0, _a1, _a2)
} else {
r0 = ret.Get(0).(network.JoiningBootstrap)
}
if rf, ok := ret.Get(1).(func(context.Context, nebula.HostName, network.CreateHostOpts) error); ok {
r1 = rf(_a0, _a1, _a2)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateNebulaCertificate provides a mock function with given fields: _a0, _a1, _a2
func (_m *MockRPC) CreateNebulaCertificate(_a0 context.Context, _a1 nebula.HostName, _a2 nebula.EncryptingPublicKey) (nebula.Certificate, error) {
ret := _m.Called(_a0, _a1, _a2)
if len(ret) == 0 {
panic("no return value specified for CreateNebulaCertificate")
}
var r0 nebula.Certificate
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, nebula.HostName, nebula.EncryptingPublicKey) (nebula.Certificate, error)); ok {
return rf(_a0, _a1, _a2)
}
if rf, ok := ret.Get(0).(func(context.Context, nebula.HostName, nebula.EncryptingPublicKey) nebula.Certificate); ok {
r0 = rf(_a0, _a1, _a2)
} else {
r0 = ret.Get(0).(nebula.Certificate)
}
if rf, ok := ret.Get(1).(func(context.Context, nebula.HostName, nebula.EncryptingPublicKey) error); ok {
r1 = rf(_a0, _a1, _a2)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// CreateNetwork provides a mock function with given fields: ctx, name, domain, ipNet, hostName
func (_m *MockRPC) CreateNetwork(ctx context.Context, name string, domain string, ipNet nebula.IPNet, hostName nebula.HostName) error {
ret := _m.Called(ctx, name, domain, ipNet, hostName)
if len(ret) == 0 {
panic("no return value specified for CreateNetwork")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string, string, nebula.IPNet, nebula.HostName) error); ok {
r0 = rf(ctx, name, domain, ipNet, hostName)
} else {
r0 = ret.Error(0)
}
return r0
}
// GetConfig provides a mock function with given fields: _a0
func (_m *MockRPC) GetConfig(_a0 context.Context) (daecommon.NetworkConfig, error) {
ret := _m.Called(_a0)
if len(ret) == 0 {
panic("no return value specified for GetConfig")
}
var r0 daecommon.NetworkConfig
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) (daecommon.NetworkConfig, error)); ok {
return rf(_a0)
}
if rf, ok := ret.Get(0).(func(context.Context) daecommon.NetworkConfig); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(daecommon.NetworkConfig)
}
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetGarageClientParams provides a mock function with given fields: _a0
func (_m *MockRPC) GetGarageClientParams(_a0 context.Context) (network.GarageClientParams, error) {
ret := _m.Called(_a0)
if len(ret) == 0 {
panic("no return value specified for GetGarageClientParams")
}
var r0 network.GarageClientParams
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) (network.GarageClientParams, error)); ok {
return rf(_a0)
}
if rf, ok := ret.Get(0).(func(context.Context) network.GarageClientParams); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(network.GarageClientParams)
}
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetHosts provides a mock function with given fields: _a0
func (_m *MockRPC) GetHosts(_a0 context.Context) ([]bootstrap.Host, error) {
ret := _m.Called(_a0)
if len(ret) == 0 {
panic("no return value specified for GetHosts")
}
var r0 []bootstrap.Host
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) ([]bootstrap.Host, error)); ok {
return rf(_a0)
}
if rf, ok := ret.Get(0).(func(context.Context) []bootstrap.Host); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]bootstrap.Host)
}
}
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetNebulaCAPublicCredentials provides a mock function with given fields: _a0
func (_m *MockRPC) GetNebulaCAPublicCredentials(_a0 context.Context) (nebula.CAPublicCredentials, error) {
ret := _m.Called(_a0)
if len(ret) == 0 {
panic("no return value specified for GetNebulaCAPublicCredentials")
}
var r0 nebula.CAPublicCredentials
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) (nebula.CAPublicCredentials, error)); ok {
return rf(_a0)
}
if rf, ok := ret.Get(0).(func(context.Context) nebula.CAPublicCredentials); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(nebula.CAPublicCredentials)
}
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// GetNetworks provides a mock function with given fields: _a0
func (_m *MockRPC) GetNetworks(_a0 context.Context) ([]bootstrap.CreationParams, error) {
ret := _m.Called(_a0)
if len(ret) == 0 {
panic("no return value specified for GetNetworks")
}
var r0 []bootstrap.CreationParams
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) ([]bootstrap.CreationParams, error)); ok {
return rf(_a0)
}
if rf, ok := ret.Get(0).(func(context.Context) []bootstrap.CreationParams); ok {
r0 = rf(_a0)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]bootstrap.CreationParams)
}
}
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(_a0)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// JoinNetwork provides a mock function with given fields: _a0, _a1
func (_m *MockRPC) JoinNetwork(_a0 context.Context, _a1 network.JoiningBootstrap) error {
ret := _m.Called(_a0, _a1)
if len(ret) == 0 {
panic("no return value specified for JoinNetwork")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, network.JoiningBootstrap) error); ok {
r0 = rf(_a0, _a1)
} else {
r0 = ret.Error(0)
}
return r0
}
// RemoveHost provides a mock function with given fields: ctx, hostName
func (_m *MockRPC) RemoveHost(ctx context.Context, hostName nebula.HostName) error {
ret := _m.Called(ctx, hostName)
if len(ret) == 0 {
panic("no return value specified for RemoveHost")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, nebula.HostName) error); ok {
r0 = rf(ctx, hostName)
} else {
r0 = ret.Error(0)
}
return r0
}
// SetConfig provides a mock function with given fields: _a0, _a1
func (_m *MockRPC) SetConfig(_a0 context.Context, _a1 daecommon.NetworkConfig) error {
ret := _m.Called(_a0, _a1)
if len(ret) == 0 {
panic("no return value specified for SetConfig")
}
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, daecommon.NetworkConfig) error); ok {
r0 = rf(_a0, _a1)
} else {
r0 = ret.Error(0)
}
return r0
}
// NewMockRPC creates a new instance of MockRPC. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func NewMockRPC(t interface {
mock.TestingT
Cleanup(func())
}) *MockRPC {
mock := &MockRPC{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}