Move most TODOs into task files, introduce busiless to explore them
This commit is contained in:
parent
1c1b05db06
commit
be7844f658
@ -18,7 +18,6 @@ Documentation for users:
|
|||||||
|
|
||||||
* [Joining a Network](./user/join-a-network.md)
|
* [Joining a Network](./user/join-a-network.md)
|
||||||
* [Using DNS](./user/using-dns.md) (advanced)
|
* [Using DNS](./user/using-dns.md) (advanced)
|
||||||
* Restic example (TODO)
|
|
||||||
|
|
||||||
### Operator Docs
|
### Operator Docs
|
||||||
|
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
# TODO this is currently broken, garage is using builtins.currentSystem for some
|
|
||||||
# reason.
|
|
||||||
{
|
{
|
||||||
description = "isle provides the foundation for an autonomous community cloud infrastructure";
|
description = "isle provides the foundation for an autonomous community cloud infrastructure";
|
||||||
|
|
||||||
|
@ -97,10 +97,6 @@ type Bootstrap struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// New initializes and returns a new Bootstrap file for a new host.
|
// New initializes and returns a new Bootstrap file for a new host.
|
||||||
//
|
|
||||||
// TODO in the resulting bootstrap only include this host and hosts which are
|
|
||||||
// necessary for connecting to nebula/garage. Remember to immediately re-poll
|
|
||||||
// garage for the full hosts list during network joining.
|
|
||||||
func New(
|
func New(
|
||||||
caCreds nebula.CACredentials,
|
caCreds nebula.CACredentials,
|
||||||
adminCreationParams CreationParams,
|
adminCreationParams CreationParams,
|
||||||
|
@ -10,10 +10,6 @@ import (
|
|||||||
"isle/daemon/network"
|
"isle/daemon/network"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO it would be good to have an `isle daemon config-check` kind of command,
|
|
||||||
// which could be run prior to a systemd service restart to make sure we don't
|
|
||||||
// restart the service into a configuration that will definitely fail.
|
|
||||||
|
|
||||||
var subCmdDaemon = subCmd{
|
var subCmdDaemon = subCmd{
|
||||||
name: "daemon",
|
name: "daemon",
|
||||||
descr: "Runs the isle daemon (Default if no sub-command given)",
|
descr: "Runs the isle daemon (Default if no sub-command given)",
|
||||||
@ -38,10 +34,6 @@ var subCmdDaemon = subCmd{
|
|||||||
return daecommon.CopyDefaultConfig(os.Stdout)
|
return daecommon.CopyDefaultConfig(os.Stdout)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO check that daemon is either running as root, or that the
|
|
||||||
// required linux capabilities are set.
|
|
||||||
// TODO check that the tun module is loaded (for nebula).
|
|
||||||
|
|
||||||
daemonConfig, err := daecommon.LoadConfig(*daemonConfigPath)
|
daemonConfig, err := daecommon.LoadConfig(*daemonConfigPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("loading daemon config: %w", err)
|
return fmt.Errorf("loading daemon config: %w", err)
|
||||||
|
@ -93,8 +93,6 @@ func New(
|
|||||||
return nil, fmt.Errorf("starting dnsmasq: %w", err)
|
return nil, fmt.Errorf("starting dnsmasq: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO wait for dnsmasq to come up
|
|
||||||
|
|
||||||
if c.garageProcs, err = garagePmuxProcs(
|
if c.garageProcs, err = garagePmuxProcs(
|
||||||
ctx,
|
ctx,
|
||||||
c.logger,
|
c.logger,
|
||||||
@ -121,8 +119,6 @@ func New(
|
|||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO block until process has been confirmed to have come back up
|
|
||||||
// successfully.
|
|
||||||
func (c *Children) reloadDNSMasq(
|
func (c *Children) reloadDNSMasq(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
networkConfig daecommon.NetworkConfig,
|
networkConfig daecommon.NetworkConfig,
|
||||||
@ -188,8 +184,6 @@ func (c *Children) reloadGarage(
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO it's possible that the config changed, but only the bootstrap
|
|
||||||
// peers, in which case we don't need to restart the node.
|
|
||||||
childConfigPath, changed, err := garageWriteChildConfig(
|
childConfigPath, changed, err := garageWriteChildConfig(
|
||||||
ctx,
|
ctx,
|
||||||
c.logger,
|
c.logger,
|
||||||
|
@ -49,13 +49,6 @@ func dnsmasqWriteConfig(
|
|||||||
return confPath, changed, nil
|
return confPath, changed, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO consider a shared dnsmasq across all the daemon's networks.
|
|
||||||
// This would have a few benefits:
|
|
||||||
// - Less processes, less problems
|
|
||||||
// - Less configuration for the user in the case of more than one network.
|
|
||||||
// - Can listen on 127.0.0.x:53, rather than on the nebula address. This
|
|
||||||
// allows DNS to come up before nebula, which is helpful when nebula depends
|
|
||||||
// on DNS.
|
|
||||||
func dnsmasqPmuxProc(
|
func dnsmasqPmuxProc(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
logger *mlog.Logger,
|
logger *mlog.Logger,
|
||||||
|
@ -16,8 +16,6 @@ import (
|
|||||||
func getDefaultHTTPSocketDirPath() string {
|
func getDefaultHTTPSocketDirPath() string {
|
||||||
path, err := firstExistingDir(
|
path, err := firstExistingDir(
|
||||||
"/tmp",
|
"/tmp",
|
||||||
|
|
||||||
// TODO it's possible the daemon process can't actually write to these
|
|
||||||
"/run",
|
"/run",
|
||||||
"/var/run",
|
"/var/run",
|
||||||
"/dev/shm",
|
"/dev/shm",
|
||||||
|
@ -369,8 +369,6 @@ func (d *Daemon) SetConfig(
|
|||||||
return struct{}{}, ErrManagedNetworkConfig
|
return struct{}{}, ErrManagedNetworkConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO needs to check that public addresses aren't being shared
|
|
||||||
// across networks, and whatever else happens in Config.Validate.
|
|
||||||
return struct{}{}, n.SetConfig(ctx, config)
|
return struct{}{}, n.SetConfig(ctx, config)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -34,8 +34,6 @@ type GarageClientParams struct {
|
|||||||
GlobalBucketS3APICredentials garage.S3APICredentials
|
GlobalBucketS3APICredentials garage.S3APICredentials
|
||||||
|
|
||||||
// RPCSecret may be empty, if the secret is not available on the host.
|
// RPCSecret may be empty, if the secret is not available on the host.
|
||||||
//
|
|
||||||
// TODO this shouldn't really be here I don't think, remove it?
|
|
||||||
RPCSecret string
|
RPCSecret string
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -58,8 +56,6 @@ type CreateHostOpts struct {
|
|||||||
// CanCreateHosts indicates that the bootstrap produced by CreateHost should
|
// CanCreateHosts indicates that the bootstrap produced by CreateHost should
|
||||||
// give the new host the ability to create new hosts as well.
|
// give the new host the ability to create new hosts as well.
|
||||||
CanCreateHosts bool
|
CanCreateHosts bool
|
||||||
|
|
||||||
// TODO add nebula cert tags
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// JoiningBootstrap wraps a normal Bootstrap to include extra data which a host
|
// JoiningBootstrap wraps a normal Bootstrap to include extra data which a host
|
||||||
@ -95,8 +91,6 @@ type RPC interface {
|
|||||||
// existing host, given the public key for that host. This is currently
|
// existing host, given the public key for that host. This is currently
|
||||||
// mostly useful for creating certs for mobile devices.
|
// mostly useful for creating certs for mobile devices.
|
||||||
//
|
//
|
||||||
// TODO Specific error for if required secret isn't present.
|
|
||||||
//
|
|
||||||
// Errors:
|
// Errors:
|
||||||
// - ErrHostNotFound
|
// - ErrHostNotFound
|
||||||
CreateNebulaCertificate(
|
CreateNebulaCertificate(
|
||||||
@ -238,9 +232,6 @@ func loadCreationParams(
|
|||||||
bootstrap.CreationParams, error,
|
bootstrap.CreationParams, error,
|
||||||
) {
|
) {
|
||||||
var (
|
var (
|
||||||
// TODO store/load the creation params separately from the rest of the
|
|
||||||
// bootstrap, since the bootstrap contains potentially the entire host
|
|
||||||
// list of a network, which could be pretty bulky.
|
|
||||||
bootstrapFilePath = bootstrap.StateDirPath(stateDir.Path)
|
bootstrapFilePath = bootstrap.StateDirPath(stateDir.Path)
|
||||||
bs bootstrap.Bootstrap
|
bs bootstrap.Bootstrap
|
||||||
)
|
)
|
||||||
@ -770,8 +761,6 @@ func (n *network) GetGarageClientParams(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (n *network) RemoveHost(ctx context.Context, hostName nebula.HostName) error {
|
func (n *network) RemoveHost(ctx context.Context, hostName nebula.HostName) error {
|
||||||
// TODO RemoveHost should publish a certificate revocation for the host
|
|
||||||
// being removed.
|
|
||||||
_, err := withCurrBootstrap(n, func(
|
_, err := withCurrBootstrap(n, func(
|
||||||
currBootstrap bootstrap.Bootstrap,
|
currBootstrap bootstrap.Bootstrap,
|
||||||
) (
|
) (
|
||||||
|
14
nix/busiless.nix
Normal file
14
nix/busiless.nix
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
buildGoModule,
|
||||||
|
}: buildGoModule rec {
|
||||||
|
pname = "busiless";
|
||||||
|
version = "da584cffa8ab630a39a3028f8235f63bb95b83e3";
|
||||||
|
src = let
|
||||||
|
src = builtins.fetchGit {
|
||||||
|
url = "https://dev.mediocregopher.com/busiless";
|
||||||
|
rev = version;
|
||||||
|
};
|
||||||
|
in "${src}/src";
|
||||||
|
vendorHash = "sha256-7Pa5QGa2bqIKdykUYt7lZV3jq55eTfeDy/pe7OcyUAg=";
|
||||||
|
subPackages = [ "cmd/busiless" ];
|
||||||
|
}
|
@ -38,9 +38,6 @@ rec {
|
|||||||
supportedSystems = [
|
supportedSystems = [
|
||||||
"x86_64-linux"
|
"x86_64-linux"
|
||||||
"aarch64-linux"
|
"aarch64-linux"
|
||||||
|
|
||||||
# rpi, TODO remember why this is disabled, try to re-enable it
|
|
||||||
#"armv7l-linux-musl"
|
|
||||||
"i686-linux"
|
"i686-linux"
|
||||||
];
|
];
|
||||||
|
|
||||||
|
27
tasks/README.md
Normal file
27
tasks/README.md
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
# tasks
|
||||||
|
|
||||||
|
This directory contains [busiless][busiless] task files. Each file corresponds
|
||||||
|
to a task which must be done, unless it is located in a `drafts` directory in
|
||||||
|
which case it is still under consideration.
|
||||||
|
|
||||||
|
Tasks are organized according to when they are planned for, except for tasks in
|
||||||
|
the `soon` directory which are only planned in the loosest sense of the word.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Creating new task files is as simple as creating a new markdown file with the
|
||||||
|
appropriate frontmatter, as described in the [busiless][busiless] documentation.
|
||||||
|
|
||||||
|
Within this directory you can run `nix-shell` to open a shell which has the
|
||||||
|
`busiless` binary available. This binary can be used to visualize and explore
|
||||||
|
the task files.
|
||||||
|
|
||||||
|
**Example Usage:**
|
||||||
|
|
||||||
|
To prioritize all tasks within the `v0.0.3` directory:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
busiless prioritize --root . --pattern '/v0.0.3/**'
|
||||||
|
```
|
||||||
|
|
||||||
|
[busiless]: https://dev.mediocregopher.com/busiless/
|
10
tasks/shell.nix
Normal file
10
tasks/shell.nix
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
pkgsNix ? (import ../nix/pkgs.nix),
|
||||||
|
buildSystem ? builtins.currentSystem,
|
||||||
|
}: let
|
||||||
|
pkgs = pkgsNix.default { inherit buildSystem; };
|
||||||
|
busiless = pkgs.callPackage ../nix/busiless.nix {};
|
||||||
|
in
|
||||||
|
pkgs.mkShell {
|
||||||
|
buildInputs = [ busiless ];
|
||||||
|
}
|
7
tasks/soon/code/daemon-cap-check.md
Normal file
7
tasks/soon/code/daemon-cap-check.md
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
When the daemon starts it should first check if it has the required
|
||||||
|
linux capabilities, or that it is running as root. It should also check that the
|
||||||
|
`tun` module is loaded.
|
11
tasks/soon/code/daemon-check-config.md
Normal file
11
tasks/soon/code/daemon-check-config.md
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
# Implement `daemon reload` Sub-Command
|
||||||
|
|
||||||
|
The new sub-command would re-read the `daemon.yml` file and reload the
|
||||||
|
configuration currently being used by the daemon using that, without requiring a
|
||||||
|
full restart of the process.
|
||||||
|
|
||||||
|
This should be integrated into the systemd service file as `ExecReload`.
|
9
tasks/soon/code/dnsmasq-startup-block.md
Normal file
9
tasks/soon/code/dnsmasq-startup-block.md
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
# Block Until dnsmasq Confirmed to Have Started
|
||||||
|
|
||||||
|
When starting up dnsmasq, either during initialization of a Children or when
|
||||||
|
`reloadDNSMasq` is called on it, the Children should block until dnsmasq has
|
||||||
|
been confirmed to have started.
|
11
tasks/soon/code/garage-dont-restart-on-peer-change.md
Normal file
11
tasks/soon/code/garage-dont-restart-on-peer-change.md
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
# Don't Restart Garage on Peer Change
|
||||||
|
|
||||||
|
When the garage configuration has changed, it is not necessary to issue a
|
||||||
|
restart to the garage processes if only the bootstrap peers have changed. Those
|
||||||
|
are only necessary on startup of garage.
|
||||||
|
|
||||||
|
Implementing this should speed up integration tests a bit.
|
11
tasks/soon/code/minimize-joining-bootstrap-size.md
Normal file
11
tasks/soon/code/minimize-joining-bootstrap-size.md
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
# Minimize Size of Joining Bootstrap
|
||||||
|
|
||||||
|
When a bootstrap is created, it is only necessary to include in it the hosts
|
||||||
|
which are nebula lighthouses or garage peers.
|
||||||
|
|
||||||
|
It will be necessary to immediately poll garage for the host list when joining a
|
||||||
|
network.
|
12
tasks/soon/code/nebula-config-reloading.md
Normal file
12
tasks/soon/code/nebula-config-reloading.md
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
# Reload Nebula On Config Changes
|
||||||
|
|
||||||
|
When nebula's firewall is modified we should send it a reload signal, rather
|
||||||
|
than completely restart it.
|
||||||
|
|
||||||
|
This will require some diving into the nebula codebase to figure out which
|
||||||
|
config items can be reloaded, and which can't. If more than just firewall is
|
||||||
|
reload-able (e.g. set of lighthouses) we should encompass that as well.
|
7
tasks/soon/code/raspberry-pi-build.md
Normal file
7
tasks/soon/code/raspberry-pi-build.md
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
# Raspberry Pi Build
|
||||||
|
|
||||||
|
Get Isle build system to include rpi (armv7l-linux-musl)
|
9
tasks/soon/docs/restic-example.md
Normal file
9
tasks/soon/docs/restic-example.md
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
after:
|
||||||
|
- /soon/drafts/chest-management.md
|
||||||
|
---
|
||||||
|
|
||||||
|
# Restic Example
|
||||||
|
|
||||||
|
An example of how to use restic needs to be added to the documentation.
|
9
tasks/soon/drafts/certificate-revocation.md
Normal file
9
tasks/soon/drafts/certificate-revocation.md
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
# Certificate Revocation Propagation
|
||||||
|
|
||||||
|
When a host is removed from the network the admin host which removed it should
|
||||||
|
publish a revocation certificate for its old certificate, so that other hosts
|
||||||
|
know to no longer trust it.
|
11
tasks/soon/drafts/chest-management.md
Normal file
11
tasks/soon/drafts/chest-management.md
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
# Storage Chest Management
|
||||||
|
|
||||||
|
The following command-line sub-commands need to be implemented:
|
||||||
|
|
||||||
|
- `storage chest(s) list`
|
||||||
|
- `storage chest create`
|
||||||
|
- `storage chest discard`
|
19
tasks/soon/drafts/separate-bootstrap-hosts.md
Normal file
19
tasks/soon/drafts/separate-bootstrap-hosts.md
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
# Store Bootstrap Hosts Separately
|
||||||
|
|
||||||
|
When CLI commands are performed and multiple networks are joined, the daemon
|
||||||
|
will search all bootstrap files for one containing a `bootstrap.CreationParams`
|
||||||
|
matching a given search string.
|
||||||
|
|
||||||
|
This will get problematic in the future, since doing this requires decoding and
|
||||||
|
loading into memory _all_ the host metadata which is included in each bootstrap.
|
||||||
|
For a large network this could be non-trivial.
|
||||||
|
|
||||||
|
The bootstrap hosts should be separated out of the bootstrap file and stored
|
||||||
|
separately.
|
||||||
|
|
||||||
|
It's also possible we want to revisit how network metadata is stored entirely.
|
||||||
|
Would sqlite be too crazy?
|
29
tasks/soon/drafts/shared-dns.md
Normal file
29
tasks/soon/drafts/shared-dns.md
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
# Shared DNS Server
|
||||||
|
|
||||||
|
Consider a shared dnsmasq (or maybe embedded CoreDNS) instance across all the
|
||||||
|
daemon's networks.
|
||||||
|
|
||||||
|
This would have a few benefits:
|
||||||
|
- Less processes, less problems
|
||||||
|
- Less configuration for the user in the case of more than one network.
|
||||||
|
- Can listen on 127.0.0.x:53, rather than on the nebula address. This
|
||||||
|
allows DNS to come up before nebula, which is helpful when nebula depends
|
||||||
|
on DNS.
|
||||||
|
|
||||||
|
This would break an existing use-case where a host is using the DNS server of a
|
||||||
|
remote host, as the DNS server would no longer be available on the nebula
|
||||||
|
address. The primary need for this at the moment is mobile, where there is not a
|
||||||
|
real app yet. Once there is a real app this won't be necessary.
|
||||||
|
|
||||||
|
In the meantime this could be worked-around by allowing the daemon to configure
|
||||||
|
which IP/ports the server listens on (which it would want to do anyway, I
|
||||||
|
imagine). The user can configure the DNS server to listen on the nebula address
|
||||||
|
manually.
|
||||||
|
|
||||||
|
This solution has the downside of potentially allowing cross-network DNS
|
||||||
|
queries, which might be a big enough security issue to be worth working around
|
||||||
|
even in the initial implementation of this.
|
8
tasks/v0.0.3/code/alloc-subcmd-names.md
Normal file
8
tasks/v0.0.3/code/alloc-subcmd-names.md
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
# Rename Storage Allocation-Related Sub-Commands
|
||||||
|
|
||||||
|
Rather than having sub-commands like `storage list-allocations` there should
|
||||||
|
instead be `storage allocation(s) list`.
|
12
tasks/v0.0.3/code/audit-error-docs.md
Normal file
12
tasks/v0.0.3/code/audit-error-docs.md
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
# Audit Error Code Documentation
|
||||||
|
|
||||||
|
Audit all code for RPC methods in both `daemon` and `daemon/network`, ensuring
|
||||||
|
that all error codes which can be returned are properly documented, and that all
|
||||||
|
errors which should have an error code have one.
|
||||||
|
|
||||||
|
`CreateNebulaCertificate` should return a specific error for if the CA root key
|
||||||
|
secret couldn't be found.
|
17
tasks/v0.0.3/code/audit-http-socket-dirs.md
Normal file
17
tasks/v0.0.3/code/audit-http-socket-dirs.md
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
# Audit HTTP Socket Directory Search Options
|
||||||
|
|
||||||
|
We are currently searching these directories for the first writeable one, within
|
||||||
|
which to create the socket file:
|
||||||
|
|
||||||
|
- /tmp
|
||||||
|
- /run
|
||||||
|
- /var/run
|
||||||
|
- /dev/shm
|
||||||
|
|
||||||
|
It would be ideal to not default to `/tmp`, but it seems that some of the others
|
||||||
|
aren't necessarily available always, or aren't readable/writeable by the `isle`
|
||||||
|
user.
|
8
tasks/v0.0.3/code/fix-nix-flake.nix
Normal file
8
tasks/v0.0.3/code/fix-nix-flake.nix
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
# Fix `flake.nix`
|
||||||
|
|
||||||
|
The `flake.nix` file is currently broken, garage is using
|
||||||
|
`builtins.currentSystem` for some reason.
|
9
tasks/v0.0.3/code/nebula-cert-groups.md
Normal file
9
tasks/v0.0.3/code/nebula-cert-groups.md
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
# Nebula Cert Groups
|
||||||
|
|
||||||
|
Allow specifying one or more group names when creating a host (or signing a
|
||||||
|
host's existing public key). These groups will get added to the host's cert,
|
||||||
|
and can be used in firewall rules.
|
8
tasks/v0.0.3/code/setconfig-validate.md
Normal file
8
tasks/v0.0.3/code/setconfig-validate.md
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
---
|
||||||
|
|
||||||
|
# Validate daemon Config in SetConfig
|
||||||
|
|
||||||
|
When the `Daemon.SetConfig` method is called, it should call Validate on the
|
||||||
|
full new `daemon.Config` (not just the NetworkConfig).
|
8
tasks/v0.0.3/code/todo.md
Normal file
8
tasks/v0.0.3/code/todo.md
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
type: tasks
|
||||||
|
---
|
||||||
|
|
||||||
|
# Address All TODOs
|
||||||
|
|
||||||
|
All TODOs which remain in the codebase must be addressed or moved into a task
|
||||||
|
file.
|
13
tasks/v0.0.3/update-documentation.md
Normal file
13
tasks/v0.0.3/update-documentation.md
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
type: task
|
||||||
|
after:
|
||||||
|
- code/**
|
||||||
|
---
|
||||||
|
|
||||||
|
# Update Documentation
|
||||||
|
|
||||||
|
All documentation related to management of network configuration needs to be
|
||||||
|
updated.
|
||||||
|
|
||||||
|
Check through all development documentation, especially that surrounding
|
||||||
|
testing.
|
Loading…
Reference in New Issue
Block a user