Compare commits

...

3 Commits

Author SHA1 Message Date
Brian Picciano
3c3bd8649a Fix minio-client creating config directory in user's home 2023-09-04 21:38:28 +02:00
Brian Picciano
98e5f4c98c Add create-bootstrap test case 2023-09-04 20:56:48 +02:00
Brian Picciano
0a482607d5 Move temp directory creation into test/utils 2023-09-04 19:56:15 +02:00
10 changed files with 176 additions and 13 deletions

View File

@ -169,8 +169,9 @@ in rec {
appImage appImage
pkgs.busybox pkgs.busybox
pkgs.yq-go pkgs.yq-go
pkgs.jq
]} ]}
export SHELL=${pkgs.bash}/bin/bash export SHELL=${pkgs.bash}/bin/bash
exec ${./tests}/entrypoint.sh "$@" exec ${pkgs.bash}/bin/bash ${./tests}/entrypoint.sh "$@"
''; '';
} }

View File

@ -7,6 +7,28 @@ import (
"syscall" "syscall"
) )
// minio-client keeps a configuration directory which contains various pieces of
// information which may or may not be useful. Unfortunately when it initializes
// this directory it likes to print some annoying logs, so we pre-initialize in
// order to prevent it from doing so.
func initMCConfigDir() (string, error) {
var (
path = filepath.Join(envDataDirPath, "mc")
sharePath = filepath.Join(path, "share")
configJSONPath = filepath.Join(path, "config.json")
)
if err := os.MkdirAll(sharePath, 0700); err != nil {
return "", fmt.Errorf("creating %q: %w", sharePath, err)
}
if err := os.WriteFile(configJSONPath, []byte(`{}`), 0600); err != nil {
return "", fmt.Errorf("writing %q: %w", configJSONPath, err)
}
return path, nil
}
var subCmdGarageMC = subCmd{ var subCmdGarageMC = subCmd{
name: "mc", name: "mc",
descr: "Runs the mc (minio-client) binary. The isle garage can be accessed under the `garage` alias", descr: "Runs the mc (minio-client) binary. The isle garage can be accessed under the `garage` alias",
@ -50,7 +72,15 @@ var subCmdGarageMC = subCmd{
args = args[i:] args = args[i:]
} }
args = append([]string{binPath("mc")}, args...) configDir, err := initMCConfigDir()
if err != nil {
return fmt.Errorf("initializing minio-client config directory: %w", err)
}
args = append([]string{
binPath("mc"),
"--config-dir", configDir,
}, args...)
var ( var (
mcHostVar = fmt.Sprintf( mcHostVar = fmt.Sprintf(

View File

@ -1,3 +1,6 @@
# shellcheck source=../utils/with-tmp-for-case.sh
source "$UTILS"/with-tmp-for-case.sh
mkdir a mkdir a
mkdir b mkdir b
mkdir c mkdir c

View File

@ -0,0 +1,20 @@
# shellcheck source=../utils/with-single-node-cluster.sh
source "$UTILS"/with-single-node-cluster.sh
adminBS="$XDG_DATA_HOME"/isle/bootstrap.yml
bs=create-bootstrap-out.yml
isle admin create-bootstrap \
--admin-path admin.yml \
--hostname secondus \
--ip 10.6.9.2 \
> "$bs"
[ "$(yq <"$bs" '.admin_creation_params')" = "$(yq <admin.yml '.creation_params')" ]
[ "$(yq <"$bs" '.hostname')" = "secondus" ]
[ "$(yq <"$bs" '.hosts.primus.nebula.signed_public_credentials')" \
= "$(yq <"$adminBS" '.nebula.signed_public_credentials')" ]
[ "$(yq <"$bs" '.hosts.primus.garage.instances|length')" = "3" ]

View File

@ -0,0 +1,13 @@
# shellcheck source=../../utils/with-single-node-cluster.sh
source "$UTILS"/with-single-node-cluster.sh
status="$(isle garage cli status | tail -n+3)"
[ "$(echo "$status" | wc -l)" = "3" ]
echo "$status" | grep -q '10.6.9.1:3900'
echo "$status" | grep -q '10.6.9.1:3910'
echo "$status" | grep -q '10.6.9.1:3920'
buckets="$(isle garage cli bucket list | tail -n+2)"
[ "$(echo "$buckets" | wc -l)" = 1 ]
echo "$buckets" | grep -q 'global-shared'

View File

@ -0,0 +1,8 @@
# shellcheck source=../../utils/with-single-node-cluster.sh
source "$UTILS"/with-single-node-cluster.sh
files="$(isle garage mc -- tree --json garage)"
[ "$(echo "$files" | jq -s '.|length')" -ge "1" ]
file="$(echo "$files" | jq -sr '.[0].key')"
[ "$(isle garage mc -- cat "garage/$file" | wc -c)" -gt "0" ]

39
tests/entrypoint.sh Executable file → Normal file
View File

@ -4,6 +4,8 @@ set -e
cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null
root=$(pwd) root=$(pwd)
export UTILS="$root"/utils
REGEXS=() REGEXS=()
while [[ $# -gt 0 ]]; do while [[ $# -gt 0 ]]; do
@ -39,11 +41,16 @@ if [ -z "$KEEP_TMP" ]; then trap 'rm -rf $TMPDIR' EXIT; fi
export TMPDIR export TMPDIR
echo "tmp dir is $TMPDIR" echo "tmp dir is $TMPDIR"
# Blackhole these directories so that tests don't accidentally use the host's
# real ones.
export XDG_RUNTIME_DIR=/dev/null
export XDG_DATA_HOME=/dev/null
test_files=$( test_files=$(
find ./cases -type f -name '*.sh' \ find ./cases -type f -name '*.sh' \
| sed "s|^\./cases/||" \ | sed "s|^\./cases/||" \
| grep -v entrypoint.sh \ | grep -v entrypoint.sh \
| sort -n | sort
) )
for r in "${REGEXS[@]}"; do for r in "${REGEXS[@]}"; do
@ -52,17 +59,17 @@ done
echo -e "number of tests: $(echo "$test_files" | wc -l)\n" echo -e "number of tests: $(echo "$test_files" | wc -l)\n"
for file in $test_files; do for file in $test_files; do
echo "$file" echo "Running test case: $file"
[ -z "$VERBOSE" ] && output="$TMPDIR/$file.log" || output=/dev/stdout if [ -z "$VERBOSE" ]; then
output="$TMPDIR/$file.log"
mkdir -p "$(dirname "$output")"
else
output=/dev/stdout
fi
( (
export TMPDIR="$TMPDIR/$file.tmp" export TEST_CASE_FILE="$file"
export XDG_RUNTIME_DIR="$TMPDIR/.run"
export XDG_DATA_HOME="$TMPDIR/.data"
mkdir -p "$TMPDIR" "$XDG_RUNTIME_DIR" "$XDG_DATA_HOME"
cd "$TMPDIR"
if ! $SHELL -e -x "$root/cases/$file" >"$output" 2>&1; then if ! $SHELL -e -x "$root/cases/$file" >"$output" 2>&1; then
echo "$file FAILED" echo "$file FAILED"
@ -74,7 +81,17 @@ for file in $test_files; do
fi fi
exit 1 exit 1
fi fi
) ) || TESTS_FAILED=1
if [ -n "$TESTS_FAILED" ]; then break; fi
done done
echo -e '\nall tests succeeded!'
# Clean up any shared running clusters. Each cleanup script is responsible for
# figuring out if its shared cluster was actually instantiated during any tests.
echo "Running any cleanup tasks"
# shellcheck source=./utils/cleanup-single-node-cluster.sh
source "$UTILS"/cleanup-single-node-cluster.sh
if [ -z "$TESTS_FAILED" ]; then echo -e '\nall tests succeeded!'; fi

View File

@ -0,0 +1,14 @@
(
set -e
TMPDIR="$TMPDIR/shared/single-node.tmp"
if [ ! -d "$TMPDIR" ]; then exit 0; fi
lock_file="$TMPDIR/.run/isle/lock"
if [ ! -e "$lock_file" ]; then exit 0; fi
pid="$(cat "$lock_file")"
echo "killing shared single node cluster (process: $pid)"
kill "$pid"
while [ -e "$TMPDIR/.run/isle" ]; do sleep 1; done
)

View File

@ -0,0 +1,48 @@
set -e
TMPDIR="$TMPDIR/shared/single-node.tmp"
XDG_RUNTIME_DIR="$TMPDIR/.run"
XDG_DATA_HOME="$TMPDIR/.data"
mkdir -p "$TMPDIR" "$XDG_RUNTIME_DIR" "$XDG_DATA_HOME"
cd "$TMPDIR"
if [ ! -d "$XDG_RUNTIME_DIR/isle" ]; then
echo "Initializing shared single node cluster"
mkdir a
mkdir b
mkdir c
cat >daemon.yml <<EOF
vpn:
public_addr: 127.0.0.1:60000
tun:
device: isle-test-shared
storage:
allocations:
- data_path: a/data
meta_path: a/meta
capacity: 100
- data_path: b/data
meta_path: b/meta
capacity: 100
- data_path: c/data
meta_path: c/meta
capacity: 100
EOF
isle admin create-network \
--config-path daemon.yml \
--domain shared.test \
--hostname primus \
--ip-net "10.6.9.1/24" \
--name "testing" \
> admin.yml
isle daemon --config-path daemon.yml >daemon.log 2>&1 &
echo "Waiting for daemon (process $!) to initialize"
while ! isle hosts list >/dev/null; do sleep 1; done
fi

View File

@ -0,0 +1,9 @@
set -e
TMPDIR="$TMPDIR/$TEST_CASE_FILE.tmp"
XDG_RUNTIME_DIR="$TMPDIR/.run"
XDG_DATA_HOME="$TMPDIR/.data"
mkdir -p "$TMPDIR" "$XDG_RUNTIME_DIR" "$XDG_DATA_HOME"
cd "$TMPDIR"