nixos/tests/rke2: add tests for single-node and multi-node
For multi-node test, create a 3-node cluster (2 server nodes and 1 agent node) to test the connection between nodes. Setup `passthru.tests`.
This commit is contained in:
parent
a642efcdab
commit
b8e5799a63
@ -795,6 +795,7 @@ in {
|
|||||||
restic-rest-server = handleTest ./restic-rest-server.nix {};
|
restic-rest-server = handleTest ./restic-rest-server.nix {};
|
||||||
restic = handleTest ./restic.nix {};
|
restic = handleTest ./restic.nix {};
|
||||||
retroarch = handleTest ./retroarch.nix {};
|
retroarch = handleTest ./retroarch.nix {};
|
||||||
|
rke2 = handleTestOn ["aarch64-linux" "x86_64-linux"] ./rke2 {};
|
||||||
rkvm = handleTest ./rkvm {};
|
rkvm = handleTest ./rkvm {};
|
||||||
robustirc-bridge = handleTest ./robustirc-bridge.nix {};
|
robustirc-bridge = handleTest ./robustirc-bridge.nix {};
|
||||||
roundcube = handleTest ./roundcube.nix {};
|
roundcube = handleTest ./roundcube.nix {};
|
||||||
|
13
nixos/tests/rke2/default.nix
Normal file
13
nixos/tests/rke2/default.nix
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
{ system ? builtins.currentSystem
|
||||||
|
, pkgs ? import ../../.. { inherit system; }
|
||||||
|
, lib ? pkgs.lib
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
allRKE2 = lib.filterAttrs (n: _: lib.strings.hasPrefix "rke2" n) pkgs;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
# Run a single node rke2 cluster and verify a pod can run
|
||||||
|
singleNode = lib.mapAttrs (_: rke2: import ./single-node.nix { inherit system pkgs rke2; }) allRKE2;
|
||||||
|
# Run a multi-node rke2 cluster and verify pod networking works across nodes
|
||||||
|
multiNode = lib.mapAttrs (_: rke2: import ./multi-node.nix { inherit system pkgs rke2; }) allRKE2;
|
||||||
|
}
|
176
nixos/tests/rke2/multi-node.nix
Normal file
176
nixos/tests/rke2/multi-node.nix
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
import ../make-test-python.nix ({ pkgs, lib, rke2, ... }:
|
||||||
|
let
|
||||||
|
pauseImage = pkgs.dockerTools.streamLayeredImage {
|
||||||
|
name = "test.local/pause";
|
||||||
|
tag = "local";
|
||||||
|
contents = pkgs.buildEnv {
|
||||||
|
name = "rke2-pause-image-env";
|
||||||
|
paths = with pkgs; [ tini bashInteractive coreutils socat ];
|
||||||
|
};
|
||||||
|
config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
|
||||||
|
};
|
||||||
|
# A daemonset that responds 'server' on port 8000
|
||||||
|
networkTestDaemonset = pkgs.writeText "test.yml" ''
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: DaemonSet
|
||||||
|
metadata:
|
||||||
|
name: test
|
||||||
|
labels:
|
||||||
|
name: test
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
name: test
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
name: test
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: test
|
||||||
|
image: test.local/pause:local
|
||||||
|
imagePullPolicy: Never
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 20Mi
|
||||||
|
command: ["socat", "TCP4-LISTEN:8000,fork", "EXEC:echo server"]
|
||||||
|
'';
|
||||||
|
tokenFile = pkgs.writeText "token" "p@s$w0rd";
|
||||||
|
agentTokenFile = pkgs.writeText "agent-token" "p@s$w0rd";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
name = "${rke2.name}-multi-node";
|
||||||
|
meta.maintainers = rke2.meta.maintainers;
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
server1 = { pkgs, ... }: {
|
||||||
|
networking.firewall.enable = false;
|
||||||
|
networking.useDHCP = false;
|
||||||
|
networking.defaultGateway = "192.168.1.1";
|
||||||
|
networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
|
||||||
|
{ address = "192.168.1.1"; prefixLength = 24; }
|
||||||
|
];
|
||||||
|
|
||||||
|
virtualisation.memorySize = 1536;
|
||||||
|
virtualisation.diskSize = 4096;
|
||||||
|
|
||||||
|
services.rke2 = {
|
||||||
|
enable = true;
|
||||||
|
role = "server";
|
||||||
|
inherit tokenFile;
|
||||||
|
inherit agentTokenFile;
|
||||||
|
nodeName = "${rke2.name}-server1";
|
||||||
|
package = rke2;
|
||||||
|
nodeIP = "192.168.1.1";
|
||||||
|
disable = [
|
||||||
|
"rke2-coredns"
|
||||||
|
"rke2-metrics-server"
|
||||||
|
"rke2-ingress-nginx"
|
||||||
|
];
|
||||||
|
extraFlags = [
|
||||||
|
"--cluster-reset"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
server2 = { pkgs, ... }: {
|
||||||
|
networking.firewall.enable = false;
|
||||||
|
networking.useDHCP = false;
|
||||||
|
networking.defaultGateway = "192.168.1.2";
|
||||||
|
networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
|
||||||
|
{ address = "192.168.1.2"; prefixLength = 24; }
|
||||||
|
];
|
||||||
|
|
||||||
|
virtualisation.memorySize = 1536;
|
||||||
|
virtualisation.diskSize = 4096;
|
||||||
|
|
||||||
|
services.rke2 = {
|
||||||
|
enable = true;
|
||||||
|
role = "server";
|
||||||
|
serverAddr = "https://192.168.1.1:6443";
|
||||||
|
inherit tokenFile;
|
||||||
|
inherit agentTokenFile;
|
||||||
|
nodeName = "${rke2.name}-server2";
|
||||||
|
package = rke2;
|
||||||
|
nodeIP = "192.168.1.2";
|
||||||
|
disable = [
|
||||||
|
"rke2-coredns"
|
||||||
|
"rke2-metrics-server"
|
||||||
|
"rke2-ingress-nginx"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
agent1 = { pkgs, ... }: {
|
||||||
|
networking.firewall.enable = false;
|
||||||
|
networking.useDHCP = false;
|
||||||
|
networking.defaultGateway = "192.168.1.3";
|
||||||
|
networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
|
||||||
|
{ address = "192.168.1.3"; prefixLength = 24; }
|
||||||
|
];
|
||||||
|
|
||||||
|
virtualisation.memorySize = 1536;
|
||||||
|
virtualisation.diskSize = 4096;
|
||||||
|
|
||||||
|
services.rke2 = {
|
||||||
|
enable = true;
|
||||||
|
role = "agent";
|
||||||
|
tokenFile = agentTokenFile;
|
||||||
|
serverAddr = "https://192.168.1.2:6443";
|
||||||
|
nodeName = "${rke2.name}-agent1";
|
||||||
|
package = rke2;
|
||||||
|
nodeIP = "192.168.1.3";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = let
|
||||||
|
kubectl = "${pkgs.kubectl}/bin/kubectl --kubeconfig=/etc/rancher/rke2/rke2.yaml";
|
||||||
|
ctr = "${pkgs.containerd}/bin/ctr -a /run/k3s/containerd/containerd.sock";
|
||||||
|
jq = "${pkgs.jq}/bin/jq";
|
||||||
|
ping = "${pkgs.iputils}/bin/ping";
|
||||||
|
in ''
|
||||||
|
machines = [server1, server2, agent1]
|
||||||
|
|
||||||
|
for machine in machines:
|
||||||
|
machine.start()
|
||||||
|
machine.wait_for_unit("rke2")
|
||||||
|
|
||||||
|
# wait for the agent to show up
|
||||||
|
server1.succeed("${kubectl} get node ${rke2.name}-agent1")
|
||||||
|
|
||||||
|
for machine in machines:
|
||||||
|
machine.succeed("${pauseImage} | ${ctr} image import -")
|
||||||
|
|
||||||
|
server1.succeed("${kubectl} cluster-info")
|
||||||
|
server1.wait_until_succeeds("${kubectl} get serviceaccount default")
|
||||||
|
|
||||||
|
# Now create a pod on each node via a daemonset and verify they can talk to each other.
|
||||||
|
server1.succeed("${kubectl} apply -f ${networkTestDaemonset}")
|
||||||
|
server1.wait_until_succeeds(
|
||||||
|
f'[ "$(${kubectl} get ds test -o json | ${jq} .status.numberReady)" -eq {len(machines)} ]'
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get pod IPs
|
||||||
|
pods = server1.succeed("${kubectl} get po -o json | ${jq} '.items[].metadata.name' -r").splitlines()
|
||||||
|
pod_ips = [
|
||||||
|
server1.succeed(f"${kubectl} get po {n} -o json | ${jq} '.status.podIP' -cr").strip() for n in pods
|
||||||
|
]
|
||||||
|
|
||||||
|
# Verify each server can ping each pod ip
|
||||||
|
for pod_ip in pod_ips:
|
||||||
|
server1.succeed(f"${ping} -c 1 {pod_ip}")
|
||||||
|
agent1.succeed(f"${ping} -c 1 {pod_ip}")
|
||||||
|
|
||||||
|
# Verify the pods can talk to each other
|
||||||
|
resp = server1.wait_until_succeeds(f"${kubectl} exec {pods[0]} -- socat TCP:{pod_ips[1]}:8000 -")
|
||||||
|
assert resp.strip() == "server"
|
||||||
|
resp = server1.wait_until_succeeds(f"${kubectl} exec {pods[1]} -- socat TCP:{pod_ips[0]}:8000 -")
|
||||||
|
assert resp.strip() == "server"
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
server1.succeed("${kubectl} delete -f ${networkTestDaemonset}")
|
||||||
|
for machine in machines:
|
||||||
|
machine.shutdown()
|
||||||
|
'';
|
||||||
|
})
|
75
nixos/tests/rke2/single-node.nix
Normal file
75
nixos/tests/rke2/single-node.nix
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
import ../make-test-python.nix ({ pkgs, lib, rke2, ... }:
|
||||||
|
let
|
||||||
|
pauseImage = pkgs.dockerTools.streamLayeredImage {
|
||||||
|
name = "test.local/pause";
|
||||||
|
tag = "local";
|
||||||
|
contents = pkgs.buildEnv {
|
||||||
|
name = "rke2-pause-image-env";
|
||||||
|
paths = with pkgs; [ tini (hiPrio coreutils) busybox ];
|
||||||
|
};
|
||||||
|
config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
|
||||||
|
};
|
||||||
|
testPodYaml = pkgs.writeText "test.yaml" ''
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: test
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: test
|
||||||
|
image: test.local/pause:local
|
||||||
|
imagePullPolicy: Never
|
||||||
|
command: ["sh", "-c", "sleep inf"]
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
{
|
||||||
|
name = "${rke2.name}-single-node";
|
||||||
|
meta.maintainers = rke2.meta.maintainers;
|
||||||
|
|
||||||
|
nodes.machine = { pkgs, ... }: {
|
||||||
|
networking.firewall.enable = false;
|
||||||
|
networking.useDHCP = false;
|
||||||
|
networking.defaultGateway = "192.168.1.1";
|
||||||
|
networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
|
||||||
|
{ address = "192.168.1.1"; prefixLength = 24; }
|
||||||
|
];
|
||||||
|
|
||||||
|
virtualisation.memorySize = 1536;
|
||||||
|
virtualisation.diskSize = 4096;
|
||||||
|
|
||||||
|
services.rke2 = {
|
||||||
|
enable = true;
|
||||||
|
role = "server";
|
||||||
|
package = rke2;
|
||||||
|
nodeIP = "192.168.1.1";
|
||||||
|
disable = [
|
||||||
|
"rke2-coredns"
|
||||||
|
"rke2-metrics-server"
|
||||||
|
"rke2-ingress-nginx"
|
||||||
|
];
|
||||||
|
extraFlags = [
|
||||||
|
"--cluster-reset"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
testScript = let
|
||||||
|
kubectl = "${pkgs.kubectl}/bin/kubectl --kubeconfig=/etc/rancher/rke2/rke2.yaml";
|
||||||
|
ctr = "${pkgs.containerd}/bin/ctr -a /run/k3s/containerd/containerd.sock";
|
||||||
|
in ''
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
machine.wait_for_unit("rke2")
|
||||||
|
machine.succeed("${kubectl} cluster-info")
|
||||||
|
machine.wait_until_succeeds(
|
||||||
|
"${pauseImage} | ${ctr} -n k8s.io image import -"
|
||||||
|
)
|
||||||
|
|
||||||
|
machine.wait_until_succeeds("${kubectl} get serviceaccount default")
|
||||||
|
machine.succeed("${kubectl} apply -f ${testPodYaml}")
|
||||||
|
machine.succeed("${kubectl} wait --for 'condition=Ready' pod/test")
|
||||||
|
machine.succeed("${kubectl} delete -f ${testPodYaml}")
|
||||||
|
|
||||||
|
machine.shutdown()
|
||||||
|
'';
|
||||||
|
})
|
@ -58,6 +58,15 @@ buildGoModule rec {
|
|||||||
|
|
||||||
passthru.updateScript = nix-update-script { };
|
passthru.updateScript = nix-update-script { };
|
||||||
|
|
||||||
|
passthru.tests = {
|
||||||
|
version = testers.testVersion {
|
||||||
|
package = rke2;
|
||||||
|
version = "v${version}";
|
||||||
|
};
|
||||||
|
} // lib.optionalAttrs stdenv.isLinux {
|
||||||
|
inherit (nixosTests) rke2;
|
||||||
|
};
|
||||||
|
|
||||||
meta = with lib; {
|
meta = with lib; {
|
||||||
homepage = "https://github.com/rancher/rke2";
|
homepage = "https://github.com/rancher/rke2";
|
||||||
description = "RKE2, also known as RKE Government, is Rancher's next-generation Kubernetes distribution.";
|
description = "RKE2, also known as RKE Government, is Rancher's next-generation Kubernetes distribution.";
|
||||||
|
Loading…
Reference in New Issue
Block a user