lol

nixos/k3s: refactor k3s multi node test (#355230)

authored by

Marcus Ramberg and committed by
GitHub
629bf81b abf02020

+45 -61
+45 -61
nixos/tests/k3s/multi-node.nix
··· 16 16 socat 17 17 ]; 18 18 }; 19 - pauseImage = pkgs.dockerTools.streamLayeredImage { 19 + pauseImage = pkgs.dockerTools.buildImage { 20 20 name = "test.local/pause"; 21 21 tag = "local"; 22 - contents = imageEnv; 22 + copyToRoot = imageEnv; 23 23 config.Entrypoint = [ 24 24 "/bin/tini" 25 25 "--" ··· 75 75 enable = true; 76 76 role = "server"; 77 77 package = k3s; 78 + images = [ pauseImage ]; 78 79 clusterInit = true; 79 80 extraFlags = [ 80 81 "--disable coredns" ··· 117 118 inherit tokenFile; 118 119 enable = true; 119 120 package = k3s; 121 + images = [ pauseImage ]; 120 122 serverAddr = "https://192.168.1.1:6443"; 121 123 clusterInit = false; 122 - extraFlags = builtins.toString [ 123 - "--disable" 124 - "coredns" 125 - "--disable" 126 - "local-storage" 127 - "--disable" 128 - "metrics-server" 129 - "--disable" 130 - "servicelb" 131 - "--disable" 132 - "traefik" 133 - "--node-ip" 134 - "192.168.1.3" 135 - "--pause-image" 136 - "test.local/pause:local" 124 + extraFlags = [ 125 + "--disable coredns" 126 + "--disable local-storage" 127 + "--disable metrics-server" 128 + "--disable servicelb" 129 + "--disable traefik" 130 + "--node-ip 192.168.1.3" 131 + "--pause-image test.local/pause:local" 137 132 ]; 138 133 }; 139 134 networking.firewall.allowedTCPPorts = [ ··· 163 158 enable = true; 164 159 role = "agent"; 165 160 package = k3s; 161 + images = [ pauseImage ]; 166 162 serverAddr = "https://192.168.1.3:6443"; 167 - extraFlags = lib.concatStringsSep " " [ 168 - "--pause-image" 169 - "test.local/pause:local" 170 - "--node-ip" 171 - "192.168.1.2" 163 + extraFlags = [ 164 + "--pause-image test.local/pause:local" 165 + "--node-ip 192.168.1.2" 172 166 ]; 173 167 }; 174 168 networking.firewall.allowedTCPPorts = [ 6443 ]; ··· 185 179 }; 186 180 }; 187 181 188 - testScript = '' 189 - machines = [server, server2, agent] 190 - for m in machines: 191 - m.start() 192 - m.wait_for_unit("k3s") 193 - 194 - is_aarch64 = "${toString pkgs.stdenv.hostPlatform.isAarch64}" == "1" 195 - 196 - # wait for the agent to show up 197 - server.wait_until_succeeds("k3s kubectl get node agent") 198 - 199 - for m in machines: 200 - m.succeed("k3s check-config") 201 - m.succeed( 202 - "${pauseImage} | k3s ctr image import -" 203 - ) 182 + testScript = # python 183 + '' 184 + start_all() 204 185 205 - server.succeed("k3s kubectl cluster-info") 206 - # Also wait for our service account to show up; it takes a sec 207 - server.wait_until_succeeds("k3s kubectl get serviceaccount default") 186 + machines = [server, server2, agent] 187 + for m in machines: 188 + m.wait_for_unit("k3s") 208 189 209 - # Now create a pod on each node via a daemonset and verify they can talk to each other. 210 - server.succeed("k3s kubectl apply -f ${networkTestDaemonset}") 211 - server.wait_until_succeeds(f'[ "$(k3s kubectl get ds test -o json | jq .status.numberReady)" -eq {len(machines)} ]') 190 + # wait for the agent to show up 191 + server.wait_until_succeeds("k3s kubectl get node agent") 212 192 213 - # Get pod IPs 214 - pods = server.succeed("k3s kubectl get po -o json | jq '.items[].metadata.name' -r").splitlines() 215 - pod_ips = [server.succeed(f"k3s kubectl get po {name} -o json | jq '.status.podIP' -cr").strip() for name in pods] 193 + for m in machines: 194 + m.succeed("k3s check-config") 216 195 217 - # Verify each server can ping each pod ip 218 - for pod_ip in pod_ips: 219 - server.succeed(f"ping -c 1 {pod_ip}") 220 - agent.succeed(f"ping -c 1 {pod_ip}") 196 + server.succeed("k3s kubectl cluster-info") 197 + # Also wait for our service account to show up; it takes a sec 198 + server.wait_until_succeeds("k3s kubectl get serviceaccount default") 221 199 222 - # Verify the pods can talk to each other 223 - resp = server.wait_until_succeeds(f"k3s kubectl exec {pods[0]} -- socat TCP:{pod_ips[1]}:8000 -") 224 - assert resp.strip() == "server" 225 - resp = server.wait_until_succeeds(f"k3s kubectl exec {pods[1]} -- socat TCP:{pod_ips[0]}:8000 -") 226 - assert resp.strip() == "server" 200 + # Now create a pod on each node via a daemonset and verify they can talk to each other. 201 + server.succeed("k3s kubectl apply -f ${networkTestDaemonset}") 202 + server.wait_until_succeeds(f'[ "$(k3s kubectl get ds test -o json | jq .status.numberReady)" -eq {len(machines)} ]') 227 203 228 - # Cleanup 229 - server.succeed("k3s kubectl delete -f ${networkTestDaemonset}") 204 + # Get pod IPs 205 + pods = server.succeed("k3s kubectl get po -o json | jq '.items[].metadata.name' -r").splitlines() 206 + pod_ips = [server.succeed(f"k3s kubectl get po {name} -o json | jq '.status.podIP' -cr").strip() for name in pods] 230 207 231 - for m in machines: 232 - m.shutdown() 233 - ''; 208 + # Verify each server can ping each pod ip 209 + for pod_ip in pod_ips: 210 + server.succeed(f"ping -c 1 {pod_ip}") 211 + server2.succeed(f"ping -c 1 {pod_ip}") 212 + agent.succeed(f"ping -c 1 {pod_ip}") 213 + # Verify the pods can talk to each other 214 + for pod in pods: 215 + resp = server.succeed(f"k3s kubectl exec {pod} -- socat TCP:{pod_ip}:8000 -") 216 + assert resp.strip() == "server" 217 + ''; 234 218 235 219 meta.maintainers = lib.teams.k3s.members; 236 220 }