···11-import ./make-test-python.nix ({ pkgs, ... }:
22-11+import ../make-test-python.nix ({ pkgs, ... }:
32 let
43 imageEnv = pkgs.buildEnv {
54 name = "k3s-pause-image-env";
···1110 contents = imageEnv;
1211 config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
1312 };
1414- # Don't use the default service account because there's a race where it may
1515- # not be created yet; make our own instead.
1613 testPodYaml = pkgs.writeText "test.yml" ''
1714 apiVersion: v1
1818- kind: ServiceAccount
1919- metadata:
2020- name: test
2121- ---
2222- apiVersion: v1
2315 kind: Pod
2416 metadata:
2517 name: test
2618 spec:
2727- serviceAccountName: test
2819 containers:
2920 - name: test
3021 image: test.local/pause:local
···6657 machine.wait_for_unit("k3s")
6758 machine.succeed("k3s kubectl cluster-info")
6859 machine.fail("sudo -u noprivs k3s kubectl cluster-info")
6969- # FIXME: this fails with the current nixos kernel config; once it passes, we should uncomment it
7070- # machine.succeed("k3s check-config")
6060+ machine.succeed("k3s check-config")
71617262 machine.succeed(
7363 "${pauseImage} | k3s ctr image import -"
7464 )
75656666+ # Also wait for our service account to show up; it takes a sec
6767+ machine.wait_until_succeeds("k3s kubectl get serviceaccount default")
7668 machine.succeed("k3s kubectl apply -f ${testPodYaml}")
7769 machine.succeed("k3s kubectl wait --for 'condition=Ready' pod/test")
7870 machine.succeed("k3s kubectl delete -f ${testPodYaml}")
+9
nixos/tests/k3s/default.nix
···11+{ system ? builtins.currentSystem
22+, pkgs ? import ../../.. { inherit system; }
33+}:
44+{
55+ # Run a single node k3s cluster and verify a pod can run
66+ single-node = import ./single-node.nix { inherit system pkgs; };
77+ # Run a multi-node k3s cluster and verify pod networking works across nodes
88+ multi-node = import ./multi-node.nix { inherit system pkgs; };
99+}
+137
nixos/tests/k3s/multi-node.nix
···11+import ../make-test-python.nix ({ pkgs, ... }:
22+ let
33+ imageEnv = pkgs.buildEnv {
44+ name = "k3s-pause-image-env";
55+ paths = with pkgs; [ tini bashInteractive coreutils socat ];
66+ };
77+ pauseImage = pkgs.dockerTools.streamLayeredImage {
88+ name = "test.local/pause";
99+ tag = "local";
1010+ contents = imageEnv;
1111+ config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
1212+ };
1313+ # A daemonset that responds 'server' on port 8000
1414+ networkTestDaemonset = pkgs.writeText "test.yml" ''
1515+ apiVersion: apps/v1
1616+ kind: DaemonSet
1717+ metadata:
1818+ name: test
1919+ labels:
2020+ name: test
2121+ spec:
2222+ selector:
2323+ matchLabels:
2424+ name: test
2525+ template:
2626+ metadata:
2727+ labels:
2828+ name: test
2929+ spec:
3030+ containers:
3131+ - name: test
3232+ image: test.local/pause:local
3333+ imagePullPolicy: Never
3434+ resources:
3535+ limits:
3636+ memory: 20Mi
3737+ command: ["socat", "TCP4-LISTEN:8000,fork", "EXEC:echo server"]
3838+ '';
3939+ tokenFile = pkgs.writeText "token" "p@s$w0rd";
4040+ in
4141+ {
4242+ name = "k3s-multi-node";
4343+4444+ nodes = {
4545+ server = { pkgs, ... }: {
4646+ environment.systemPackages = with pkgs; [ gzip jq ];
4747+ # k3s uses enough resources the default vm fails.
4848+ virtualisation.memorySize = 1536;
4949+ virtualisation.diskSize = 4096;
5050+5151+ services.k3s = {
5252+ inherit tokenFile;
5353+ enable = true;
5454+ role = "server";
5555+ package = pkgs.k3s;
5656+ extraFlags = "--no-deploy coredns,servicelb,traefik,local-storage,metrics-server --pause-image test.local/pause:local --node-ip 192.168.1.1";
5757+ };
5858+ networking.firewall.allowedTCPPorts = [ 6443 ];
5959+ networking.firewall.allowedUDPPorts = [ 8472 ];
6060+ networking.firewall.trustedInterfaces = [ "flannel.1" ];
6161+ networking.useDHCP = false;
6262+ networking.defaultGateway = "192.168.1.1";
6363+ networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
6464+ { address = "192.168.1.1"; prefixLength = 24; }
6565+ ];
6666+ };
6767+6868+ agent = { pkgs, ... }: {
6969+ virtualisation.memorySize = 1024;
7070+ virtualisation.diskSize = 2048;
7171+ services.k3s = {
7272+ inherit tokenFile;
7373+ enable = true;
7474+ role = "agent";
7575+ serverAddr = "https://192.168.1.1:6443";
7676+ extraFlags = "--pause-image test.local/pause:local --node-ip 192.168.1.2";
7777+ };
7878+ networking.firewall.allowedTCPPorts = [ 6443 ];
7979+ networking.firewall.allowedUDPPorts = [ 8472 ];
8080+ networking.firewall.trustedInterfaces = [ "flannel.1" ];
8181+ networking.useDHCP = false;
8282+ networking.defaultGateway = "192.168.1.2";
8383+ networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
8484+ { address = "192.168.1.2"; prefixLength = 24; }
8585+ ];
8686+ };
8787+ };
8888+8989+ meta = with pkgs.lib.maintainers; {
9090+ maintainers = [ euank ];
9191+ };
9292+9393+ testScript = ''
9494+ start_all()
9595+ machines = [server, agent]
9696+ for m in machines:
9797+ m.wait_for_unit("k3s")
9898+9999+ # wait for the agent to show up
100100+ server.wait_until_succeeds("k3s kubectl get node agent")
101101+102102+ for m in machines:
103103+ m.succeed("k3s check-config")
104104+ m.succeed(
105105+ "${pauseImage} | k3s ctr image import -"
106106+ )
107107+108108+ server.succeed("k3s kubectl cluster-info")
109109+ # Also wait for our service account to show up; it takes a sec
110110+ server.wait_until_succeeds("k3s kubectl get serviceaccount default")
111111+112112+ # Now create a pod on each node via a daemonset and verify they can talk to each other.
113113+ server.succeed("k3s kubectl apply -f ${networkTestDaemonset}")
114114+ server.wait_until_succeeds(f'[ "$(k3s kubectl get ds test -o json | jq .status.numberReady)" -eq {len(machines)} ]')
115115+116116+ # Get pod IPs
117117+ pods = server.succeed("k3s kubectl get po -o json | jq '.items[].metadata.name' -r").splitlines()
118118+ pod_ips = [server.succeed(f"k3s kubectl get po {name} -o json | jq '.status.podIP' -cr").strip() for name in pods]
119119+120120+ # Verify each server can ping each pod ip
121121+ for pod_ip in pod_ips:
122122+ server.succeed(f"ping -c 1 {pod_ip}")
123123+ agent.succeed(f"ping -c 1 {pod_ip}")
124124+125125+ # Verify the pods can talk to each other
126126+ resp = server.wait_until_succeeds(f"k3s kubectl exec {pods[0]} -- socat TCP:{pod_ips[1]}:8000 -")
127127+ assert resp.strip() == "server"
128128+ resp = server.wait_until_succeeds(f"k3s kubectl exec {pods[1]} -- socat TCP:{pod_ips[0]}:8000 -")
129129+ assert resp.strip() == "server"
130130+131131+ # Cleanup
132132+ server.succeed("k3s kubectl delete -f ${networkTestDaemonset}")
133133+134134+ for m in machines:
135135+ m.shutdown()
136136+ '';
137137+ })
+2-2
pkgs/applications/editors/greenfoot/default.nix
···2233stdenv.mkDerivation rec {
44 pname = "greenfoot";
55- version = "3.7.0";
55+ version = "3.7.1";
66 src = fetchurl {
77 # We use the deb here. First instinct might be to go for the "generic" JAR
88 # download, but that is actually a graphical installer that is much harder
99 # to unpack than the deb.
1010 url = "https://www.greenfoot.org/download/files/Greenfoot-linux-${builtins.replaceStrings ["."] [""] version}.deb";
1111- sha256 = "sha256-K9faU3ZarcR4g8riHpoZYVH0sXtueqfm3Fo+sZAHJA8=";
1111+ sha256 = "sha256-wGgKDsA/2luw+Nzs9dWb/HRHMx/0S0CFfoI53OCzxug=";
1212 };
13131414 nativeBuildInputs = [ makeWrapper ];
···12121313# Version of Pulumi from
1414# https://www.pulumi.com/docs/get-started/install/versions/
1515-VERSION="3.35.2"
1515+VERSION="3.37.2"
16161717# An array of plugin names. The respective repository inside Pulumi's
1818# Github organization is called pulumi-$name by convention.