···1-import ./make-test-python.nix ({ pkgs, ... }:
2-3 let
4 imageEnv = pkgs.buildEnv {
5 name = "k3s-pause-image-env";
···11 contents = imageEnv;
12 config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
13 };
14- # Don't use the default service account because there's a race where it may
15- # not be created yet; make our own instead.
16 testPodYaml = pkgs.writeText "test.yml" ''
17 apiVersion: v1
18- kind: ServiceAccount
19- metadata:
20- name: test
21- ---
22- apiVersion: v1
23 kind: Pod
24 metadata:
25 name: test
26 spec:
27- serviceAccountName: test
28 containers:
29 - name: test
30 image: test.local/pause:local
···66 machine.wait_for_unit("k3s")
67 machine.succeed("k3s kubectl cluster-info")
68 machine.fail("sudo -u noprivs k3s kubectl cluster-info")
69- # FIXME: this fails with the current nixos kernel config; once it passes, we should uncomment it
70- # machine.succeed("k3s check-config")
7172 machine.succeed(
73 "${pauseImage} | k3s ctr image import -"
74 )
750076 machine.succeed("k3s kubectl apply -f ${testPodYaml}")
77 machine.succeed("k3s kubectl wait --for 'condition=Ready' pod/test")
78 machine.succeed("k3s kubectl delete -f ${testPodYaml}")
···1+import ../make-test-python.nix ({ pkgs, ... }:
02 let
3 imageEnv = pkgs.buildEnv {
4 name = "k3s-pause-image-env";
···10 contents = imageEnv;
11 config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
12 };
0013 testPodYaml = pkgs.writeText "test.yml" ''
14 apiVersion: v1
0000015 kind: Pod
16 metadata:
17 name: test
18 spec:
019 containers:
20 - name: test
21 image: test.local/pause:local
···57 machine.wait_for_unit("k3s")
58 machine.succeed("k3s kubectl cluster-info")
59 machine.fail("sudo -u noprivs k3s kubectl cluster-info")
60+ machine.succeed("k3s check-config")
06162 machine.succeed(
63 "${pauseImage} | k3s ctr image import -"
64 )
6566+ # Also wait for our service account to show up; it takes a sec
67+ machine.wait_until_succeeds("k3s kubectl get serviceaccount default")
68 machine.succeed("k3s kubectl apply -f ${testPodYaml}")
69 machine.succeed("k3s kubectl wait --for 'condition=Ready' pod/test")
70 machine.succeed("k3s kubectl delete -f ${testPodYaml}")
+9
nixos/tests/k3s/default.nix
···000000000
···1+{ system ? builtins.currentSystem
2+, pkgs ? import ../../.. { inherit system; }
3+}:
4+{
5+ # Run a single node k3s cluster and verify a pod can run
6+ single-node = import ./single-node.nix { inherit system pkgs; };
7+ # Run a multi-node k3s cluster and verify pod networking works across nodes
8+ multi-node = import ./multi-node.nix { inherit system pkgs; };
9+}
···1+import ../make-test-python.nix ({ pkgs, ... }:
2+ let
3+ imageEnv = pkgs.buildEnv {
4+ name = "k3s-pause-image-env";
5+ paths = with pkgs; [ tini bashInteractive coreutils socat ];
6+ };
7+ pauseImage = pkgs.dockerTools.streamLayeredImage {
8+ name = "test.local/pause";
9+ tag = "local";
10+ contents = imageEnv;
11+ config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
12+ };
13+ # A daemonset that responds 'server' on port 8000
14+ networkTestDaemonset = pkgs.writeText "test.yml" ''
15+ apiVersion: apps/v1
16+ kind: DaemonSet
17+ metadata:
18+ name: test
19+ labels:
20+ name: test
21+ spec:
22+ selector:
23+ matchLabels:
24+ name: test
25+ template:
26+ metadata:
27+ labels:
28+ name: test
29+ spec:
30+ containers:
31+ - name: test
32+ image: test.local/pause:local
33+ imagePullPolicy: Never
34+ resources:
35+ limits:
36+ memory: 20Mi
37+ command: ["socat", "TCP4-LISTEN:8000,fork", "EXEC:echo server"]
38+ '';
39+ tokenFile = pkgs.writeText "token" "p@s$w0rd";
40+ in
41+ {
42+ name = "k3s-multi-node";
43+44+ nodes = {
45+ server = { pkgs, ... }: {
46+ environment.systemPackages = with pkgs; [ gzip jq ];
47+ # k3s uses enough resources the default vm fails.
48+ virtualisation.memorySize = 1536;
49+ virtualisation.diskSize = 4096;
50+51+ services.k3s = {
52+ inherit tokenFile;
53+ enable = true;
54+ role = "server";
55+ package = pkgs.k3s;
56+ extraFlags = "--no-deploy coredns,servicelb,traefik,local-storage,metrics-server --pause-image test.local/pause:local --node-ip 192.168.1.1";
57+ };
58+ networking.firewall.allowedTCPPorts = [ 6443 ];
59+ networking.firewall.allowedUDPPorts = [ 8472 ];
60+ networking.firewall.trustedInterfaces = [ "flannel.1" ];
61+ networking.useDHCP = false;
62+ networking.defaultGateway = "192.168.1.1";
63+ networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
64+ { address = "192.168.1.1"; prefixLength = 24; }
65+ ];
66+ };
67+68+ agent = { pkgs, ... }: {
69+ virtualisation.memorySize = 1024;
70+ virtualisation.diskSize = 2048;
71+ services.k3s = {
72+ inherit tokenFile;
73+ enable = true;
74+ role = "agent";
75+ serverAddr = "https://192.168.1.1:6443";
76+ extraFlags = "--pause-image test.local/pause:local --node-ip 192.168.1.2";
77+ };
78+ networking.firewall.allowedTCPPorts = [ 6443 ];
79+ networking.firewall.allowedUDPPorts = [ 8472 ];
80+ networking.firewall.trustedInterfaces = [ "flannel.1" ];
81+ networking.useDHCP = false;
82+ networking.defaultGateway = "192.168.1.2";
83+ networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
84+ { address = "192.168.1.2"; prefixLength = 24; }
85+ ];
86+ };
87+ };
88+89+ meta = with pkgs.lib.maintainers; {
90+ maintainers = [ euank ];
91+ };
92+93+ testScript = ''
94+ start_all()
95+ machines = [server, agent]
96+ for m in machines:
97+ m.wait_for_unit("k3s")
98+99+ # wait for the agent to show up
100+ server.wait_until_succeeds("k3s kubectl get node agent")
101+102+ for m in machines:
103+ m.succeed("k3s check-config")
104+ m.succeed(
105+ "${pauseImage} | k3s ctr image import -"
106+ )
107+108+ server.succeed("k3s kubectl cluster-info")
109+ # Also wait for our service account to show up; it takes a sec
110+ server.wait_until_succeeds("k3s kubectl get serviceaccount default")
111+112+ # Now create a pod on each node via a daemonset and verify they can talk to each other.
113+ server.succeed("k3s kubectl apply -f ${networkTestDaemonset}")
114+ server.wait_until_succeeds(f'[ "$(k3s kubectl get ds test -o json | jq .status.numberReady)" -eq {len(machines)} ]')
115+116+ # Get pod IPs
117+ pods = server.succeed("k3s kubectl get po -o json | jq '.items[].metadata.name' -r").splitlines()
118+ pod_ips = [server.succeed(f"k3s kubectl get po {name} -o json | jq '.status.podIP' -cr").strip() for name in pods]
119+120+ # Verify each server can ping each pod ip
121+ for pod_ip in pod_ips:
122+ server.succeed(f"ping -c 1 {pod_ip}")
123+ agent.succeed(f"ping -c 1 {pod_ip}")
124+125+ # Verify the pods can talk to each other
126+ resp = server.wait_until_succeeds(f"k3s kubectl exec {pods[0]} -- socat TCP:{pod_ips[1]}:8000 -")
127+ assert resp.strip() == "server"
128+ resp = server.wait_until_succeeds(f"k3s kubectl exec {pods[1]} -- socat TCP:{pod_ips[0]}:8000 -")
129+ assert resp.strip() == "server"
130+131+ # Cleanup
132+ server.succeed("k3s kubectl delete -f ${networkTestDaemonset}")
133+134+ for m in machines:
135+ m.shutdown()
136+ '';
137+ })
+2-2
pkgs/applications/editors/greenfoot/default.nix
···23stdenv.mkDerivation rec {
4 pname = "greenfoot";
5- version = "3.7.0";
6 src = fetchurl {
7 # We use the deb here. First instinct might be to go for the "generic" JAR
8 # download, but that is actually a graphical installer that is much harder
9 # to unpack than the deb.
10 url = "https://www.greenfoot.org/download/files/Greenfoot-linux-${builtins.replaceStrings ["."] [""] version}.deb";
11- sha256 = "sha256-K9faU3ZarcR4g8riHpoZYVH0sXtueqfm3Fo+sZAHJA8=";
12 };
1314 nativeBuildInputs = [ makeWrapper ];
···23stdenv.mkDerivation rec {
4 pname = "greenfoot";
5+ version = "3.7.1";
6 src = fetchurl {
7 # We use the deb here. First instinct might be to go for the "generic" JAR
8 # download, but that is actually a graphical installer that is much harder
9 # to unpack than the deb.
10 url = "https://www.greenfoot.org/download/files/Greenfoot-linux-${builtins.replaceStrings ["."] [""] version}.deb";
11+ sha256 = "sha256-wGgKDsA/2luw+Nzs9dWb/HRHMx/0S0CFfoI53OCzxug=";
12 };
1314 nativeBuildInputs = [ makeWrapper ];
···1213# Version of Pulumi from
14# https://www.pulumi.com/docs/get-started/install/versions/
15-VERSION="3.35.2"
1617# An array of plugin names. The respective repository inside Pulumi's
18# Github organization is called pulumi-$name by convention.
···1213# Version of Pulumi from
14# https://www.pulumi.com/docs/get-started/install/versions/
15+VERSION="3.37.2"
1617# An array of plugin names. The respective repository inside Pulumi's
18# Github organization is called pulumi-$name by convention.
···13651366 tahoelafs = throw "'tahoelafs' has been renamed to/replaced by 'tahoe-lafs'"; # Converted to throw 2022-02-22
1367 tangogps = foxtrotgps; # Added 2020-01-26
001368 tdm = throw "tdm has been removed because nobody can figure out how to fix OpenAL integration. Use precompiled binary and `steam-run` instead";
1369 teleconsole = throw "teleconsole is archived by upstream"; # Added 2022-04-05
1370 telepathy-qt = throw "telepathy-qt no longer supports Qt 4. Please use libsForQt5.telepathy instead"; # Added 2020-07-02
···13651366 tahoelafs = throw "'tahoelafs' has been renamed to/replaced by 'tahoe-lafs'"; # Converted to throw 2022-02-22
1367 tangogps = foxtrotgps; # Added 2020-01-26
1368+ taplo-cli = taplo; # Added 2022-07-30
1369+ taplo-lsp = taplo; # Added 2022-07-30
1370 tdm = throw "tdm has been removed because nobody can figure out how to fix OpenAL integration. Use precompiled binary and `steam-run` instead";
1371 teleconsole = throw "teleconsole is archived by upstream"; # Added 2022-04-05
1372 telepathy-qt = throw "telepathy-qt no longer supports Qt 4. Please use libsForQt5.telepathy instead"; # Added 2020-07-02