···8081### Usage with nix-shell {#ssec-dart-applications-nix-shell}
820083As `buildDartApplication` provides dependencies instead of `pub get`, Dart needs to be explicitly told where to find them.
8485Run the following commands in the source directory to configure Dart appropriately.
···103 pname = "firmware-updater";
104 version = "unstable-2023-04-30";
105000106 src = fetchFromGitHub {
107 owner = "canonical";
108 repo = "firmware-updater";
···117118### Usage with nix-shell {#ssec-dart-flutter-nix-shell}
119120-See the [Dart documentation](#ssec-dart-applications-nix-shell) for nix-shell instructions.
00000000000
···8081### Usage with nix-shell {#ssec-dart-applications-nix-shell}
8283+#### Using dependencies from the Nix store {#ssec-dart-applications-nix-shell-deps}
84+85As `buildDartApplication` provides dependencies instead of `pub get`, Dart needs to be explicitly told where to find them.
8687Run the following commands in the source directory to configure Dart appropriately.
···105 pname = "firmware-updater";
106 version = "unstable-2023-04-30";
107108+ # To build for the Web, use the targetFlutterPlatform argument.
109+ # targetFlutterPlatform = "web";
110+111 src = fetchFromGitHub {
112 owner = "canonical";
113 repo = "firmware-updater";
···122123### Usage with nix-shell {#ssec-dart-flutter-nix-shell}
124125+Flutter-specific `nix-shell` usage notes are included here. See the [Dart documentation](#ssec-dart-applications-nix-shell) for general `nix-shell` instructions.
126+127+#### Entering the shell {#ssec-dart-flutter-nix-shell-enter}
128+129+By default, dependencies for only the `targetFlutterPlatform` are available in the
130+build environment. This is useful for keeping closures small, but be problematic
131+during development. It's common, for example, to build Web apps for Linux during
132+development to take advantage of native features such as stateful hot reload.
133+134+To enter a shell with all the usual target platforms available, use the `multiShell` attribute.
135+136+e.g. `nix-shell '<nixpkgs>' -A fluffychat-web.multiShell`.
···37- [Anki Sync Server](https://docs.ankiweb.net/sync-server.html), the official sync server built into recent versions of Anki. Available as [services.anki-sync-server](#opt-services.anki-sync-server.enable).
38The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been marked deprecated and will be dropped after 24.05 due to lack of maintenance of the anki-sync-server softwares.
390040- [ping_exporter](https://github.com/czerwonk/ping_exporter), a Prometheus exporter for ICMP echo requests. Available as [services.prometheus.exporters.ping](#opt-services.prometheus.exporters.ping.enable).
4142- [Clevis](https://github.com/latchset/clevis), a pluggable framework for automated decryption, used to unlock encrypted devices in initrd. Available as [boot.initrd.clevis.enable](#opt-boot.initrd.clevis.enable).
···37- [Anki Sync Server](https://docs.ankiweb.net/sync-server.html), the official sync server built into recent versions of Anki. Available as [services.anki-sync-server](#opt-services.anki-sync-server.enable).
38The pre-existing [services.ankisyncd](#opt-services.ankisyncd.enable) has been marked deprecated and will be dropped after 24.05 due to lack of maintenance of the anki-sync-server softwares.
3940+- [Suwayomi Server](https://github.com/Suwayomi/Suwayomi-Server), a free and open source manga reader server that runs extensions built for [Tachiyomi](https://tachiyomi.org). Available as [services.suwayomi-server](#opt-services.suwayomi-server.enable).
41+42- [ping_exporter](https://github.com/czerwonk/ping_exporter), a Prometheus exporter for ICMP echo requests. Available as [services.prometheus.exporters.ping](#opt-services.prometheus.exporters.ping.enable).
4344- [Clevis](https://github.com/latchset/clevis), a pluggable framework for automated decryption, used to unlock encrypted devices in initrd. Available as [boot.initrd.clevis.enable](#opt-boot.initrd.clevis.enable).
···4 name = "ayatana-indicators";
56 meta = {
7- maintainers = with lib.maintainers; [ OPNA2608 ];
8 };
910 nodes.machine = { config, ... }: {
···28 enable = true;
29 packages = with pkgs; [
30 ayatana-indicator-messages
31- ];
0032 };
3334- # Services needed by some indicators
035 services.accounts-daemon.enable = true; # messages
00000036 };
3738 # TODO session indicator starts up in a semi-broken state, but works fine after a restart. maybe being started before graphical session is truly up & ready?
39 testScript = { nodes, ... }: let
40- runCommandPerIndicatorService = command: lib.strings.concatMapStringsSep "\n" command nodes.machine.systemd.user.targets."ayatana-indicators".wants;
00000000041 in ''
42 start_all()
43 machine.wait_for_x()
···50 machine.sleep(10)
5152 # Now check if all indicators were brought up successfully, and kill them for later
53- '' + (runCommandPerIndicatorService (service: let serviceExec = builtins.replaceStrings [ "." ] [ "-" ] service; in ''
54 machine.succeed("pgrep -f ${serviceExec}")
55 machine.succeed("pkill -f ${serviceExec}")
56 '')) + ''
···65 machine.sleep(10)
6667 # Now check if all indicator services were brought up successfully
68- '' + runCommandPerIndicatorService (service: ''
69 machine.wait_for_unit("${service}", "${user}")
70 '');
71})
···4 name = "ayatana-indicators";
56 meta = {
7+ maintainers = lib.teams.lomiri.members;
8 };
910 nodes.machine = { config, ... }: {
···28 enable = true;
29 packages = with pkgs; [
30 ayatana-indicator-messages
31+ ] ++ (with pkgs.lomiri; [
32+ lomiri-indicator-network
33+ ]);
34 };
3536+ # Setup needed by some indicators
37+38 services.accounts-daemon.enable = true; # messages
39+40+ # Lomiri-ish setup for Lomiri indicators
41+ # TODO move into a Lomiri module, once the package set is far enough for the DE to start
42+43+ networking.networkmanager.enable = true; # lomiri-network-indicator
44+ # TODO potentially urfkill for lomiri-network-indicator?
45 };
4647 # TODO session indicator starts up in a semi-broken state, but works fine after a restart. maybe being started before graphical session is truly up & ready?
48 testScript = { nodes, ... }: let
49+ runCommandOverServiceList = list: command:
50+ lib.strings.concatMapStringsSep "\n" command list;
51+52+ runCommandOverAyatanaIndicators = runCommandOverServiceList
53+ (builtins.filter
54+ (service: !(lib.strings.hasPrefix "lomiri" service || lib.strings.hasPrefix "telephony-service" service))
55+ nodes.machine.systemd.user.targets."ayatana-indicators".wants);
56+57+ runCommandOverAllIndicators = runCommandOverServiceList
58+ nodes.machine.systemd.user.targets."ayatana-indicators".wants;
59 in ''
60 start_all()
61 machine.wait_for_x()
···68 machine.sleep(10)
6970 # Now check if all indicators were brought up successfully, and kill them for later
71+ '' + (runCommandOverAyatanaIndicators (service: let serviceExec = builtins.replaceStrings [ "." ] [ "-" ] service; in ''
72 machine.succeed("pgrep -f ${serviceExec}")
73 machine.succeed("pkill -f ${serviceExec}")
74 '')) + ''
···83 machine.sleep(10)
8485 # Now check if all indicator services were brought up successfully
86+ '' + runCommandOverAllIndicators (service: ''
87 machine.wait_for_unit("${service}", "${user}")
88 '');
89})
+90-19
nixos/tests/haproxy.nix
···1-import ./make-test-python.nix ({ pkgs, ...}: {
2 name = "haproxy";
3 nodes = {
4- machine = { ... }: {
5- services.haproxy = {
6 enable = true;
7 config = ''
0008 defaults
09 timeout connect 10s
00000001011 backend http_server
12- mode http
13- server httpd [::1]:8000
1415 frontend http
16- bind *:80
17- mode http
000018 http-request use-service prometheus-exporter if { path /metrics }
19 use_backend http_server
00000020 '';
21 };
22 services.httpd = {
···30 }];
31 };
32 };
0000033 };
34 };
35 testScript = ''
00000000000000000000000036 start_all()
37- machine.wait_for_unit("multi-user.target")
38- machine.wait_for_unit("haproxy.service")
39- machine.wait_for_unit("httpd.service")
40- assert "We are all good!" in machine.succeed("curl -fk http://localhost:80/index.txt")
41- assert "haproxy_process_pool_allocated_bytes" in machine.succeed(
42- "curl -fk http://localhost:80/metrics"
43- )
0000000000000000000000004445 with subtest("reload"):
46- machine.succeed("systemctl reload haproxy")
47 # wait some time to ensure the following request hits the reloaded haproxy
48- machine.sleep(5)
49- assert "We are all good!" in machine.succeed(
50- "curl -fk http://localhost:80/index.txt"
51- )
52 '';
53})
···1+import ./make-test-python.nix ({ lib, pkgs, ...}: {
2 name = "haproxy";
3 nodes = {
4+ server = { ... }: {
5+ services.haproxy = {
6 enable = true;
7 config = ''
8+ global
9+ limited-quic
10+11 defaults
12+ mode http
13 timeout connect 10s
14+ timeout client 10s
15+ timeout server 10s
16+17+ log /dev/log local0 debug err
18+ option logasap
19+ option httplog
20+ option httpslog
2122 backend http_server
23+ server httpd [::1]:8000 alpn http/1.1
02425 frontend http
26+ bind :80
27+ bind :443 ssl strict-sni crt /etc/ssl/fullchain.pem alpn h2,http/1.1
28+ bind quic4@:443 ssl strict-sni crt /etc/ssl/fullchain.pem alpn h3 allow-0rtt
29+30+ http-after-response add-header alt-svc 'h3=":443"; ma=60' if { ssl_fc }
31+32 http-request use-service prometheus-exporter if { path /metrics }
33 use_backend http_server
34+35+ frontend http-cert-auth
36+ bind :8443 ssl strict-sni crt /etc/ssl/fullchain.pem verify required ca-file /etc/ssl/cacert.crt
37+ bind quic4@:8443 ssl strict-sni crt /etc/ssl/fullchain.pem verify required ca-file /etc/ssl/cacert.crt alpn h3
38+39+ use_backend http_server
40 '';
41 };
42 services.httpd = {
···50 }];
51 };
52 };
53+ networking.firewall.allowedTCPPorts = [ 80 443 8443 ];
54+ networking.firewall.allowedUDPPorts = [ 443 8443 ];
55+ };
56+ client = { ... }: {
57+ environment.systemPackages = [ pkgs.curlHTTP3 ];
58 };
59 };
60 testScript = ''
61+ # Helpers
62+ def cmd(command):
63+ print(f"+{command}")
64+ r = os.system(command)
65+ if r != 0:
66+ raise Exception(f"Command {command} failed with exit code {r}")
67+68+ def openssl(command):
69+ cmd(f"${pkgs.openssl}/bin/openssl {command}")
70+71+ # Generate CA.
72+ openssl("req -new -newkey rsa:4096 -nodes -x509 -days 7 -subj '/C=ZZ/ST=Cloud/L=Unspecified/O=NixOS/OU=Tests/CN=CA Certificate' -keyout cacert.key -out cacert.crt")
73+74+ # Generate and sign Server.
75+ openssl("req -newkey rsa:4096 -nodes -subj '/CN=server/OU=Tests/O=NixOS' -keyout server.key -out server.csr")
76+ openssl("x509 -req -in server.csr -out server.crt -CA cacert.crt -CAkey cacert.key -days 7")
77+ cmd("cat server.crt server.key > fullchain.pem")
78+79+ # Generate and sign Client.
80+ openssl("req -newkey rsa:4096 -nodes -subj '/CN=client/OU=Tests/O=NixOS' -keyout client.key -out client.csr")
81+ openssl("x509 -req -in client.csr -out client.crt -CA cacert.crt -CAkey cacert.key -days 7")
82+ cmd("cat client.crt client.key > client.pem")
83+84+ # Start the actual test.
85 start_all()
86+ server.copy_from_host("fullchain.pem", "/etc/ssl/fullchain.pem")
87+ server.copy_from_host("cacert.crt", "/etc/ssl/cacert.crt")
88+ server.succeed("chmod 0644 /etc/ssl/fullchain.pem /etc/ssl/cacert.crt")
89+90+ client.copy_from_host("cacert.crt", "/etc/ssl/cacert.crt")
91+ client.copy_from_host("client.pem", "/root/client.pem")
92+93+ server.wait_for_unit("multi-user.target")
94+ server.wait_for_unit("haproxy.service")
95+ server.wait_for_unit("httpd.service")
96+97+ assert "We are all good!" in client.succeed("curl -f http://server/index.txt")
98+ assert "haproxy_process_pool_allocated_bytes" in client.succeed("curl -f http://server/metrics")
99+100+ with subtest("https"):
101+ assert "We are all good!" in client.succeed("curl -f --cacert /etc/ssl/cacert.crt https://server/index.txt")
102+103+ with subtest("https-cert-auth"):
104+ # Client must succeed in authenticating with the right certificate.
105+ assert "We are all good!" in client.succeed("curl -f --cacert /etc/ssl/cacert.crt --cert-type pem --cert /root/client.pem https://server:8443/index.txt")
106+ # Client must fail without certificate.
107+ client.fail("curl --cacert /etc/ssl/cacert.crt https://server:8443/index.txt")
108+109+ with subtest("h3"):
110+ assert "We are all good!" in client.succeed("curl -f --http3-only --cacert /etc/ssl/cacert.crt https://server/index.txt")
111+112+ with subtest("h3-cert-auth"):
113+ # Client must succeed in authenticating with the right certificate.
114+ assert "We are all good!" in client.succeed("curl -f --http3-only --cacert /etc/ssl/cacert.crt --cert-type pem --cert /root/client.pem https://server:8443/index.txt")
115+ # Client must fail without certificate.
116+ client.fail("curl -f --http3-only --cacert /etc/ssl/cacert.crt https://server:8443/index.txt")
117118 with subtest("reload"):
119+ server.succeed("systemctl reload haproxy")
120 # wait some time to ensure the following request hits the reloaded haproxy
121+ server.sleep(5)
122+ assert "We are all good!" in client.succeed("curl -f http://server/index.txt")
00123 '';
124})
···1011buildGoModule rec {
12 pname = "fastly";
13- version = "10.7.0";
1415 src = fetchFromGitHub {
16 owner = "fastly";
17 repo = "cli";
18 rev = "refs/tags/v${version}";
19- hash = "sha256-KqFBsSoiKzvbSG5XanlFcU8NkveksnEbfqNuPeWEb48=";
20 # The git commit is part of the `fastly version` original output;
21 # leave that output the same in nixpkgs. Use the `.git` directory
22 # to retrieve the commit SHA, and remove the directory afterwards,
···33 "cmd/fastly"
34 ];
3536- vendorHash = "sha256-Mh737emdQkIoNOAkaTafCoMQnLqXIGMKX6X5ClsmMzc=";
3738 nativeBuildInputs = [
39 installShellFiles
···1011buildGoModule rec {
12 pname = "fastly";
13+ version = "10.8.0";
1415 src = fetchFromGitHub {
16 owner = "fastly";
17 repo = "cli";
18 rev = "refs/tags/v${version}";
19+ hash = "sha256-XlfTtA4jYFrs1W8pyulkqbhrRt8vS+oPB/g9/tIW8Ws=";
20 # The git commit is part of the `fastly version` original output;
21 # leave that output the same in nixpkgs. Use the `.git` directory
22 # to retrieve the commit SHA, and remove the directory afterwards,
···33 "cmd/fastly"
34 ];
3536+ vendorHash = "sha256-sN6kJspIG3XKW71sTjINE+hoWHNbd8ZmVEXNcvuvThg=";
3738 nativeBuildInputs = [
39 installShellFiles