declarative containers: additional veths

With these changes, a container can have more then one veth-pair. This allows for example to have LAN and DMZ as bridges on the host and add dedicated containers for proxies, ipv4-firewall and ipv6-firewall. Or to have a bridge for normal WAN, one bridge for administration and one bridge for customer-internal communication. So that web-server containers can be reached from outside per http, from the management via ssh and can talk to their database via the customer network.

The scripts to set up the containers are now rendered several times instead of just one template. The scripts now contain per-container code to configure the extra veth interfaces. The default template without support for extra-veths is still rendered for the imperative containers.

Also a test is there to see if extra veths can be placed into host-bridges or can be reached via routing.

+384 -179
+280 -179
nixos/modules/virtualisation/containers.nix
··· 6 6 7 7 # The container's init script, a small wrapper around the regular 8 8 # NixOS stage-2 init script. 9 - containerInit = pkgs.writeScript "container-init" 9 + containerInit = (cfg: 10 + let 11 + renderExtraVeth = (name: cfg: 12 + '' 13 + echo "Bringing ${name} up" 14 + ip link set dev ${name} up 15 + ${optionalString (cfg . "localAddress" or null != null) '' 16 + echo "Setting ip for ${name}" 17 + ip addr add ${cfg . "localAddress"} dev ${name} 18 + ''} 19 + ${optionalString (cfg . "localAddress6" or null != null) '' 20 + echo "Setting ip6 for ${name}" 21 + ip -6 addr add ${cfg . "localAddress6"} dev ${name} 22 + ''} 23 + ${optionalString (cfg . "hostAddress" or null != null) '' 24 + echo "Setting route to host for ${name}" 25 + ip route add ${cfg . "hostAddress"} dev ${name} 26 + ''} 27 + ${optionalString (cfg . "hostAddress6" or null != null) '' 28 + echo "Setting route6 to host for ${name}" 29 + ip -6 route add ${cfg . "hostAddress6"} dev ${name} 30 + ''} 31 + '' 32 + ); 33 + in 34 + pkgs.writeScript "container-init" 35 + '' 36 + #! ${pkgs.stdenv.shell} -e 37 + 38 + # Initialise the container side of the veth pair. 39 + if [ "$PRIVATE_NETWORK" = 1 ]; then 40 + 41 + ip link set host0 name eth0 42 + ip link set dev eth0 up 43 + 44 + if [ -n "$LOCAL_ADDRESS" ]; then 45 + ip addr add $LOCAL_ADDRESS dev eth0 46 + fi 47 + if [ -n "$LOCAL_ADDRESS6" ]; then 48 + ip -6 addr add $LOCAL_ADDRESS6 dev eth0 49 + fi 50 + if [ -n "$HOST_ADDRESS" ]; then 51 + ip route add $HOST_ADDRESS dev eth0 52 + ip route add default via $HOST_ADDRESS 53 + fi 54 + if [ -n "$HOST_ADDRESS6" ]; then 55 + ip -6 route add $HOST_ADDRESS6 dev eth0 56 + ip -6 route add default via $HOST_ADDRESS6 57 + fi 58 + 59 + ${concatStringsSep "\n" (mapAttrsToList renderExtraVeth cfg . "extraVeths" or {})} 60 + ip a 61 + ip r 62 + fi 63 + 64 + # Start the regular stage 1 script. 65 + exec "$1" 66 + '' 67 + ); 68 + 69 + nspawnExtraVethArgs = (name: cfg: "--network-veth-extra=${name}"); 70 + startScript = (cfg: 10 71 '' 11 - #! ${pkgs.stdenv.shell} -e 72 + mkdir -p -m 0755 "$root/etc" "$root/var/lib" 73 + mkdir -p -m 0700 "$root/var/lib/private" "$root/root" /run/containers 74 + if ! [ -e "$root/etc/os-release" ]; then 75 + touch "$root/etc/os-release" 76 + fi 77 + 78 + if ! [ -e "$root/etc/machine-id" ]; then 79 + touch "$root/etc/machine-id" 80 + fi 81 + 82 + mkdir -p -m 0755 \ 83 + "/nix/var/nix/profiles/per-container/$INSTANCE" \ 84 + "/nix/var/nix/gcroots/per-container/$INSTANCE" 85 + 86 + cp --remove-destination /etc/resolv.conf "$root/etc/resolv.conf" 12 87 13 - # Initialise the container side of the veth pair. 14 88 if [ "$PRIVATE_NETWORK" = 1 ]; then 89 + extraFlags+=" --network-veth" 90 + if [ -n "$HOST_BRIDGE" ]; then 91 + extraFlags+=" --network-bridge=$HOST_BRIDGE" 92 + fi 93 + fi 15 94 16 - ip link set host0 name eth0 17 - ip link set dev eth0 up 95 + ${if cfg . "extraVeths" or null != null then 96 + ''extraFlags+=" ${concatStringsSep " " (mapAttrsToList nspawnExtraVethArgs cfg . "extraVeths" or {})}"'' 97 + else 98 + ''# No extra veth pairs to create'' 99 + } 18 100 19 - if [ -n "$LOCAL_ADDRESS" ]; then 20 - ip addr add $LOCAL_ADDRESS dev eth0 21 - fi 22 - if [ -n "$LOCAL_ADDRESS6" ]; then 23 - ip -6 addr add $LOCAL_ADDRESS6 dev eth0 24 - fi 25 - if [ -n "$HOST_ADDRESS" ]; then 26 - ip route add $HOST_ADDRESS dev eth0 27 - ip route add default via $HOST_ADDRESS 28 - fi 29 - if [ -n "$HOST_ADDRESS6" ]; then 30 - ip -6 route add $HOST_ADDRESS6 dev eth0 31 - ip -6 route add default via $HOST_ADDRESS6 101 + for iface in $INTERFACES; do 102 + extraFlags+=" --network-interface=$iface" 103 + done 104 + 105 + for iface in $MACVLANS; do 106 + extraFlags+=" --network-macvlan=$iface" 107 + done 108 + 109 + # If the host is 64-bit and the container is 32-bit, add a 110 + # --personality flag. 111 + ${optionalString (config.nixpkgs.system == "x86_64-linux") '' 112 + if [ "$(< ''${SYSTEM_PATH:-/nix/var/nix/profiles/per-container/$INSTANCE/system}/system)" = i686-linux ]; then 113 + extraFlags+=" --personality=x86" 32 114 fi 115 + ''} 116 + 117 + # Run systemd-nspawn without startup notification (we'll 118 + # wait for the container systemd to signal readiness). 119 + EXIT_ON_REBOOT=1 \ 120 + exec ${config.systemd.package}/bin/systemd-nspawn \ 121 + --keep-unit \ 122 + -M "$INSTANCE" -D "$root" $extraFlags \ 123 + $EXTRA_NSPAWN_FLAGS \ 124 + --notify-ready=yes \ 125 + --bind-ro=/nix/store \ 126 + --bind-ro=/nix/var/nix/db \ 127 + --bind-ro=/nix/var/nix/daemon-socket \ 128 + --bind="/nix/var/nix/profiles/per-container/$INSTANCE:/nix/var/nix/profiles" \ 129 + --bind="/nix/var/nix/gcroots/per-container/$INSTANCE:/nix/var/nix/gcroots" \ 130 + --setenv PRIVATE_NETWORK="$PRIVATE_NETWORK" \ 131 + --setenv HOST_BRIDGE="$HOST_BRIDGE" \ 132 + --setenv HOST_ADDRESS="$HOST_ADDRESS" \ 133 + --setenv LOCAL_ADDRESS="$LOCAL_ADDRESS" \ 134 + --setenv HOST_ADDRESS6="$HOST_ADDRESS6" \ 135 + --setenv LOCAL_ADDRESS6="$LOCAL_ADDRESS6" \ 136 + --setenv PATH="$PATH" \ 137 + ${containerInit cfg} "''${SYSTEM_PATH:-/nix/var/nix/profiles/system}/init" 138 + '' 139 + ); 140 + 141 + preStartScript = (cfg: 142 + '' 143 + # Clean up existing machined registration and interfaces. 144 + machinectl terminate "$INSTANCE" 2> /dev/null || true 145 + 146 + if [ "$PRIVATE_NETWORK" = 1 ]; then 147 + ip link del dev "ve-$INSTANCE" 2> /dev/null || true 148 + ip link del dev "vb-$INSTANCE" 2> /dev/null || true 33 149 fi 34 150 35 - # Start the regular stage 1 script. 36 - exec "$1" 37 - ''; 151 + ${concatStringsSep "\n" ( 152 + mapAttrsToList (name: cfg: 153 + ''ip link del dev ${name} 2> /dev/null || true '' 154 + ) cfg . "extraVeths" or {} 155 + )} 156 + '' 157 + ); 158 + postStartScript = (cfg: 159 + let 160 + ipcall = (cfg: ipcmd: variable: attribute: 161 + if cfg . attribute or null == null then 162 + '' 163 + if [ -n "${variable}" ]; then 164 + ${ipcmd} add ${variable} dev $ifaceHost 165 + fi 166 + '' 167 + else 168 + ''${ipcmd} add ${cfg . attribute} dev $ifaceHost'' 169 + ); 170 + renderExtraVeth = (name: cfg: 171 + if cfg . "hostBridge" or null != null then 172 + '' 173 + # Add ${name} to bridge ${cfg.hostBridge} 174 + ip link set dev ${name} master ${cfg.hostBridge} up 175 + '' 176 + else 177 + '' 178 + # Set IPs and routes for ${name} 179 + ${optionalString (cfg . "hostAddress" or null != null) '' 180 + ip addr add ${cfg . "hostAddress"} dev ${name} 181 + ''} 182 + ${optionalString (cfg . "hostAddress6" or null != null) '' 183 + ip -6 addr add ${cfg . "hostAddress6"} dev ${name} 184 + ''} 185 + ${optionalString (cfg . "localAddress" or null != null) '' 186 + ip route add ${cfg . "localAddress"} dev ${name} 187 + ''} 188 + ${optionalString (cfg . "localAddress6" or null != null) '' 189 + ip -6 route add ${cfg . "localAddress6"} dev ${name} 190 + ''} 191 + '' 192 + ); 193 + in 194 + '' 195 + if [ "$PRIVATE_NETWORK" = 1 ]; then 196 + if [ -z "$HOST_BRIDGE" ]; then 197 + ifaceHost=ve-$INSTANCE 198 + ip link set dev $ifaceHost up 199 + 200 + ${ipcall cfg "ip addr" "$HOST_ADDRESS" "hostAddress"} 201 + ${ipcall cfg "ip -6 addr" "$HOST_ADDRESS6" "hostAddress6"} 202 + ${ipcall cfg "ip route" "$LOCAL_ADDRESS" "localAddress"} 203 + ${ipcall cfg "ip -6 route" "$LOCAL_ADDRESS6" "localAddress6"} 204 + fi 205 + ${concatStringsSep "\n" (mapAttrsToList renderExtraVeth cfg . "extraVeths" or {})} 206 + fi 207 + 208 + # Get the leader PID so that we can signal it in 209 + # preStop. We can't use machinectl there because D-Bus 210 + # might be shutting down. FIXME: in systemd 219 we can 211 + # just signal systemd-nspawn to do a clean shutdown. 212 + machinectl show "$INSTANCE" | sed 's/Leader=\(.*\)/\1/;t;d' > "/run/containers/$INSTANCE.pid" 213 + '' 214 + ); 38 215 39 216 system = config.nixpkgs.system; 40 217 ··· 73 250 74 251 mkBindFlags = bs: concatMapStrings mkBindFlag (lib.attrValues bs); 75 252 253 + networkOptions = { 254 + hostBridge = mkOption { 255 + type = types.nullOr types.string; 256 + default = null; 257 + example = "br0"; 258 + description = '' 259 + Put the host-side of the veth-pair into the named bridge. 260 + Only one of hostAddress* or hostBridge can be given. 261 + ''; 262 + }; 263 + 264 + hostAddress = mkOption { 265 + type = types.nullOr types.str; 266 + default = null; 267 + example = "10.231.136.1"; 268 + description = '' 269 + The IPv4 address assigned to the host interface. 270 + (Not used when hostBridge is set.) 271 + ''; 272 + }; 273 + 274 + hostAddress6 = mkOption { 275 + type = types.nullOr types.string; 276 + default = null; 277 + example = "fc00::1"; 278 + description = '' 279 + The IPv6 address assigned to the host interface. 280 + (Not used when hostBridge is set.) 281 + ''; 282 + }; 283 + 284 + localAddress = mkOption { 285 + type = types.nullOr types.str; 286 + default = null; 287 + example = "10.231.136.2"; 288 + description = '' 289 + The IPv4 address assigned to the interface in the container. 290 + If a hostBridge is used, this should be given with netmask to access 291 + the whole network. Otherwise the default netmask is /32 and routing is 292 + set up from localAddress to hostAddress and back. 293 + ''; 294 + }; 295 + 296 + localAddress6 = mkOption { 297 + type = types.nullOr types.string; 298 + default = null; 299 + example = "fc00::2"; 300 + description = '' 301 + The IPv6 address assigned to the interface in the container. 302 + If a hostBridge is used, this should be given with netmask to access 303 + the whole network. Otherwise the default netmask is /128 and routing is 304 + set up from localAddress6 to hostAddress6 and back. 305 + ''; 306 + }; 307 + 308 + }; 309 + 76 310 in 77 311 78 312 { ··· 133 367 ''; 134 368 }; 135 369 136 - hostBridge = mkOption { 137 - type = types.nullOr types.string; 138 - default = null; 139 - example = "br0"; 140 - description = '' 141 - Put the host-side of the veth-pair into the named bridge. 142 - Only one of hostAddress* or hostBridge can be given. 143 - ''; 144 - }; 145 - 146 - hostAddress = mkOption { 147 - type = types.nullOr types.str; 148 - default = null; 149 - example = "10.231.136.1"; 150 - description = '' 151 - The IPv4 address assigned to the host interface. 152 - (Not used when hostBridge is set.) 153 - ''; 154 - }; 155 - 156 - hostAddress6 = mkOption { 157 - type = types.nullOr types.string; 158 - default = null; 159 - example = "fc00::1"; 160 - description = '' 161 - The IPv6 address assigned to the host interface. 162 - (Not used when hostBridge is set.) 163 - ''; 164 - }; 165 - 166 - localAddress = mkOption { 167 - type = types.nullOr types.str; 168 - default = null; 169 - example = "10.231.136.2"; 170 - description = '' 171 - The IPv4 address assigned to <literal>eth0</literal> 172 - in the container. 173 - ''; 174 - }; 175 - 176 - localAddress6 = mkOption { 177 - type = types.nullOr types.string; 178 - default = null; 179 - example = "fc00::2"; 180 - description = '' 181 - The IPv6 address assigned to <literal>eth0</literal> 182 - in the container. 183 - ''; 184 - }; 185 - 186 370 interfaces = mkOption { 187 371 type = types.listOf types.string; 188 372 default = []; 189 373 example = [ "eth1" "eth2" ]; 190 374 description = '' 191 375 The list of interfaces to be moved into the container. 376 + ''; 377 + }; 378 + 379 + extraVeths = mkOption { 380 + type = types.attrsOf types.optionSet; 381 + default = {}; 382 + options = networkOptions; 383 + description = '' 384 + Extra veth-pairs to be created for the container 192 385 ''; 193 386 }; 194 387 ··· 214 407 ''; 215 408 }; 216 409 217 - }; 410 + } // networkOptions; 218 411 219 412 config = mkMerge 220 413 [ (mkIf options.config.isDefined { ··· 272 465 environment.INSTANCE = "%i"; 273 466 environment.root = "/var/lib/containers/%i"; 274 467 275 - preStart = 276 - '' 277 - # Clean up existing machined registration and interfaces. 278 - machinectl terminate "$INSTANCE" 2> /dev/null || true 468 + preStart = preStartScript {}; 279 469 280 - if [ "$PRIVATE_NETWORK" = 1 ]; then 281 - ip link del dev "ve-$INSTANCE" 2> /dev/null || true 282 - ip link del dev "vb-$INSTANCE" 2> /dev/null || true 283 - fi 284 - ''; 470 + script = startScript {}; 285 471 286 - script = 287 - '' 288 - mkdir -p -m 0755 "$root/etc" "$root/var/lib" 289 - mkdir -p -m 0700 "$root/var/lib/private" "$root/root" /run/containers 290 - if ! [ -e "$root/etc/os-release" ]; then 291 - touch "$root/etc/os-release" 292 - fi 293 - 294 - if ! [ -e "$root/etc/machine-id" ]; then 295 - touch "$root/etc/machine-id" 296 - fi 297 - 298 - mkdir -p -m 0755 \ 299 - "/nix/var/nix/profiles/per-container/$INSTANCE" \ 300 - "/nix/var/nix/gcroots/per-container/$INSTANCE" 301 - 302 - cp --remove-destination /etc/resolv.conf "$root/etc/resolv.conf" 303 - 304 - if [ "$PRIVATE_NETWORK" = 1 ]; then 305 - extraFlags+=" --network-veth" 306 - if [ -n "$HOST_BRIDGE" ]; then 307 - extraFlags+=" --network-bridge=$HOST_BRIDGE" 308 - fi 309 - fi 310 - 311 - for iface in $INTERFACES; do 312 - extraFlags+=" --network-interface=$iface" 313 - done 314 - 315 - for iface in $MACVLANS; do 316 - extraFlags+=" --network-macvlan=$iface" 317 - done 318 - 319 - # If the host is 64-bit and the container is 32-bit, add a 320 - # --personality flag. 321 - ${optionalString (config.nixpkgs.system == "x86_64-linux") '' 322 - if [ "$(< ''${SYSTEM_PATH:-/nix/var/nix/profiles/per-container/$INSTANCE/system}/system)" = i686-linux ]; then 323 - extraFlags+=" --personality=x86" 324 - fi 325 - ''} 326 - 327 - # Run systemd-nspawn without startup notification (we'll 328 - # wait for the container systemd to signal readiness). 329 - EXIT_ON_REBOOT=1 \ 330 - exec ${config.systemd.package}/bin/systemd-nspawn \ 331 - --keep-unit \ 332 - -M "$INSTANCE" -D "$root" $extraFlags \ 333 - $EXTRA_NSPAWN_FLAGS \ 334 - --notify-ready=yes \ 335 - --bind-ro=/nix/store \ 336 - --bind-ro=/nix/var/nix/db \ 337 - --bind-ro=/nix/var/nix/daemon-socket \ 338 - --bind="/nix/var/nix/profiles/per-container/$INSTANCE:/nix/var/nix/profiles" \ 339 - --bind="/nix/var/nix/gcroots/per-container/$INSTANCE:/nix/var/nix/gcroots" \ 340 - --setenv PRIVATE_NETWORK="$PRIVATE_NETWORK" \ 341 - --setenv HOST_BRIDGE="$HOST_BRIDGE" \ 342 - --setenv HOST_ADDRESS="$HOST_ADDRESS" \ 343 - --setenv LOCAL_ADDRESS="$LOCAL_ADDRESS" \ 344 - --setenv HOST_ADDRESS6="$HOST_ADDRESS6" \ 345 - --setenv LOCAL_ADDRESS6="$LOCAL_ADDRESS6" \ 346 - --setenv PATH="$PATH" \ 347 - ${containerInit} "''${SYSTEM_PATH:-/nix/var/nix/profiles/system}/init" 348 - ''; 349 - 350 - postStart = 351 - '' 352 - if [ "$PRIVATE_NETWORK" = 1 ]; then 353 - if [ -z "$HOST_BRIDGE" ]; then 354 - ifaceHost=ve-$INSTANCE 355 - ip link set dev $ifaceHost up 356 - if [ -n "$HOST_ADDRESS" ]; then 357 - ip addr add $HOST_ADDRESS dev $ifaceHost 358 - fi 359 - if [ -n "$HOST_ADDRESS6" ]; then 360 - ip -6 addr add $HOST_ADDRESS6 dev $ifaceHost 361 - fi 362 - if [ -n "$LOCAL_ADDRESS" ]; then 363 - ip route add $LOCAL_ADDRESS dev $ifaceHost 364 - fi 365 - if [ -n "$LOCAL_ADDRESS6" ]; then 366 - ip -6 route add $LOCAL_ADDRESS6 dev $ifaceHost 367 - fi 368 - fi 369 - fi 370 - 371 - # Get the leader PID so that we can signal it in 372 - # preStop. We can't use machinectl there because D-Bus 373 - # might be shutting down. FIXME: in systemd 219 we can 374 - # just signal systemd-nspawn to do a clean shutdown. 375 - machinectl show "$INSTANCE" | sed 's/Leader=\(.*\)/\1/;t;d' > "/run/containers/$INSTANCE.pid" 376 - ''; 472 + postStart = postStartScript {}; 377 473 378 474 preStop = 379 475 '' ··· 425 521 [{ name = "container@"; value = unit; }] 426 522 # declarative containers 427 523 ++ (mapAttrsToList (name: cfg: nameValuePair "container@${name}" ( 524 + unit // { 525 + preStart = preStartScript cfg; 526 + script = startScript cfg; 527 + postStart = postStartScript cfg; 528 + } // ( 428 529 if cfg.autoStart then 429 - unit // { 530 + { 430 531 wantedBy = [ "multi-user.target" ]; 431 532 wants = [ "network.target" ]; 432 533 after = [ "network.target" ]; 433 534 restartTriggers = [ cfg.path ]; 434 535 reloadIfChanged = true; 435 536 } 436 - else null 537 + else {}) 437 538 )) config.containers) 438 539 )); 439 540 ··· 462 563 LOCAL_ADDRESS6=${cfg.localAddress6} 463 564 ''} 464 565 ''} 465 - INTERFACES="${toString cfg.interfaces}" 466 - ${optionalString cfg.autoStart '' 467 - AUTO_START=1 468 - ''} 469 - EXTRA_NSPAWN_FLAGS="${mkBindFlags cfg.bindMounts}" 566 + INTERFACES="${toString cfg.interfaces}" 567 + ${optionalString cfg.autoStart '' 568 + AUTO_START=1 569 + ''} 570 + EXTRA_NSPAWN_FLAGS="${mkBindFlags cfg.bindMounts}" 470 571 ''; 471 572 }) config.containers; 472 573
+1
nixos/release.nix
··· 218 218 tests.containers-ipv6 = callTest tests/containers-ipv6.nix {}; 219 219 tests.containers-bridge = callTest tests/containers-bridge.nix {}; 220 220 tests.containers-imperative = callTest tests/containers-imperative.nix {}; 221 + tests.containers-extra_veth = callTest tests/containers-extra_veth.nix {}; 221 222 tests.docker = hydraJob (import tests/docker.nix { system = "x86_64-linux"; }); 222 223 tests.dockerRegistry = hydraJob (import tests/docker-registry.nix { system = "x86_64-linux"; }); 223 224 tests.dnscrypt-proxy = callTest tests/dnscrypt-proxy.nix { system = "x86_64-linux"; };
+103
nixos/tests/containers-extra_veth.nix
··· 1 + # Test for NixOS' container support. 2 + 3 + import ./make-test.nix ({ pkgs, ...} : { 4 + name = "containers-bridge"; 5 + meta = with pkgs.stdenv.lib.maintainers; { 6 + maintainers = [ aristid aszlig eelco chaoflow ]; 7 + }; 8 + 9 + machine = 10 + { config, pkgs, ... }: 11 + { imports = [ ../modules/installer/cd-dvd/channel.nix ]; 12 + virtualisation.writableStore = true; 13 + virtualisation.memorySize = 768; 14 + virtualisation.vlans = []; 15 + 16 + networking.bridges = { 17 + br0 = { 18 + interfaces = []; 19 + }; 20 + br1 = { interfaces = []; }; 21 + }; 22 + networking.interfaces = { 23 + br0 = { 24 + ip4 = [{ address = "192.168.0.1"; prefixLength = 24; }]; 25 + ip6 = [{ address = "fc00::1"; prefixLength = 7; }]; 26 + }; 27 + br1 = { 28 + ip4 = [{ address = "192.168.1.1"; prefixLength = 24; }]; 29 + }; 30 + }; 31 + 32 + containers.webserver = 33 + { 34 + autoStart = true; 35 + privateNetwork = true; 36 + hostBridge = "br0"; 37 + localAddress = "192.168.0.100/24"; 38 + localAddress6 = "fc00::2/7"; 39 + extraVeths = { 40 + veth1 = { hostBridge = "br1"; localAddress = "192.168.1.100/24"; }; 41 + veth2 = { hostAddress = "192.168.2.1"; localAddress = "192.168.2.100"; }; 42 + }; 43 + config = 44 + { 45 + networking.firewall.allowedTCPPorts = [ 80 ]; 46 + networking.firewall.allowPing = true; 47 + }; 48 + }; 49 + 50 + virtualisation.pathsInNixDB = [ pkgs.stdenv ]; 51 + }; 52 + 53 + testScript = 54 + '' 55 + $machine->waitForUnit("default.target"); 56 + $machine->succeed("nixos-container list") =~ /webserver/ or die; 57 + 58 + # Status of the webserver container. 59 + $machine->succeed("nixos-container status webserver") =~ /up/ or die; 60 + 61 + # Debug 62 + #$machine->succeed("nixos-container run webserver -- ip link >&2"); 63 + 64 + # Ensure that the veths are inside the container 65 + $machine->succeed("nixos-container run webserver -- ip link show veth1") =~ /state UP/ or die; 66 + $machine->succeed("nixos-container run webserver -- ip link show veth2") =~ /state UP/ or die; 67 + 68 + # Debug 69 + #$machine->succeed("ip link >&2"); 70 + 71 + # Ensure the presence of the extra veths 72 + $machine->succeed("ip link show veth1") =~ /state UP/ or die; 73 + $machine->succeed("ip link show veth2") =~ /state UP/ or die; 74 + 75 + # Ensure the veth1 is part of br1 on the host 76 + $machine->succeed("ip link show veth1") =~ /master br1/ or die; 77 + 78 + # Debug 79 + #$machine->succeed("ip -4 a >&2"); 80 + #$machine->succeed("ip -4 r >&2"); 81 + #$machine->succeed("nixos-container run webserver -- ip link >&2"); 82 + #$machine->succeed("nixos-container run webserver -- ip -4 a >&2"); 83 + #$machine->succeed("nixos-container run webserver -- ip -4 r >&2"); 84 + 85 + # Ping on main veth 86 + $machine->succeed("ping -n -c 1 192.168.0.100"); 87 + $machine->succeed("ping6 -n -c 1 fc00::2"); 88 + 89 + # Ping on the first extra veth 90 + $machine->succeed("ping -n -c 1 192.168.1.100 >&2"); 91 + 92 + # Ping on the second extra veth 93 + $machine->succeed("ping -n -c 1 192.168.2.100 >&2"); 94 + 95 + # Stop the container. 96 + $machine->succeed("nixos-container stop webserver"); 97 + $machine->fail("ping -n -c 1 192.168.1.100 >&2"); 98 + $machine->fail("ping -n -c 1 192.168.2.100 >&2"); 99 + 100 + # Destroying a declarative container should fail. 101 + $machine->fail("nixos-container destroy webserver"); 102 + ''; 103 + })