1{ bashInteractive
2, buildPackages
3, cacert
4, callPackage
5, closureInfo
6, coreutils
7, e2fsprogs
8, proot
9, fakeNss
10, fakeroot
11, file
12, go
13, jq
14, jshon
15, lib
16, makeWrapper
17, moreutils
18, nix
19, nixosTests
20, pigz
21, rsync
22, runCommand
23, runtimeShell
24, shadow
25, skopeo
26, storeDir ? builtins.storeDir
27, substituteAll
28, symlinkJoin
29, tarsum
30, util-linux
31, vmTools
32, writeClosure
33, writeScript
34, writeShellScriptBin
35, writeText
36, writeTextDir
37, writePython3
38, zstd
39}:
40
41let
42 inherit (lib)
43 optionals
44 optionalString
45 ;
46
47 inherit (lib)
48 escapeShellArgs
49 toList
50 ;
51
52 mkDbExtraCommand = contents:
53 let
54 contentsList = if builtins.isList contents then contents else [ contents ];
55 in
56 ''
57 echo "Generating the nix database..."
58 echo "Warning: only the database of the deepest Nix layer is loaded."
59 echo " If you want to use nix commands in the container, it would"
60 echo " be better to only have one layer that contains a nix store."
61
62 export NIX_REMOTE=local?root=$PWD
63 # A user is required by nix
64 # https://github.com/NixOS/nix/blob/9348f9291e5d9e4ba3c4347ea1b235640f54fd79/src/libutil/util.cc#L478
65 export USER=nobody
66 ${buildPackages.nix}/bin/nix-store --load-db < ${closureInfo {rootPaths = contentsList;}}/registration
67 # Reset registration times to make the image reproducible
68 ${buildPackages.sqlite}/bin/sqlite3 nix/var/nix/db/db.sqlite "UPDATE ValidPaths SET registrationTime = ''${SOURCE_DATE_EPOCH}"
69
70 mkdir -p nix/var/nix/gcroots/docker/
71 for i in ${lib.concatStringsSep " " contentsList}; do
72 ln -s $i nix/var/nix/gcroots/docker/$(basename $i)
73 done;
74 '';
75
76 # The OCI Image specification recommends that configurations use values listed
77 # in the Go Language document for GOARCH.
78 # Reference: https://github.com/opencontainers/image-spec/blob/master/config.md#properties
79 # For the mapping from Nixpkgs system parameters to GOARCH, we can reuse the
80 # mapping from the go package.
81 defaultArchitecture = go.GOARCH;
82
83 compressors = {
84 none = {
85 ext = "";
86 nativeInputs = [ ];
87 compress = "cat";
88 decompress = "cat";
89 };
90 gz = {
91 ext = ".gz";
92 nativeInputs = [ pigz ];
93 compress = "pigz -p$NIX_BUILD_CORES -nTR";
94 decompress = "pigz -d -p$NIX_BUILD_CORES";
95 };
96 zstd = {
97 ext = ".zst";
98 nativeInputs = [ zstd ];
99 compress = "zstd -T$NIX_BUILD_CORES";
100 decompress = "zstd -d -T$NIX_BUILD_CORES";
101 };
102 };
103
104 compressorForImage = compressor: imageName: compressors.${compressor} or
105 (throw "in docker image ${imageName}: compressor must be one of: [${toString builtins.attrNames compressors}]");
106
107in
108rec {
109 examples = callPackage ./examples.nix {
110 inherit buildImage buildLayeredImage fakeNss pullImage shadowSetup buildImageWithNixDb streamNixShellImage;
111 };
112
113 tests = {
114 inherit (nixosTests)
115 docker-tools
116 docker-tools-overlay
117 # requires remote builder
118 # docker-tools-cross
119 ;
120 };
121
122 pullImage =
123 let
124 fixName = name: builtins.replaceStrings [ "/" ":" ] [ "-" "-" ] name;
125 in
126 { imageName
127 # To find the digest of an image, you can use skopeo:
128 # see doc/functions.xml
129 , imageDigest
130 , sha256
131 , os ? "linux"
132 , # Image architecture, defaults to the architecture of the `hostPlatform` when unset
133 arch ? defaultArchitecture
134 # This is used to set name to the pulled image
135 , finalImageName ? imageName
136 # This used to set a tag to the pulled image
137 , finalImageTag ? "latest"
138 # This is used to disable TLS certificate verification, allowing access to http registries on (hopefully) trusted networks
139 , tlsVerify ? true
140
141 , name ? fixName "docker-image-${finalImageName}-${finalImageTag}.tar"
142 }:
143
144 runCommand name
145 {
146 inherit imageDigest;
147 imageName = finalImageName;
148 imageTag = finalImageTag;
149 impureEnvVars = lib.fetchers.proxyImpureEnvVars;
150 outputHashMode = "flat";
151 outputHashAlgo = "sha256";
152 outputHash = sha256;
153
154 nativeBuildInputs = [ skopeo ];
155 SSL_CERT_FILE = "${cacert.out}/etc/ssl/certs/ca-bundle.crt";
156
157 sourceURL = "docker://${imageName}@${imageDigest}";
158 destNameTag = "${finalImageName}:${finalImageTag}";
159 } ''
160 skopeo \
161 --insecure-policy \
162 --tmpdir=$TMPDIR \
163 --override-os ${os} \
164 --override-arch ${arch} \
165 copy \
166 --src-tls-verify=${lib.boolToString tlsVerify} \
167 "$sourceURL" "docker-archive://$out:$destNameTag" \
168 | cat # pipe through cat to force-disable progress bar
169 '';
170
171 # We need to sum layer.tar, not a directory, hence tarsum instead of nix-hash.
172 # And we cannot untar it, because then we cannot preserve permissions etc.
173 inherit tarsum; # pkgs.dockerTools.tarsum
174
175 # buildEnv creates symlinks to dirs, which is hard to edit inside the overlay VM
176 mergeDrvs =
177 { derivations
178 , onlyDeps ? false
179 }:
180 runCommand "merge-drvs"
181 {
182 inherit derivations onlyDeps;
183 } ''
184 if [[ -n "$onlyDeps" ]]; then
185 echo $derivations > $out
186 exit 0
187 fi
188
189 mkdir $out
190 for derivation in $derivations; do
191 echo "Merging $derivation..."
192 if [[ -d "$derivation" ]]; then
193 # If it's a directory, copy all of its contents into $out.
194 cp -drf --preserve=mode -f $derivation/* $out/
195 else
196 # Otherwise treat the derivation as a tarball and extract it
197 # into $out.
198 tar -C $out -xpf $drv || true
199 fi
200 done
201 '';
202
203 # Helper for setting up the base files for managing users and
204 # groups, only if such files don't exist already. It is suitable for
205 # being used in a runAsRoot script.
206 shadowSetup = ''
207 export PATH=${shadow}/bin:$PATH
208 mkdir -p /etc/pam.d
209 if [[ ! -f /etc/passwd ]]; then
210 echo "root:x:0:0::/root:${runtimeShell}" > /etc/passwd
211 echo "root:!x:::::::" > /etc/shadow
212 fi
213 if [[ ! -f /etc/group ]]; then
214 echo "root:x:0:" > /etc/group
215 echo "root:x::" > /etc/gshadow
216 fi
217 if [[ ! -f /etc/pam.d/other ]]; then
218 cat > /etc/pam.d/other <<EOF
219 account sufficient pam_unix.so
220 auth sufficient pam_rootok.so
221 password requisite pam_unix.so nullok yescrypt
222 session required pam_unix.so
223 EOF
224 fi
225 if [[ ! -f /etc/login.defs ]]; then
226 touch /etc/login.defs
227 fi
228 '';
229
230 # Run commands in a virtual machine.
231 runWithOverlay =
232 { name
233 , fromImage ? null
234 , fromImageName ? null
235 , fromImageTag ? null
236 , diskSize ? 1024
237 , buildVMMemorySize ? 512
238 , preMount ? ""
239 , postMount ? ""
240 , postUmount ? ""
241 }:
242 vmTools.runInLinuxVM (
243 runCommand name
244 {
245 preVM = vmTools.createEmptyImage {
246 size = diskSize;
247 fullName = "docker-run-disk";
248 destination = "./image";
249 };
250 inherit fromImage fromImageName fromImageTag;
251 memSize = buildVMMemorySize;
252
253 nativeBuildInputs = [ util-linux e2fsprogs jshon rsync jq ];
254 } ''
255 mkdir disk
256 mkfs /dev/${vmTools.hd}
257 mount /dev/${vmTools.hd} disk
258 cd disk
259
260 function dedup() {
261 declare -A seen
262 while read ln; do
263 if [[ -z "''${seen["$ln"]:-}" ]]; then
264 echo "$ln"; seen["$ln"]=1
265 fi
266 done
267 }
268
269 if [[ -n "$fromImage" ]]; then
270 echo "Unpacking base image..."
271 mkdir image
272 tar -C image -xpf "$fromImage"
273
274 if [[ -n "$fromImageName" ]] && [[ -n "$fromImageTag" ]]; then
275 parentID="$(
276 cat "image/manifest.json" |
277 jq -r '.[] | select(.RepoTags | contains([$desiredTag])) | rtrimstr(".json")' \
278 --arg desiredTag "$fromImageName:$fromImageTag"
279 )"
280 else
281 echo "From-image name or tag wasn't set. Reading the first ID."
282 parentID="$(cat "image/manifest.json" | jq -r '.[0].Config | rtrimstr(".json")')"
283 fi
284
285 # In case of repeated layers, unpack only the last occurrence of each
286 cat ./image/manifest.json | jq -r '.[0].Layers | .[]' | tac | dedup | tac > layer-list
287 else
288 touch layer-list
289 fi
290
291 # Unpack all of the parent layers into the image.
292 lowerdir=""
293 extractionID=0
294 for layerTar in $(cat layer-list); do
295 echo "Unpacking layer $layerTar"
296 extractionID=$((extractionID + 1))
297
298 mkdir -p image/$extractionID/layer
299 tar -C image/$extractionID/layer -xpf image/$layerTar
300 rm image/$layerTar
301
302 find image/$extractionID/layer -name ".wh.*" -exec bash -c 'name="$(basename {}|sed "s/^.wh.//")"; mknod "$(dirname {})/$name" c 0 0; rm {}' \;
303
304 # Get the next lower directory and continue the loop.
305 lowerdir=image/$extractionID/layer''${lowerdir:+:}$lowerdir
306 done
307
308 mkdir work
309 mkdir layer
310 mkdir mnt
311
312 ${lib.optionalString (preMount != "") ''
313 # Execute pre-mount steps
314 echo "Executing pre-mount steps..."
315 ${preMount}
316 ''}
317
318 if [ -n "$lowerdir" ]; then
319 mount -t overlay overlay -olowerdir=$lowerdir,workdir=work,upperdir=layer mnt
320 else
321 mount --bind layer mnt
322 fi
323
324 ${lib.optionalString (postMount != "") ''
325 # Execute post-mount steps
326 echo "Executing post-mount steps..."
327 ${postMount}
328 ''}
329
330 umount mnt
331
332 (
333 cd layer
334 cmd='name="$(basename {})"; touch "$(dirname {})/.wh.$name"; rm "{}"'
335 find . -type c -exec bash -c "$cmd" \;
336 )
337
338 ${postUmount}
339 '');
340
341 exportImage = { name ? fromImage.name, fromImage, fromImageName ? null, fromImageTag ? null, diskSize ? 1024 }:
342 runWithOverlay {
343 inherit name fromImage fromImageName fromImageTag diskSize;
344
345 postMount = ''
346 echo "Packing raw image..."
347 tar -C mnt --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" -cf $out/layer.tar .
348 '';
349
350 postUmount = ''
351 mv $out/layer.tar .
352 rm -rf $out
353 mv layer.tar $out
354 '';
355 };
356
357 # Create an executable shell script which has the coreutils in its
358 # PATH. Since root scripts are executed in a blank environment, even
359 # things like `ls` or `echo` will be missing.
360 shellScript = name: text:
361 writeScript name ''
362 #!${runtimeShell}
363 set -e
364 export PATH=${coreutils}/bin:/bin
365 ${text}
366 '';
367
368 # Create a "layer" (set of files).
369 mkPureLayer =
370 {
371 # Name of the layer
372 name
373 , # JSON containing configuration and metadata for this layer.
374 baseJson
375 , # Files to add to the layer.
376 copyToRoot ? null
377 , # When copying the contents into the image, preserve symlinks to
378 # directories (see `rsync -K`). Otherwise, transform those symlinks
379 # into directories.
380 keepContentsDirlinks ? false
381 , # Additional commands to run on the layer before it is tar'd up.
382 extraCommands ? ""
383 , uid ? 0
384 , gid ? 0
385 }:
386 runCommand "docker-layer-${name}"
387 {
388 inherit baseJson extraCommands;
389 contents = copyToRoot;
390 nativeBuildInputs = [ jshon rsync tarsum ];
391 }
392 ''
393 mkdir layer
394 if [[ -n "$contents" ]]; then
395 echo "Adding contents..."
396 for item in $contents; do
397 echo "Adding $item"
398 rsync -a${if keepContentsDirlinks then "K" else "k"} --chown=0:0 $item/ layer/
399 done
400 else
401 echo "No contents to add to layer."
402 fi
403
404 chmod ug+w layer
405
406 if [[ -n "$extraCommands" ]]; then
407 (cd layer; eval "$extraCommands")
408 fi
409
410 # Tar up the layer and throw it into 'layer.tar'.
411 echo "Packing layer..."
412 mkdir $out
413 tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=${toString uid} --group=${toString gid} -cf - . | tee -p $out/layer.tar | tarsum)
414
415 # Add a 'checksum' field to the JSON, with the value set to the
416 # checksum of the tarball.
417 cat ${baseJson} | jshon -s "$tarhash" -i checksum > $out/json
418
419 # Indicate to docker that we're using schema version 1.0.
420 echo -n "1.0" > $out/VERSION
421
422 echo "Finished building layer '${name}'"
423 '';
424
425 # Make a "root" layer; required if we need to execute commands as a
426 # privileged user on the image. The commands themselves will be
427 # performed in a virtual machine sandbox.
428 mkRootLayer =
429 {
430 # Name of the image.
431 name
432 , # Script to run as root. Bash.
433 runAsRoot
434 , # Files to add to the layer. If null, an empty layer will be created.
435 # To add packages to /bin, use `buildEnv` or similar.
436 copyToRoot ? null
437 , # When copying the contents into the image, preserve symlinks to
438 # directories (see `rsync -K`). Otherwise, transform those symlinks
439 # into directories.
440 keepContentsDirlinks ? false
441 , # JSON containing configuration and metadata for this layer.
442 baseJson
443 , # Existing image onto which to append the new layer.
444 fromImage ? null
445 , # Name of the image we're appending onto.
446 fromImageName ? null
447 , # Tag of the image we're appending onto.
448 fromImageTag ? null
449 , # How much disk to allocate for the temporary virtual machine.
450 diskSize ? 1024
451 , # How much memory to allocate for the temporary virtual machine.
452 buildVMMemorySize ? 512
453 , # Commands (bash) to run on the layer; these do not require sudo.
454 extraCommands ? ""
455 }:
456 # Generate an executable script from the `runAsRoot` text.
457 let
458 runAsRootScript = shellScript "run-as-root.sh" runAsRoot;
459 extraCommandsScript = shellScript "extra-commands.sh" extraCommands;
460 in
461 runWithOverlay {
462 name = "docker-layer-${name}";
463
464 inherit fromImage fromImageName fromImageTag diskSize buildVMMemorySize;
465
466 preMount = lib.optionalString (copyToRoot != null && copyToRoot != [ ]) ''
467 echo "Adding contents..."
468 for item in ${escapeShellArgs (map (c: "${c}") (toList copyToRoot))}; do
469 echo "Adding $item..."
470 rsync -a${if keepContentsDirlinks then "K" else "k"} --chown=0:0 $item/ layer/
471 done
472
473 chmod ug+w layer
474 '';
475
476 postMount = ''
477 mkdir -p mnt/{dev,proc,sys,tmp} mnt${storeDir}
478
479 # Mount /dev, /sys and the nix store as shared folders.
480 mount --rbind /dev mnt/dev
481 mount --rbind /sys mnt/sys
482 mount --rbind ${storeDir} mnt${storeDir}
483
484 # Execute the run as root script. See 'man unshare' for
485 # details on what's going on here; basically this command
486 # means that the runAsRootScript will be executed in a nearly
487 # completely isolated environment.
488 #
489 # Ideally we would use --mount-proc=mnt/proc or similar, but this
490 # doesn't work. The workaround is to setup proc after unshare.
491 # See: https://github.com/karelzak/util-linux/issues/648
492 unshare -imnpuf --mount-proc sh -c 'mount --rbind /proc mnt/proc && chroot mnt ${runAsRootScript}'
493
494 # Unmount directories and remove them.
495 umount -R mnt/dev mnt/sys mnt${storeDir}
496 rmdir --ignore-fail-on-non-empty \
497 mnt/dev mnt/proc mnt/sys mnt${storeDir} \
498 mnt$(dirname ${storeDir})
499 '';
500
501 postUmount = ''
502 (cd layer; ${extraCommandsScript})
503
504 echo "Packing layer..."
505 mkdir -p $out
506 tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" -cf - . |
507 tee -p $out/layer.tar |
508 ${tarsum}/bin/tarsum)
509
510 cat ${baseJson} | jshon -s "$tarhash" -i checksum > $out/json
511 # Indicate to docker that we're using schema version 1.0.
512 echo -n "1.0" > $out/VERSION
513
514 echo "Finished building layer '${name}'"
515 '';
516 };
517
518 buildLayeredImage = lib.makeOverridable ({ name, compressor ? "gz", ... }@args:
519 let
520 stream = streamLayeredImage (builtins.removeAttrs args ["compressor"]);
521 compress = compressorForImage compressor name;
522 in
523 runCommand "${baseNameOf name}.tar${compress.ext}"
524 {
525 inherit (stream) imageName;
526 passthru = { inherit (stream) imageTag; inherit stream; };
527 nativeBuildInputs = compress.nativeInputs;
528 } "${stream} | ${compress.compress} > $out"
529 );
530
531 # 1. extract the base image
532 # 2. create the layer
533 # 3. add layer deps to the layer itself, diffing with the base image
534 # 4. compute the layer id
535 # 5. put the layer in the image
536 # 6. repack the image
537 buildImage = lib.makeOverridable (
538 args@{
539 # Image name.
540 name
541 , # Image tag, when null then the nix output hash will be used.
542 tag ? null
543 , # Parent image, to append to.
544 fromImage ? null
545 , # Name of the parent image; will be read from the image otherwise.
546 fromImageName ? null
547 , # Tag of the parent image; will be read from the image otherwise.
548 fromImageTag ? null
549 , # Files to put on the image (a nix store path or list of paths).
550 copyToRoot ? null
551 , # When copying the contents into the image, preserve symlinks to
552 # directories (see `rsync -K`). Otherwise, transform those symlinks
553 # into directories.
554 keepContentsDirlinks ? false
555 , # Docker config; e.g. what command to run on the container.
556 config ? null
557 , # Image architecture, defaults to the architecture of the `hostPlatform` when unset
558 architecture ? defaultArchitecture
559 , # Optional bash script to run on the files prior to fixturizing the layer.
560 extraCommands ? ""
561 , uid ? 0
562 , gid ? 0
563 , # Optional bash script to run as root on the image when provisioning.
564 runAsRoot ? null
565 , # Size of the virtual machine disk to provision when building the image.
566 diskSize ? 1024
567 , # Size of the virtual machine memory to provision when building the image.
568 buildVMMemorySize ? 512
569 , # Time of creation of the image.
570 created ? "1970-01-01T00:00:01Z"
571 , # Compressor to use. One of: none, gz, zstd.
572 compressor ? "gz"
573 , # Deprecated.
574 contents ? null
575 ,
576 }:
577
578 let
579 checked =
580 lib.warnIf (contents != null)
581 "in docker image ${name}: The contents parameter is deprecated. Change to copyToRoot if the contents are designed to be copied to the root filesystem, such as when you use `buildEnv` or similar between contents and your packages. Use copyToRoot = buildEnv { ... }; or similar if you intend to add packages to /bin."
582 lib.throwIf (contents != null && copyToRoot != null) "in docker image ${name}: You can not specify both contents and copyToRoot."
583 ;
584
585 rootContents = if copyToRoot == null then contents else copyToRoot;
586
587 baseName = baseNameOf name;
588
589 # Create a JSON blob of the configuration. Set the date to unix zero.
590 baseJson =
591 let
592 pure = writeText "${baseName}-config.json" (builtins.toJSON {
593 inherit created config architecture;
594 preferLocalBuild = true;
595 os = "linux";
596 });
597 impure = runCommand "${baseName}-config.json"
598 {
599 nativeBuildInputs = [ jq ];
600 preferLocalBuild = true;
601 }
602 ''
603 jq ".created = \"$(TZ=utc date --iso-8601="seconds")\"" ${pure} > $out
604 '';
605 in
606 if created == "now" then impure else pure;
607
608 compress = compressorForImage compressor name;
609
610 layer =
611 if runAsRoot == null
612 then
613 mkPureLayer
614 {
615 name = baseName;
616 inherit baseJson keepContentsDirlinks extraCommands uid gid;
617 copyToRoot = rootContents;
618 } else
619 mkRootLayer {
620 name = baseName;
621 inherit baseJson fromImage fromImageName fromImageTag
622 keepContentsDirlinks runAsRoot diskSize buildVMMemorySize
623 extraCommands;
624 copyToRoot = rootContents;
625 };
626 result = runCommand "docker-image-${baseName}.tar${compress.ext}"
627 {
628 nativeBuildInputs = [ jshon jq moreutils ] ++ compress.nativeInputs;
629 # Image name must be lowercase
630 imageName = lib.toLower name;
631 imageTag = lib.optionalString (tag != null) tag;
632 inherit fromImage baseJson;
633 layerClosure = writeClosure [ layer ];
634 passthru.buildArgs = args;
635 passthru.layer = layer;
636 passthru.imageTag =
637 if tag != null
638 then tag
639 else
640 lib.head (lib.strings.splitString "-" (baseNameOf (builtins.unsafeDiscardStringContext result.outPath)));
641 } ''
642 ${lib.optionalString (tag == null) ''
643 outName="$(basename "$out")"
644 outHash=$(echo "$outName" | cut -d - -f 1)
645
646 imageTag=$outHash
647 ''}
648
649 # Print tar contents:
650 # 1: Interpreted as relative to the root directory
651 # 2: With no trailing slashes on directories
652 # This is useful for ensuring that the output matches the
653 # values generated by the "find" command
654 ls_tar() {
655 for f in $(tar -tf $1 | xargs realpath -ms --relative-to=.); do
656 if [[ "$f" != "." ]]; then
657 echo "/$f"
658 fi
659 done
660 }
661
662 mkdir image
663 touch baseFiles
664 baseEnvs='[]'
665 if [[ -n "$fromImage" ]]; then
666 echo "Unpacking base image..."
667 tar -C image -xpf "$fromImage"
668
669 # Store the layers and the environment variables from the base image
670 cat ./image/manifest.json | jq -r '.[0].Layers | .[]' > layer-list
671 configName="$(cat ./image/manifest.json | jq -r '.[0].Config')"
672 baseEnvs="$(cat "./image/$configName" | jq '.config.Env // []')"
673
674 # Extract the parentID from the manifest
675 if [[ -n "$fromImageName" ]] && [[ -n "$fromImageTag" ]]; then
676 parentID="$(
677 cat "image/manifest.json" |
678 jq -r '.[] | select(.RepoTags | contains([$desiredTag])) | rtrimstr(".json")' \
679 --arg desiredTag "$fromImageName:$fromImageTag"
680 )"
681 else
682 echo "From-image name or tag wasn't set. Reading the first ID."
683 parentID="$(cat "image/manifest.json" | jq -r '.[0].Config | rtrimstr(".json")')"
684 fi
685
686 # Otherwise do not import the base image configuration and manifest
687 chmod a+w image image/*.json
688 rm -f image/*.json
689
690 for l in image/*/layer.tar; do
691 ls_tar $l >> baseFiles
692 done
693 else
694 touch layer-list
695 fi
696
697 chmod -R ug+rw image
698
699 mkdir temp
700 cp ${layer}/* temp/
701 chmod ug+w temp/*
702
703 for dep in $(cat $layerClosure); do
704 find $dep >> layerFiles
705 done
706
707 echo "Adding layer..."
708 # Record the contents of the tarball with ls_tar.
709 ls_tar temp/layer.tar >> baseFiles
710
711 # Append nix/store directory to the layer so that when the layer is loaded in the
712 # image /nix/store has read permissions for non-root users.
713 # nix/store is added only if the layer has /nix/store paths in it.
714 if [ $(wc -l < $layerClosure) -gt 1 ] && [ $(grep -c -e "^/nix/store$" baseFiles) -eq 0 ]; then
715 mkdir -p nix/store
716 chmod -R 555 nix
717 echo "./nix" >> layerFiles
718 echo "./nix/store" >> layerFiles
719 fi
720
721 # Get the files in the new layer which were *not* present in
722 # the old layer, and record them as newFiles.
723 comm <(sort -n baseFiles|uniq) \
724 <(sort -n layerFiles|uniq|grep -v ${layer}) -1 -3 > newFiles
725 # Append the new files to the layer.
726 tar -rpf temp/layer.tar --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" \
727 --owner=0 --group=0 --no-recursion --verbatim-files-from --files-from newFiles
728
729 echo "Adding meta..."
730
731 # If we have a parentID, add it to the json metadata.
732 if [[ -n "$parentID" ]]; then
733 cat temp/json | jshon -s "$parentID" -i parent > tmpjson
734 mv tmpjson temp/json
735 fi
736
737 # Take the sha256 sum of the generated json and use it as the layer ID.
738 # Compute the size and add it to the json under the 'Size' field.
739 layerID=$(sha256sum temp/json|cut -d ' ' -f 1)
740 size=$(stat --printf="%s" temp/layer.tar)
741 cat temp/json | jshon -s "$layerID" -i id -n $size -i Size > tmpjson
742 mv tmpjson temp/json
743
744 # Use the temp folder we've been working on to create a new image.
745 mv temp image/$layerID
746
747 # Add the new layer ID to the end of the layer list
748 (
749 cat layer-list
750 # originally this used `sed -i "1i$layerID" layer-list`, but
751 # would fail if layer-list was completely empty.
752 echo "$layerID/layer.tar"
753 ) | sponge layer-list
754
755 # Create image json and image manifest
756 imageJson=$(cat ${baseJson} | jq '.config.Env = $baseenv + .config.Env' --argjson baseenv "$baseEnvs")
757 imageJson=$(echo "$imageJson" | jq ". + {\"rootfs\": {\"diff_ids\": [], \"type\": \"layers\"}}")
758 manifestJson=$(jq -n "[{\"RepoTags\":[\"$imageName:$imageTag\"]}]")
759
760 for layerTar in $(cat ./layer-list); do
761 layerChecksum=$(sha256sum image/$layerTar | cut -d ' ' -f1)
762 imageJson=$(echo "$imageJson" | jq ".history |= . + [{\"created\": \"$(jq -r .created ${baseJson})\"}]")
763 # diff_ids order is from the bottom-most to top-most layer
764 imageJson=$(echo "$imageJson" | jq ".rootfs.diff_ids |= . + [\"sha256:$layerChecksum\"]")
765 manifestJson=$(echo "$manifestJson" | jq ".[0].Layers |= . + [\"$layerTar\"]")
766 done
767
768 imageJsonChecksum=$(echo "$imageJson" | sha256sum | cut -d ' ' -f1)
769 echo "$imageJson" > "image/$imageJsonChecksum.json"
770 manifestJson=$(echo "$manifestJson" | jq ".[0].Config = \"$imageJsonChecksum.json\"")
771 echo "$manifestJson" > image/manifest.json
772
773 # Store the json under the name image/repositories.
774 jshon -n object \
775 -n object -s "$layerID" -i "$imageTag" \
776 -i "$imageName" > image/repositories
777
778 # Make the image read-only.
779 chmod -R a-w image
780
781 echo "Cooking the image..."
782 tar -C image --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 --xform s:'^./':: -c . | ${compress.compress} > $out
783
784 echo "Finished."
785 '';
786
787 in
788 checked result
789 );
790
791 # Merge the tarballs of images built with buildImage into a single
792 # tarball that contains all images. Running `docker load` on the resulting
793 # tarball will load the images into the docker daemon.
794 mergeImages = images: runCommand "merge-docker-images"
795 {
796 inherit images;
797 nativeBuildInputs = [ file jq ]
798 ++ compressors.none.nativeInputs
799 ++ compressors.gz.nativeInputs
800 ++ compressors.zstd.nativeInputs;
801 } ''
802 mkdir image inputs
803 # Extract images
804 repos=()
805 manifests=()
806 last_image_mime="application/gzip"
807 for item in $images; do
808 name=$(basename $item)
809 mkdir inputs/$name
810
811 last_image_mime=$(file --mime-type -b $item)
812 case $last_image_mime in
813 "application/x-tar") ${compressors.none.decompress};;
814 "application/zstd") ${compressors.zstd.decompress};;
815 "application/gzip") ${compressors.gz.decompress};;
816 *) echo "error: unexpected layer type $last_image_mime" >&2; exit 1;;
817 esac < $item | tar -xC inputs/$name
818
819 if [ -f inputs/$name/repositories ]; then
820 repos+=(inputs/$name/repositories)
821 fi
822 if [ -f inputs/$name/manifest.json ]; then
823 manifests+=(inputs/$name/manifest.json)
824 fi
825 done
826 # Copy all layers from input images to output image directory
827 cp -R --update=none inputs/*/* image/
828 # Merge repositories objects and manifests
829 jq -s add "''${repos[@]}" > repositories
830 jq -s add "''${manifests[@]}" > manifest.json
831 # Replace output image repositories and manifest with merged versions
832 mv repositories image/repositories
833 mv manifest.json image/manifest.json
834 # Create tarball and gzip
835 tar -C image --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 --xform s:'^./':: -c . | (
836 case $last_image_mime in
837 "application/x-tar") ${compressors.none.compress};;
838 "application/zstd") ${compressors.zstd.compress};;
839 "application/gzip") ${compressors.gz.compress};;
840 # `*)` not needed; already checked.
841 esac
842 ) > $out
843 '';
844
845
846 # Provide a /etc/passwd and /etc/group that contain root and nobody.
847 # Useful when packaging binaries that insist on using nss to look up
848 # username/groups (like nginx).
849 # /bin/sh is fine to not exist, and provided by another shim.
850 inherit fakeNss; # alias
851
852 # This provides a /usr/bin/env, for shell scripts using the
853 # "#!/usr/bin/env executable" shebang.
854 usrBinEnv = runCommand "usr-bin-env" { } ''
855 mkdir -p $out/usr/bin
856 ln -s ${coreutils}/bin/env $out/usr/bin
857 '';
858
859 # This provides /bin/sh, pointing to bashInteractive.
860 # The use of bashInteractive here is intentional to support cases like `docker run -it <image_name>`, so keep these use cases in mind if making any changes to how this works.
861 binSh = runCommand "bin-sh" { } ''
862 mkdir -p $out/bin
863 ln -s ${bashInteractive}/bin/bash $out/bin/sh
864 '';
865
866 # This provides the ca bundle in common locations
867 caCertificates = runCommand "ca-certificates" { } ''
868 mkdir -p $out/etc/ssl/certs $out/etc/pki/tls/certs
869 # Old NixOS compatibility.
870 ln -s ${cacert}/etc/ssl/certs/ca-bundle.crt $out/etc/ssl/certs/ca-bundle.crt
871 # NixOS canonical location + Debian/Ubuntu/Arch/Gentoo compatibility.
872 ln -s ${cacert}/etc/ssl/certs/ca-bundle.crt $out/etc/ssl/certs/ca-certificates.crt
873 # CentOS/Fedora compatibility.
874 ln -s ${cacert}/etc/ssl/certs/ca-bundle.crt $out/etc/pki/tls/certs/ca-bundle.crt
875 '';
876
877 # Build an image and populate its nix database with the provided
878 # contents. The main purpose is to be able to use nix commands in
879 # the container.
880 # Be careful since this doesn't work well with multilayer.
881 # TODO: add the dependencies of the config json.
882 buildImageWithNixDb = args@{ copyToRoot ? contents, contents ? null, extraCommands ? "", ... }: (
883 buildImage (args // {
884 extraCommands = (mkDbExtraCommand copyToRoot) + extraCommands;
885 })
886 );
887
888 # TODO: add the dependencies of the config json.
889 buildLayeredImageWithNixDb = args@{ contents ? null, extraCommands ? "", ... }: (
890 buildLayeredImage (args // {
891 extraCommands = (mkDbExtraCommand contents) + extraCommands;
892 })
893 );
894
895 # Arguments are documented in ../../../doc/build-helpers/images/dockertools.section.md
896 streamLayeredImage = lib.makeOverridable (
897 {
898 name
899 , tag ? null
900 , fromImage ? null
901 , contents ? [ ]
902 , config ? { }
903 , architecture ? defaultArchitecture
904 , created ? "1970-01-01T00:00:01Z"
905 , uid ? 0
906 , gid ? 0
907 , uname ? "root"
908 , gname ? "root"
909 , maxLayers ? 100
910 , extraCommands ? ""
911 , fakeRootCommands ? ""
912 , enableFakechroot ? false
913 , includeStorePaths ? true
914 , passthru ? {}
915 ,
916 }:
917 assert
918 (lib.assertMsg (maxLayers > 1)
919 "the maxLayers argument of dockerTools.buildLayeredImage function must be greather than 1 (current value: ${toString maxLayers})");
920 let
921 baseName = baseNameOf name;
922
923 streamScript = writePython3 "stream" { } ./stream_layered_image.py;
924 baseJson = writeText "${baseName}-base.json" (builtins.toJSON {
925 inherit config architecture;
926 os = "linux";
927 });
928
929 contentsList = if builtins.isList contents then contents else [ contents ];
930 bind-paths = builtins.toString (builtins.map (path: "--bind=${path}:${path}!") [
931 "/dev/"
932 "/proc/"
933 "/sys/"
934 "${builtins.storeDir}/"
935 "$out/layer.tar"
936 ]);
937
938 # We store the customisation layer as a tarball, to make sure that
939 # things like permissions set on 'extraCommands' are not overridden
940 # by Nix. Then we precompute the sha256 for performance.
941 customisationLayer = symlinkJoin {
942 name = "${baseName}-customisation-layer";
943 paths = contentsList;
944 inherit extraCommands fakeRootCommands;
945 nativeBuildInputs = [
946 fakeroot
947 ] ++ optionals enableFakechroot [
948 proot
949 ];
950 postBuild = ''
951 mv $out old_out
952 (cd old_out; eval "$extraCommands" )
953
954 mkdir $out
955 ${if enableFakechroot then ''
956 proot -r $PWD/old_out ${bind-paths} --pwd=/ fakeroot bash -c '
957 source $stdenv/setup
958 eval "$fakeRootCommands"
959 tar \
960 --sort name \
961 --exclude=./proc \
962 --exclude=./sys \
963 --exclude=.${builtins.storeDir} \
964 --numeric-owner --mtime "@$SOURCE_DATE_EPOCH" \
965 --hard-dereference \
966 -cf $out/layer.tar .
967 '
968 '' else ''
969 fakeroot bash -c '
970 source $stdenv/setup
971 cd old_out
972 eval "$fakeRootCommands"
973 tar \
974 --sort name \
975 --numeric-owner --mtime "@$SOURCE_DATE_EPOCH" \
976 --hard-dereference \
977 -cf $out/layer.tar .
978 '
979 ''}
980 sha256sum $out/layer.tar \
981 | cut -f 1 -d ' ' \
982 > $out/checksum
983 '';
984 };
985
986 closureRoots = lib.optionals includeStorePaths /* normally true */ (
987 [ baseJson customisationLayer ]
988 );
989 overallClosure = writeText "closure" (lib.concatStringsSep " " closureRoots);
990
991 # These derivations are only created as implementation details of docker-tools,
992 # so they'll be excluded from the created images.
993 unnecessaryDrvs = [ baseJson overallClosure customisationLayer ];
994
995 conf = runCommand "${baseName}-conf.json"
996 {
997 inherit fromImage maxLayers created uid gid uname gname;
998 imageName = lib.toLower name;
999 preferLocalBuild = true;
1000 passthru.imageTag =
1001 if tag != null
1002 then tag
1003 else
1004 lib.head (lib.strings.splitString "-" (baseNameOf (builtins.unsafeDiscardStringContext conf.outPath)));
1005 paths = buildPackages.referencesByPopularity overallClosure;
1006 nativeBuildInputs = [ jq ];
1007 } ''
1008 ${if (tag == null) then ''
1009 outName="$(basename "$out")"
1010 outHash=$(echo "$outName" | cut -d - -f 1)
1011
1012 imageTag=$outHash
1013 '' else ''
1014 imageTag="${tag}"
1015 ''}
1016
1017 # convert "created" to iso format
1018 if [[ "$created" != "now" ]]; then
1019 created="$(date -Iseconds -d "$created")"
1020 fi
1021
1022 paths() {
1023 cat $paths ${lib.concatMapStringsSep " "
1024 (path: "| (grep -v ${path} || true)")
1025 unnecessaryDrvs}
1026 }
1027
1028 # Compute the number of layers that are already used by a potential
1029 # 'fromImage' as well as the customization layer. Ensure that there is
1030 # still at least one layer available to store the image contents.
1031 usedLayers=0
1032
1033 # subtract number of base image layers
1034 if [[ -n "$fromImage" ]]; then
1035 (( usedLayers += $(tar -xOf "$fromImage" manifest.json | jq '.[0].Layers | length') ))
1036 fi
1037
1038 # one layer will be taken up by the customisation layer
1039 (( usedLayers += 1 ))
1040
1041 if ! (( $usedLayers < $maxLayers )); then
1042 echo >&2 "Error: usedLayers $usedLayers layers to store 'fromImage' and" \
1043 "'extraCommands', but only maxLayers=$maxLayers were" \
1044 "allowed. At least 1 layer is required to store contents."
1045 exit 1
1046 fi
1047 availableLayers=$(( maxLayers - usedLayers ))
1048
1049 # Create $maxLayers worth of Docker Layers, one layer per store path
1050 # unless there are more paths than $maxLayers. In that case, create
1051 # $maxLayers-1 for the most popular layers, and smush the remainaing
1052 # store paths in to one final layer.
1053 #
1054 # The following code is fiddly w.r.t. ensuring every layer is
1055 # created, and that no paths are missed. If you change the
1056 # following lines, double-check that your code behaves properly
1057 # when the number of layers equals:
1058 # maxLayers-1, maxLayers, and maxLayers+1, 0
1059 paths |
1060 jq -sR '
1061 rtrimstr("\n") | split("\n")
1062 | (.[:$maxLayers-1] | map([.])) + [ .[$maxLayers-1:] ]
1063 | map(select(length > 0))
1064 ' \
1065 --argjson maxLayers "$availableLayers" > store_layers.json
1066
1067 # The index on $store_layers is necessary because the --slurpfile
1068 # automatically reads the file as an array.
1069 cat ${baseJson} | jq '
1070 . + {
1071 "store_dir": $store_dir,
1072 "from_image": $from_image,
1073 "store_layers": $store_layers[0],
1074 "customisation_layer", $customisation_layer,
1075 "repo_tag": $repo_tag,
1076 "created": $created,
1077 "uid": $uid,
1078 "gid": $gid,
1079 "uname": $uname,
1080 "gname": $gname
1081 }
1082 ' --arg store_dir "${storeDir}" \
1083 --argjson from_image ${if fromImage == null then "null" else "'\"${fromImage}\"'"} \
1084 --slurpfile store_layers store_layers.json \
1085 --arg customisation_layer ${customisationLayer} \
1086 --arg repo_tag "$imageName:$imageTag" \
1087 --arg created "$created" \
1088 --arg uid "$uid" \
1089 --arg gid "$gid" \
1090 --arg uname "$uname" \
1091 --arg gname "$gname" |
1092 tee $out
1093 '';
1094
1095 result = runCommand "stream-${baseName}"
1096 {
1097 inherit (conf) imageName;
1098 preferLocalBuild = true;
1099 passthru = passthru // {
1100 inherit (conf) imageTag;
1101
1102 # Distinguish tarballs and exes at the Nix level so functions that
1103 # take images can know in advance how the image is supposed to be used.
1104 isExe = true;
1105 };
1106 nativeBuildInputs = [ makeWrapper ];
1107 } ''
1108 makeWrapper ${streamScript} $out --add-flags ${conf}
1109 '';
1110 in
1111 result
1112 );
1113
1114 # This function streams a docker image that behaves like a nix-shell for a derivation
1115 streamNixShellImage =
1116 { # The derivation whose environment this docker image should be based on
1117 drv
1118 , # Image Name
1119 name ? drv.name + "-env"
1120 , # Image tag, the Nix's output hash will be used if null
1121 tag ? null
1122 , # User id to run the container as. Defaults to 1000, because many
1123 # binaries don't like to be run as root
1124 uid ? 1000
1125 , # Group id to run the container as, see also uid
1126 gid ? 1000
1127 , # The home directory of the user
1128 homeDirectory ? "/build"
1129 , # The path to the bash binary to use as the shell. See `NIX_BUILD_SHELL` in `man nix-shell`
1130 shell ? bashInteractive + "/bin/bash"
1131 , # Run this command in the environment of the derivation, in an interactive shell. See `--command` in `man nix-shell`
1132 command ? null
1133 , # Same as `command`, but runs the command in a non-interactive shell instead. See `--run` in `man nix-shell`
1134 run ? null
1135 }:
1136 assert lib.assertMsg (! (drv.drvAttrs.__structuredAttrs or false))
1137 "streamNixShellImage: Does not work with the derivation ${drv.name} because it uses __structuredAttrs";
1138 assert lib.assertMsg (command == null || run == null)
1139 "streamNixShellImage: Can't specify both command and run";
1140 let
1141
1142 # A binary that calls the command to build the derivation
1143 builder = writeShellScriptBin "buildDerivation" ''
1144 exec ${lib.escapeShellArg (stringValue drv.drvAttrs.builder)} ${lib.escapeShellArgs (map stringValue drv.drvAttrs.args)}
1145 '';
1146
1147 staticPath = "${dirOf shell}:${lib.makeBinPath [ builder ]}";
1148
1149 # https://github.com/NixOS/nix/blob/2.8.0/src/nix-build/nix-build.cc#L493-L526
1150 rcfile = writeText "nix-shell-rc" ''
1151 unset PATH
1152 dontAddDisableDepTrack=1
1153 # TODO: https://github.com/NixOS/nix/blob/2.8.0/src/nix-build/nix-build.cc#L506
1154 [ -e $stdenv/setup ] && source $stdenv/setup
1155 PATH=${staticPath}:"$PATH"
1156 SHELL=${lib.escapeShellArg shell}
1157 BASH=${lib.escapeShellArg shell}
1158 set +e
1159 [ -n "$PS1" -a -z "$NIX_SHELL_PRESERVE_PROMPT" ] && PS1='\n\[\033[1;32m\][nix-shell:\w]\$\[\033[0m\] '
1160 if [ "$(type -t runHook)" = function ]; then
1161 runHook shellHook
1162 fi
1163 unset NIX_ENFORCE_PURITY
1164 shopt -u nullglob
1165 shopt -s execfail
1166 ${optionalString (command != null || run != null) ''
1167 ${optionalString (command != null) command}
1168 ${optionalString (run != null) run}
1169 exit
1170 ''}
1171 '';
1172
1173 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/globals.hh#L464-L465
1174 sandboxBuildDir = "/build";
1175
1176 # This function closely mirrors what this Nix code does:
1177 # https://github.com/NixOS/nix/blob/2.8.0/src/libexpr/primops.cc#L1102
1178 # https://github.com/NixOS/nix/blob/2.8.0/src/libexpr/eval.cc#L1981-L2036
1179 stringValue = value:
1180 # We can't just use `toString` on all derivation attributes because that
1181 # would not put path literals in the closure. So we explicitly copy
1182 # those into the store here
1183 if builtins.typeOf value == "path" then "${value}"
1184 else if builtins.typeOf value == "list" then toString (map stringValue value)
1185 else toString value;
1186
1187 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L992-L1004
1188 drvEnv = lib.mapAttrs' (name: value:
1189 let str = stringValue value;
1190 in if lib.elem name (drv.drvAttrs.passAsFile or [])
1191 then lib.nameValuePair "${name}Path" (writeText "pass-as-text-${name}" str)
1192 else lib.nameValuePair name str
1193 ) drv.drvAttrs //
1194 # A mapping from output name to the nix store path where they should end up
1195 # https://github.com/NixOS/nix/blob/2.8.0/src/libexpr/primops.cc#L1253
1196 lib.genAttrs drv.outputs (output: builtins.unsafeDiscardStringContext drv.${output}.outPath);
1197
1198 # Environment variables set in the image
1199 envVars = {
1200
1201 # Root certificates for internet access
1202 SSL_CERT_FILE = "${cacert}/etc/ssl/certs/ca-bundle.crt";
1203 NIX_SSL_CERT_FILE = "${cacert}/etc/ssl/certs/ca-bundle.crt";
1204
1205 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1027-L1030
1206 # PATH = "/path-not-set";
1207 # Allows calling bash and `buildDerivation` as the Cmd
1208 PATH = staticPath;
1209
1210 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1032-L1038
1211 HOME = homeDirectory;
1212
1213 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1040-L1044
1214 NIX_STORE = storeDir;
1215
1216 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1046-L1047
1217 # TODO: Make configurable?
1218 NIX_BUILD_CORES = "1";
1219
1220 } // drvEnv // {
1221
1222 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1008-L1010
1223 NIX_BUILD_TOP = sandboxBuildDir;
1224
1225 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1012-L1013
1226 TMPDIR = sandboxBuildDir;
1227 TEMPDIR = sandboxBuildDir;
1228 TMP = sandboxBuildDir;
1229 TEMP = sandboxBuildDir;
1230
1231 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1015-L1019
1232 PWD = sandboxBuildDir;
1233
1234 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1071-L1074
1235 # We don't set it here because the output here isn't handled in any special way
1236 # NIX_LOG_FD = "2";
1237
1238 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1076-L1077
1239 TERM = "xterm-256color";
1240 };
1241
1242
1243 in streamLayeredImage {
1244 inherit name tag;
1245 contents = [
1246 binSh
1247 usrBinEnv
1248 (fakeNss.override {
1249 # Allows programs to look up the build user's home directory
1250 # https://github.com/NixOS/nix/blob/ffe155abd36366a870482625543f9bf924a58281/src/libstore/build/local-derivation-goal.cc#L906-L910
1251 # Slightly differs however: We use the passed-in homeDirectory instead of sandboxBuildDir.
1252 # We're doing this because it's arguably a bug in Nix that sandboxBuildDir is used here: https://github.com/NixOS/nix/issues/6379
1253 extraPasswdLines = [
1254 "nixbld:x:${toString uid}:${toString gid}:Build user:${homeDirectory}:/noshell"
1255 ];
1256 extraGroupLines = [
1257 "nixbld:!:${toString gid}:"
1258 ];
1259 })
1260 ];
1261
1262 fakeRootCommands = ''
1263 # Effectively a single-user installation of Nix, giving the user full
1264 # control over the Nix store. Needed for building the derivation this
1265 # shell is for, but also in case one wants to use Nix inside the
1266 # image
1267 mkdir -p ./nix/{store,var/nix} ./etc/nix
1268 chown -R ${toString uid}:${toString gid} ./nix ./etc/nix
1269
1270 # Gives the user control over the build directory
1271 mkdir -p .${sandboxBuildDir}
1272 chown -R ${toString uid}:${toString gid} .${sandboxBuildDir}
1273 '';
1274
1275 # Run this image as the given uid/gid
1276 config.User = "${toString uid}:${toString gid}";
1277 config.Cmd =
1278 # https://github.com/NixOS/nix/blob/2.8.0/src/nix-build/nix-build.cc#L185-L186
1279 # https://github.com/NixOS/nix/blob/2.8.0/src/nix-build/nix-build.cc#L534-L536
1280 if run == null
1281 then [ shell "--rcfile" rcfile ]
1282 else [ shell rcfile ];
1283 config.WorkingDir = sandboxBuildDir;
1284 config.Env = lib.mapAttrsToList (name: value: "${name}=${value}") envVars;
1285 };
1286
1287 # Wrapper around streamNixShellImage to build an image from the result
1288 buildNixShellImage = { drv, compressor ? "gz", ... }@args:
1289 let
1290 stream = streamNixShellImage (builtins.removeAttrs args ["compressor"]);
1291 compress = compressorForImage compressor drv.name;
1292 in
1293 runCommand "${drv.name}-env.tar${compress.ext}"
1294 {
1295 inherit (stream) imageName;
1296 passthru = { inherit (stream) imageTag; };
1297 nativeBuildInputs = compress.nativeInputs;
1298 } "${stream} | ${compress.compress} > $out";
1299}