1{ bashInteractive
2, buildPackages
3, cacert
4, callPackage
5, closureInfo
6, coreutils
7, devShellTools
8, e2fsprogs
9, proot
10, fakeNss
11, fakeroot
12, file
13, go
14, jq
15, jshon
16, lib
17, makeWrapper
18, moreutils
19, nix
20, nixosTests
21, pigz
22, rsync
23, runCommand
24, runtimeShell
25, shadow
26, skopeo
27, stdenv
28, storeDir ? builtins.storeDir
29, substituteAll
30, symlinkJoin
31, tarsum
32, util-linux
33, vmTools
34, writeClosure
35, writeScript
36, writeShellScriptBin
37, writeText
38, writeTextDir
39, writePython3
40, zstd
41}:
42
43let
44 inherit (lib)
45 optionals
46 optionalString
47 ;
48
49 inherit (lib)
50 escapeShellArgs
51 toList
52 ;
53
54 inherit (devShellTools)
55 valueToString
56 ;
57
58 mkDbExtraCommand = contents:
59 let
60 contentsList = if builtins.isList contents then contents else [ contents ];
61 in
62 ''
63 echo "Generating the nix database..."
64 echo "Warning: only the database of the deepest Nix layer is loaded."
65 echo " If you want to use nix commands in the container, it would"
66 echo " be better to only have one layer that contains a nix store."
67
68 export NIX_REMOTE=local?root=$PWD
69 # A user is required by nix
70 # https://github.com/NixOS/nix/blob/9348f9291e5d9e4ba3c4347ea1b235640f54fd79/src/libutil/util.cc#L478
71 export USER=nobody
72 ${buildPackages.nix}/bin/nix-store --load-db < ${closureInfo {rootPaths = contentsList;}}/registration
73 # Reset registration times to make the image reproducible
74 ${buildPackages.sqlite}/bin/sqlite3 nix/var/nix/db/db.sqlite "UPDATE ValidPaths SET registrationTime = ''${SOURCE_DATE_EPOCH}"
75
76 mkdir -p nix/var/nix/gcroots/docker/
77 for i in ${lib.concatStringsSep " " contentsList}; do
78 ln -s $i nix/var/nix/gcroots/docker/$(basename $i)
79 done;
80 '';
81
82 # The OCI Image specification recommends that configurations use values listed
83 # in the Go Language document for GOARCH.
84 # Reference: https://github.com/opencontainers/image-spec/blob/master/config.md#properties
85 # For the mapping from Nixpkgs system parameters to GOARCH, we can reuse the
86 # mapping from the go package.
87 defaultArchitecture = go.GOARCH;
88
89 compressors = {
90 none = {
91 ext = "";
92 nativeInputs = [ ];
93 compress = "cat";
94 decompress = "cat";
95 };
96 gz = {
97 ext = ".gz";
98 nativeInputs = [ pigz ];
99 compress = "pigz -p$NIX_BUILD_CORES -nTR";
100 decompress = "pigz -d -p$NIX_BUILD_CORES";
101 };
102 zstd = {
103 ext = ".zst";
104 nativeInputs = [ zstd ];
105 compress = "zstd -T$NIX_BUILD_CORES";
106 decompress = "zstd -d -T$NIX_BUILD_CORES";
107 };
108 };
109
110 compressorForImage = compressor: imageName: compressors.${compressor} or
111 (throw "in docker image ${imageName}: compressor must be one of: [${toString builtins.attrNames compressors}]");
112
113in
114rec {
115 examples = callPackage ./examples.nix {
116 inherit buildImage buildLayeredImage fakeNss pullImage shadowSetup buildImageWithNixDb streamNixShellImage;
117 };
118
119 tests = {
120 inherit (nixosTests)
121 docker-tools
122 docker-tools-overlay
123 # requires remote builder
124 # docker-tools-cross
125 ;
126 };
127
128 pullImage =
129 let
130 fixName = name: builtins.replaceStrings [ "/" ":" ] [ "-" "-" ] name;
131 in
132 { imageName
133 # To find the digest of an image, you can use skopeo:
134 # see doc/functions.xml
135 , imageDigest
136 , sha256
137 , os ? "linux"
138 , # Image architecture, defaults to the architecture of the `hostPlatform` when unset
139 arch ? defaultArchitecture
140 # This is used to set name to the pulled image
141 , finalImageName ? imageName
142 # This used to set a tag to the pulled image
143 , finalImageTag ? "latest"
144 # This is used to disable TLS certificate verification, allowing access to http registries on (hopefully) trusted networks
145 , tlsVerify ? true
146
147 , name ? fixName "docker-image-${finalImageName}-${finalImageTag}.tar"
148 }:
149
150 runCommand name
151 {
152 inherit imageDigest;
153 imageName = finalImageName;
154 imageTag = finalImageTag;
155 impureEnvVars = lib.fetchers.proxyImpureEnvVars;
156 outputHashMode = "flat";
157 outputHashAlgo = "sha256";
158 outputHash = sha256;
159
160 nativeBuildInputs = [ skopeo ];
161 SSL_CERT_FILE = "${cacert.out}/etc/ssl/certs/ca-bundle.crt";
162
163 sourceURL = "docker://${imageName}@${imageDigest}";
164 destNameTag = "${finalImageName}:${finalImageTag}";
165 } ''
166 skopeo \
167 --insecure-policy \
168 --tmpdir=$TMPDIR \
169 --override-os ${os} \
170 --override-arch ${arch} \
171 copy \
172 --src-tls-verify=${lib.boolToString tlsVerify} \
173 "$sourceURL" "docker-archive://$out:$destNameTag" \
174 | cat # pipe through cat to force-disable progress bar
175 '';
176
177 # We need to sum layer.tar, not a directory, hence tarsum instead of nix-hash.
178 # And we cannot untar it, because then we cannot preserve permissions etc.
179 inherit tarsum; # pkgs.dockerTools.tarsum
180
181 # buildEnv creates symlinks to dirs, which is hard to edit inside the overlay VM
182 mergeDrvs =
183 { derivations
184 , onlyDeps ? false
185 }:
186 runCommand "merge-drvs"
187 {
188 inherit derivations onlyDeps;
189 } ''
190 if [[ -n "$onlyDeps" ]]; then
191 echo $derivations > $out
192 exit 0
193 fi
194
195 mkdir $out
196 for derivation in $derivations; do
197 echo "Merging $derivation..."
198 if [[ -d "$derivation" ]]; then
199 # If it's a directory, copy all of its contents into $out.
200 cp -drf --preserve=mode -f $derivation/* $out/
201 else
202 # Otherwise treat the derivation as a tarball and extract it
203 # into $out.
204 tar -C $out -xpf $drv || true
205 fi
206 done
207 '';
208
209 # Helper for setting up the base files for managing users and
210 # groups, only if such files don't exist already. It is suitable for
211 # being used in a runAsRoot script.
212 shadowSetup = ''
213 export PATH=${shadow}/bin:$PATH
214 mkdir -p /etc/pam.d
215 if [[ ! -f /etc/passwd ]]; then
216 echo "root:x:0:0::/root:${runtimeShell}" > /etc/passwd
217 echo "root:!x:::::::" > /etc/shadow
218 fi
219 if [[ ! -f /etc/group ]]; then
220 echo "root:x:0:" > /etc/group
221 echo "root:x::" > /etc/gshadow
222 fi
223 if [[ ! -f /etc/pam.d/other ]]; then
224 cat > /etc/pam.d/other <<EOF
225 account sufficient pam_unix.so
226 auth sufficient pam_rootok.so
227 password requisite pam_unix.so nullok yescrypt
228 session required pam_unix.so
229 EOF
230 fi
231 if [[ ! -f /etc/login.defs ]]; then
232 touch /etc/login.defs
233 fi
234 '';
235
236 # Run commands in a virtual machine.
237 runWithOverlay =
238 { name
239 , fromImage ? null
240 , fromImageName ? null
241 , fromImageTag ? null
242 , diskSize ? 1024
243 , buildVMMemorySize ? 512
244 , preMount ? ""
245 , postMount ? ""
246 , postUmount ? ""
247 }:
248 vmTools.runInLinuxVM (
249 runCommand name
250 {
251 preVM = vmTools.createEmptyImage {
252 size = diskSize;
253 fullName = "docker-run-disk";
254 destination = "./image";
255 };
256 inherit fromImage fromImageName fromImageTag;
257 memSize = buildVMMemorySize;
258
259 nativeBuildInputs = [ util-linux e2fsprogs jshon rsync jq ];
260 } ''
261 mkdir disk
262 mkfs /dev/${vmTools.hd}
263 mount /dev/${vmTools.hd} disk
264 cd disk
265
266 function dedup() {
267 declare -A seen
268 while read ln; do
269 if [[ -z "''${seen["$ln"]:-}" ]]; then
270 echo "$ln"; seen["$ln"]=1
271 fi
272 done
273 }
274
275 if [[ -n "$fromImage" ]]; then
276 echo "Unpacking base image..."
277 mkdir image
278 tar -C image -xpf "$fromImage"
279
280 if [[ -n "$fromImageName" ]] && [[ -n "$fromImageTag" ]]; then
281 parentID="$(
282 cat "image/manifest.json" |
283 jq -r '.[] | select(.RepoTags | contains([$desiredTag])) | rtrimstr(".json")' \
284 --arg desiredTag "$fromImageName:$fromImageTag"
285 )"
286 else
287 echo "From-image name or tag wasn't set. Reading the first ID."
288 parentID="$(cat "image/manifest.json" | jq -r '.[0].Config | rtrimstr(".json")')"
289 fi
290
291 # In case of repeated layers, unpack only the last occurrence of each
292 cat ./image/manifest.json | jq -r '.[0].Layers | .[]' | tac | dedup | tac > layer-list
293 else
294 touch layer-list
295 fi
296
297 # Unpack all of the parent layers into the image.
298 lowerdir=""
299 extractionID=0
300 for layerTar in $(cat layer-list); do
301 echo "Unpacking layer $layerTar"
302 extractionID=$((extractionID + 1))
303
304 mkdir -p image/$extractionID/layer
305 tar -C image/$extractionID/layer -xpf image/$layerTar
306 rm image/$layerTar
307
308 find image/$extractionID/layer -name ".wh.*" -exec bash -c 'name="$(basename {}|sed "s/^.wh.//")"; mknod "$(dirname {})/$name" c 0 0; rm {}' \;
309
310 # Get the next lower directory and continue the loop.
311 lowerdir=image/$extractionID/layer''${lowerdir:+:}$lowerdir
312 done
313
314 mkdir work
315 mkdir layer
316 mkdir mnt
317
318 ${lib.optionalString (preMount != "") ''
319 # Execute pre-mount steps
320 echo "Executing pre-mount steps..."
321 ${preMount}
322 ''}
323
324 if [ -n "$lowerdir" ]; then
325 mount -t overlay overlay -olowerdir=$lowerdir,workdir=work,upperdir=layer mnt
326 else
327 mount --bind layer mnt
328 fi
329
330 ${lib.optionalString (postMount != "") ''
331 # Execute post-mount steps
332 echo "Executing post-mount steps..."
333 ${postMount}
334 ''}
335
336 umount mnt
337
338 (
339 cd layer
340 cmd='name="$(basename {})"; touch "$(dirname {})/.wh.$name"; rm "{}"'
341 find . -type c -exec bash -c "$cmd" \;
342 )
343
344 ${postUmount}
345 '');
346
347 exportImage = { name ? fromImage.name, fromImage, fromImageName ? null, fromImageTag ? null, diskSize ? 1024 }:
348 runWithOverlay {
349 inherit name fromImage fromImageName fromImageTag diskSize;
350
351 postMount = ''
352 echo "Packing raw image..."
353 tar -C mnt --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" -cf $out/layer.tar .
354 '';
355
356 postUmount = ''
357 mv $out/layer.tar .
358 rm -rf $out
359 mv layer.tar $out
360 '';
361 };
362
363 # Create an executable shell script which has the coreutils in its
364 # PATH. Since root scripts are executed in a blank environment, even
365 # things like `ls` or `echo` will be missing.
366 shellScript = name: text:
367 writeScript name ''
368 #!${runtimeShell}
369 set -e
370 export PATH=${coreutils}/bin:/bin
371 ${text}
372 '';
373
374 # Create a "layer" (set of files).
375 mkPureLayer =
376 {
377 # Name of the layer
378 name
379 , # JSON containing configuration and metadata for this layer.
380 baseJson
381 , # Files to add to the layer.
382 copyToRoot ? null
383 , # When copying the contents into the image, preserve symlinks to
384 # directories (see `rsync -K`). Otherwise, transform those symlinks
385 # into directories.
386 keepContentsDirlinks ? false
387 , # Additional commands to run on the layer before it is tar'd up.
388 extraCommands ? ""
389 , uid ? 0
390 , gid ? 0
391 }:
392 runCommand "docker-layer-${name}"
393 {
394 inherit baseJson extraCommands;
395 contents = copyToRoot;
396 nativeBuildInputs = [ jshon rsync tarsum ];
397 }
398 ''
399 mkdir layer
400 if [[ -n "$contents" ]]; then
401 echo "Adding contents..."
402 for item in $contents; do
403 echo "Adding $item"
404 rsync -a${if keepContentsDirlinks then "K" else "k"} --chown=0:0 $item/ layer/
405 done
406 else
407 echo "No contents to add to layer."
408 fi
409
410 chmod ug+w layer
411
412 if [[ -n "$extraCommands" ]]; then
413 (cd layer; eval "$extraCommands")
414 fi
415
416 # Tar up the layer and throw it into 'layer.tar'.
417 echo "Packing layer..."
418 mkdir $out
419 tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=${toString uid} --group=${toString gid} -cf - . | tee -p $out/layer.tar | tarsum)
420
421 # Add a 'checksum' field to the JSON, with the value set to the
422 # checksum of the tarball.
423 cat ${baseJson} | jshon -s "$tarhash" -i checksum > $out/json
424
425 # Indicate to docker that we're using schema version 1.0.
426 echo -n "1.0" > $out/VERSION
427
428 echo "Finished building layer '${name}'"
429 '';
430
431 # Make a "root" layer; required if we need to execute commands as a
432 # privileged user on the image. The commands themselves will be
433 # performed in a virtual machine sandbox.
434 mkRootLayer =
435 {
436 # Name of the image.
437 name
438 , # Script to run as root. Bash.
439 runAsRoot
440 , # Files to add to the layer. If null, an empty layer will be created.
441 # To add packages to /bin, use `buildEnv` or similar.
442 copyToRoot ? null
443 , # When copying the contents into the image, preserve symlinks to
444 # directories (see `rsync -K`). Otherwise, transform those symlinks
445 # into directories.
446 keepContentsDirlinks ? false
447 , # JSON containing configuration and metadata for this layer.
448 baseJson
449 , # Existing image onto which to append the new layer.
450 fromImage ? null
451 , # Name of the image we're appending onto.
452 fromImageName ? null
453 , # Tag of the image we're appending onto.
454 fromImageTag ? null
455 , # How much disk to allocate for the temporary virtual machine.
456 diskSize ? 1024
457 , # How much memory to allocate for the temporary virtual machine.
458 buildVMMemorySize ? 512
459 , # Commands (bash) to run on the layer; these do not require sudo.
460 extraCommands ? ""
461 }:
462 # Generate an executable script from the `runAsRoot` text.
463 let
464 runAsRootScript = shellScript "run-as-root.sh" runAsRoot;
465 extraCommandsScript = shellScript "extra-commands.sh" extraCommands;
466 in
467 runWithOverlay {
468 name = "docker-layer-${name}";
469
470 inherit fromImage fromImageName fromImageTag diskSize buildVMMemorySize;
471
472 preMount = lib.optionalString (copyToRoot != null && copyToRoot != [ ]) ''
473 echo "Adding contents..."
474 for item in ${escapeShellArgs (map (c: "${c}") (toList copyToRoot))}; do
475 echo "Adding $item..."
476 rsync -a${if keepContentsDirlinks then "K" else "k"} --chown=0:0 $item/ layer/
477 done
478
479 chmod ug+w layer
480 '';
481
482 postMount = ''
483 mkdir -p mnt/{dev,proc,sys,tmp} mnt${storeDir}
484
485 # Mount /dev, /sys and the nix store as shared folders.
486 mount --rbind /dev mnt/dev
487 mount --rbind /sys mnt/sys
488 mount --rbind ${storeDir} mnt${storeDir}
489
490 # Execute the run as root script. See 'man unshare' for
491 # details on what's going on here; basically this command
492 # means that the runAsRootScript will be executed in a nearly
493 # completely isolated environment.
494 #
495 # Ideally we would use --mount-proc=mnt/proc or similar, but this
496 # doesn't work. The workaround is to setup proc after unshare.
497 # See: https://github.com/karelzak/util-linux/issues/648
498 unshare -imnpuf --mount-proc sh -c 'mount --rbind /proc mnt/proc && chroot mnt ${runAsRootScript}'
499
500 # Unmount directories and remove them.
501 umount -R mnt/dev mnt/sys mnt${storeDir}
502 rmdir --ignore-fail-on-non-empty \
503 mnt/dev mnt/proc mnt/sys mnt${storeDir} \
504 mnt$(dirname ${storeDir})
505 '';
506
507 postUmount = ''
508 (cd layer; ${extraCommandsScript})
509
510 echo "Packing layer..."
511 mkdir -p $out
512 tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" -cf - . |
513 tee -p $out/layer.tar |
514 ${tarsum}/bin/tarsum)
515
516 cat ${baseJson} | jshon -s "$tarhash" -i checksum > $out/json
517 # Indicate to docker that we're using schema version 1.0.
518 echo -n "1.0" > $out/VERSION
519
520 echo "Finished building layer '${name}'"
521 '';
522 };
523
524 buildLayeredImage = lib.makeOverridable ({ name, compressor ? "gz", ... }@args:
525 let
526 stream = streamLayeredImage (builtins.removeAttrs args ["compressor"]);
527 compress = compressorForImage compressor name;
528 in
529 runCommand "${baseNameOf name}.tar${compress.ext}"
530 {
531 inherit (stream) imageName;
532 passthru = { inherit (stream) imageTag; inherit stream; };
533 nativeBuildInputs = compress.nativeInputs;
534 } "${stream} | ${compress.compress} > $out"
535 );
536
537 # 1. extract the base image
538 # 2. create the layer
539 # 3. add layer deps to the layer itself, diffing with the base image
540 # 4. compute the layer id
541 # 5. put the layer in the image
542 # 6. repack the image
543 buildImage = lib.makeOverridable (
544 args@{
545 # Image name.
546 name
547 , # Image tag, when null then the nix output hash will be used.
548 tag ? null
549 , # Parent image, to append to.
550 fromImage ? null
551 , # Name of the parent image; will be read from the image otherwise.
552 fromImageName ? null
553 , # Tag of the parent image; will be read from the image otherwise.
554 fromImageTag ? null
555 , # Files to put on the image (a nix store path or list of paths).
556 copyToRoot ? null
557 , # When copying the contents into the image, preserve symlinks to
558 # directories (see `rsync -K`). Otherwise, transform those symlinks
559 # into directories.
560 keepContentsDirlinks ? false
561 , # Docker config; e.g. what command to run on the container.
562 config ? null
563 , # Image architecture, defaults to the architecture of the `hostPlatform` when unset
564 architecture ? defaultArchitecture
565 , # Optional bash script to run on the files prior to fixturizing the layer.
566 extraCommands ? ""
567 , uid ? 0
568 , gid ? 0
569 , # Optional bash script to run as root on the image when provisioning.
570 runAsRoot ? null
571 , # Size of the virtual machine disk to provision when building the image.
572 diskSize ? 1024
573 , # Size of the virtual machine memory to provision when building the image.
574 buildVMMemorySize ? 512
575 , # Time of creation of the image.
576 created ? "1970-01-01T00:00:01Z"
577 , # Compressor to use. One of: none, gz, zstd.
578 compressor ? "gz"
579 # Populate the nix database in the image with the dependencies of `copyToRoot`.
580 , includeNixDB ? false
581 , # Deprecated.
582 contents ? null
583 ,
584 }:
585
586 let
587 checked =
588 lib.warnIf (contents != null)
589 "in docker image ${name}: The contents parameter is deprecated. Change to copyToRoot if the contents are designed to be copied to the root filesystem, such as when you use `buildEnv` or similar between contents and your packages. Use copyToRoot = buildEnv { ... }; or similar if you intend to add packages to /bin."
590 lib.throwIf (contents != null && copyToRoot != null) "in docker image ${name}: You can not specify both contents and copyToRoot."
591 ;
592
593 rootContents = if copyToRoot == null then contents else copyToRoot;
594
595 baseName = baseNameOf name;
596
597 # Create a JSON blob of the configuration. Set the date to unix zero.
598 baseJson =
599 let
600 pure = writeText "${baseName}-config.json" (builtins.toJSON {
601 inherit created config architecture;
602 preferLocalBuild = true;
603 os = "linux";
604 });
605 impure = runCommand "${baseName}-config.json"
606 {
607 nativeBuildInputs = [ jq ];
608 preferLocalBuild = true;
609 }
610 ''
611 jq ".created = \"$(TZ=utc date --iso-8601="seconds")\"" ${pure} > $out
612 '';
613 in
614 if created == "now" then impure else pure;
615
616 compress = compressorForImage compressor name;
617
618 # TODO: add the dependencies of the config json.
619 extraCommandsWithDB =
620 if includeNixDB then (mkDbExtraCommand rootContents) + extraCommands
621 else extraCommands;
622
623 layer =
624 if runAsRoot == null
625 then
626 mkPureLayer
627 {
628 name = baseName;
629 inherit baseJson keepContentsDirlinks uid gid;
630 extraCommands = extraCommandsWithDB;
631 copyToRoot = rootContents;
632 } else
633 mkRootLayer {
634 name = baseName;
635 inherit baseJson fromImage fromImageName fromImageTag
636 keepContentsDirlinks runAsRoot diskSize buildVMMemorySize;
637 extraCommands = extraCommandsWithDB;
638 copyToRoot = rootContents;
639 };
640 result = runCommand "docker-image-${baseName}.tar${compress.ext}"
641 {
642 nativeBuildInputs = [ jshon jq moreutils ] ++ compress.nativeInputs;
643 # Image name must be lowercase
644 imageName = lib.toLower name;
645 imageTag = lib.optionalString (tag != null) tag;
646 inherit fromImage baseJson;
647 layerClosure = writeClosure [ layer ];
648 passthru.buildArgs = args;
649 passthru.layer = layer;
650 passthru.imageTag =
651 if tag != null
652 then tag
653 else
654 lib.head (lib.strings.splitString "-" (baseNameOf (builtins.unsafeDiscardStringContext result.outPath)));
655 } ''
656 ${lib.optionalString (tag == null) ''
657 outName="$(basename "$out")"
658 outHash=$(echo "$outName" | cut -d - -f 1)
659
660 imageTag=$outHash
661 ''}
662
663 # Print tar contents:
664 # 1: Interpreted as relative to the root directory
665 # 2: With no trailing slashes on directories
666 # This is useful for ensuring that the output matches the
667 # values generated by the "find" command
668 ls_tar() {
669 for f in $(tar -tf $1 | xargs realpath -ms --relative-to=.); do
670 if [[ "$f" != "." ]]; then
671 echo "/$f"
672 fi
673 done
674 }
675
676 mkdir image
677 touch baseFiles
678 baseEnvs='[]'
679 if [[ -n "$fromImage" ]]; then
680 echo "Unpacking base image..."
681 tar -C image -xpf "$fromImage"
682
683 # Store the layers and the environment variables from the base image
684 cat ./image/manifest.json | jq -r '.[0].Layers | .[]' > layer-list
685 configName="$(cat ./image/manifest.json | jq -r '.[0].Config')"
686 baseEnvs="$(cat "./image/$configName" | jq '.config.Env // []')"
687
688 # Extract the parentID from the manifest
689 if [[ -n "$fromImageName" ]] && [[ -n "$fromImageTag" ]]; then
690 parentID="$(
691 cat "image/manifest.json" |
692 jq -r '.[] | select(.RepoTags | contains([$desiredTag])) | rtrimstr(".json")' \
693 --arg desiredTag "$fromImageName:$fromImageTag"
694 )"
695 else
696 echo "From-image name or tag wasn't set. Reading the first ID."
697 parentID="$(cat "image/manifest.json" | jq -r '.[0].Config | rtrimstr(".json")')"
698 fi
699
700 # Otherwise do not import the base image configuration and manifest
701 chmod a+w image image/*.json
702 rm -f image/*.json
703
704 for l in image/*/layer.tar; do
705 ls_tar $l >> baseFiles
706 done
707 else
708 touch layer-list
709 fi
710
711 chmod -R ug+rw image
712
713 mkdir temp
714 cp ${layer}/* temp/
715 chmod ug+w temp/*
716
717 for dep in $(cat $layerClosure); do
718 find $dep >> layerFiles
719 done
720
721 echo "Adding layer..."
722 # Record the contents of the tarball with ls_tar.
723 ls_tar temp/layer.tar >> baseFiles
724
725 # Append nix/store directory to the layer so that when the layer is loaded in the
726 # image /nix/store has read permissions for non-root users.
727 # nix/store is added only if the layer has /nix/store paths in it.
728 if [ $(wc -l < $layerClosure) -gt 1 ] && [ $(grep -c -e "^/nix/store$" baseFiles) -eq 0 ]; then
729 mkdir -p nix/store
730 chmod -R 555 nix
731 echo "./nix" >> layerFiles
732 echo "./nix/store" >> layerFiles
733 fi
734
735 # Get the files in the new layer which were *not* present in
736 # the old layer, and record them as newFiles.
737 comm <(sort -n baseFiles|uniq) \
738 <(sort -n layerFiles|uniq|grep -v ${layer}) -1 -3 > newFiles
739 # Append the new files to the layer.
740 tar -rpf temp/layer.tar --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" \
741 --owner=0 --group=0 --no-recursion --verbatim-files-from --files-from newFiles
742
743 echo "Adding meta..."
744
745 # If we have a parentID, add it to the json metadata.
746 if [[ -n "$parentID" ]]; then
747 cat temp/json | jshon -s "$parentID" -i parent > tmpjson
748 mv tmpjson temp/json
749 fi
750
751 # Take the sha256 sum of the generated json and use it as the layer ID.
752 # Compute the size and add it to the json under the 'Size' field.
753 layerID=$(sha256sum temp/json|cut -d ' ' -f 1)
754 size=$(stat --printf="%s" temp/layer.tar)
755 cat temp/json | jshon -s "$layerID" -i id -n $size -i Size > tmpjson
756 mv tmpjson temp/json
757
758 # Use the temp folder we've been working on to create a new image.
759 mv temp image/$layerID
760
761 # Add the new layer ID to the end of the layer list
762 (
763 cat layer-list
764 # originally this used `sed -i "1i$layerID" layer-list`, but
765 # would fail if layer-list was completely empty.
766 echo "$layerID/layer.tar"
767 ) | sponge layer-list
768
769 # Create image json and image manifest
770 imageJson=$(cat ${baseJson} | jq '.config.Env = $baseenv + .config.Env' --argjson baseenv "$baseEnvs")
771 imageJson=$(echo "$imageJson" | jq ". + {\"rootfs\": {\"diff_ids\": [], \"type\": \"layers\"}}")
772 manifestJson=$(jq -n "[{\"RepoTags\":[\"$imageName:$imageTag\"]}]")
773
774 for layerTar in $(cat ./layer-list); do
775 layerChecksum=$(sha256sum image/$layerTar | cut -d ' ' -f1)
776 imageJson=$(echo "$imageJson" | jq ".history |= . + [{\"created\": \"$(jq -r .created ${baseJson})\"}]")
777 # diff_ids order is from the bottom-most to top-most layer
778 imageJson=$(echo "$imageJson" | jq ".rootfs.diff_ids |= . + [\"sha256:$layerChecksum\"]")
779 manifestJson=$(echo "$manifestJson" | jq ".[0].Layers |= . + [\"$layerTar\"]")
780 done
781
782 imageJsonChecksum=$(echo "$imageJson" | sha256sum | cut -d ' ' -f1)
783 echo "$imageJson" > "image/$imageJsonChecksum.json"
784 manifestJson=$(echo "$manifestJson" | jq ".[0].Config = \"$imageJsonChecksum.json\"")
785 echo "$manifestJson" > image/manifest.json
786
787 # Store the json under the name image/repositories.
788 jshon -n object \
789 -n object -s "$layerID" -i "$imageTag" \
790 -i "$imageName" > image/repositories
791
792 # Make the image read-only.
793 chmod -R a-w image
794
795 echo "Cooking the image..."
796 tar -C image --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 --xform s:'^./':: -c . | ${compress.compress} > $out
797
798 echo "Finished."
799 '';
800
801 in
802 checked result
803 );
804
805 # Merge the tarballs of images built with buildImage into a single
806 # tarball that contains all images. Running `docker load` on the resulting
807 # tarball will load the images into the docker daemon.
808 mergeImages = images: runCommand "merge-docker-images"
809 {
810 inherit images;
811 nativeBuildInputs = [ file jq ]
812 ++ compressors.none.nativeInputs
813 ++ compressors.gz.nativeInputs
814 ++ compressors.zstd.nativeInputs;
815 } ''
816 mkdir image inputs
817 # Extract images
818 repos=()
819 manifests=()
820 last_image_mime="application/gzip"
821 for item in $images; do
822 name=$(basename $item)
823 mkdir inputs/$name
824
825 last_image_mime=$(file --mime-type -b $item)
826 case $last_image_mime in
827 "application/x-tar") ${compressors.none.decompress};;
828 "application/zstd") ${compressors.zstd.decompress};;
829 "application/gzip") ${compressors.gz.decompress};;
830 *) echo "error: unexpected layer type $last_image_mime" >&2; exit 1;;
831 esac < $item | tar -xC inputs/$name
832
833 if [ -f inputs/$name/repositories ]; then
834 repos+=(inputs/$name/repositories)
835 fi
836 if [ -f inputs/$name/manifest.json ]; then
837 manifests+=(inputs/$name/manifest.json)
838 fi
839 done
840 # Copy all layers from input images to output image directory
841 cp -R --update=none inputs/*/* image/
842 # Merge repositories objects and manifests
843 jq -s add "''${repos[@]}" > repositories
844 jq -s add "''${manifests[@]}" > manifest.json
845 # Replace output image repositories and manifest with merged versions
846 mv repositories image/repositories
847 mv manifest.json image/manifest.json
848 # Create tarball and gzip
849 tar -C image --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 --xform s:'^./':: -c . | (
850 case $last_image_mime in
851 "application/x-tar") ${compressors.none.compress};;
852 "application/zstd") ${compressors.zstd.compress};;
853 "application/gzip") ${compressors.gz.compress};;
854 # `*)` not needed; already checked.
855 esac
856 ) > $out
857 '';
858
859
860 # Provide a /etc/passwd and /etc/group that contain root and nobody.
861 # Useful when packaging binaries that insist on using nss to look up
862 # username/groups (like nginx).
863 # /bin/sh is fine to not exist, and provided by another shim.
864 inherit fakeNss; # alias
865
866 # This provides a /usr/bin/env, for shell scripts using the
867 # "#!/usr/bin/env executable" shebang.
868 usrBinEnv = runCommand "usr-bin-env" { } ''
869 mkdir -p $out/usr/bin
870 ln -s ${coreutils}/bin/env $out/usr/bin
871 '';
872
873 # This provides /bin/sh, pointing to bashInteractive.
874 # The use of bashInteractive here is intentional to support cases like `docker run -it <image_name>`, so keep these use cases in mind if making any changes to how this works.
875 binSh = runCommand "bin-sh" { } ''
876 mkdir -p $out/bin
877 ln -s ${bashInteractive}/bin/bash $out/bin/sh
878 '';
879
880 # This provides the ca bundle in common locations
881 caCertificates = runCommand "ca-certificates" { } ''
882 mkdir -p $out/etc/ssl/certs $out/etc/pki/tls/certs
883 # Old NixOS compatibility.
884 ln -s ${cacert}/etc/ssl/certs/ca-bundle.crt $out/etc/ssl/certs/ca-bundle.crt
885 # NixOS canonical location + Debian/Ubuntu/Arch/Gentoo compatibility.
886 ln -s ${cacert}/etc/ssl/certs/ca-bundle.crt $out/etc/ssl/certs/ca-certificates.crt
887 # CentOS/Fedora compatibility.
888 ln -s ${cacert}/etc/ssl/certs/ca-bundle.crt $out/etc/pki/tls/certs/ca-bundle.crt
889 '';
890
891 # Build an image and populate its nix database with the provided
892 # contents. The main purpose is to be able to use nix commands in
893 # the container.
894 # Be careful since this doesn't work well with multilayer.
895 # TODO: add the dependencies of the config json.
896 buildImageWithNixDb = args: buildImage (args // { includeNixDB = true; });
897
898 buildLayeredImageWithNixDb = args: buildLayeredImage (args // { includeNixDB = true; });
899
900 # Arguments are documented in ../../../doc/build-helpers/images/dockertools.section.md
901 streamLayeredImage = lib.makeOverridable (
902 {
903 name
904 , tag ? null
905 , fromImage ? null
906 , contents ? [ ]
907 , config ? { }
908 , architecture ? defaultArchitecture
909 , created ? "1970-01-01T00:00:01Z"
910 , uid ? 0
911 , gid ? 0
912 , uname ? "root"
913 , gname ? "root"
914 , maxLayers ? 100
915 , extraCommands ? ""
916 , fakeRootCommands ? ""
917 , enableFakechroot ? false
918 , includeStorePaths ? true
919 , includeNixDB ? false
920 , passthru ? {}
921 ,
922 }:
923 assert
924 (lib.assertMsg (maxLayers > 1)
925 "the maxLayers argument of dockerTools.buildLayeredImage function must be greather than 1 (current value: ${toString maxLayers})");
926 assert
927 (lib.assertMsg (enableFakechroot -> !stdenv.isDarwin) ''
928 cannot use `enableFakechroot` because `proot` is not portable to Darwin. Workarounds:
929 - use `fakeRootCommands` with the restricted `fakeroot` environment
930 - cross-compile your packages
931 - run your packages in a virtual machine
932 Discussion: https://github.com/NixOS/nixpkgs/issues/327311'');
933 let
934 baseName = baseNameOf name;
935
936 streamScript = writePython3 "stream" { } ./stream_layered_image.py;
937 baseJson = writeText "${baseName}-base.json" (builtins.toJSON {
938 inherit config architecture;
939 os = "linux";
940 });
941
942 contentsList = if builtins.isList contents then contents else [ contents ];
943 bind-paths = builtins.toString (builtins.map (path: "--bind=${path}:${path}!") [
944 "/dev/"
945 "/proc/"
946 "/sys/"
947 "${builtins.storeDir}/"
948 "$out/layer.tar"
949 ]);
950
951 # We store the customisation layer as a tarball, to make sure that
952 # things like permissions set on 'extraCommands' are not overridden
953 # by Nix. Then we precompute the sha256 for performance.
954 customisationLayer = symlinkJoin {
955 name = "${baseName}-customisation-layer";
956 paths = contentsList;
957 extraCommands =
958 (lib.optionalString includeNixDB (mkDbExtraCommand contents)) + extraCommands;
959 inherit fakeRootCommands;
960 nativeBuildInputs = [
961 fakeroot
962 ] ++ optionals enableFakechroot [
963 proot
964 ];
965 postBuild = ''
966 mv $out old_out
967 (cd old_out; eval "$extraCommands" )
968
969 mkdir $out
970 ${if enableFakechroot then ''
971 proot -r $PWD/old_out ${bind-paths} --pwd=/ fakeroot bash -c '
972 source $stdenv/setup
973 eval "$fakeRootCommands"
974 tar \
975 --sort name \
976 --exclude=./proc \
977 --exclude=./sys \
978 --exclude=.${builtins.storeDir} \
979 --numeric-owner --mtime "@$SOURCE_DATE_EPOCH" \
980 --hard-dereference \
981 -cf $out/layer.tar .
982 '
983 '' else ''
984 fakeroot bash -c '
985 source $stdenv/setup
986 cd old_out
987 eval "$fakeRootCommands"
988 tar \
989 --sort name \
990 --numeric-owner --mtime "@$SOURCE_DATE_EPOCH" \
991 --hard-dereference \
992 -cf $out/layer.tar .
993 '
994 ''}
995 sha256sum $out/layer.tar \
996 | cut -f 1 -d ' ' \
997 > $out/checksum
998 '';
999 };
1000
1001 closureRoots = lib.optionals includeStorePaths /* normally true */ (
1002 [ baseJson customisationLayer ]
1003 );
1004 overallClosure = writeText "closure" (lib.concatStringsSep " " closureRoots);
1005
1006 # These derivations are only created as implementation details of docker-tools,
1007 # so they'll be excluded from the created images.
1008 unnecessaryDrvs = [ baseJson overallClosure customisationLayer ];
1009
1010 conf = runCommand "${baseName}-conf.json"
1011 {
1012 inherit fromImage maxLayers created uid gid uname gname;
1013 imageName = lib.toLower name;
1014 preferLocalBuild = true;
1015 passthru.imageTag =
1016 if tag != null
1017 then tag
1018 else
1019 lib.head (lib.strings.splitString "-" (baseNameOf (builtins.unsafeDiscardStringContext conf.outPath)));
1020 paths = buildPackages.referencesByPopularity overallClosure;
1021 nativeBuildInputs = [ jq ];
1022 } ''
1023 ${if (tag == null) then ''
1024 outName="$(basename "$out")"
1025 outHash=$(echo "$outName" | cut -d - -f 1)
1026
1027 imageTag=$outHash
1028 '' else ''
1029 imageTag="${tag}"
1030 ''}
1031
1032 # convert "created" to iso format
1033 if [[ "$created" != "now" ]]; then
1034 created="$(date -Iseconds -d "$created")"
1035 fi
1036
1037 paths() {
1038 cat $paths ${lib.concatMapStringsSep " "
1039 (path: "| (grep -v ${path} || true)")
1040 unnecessaryDrvs}
1041 }
1042
1043 # Compute the number of layers that are already used by a potential
1044 # 'fromImage' as well as the customization layer. Ensure that there is
1045 # still at least one layer available to store the image contents.
1046 usedLayers=0
1047
1048 # subtract number of base image layers
1049 if [[ -n "$fromImage" ]]; then
1050 (( usedLayers += $(tar -xOf "$fromImage" manifest.json | jq '.[0].Layers | length') ))
1051 fi
1052
1053 # one layer will be taken up by the customisation layer
1054 (( usedLayers += 1 ))
1055
1056 if ! (( $usedLayers < $maxLayers )); then
1057 echo >&2 "Error: usedLayers $usedLayers layers to store 'fromImage' and" \
1058 "'extraCommands', but only maxLayers=$maxLayers were" \
1059 "allowed. At least 1 layer is required to store contents."
1060 exit 1
1061 fi
1062 availableLayers=$(( maxLayers - usedLayers ))
1063
1064 # Create $maxLayers worth of Docker Layers, one layer per store path
1065 # unless there are more paths than $maxLayers. In that case, create
1066 # $maxLayers-1 for the most popular layers, and smush the remainaing
1067 # store paths in to one final layer.
1068 #
1069 # The following code is fiddly w.r.t. ensuring every layer is
1070 # created, and that no paths are missed. If you change the
1071 # following lines, double-check that your code behaves properly
1072 # when the number of layers equals:
1073 # maxLayers-1, maxLayers, and maxLayers+1, 0
1074 paths |
1075 jq -sR '
1076 rtrimstr("\n") | split("\n")
1077 | (.[:$maxLayers-1] | map([.])) + [ .[$maxLayers-1:] ]
1078 | map(select(length > 0))
1079 ' \
1080 --argjson maxLayers "$availableLayers" > store_layers.json
1081
1082 # The index on $store_layers is necessary because the --slurpfile
1083 # automatically reads the file as an array.
1084 cat ${baseJson} | jq '
1085 . + {
1086 "store_dir": $store_dir,
1087 "from_image": $from_image,
1088 "store_layers": $store_layers[0],
1089 "customisation_layer", $customisation_layer,
1090 "repo_tag": $repo_tag,
1091 "created": $created,
1092 "uid": $uid,
1093 "gid": $gid,
1094 "uname": $uname,
1095 "gname": $gname
1096 }
1097 ' --arg store_dir "${storeDir}" \
1098 --argjson from_image ${if fromImage == null then "null" else "'\"${fromImage}\"'"} \
1099 --slurpfile store_layers store_layers.json \
1100 --arg customisation_layer ${customisationLayer} \
1101 --arg repo_tag "$imageName:$imageTag" \
1102 --arg created "$created" \
1103 --arg uid "$uid" \
1104 --arg gid "$gid" \
1105 --arg uname "$uname" \
1106 --arg gname "$gname" |
1107 tee $out
1108 '';
1109
1110 result = runCommand "stream-${baseName}"
1111 {
1112 inherit conf;
1113 inherit (conf) imageName;
1114 inherit streamScript;
1115 preferLocalBuild = true;
1116 passthru = passthru // {
1117 inherit (conf) imageTag;
1118
1119 # Distinguish tarballs and exes at the Nix level so functions that
1120 # take images can know in advance how the image is supposed to be used.
1121 isExe = true;
1122 };
1123 nativeBuildInputs = [ makeWrapper ];
1124 } ''
1125 makeWrapper $streamScript $out --add-flags $conf
1126 '';
1127 in
1128 result
1129 );
1130
1131 # This function streams a docker image that behaves like a nix-shell for a derivation
1132 # Docs: doc/build-helpers/images/dockertools.section.md
1133 # Tests: nixos/tests/docker-tools-nix-shell.nix
1134 streamNixShellImage =
1135 { drv
1136 , name ? drv.name + "-env"
1137 , tag ? null
1138 , uid ? 1000
1139 , gid ? 1000
1140 , homeDirectory ? "/build"
1141 , shell ? bashInteractive + "/bin/bash"
1142 , command ? null
1143 , run ? null
1144 }:
1145 assert lib.assertMsg (! (drv.drvAttrs.__structuredAttrs or false))
1146 "streamNixShellImage: Does not work with the derivation ${drv.name} because it uses __structuredAttrs";
1147 assert lib.assertMsg (command == null || run == null)
1148 "streamNixShellImage: Can't specify both command and run";
1149 let
1150
1151 # A binary that calls the command to build the derivation
1152 builder = writeShellScriptBin "buildDerivation" ''
1153 exec ${lib.escapeShellArg (valueToString drv.drvAttrs.builder)} ${lib.escapeShellArgs (map valueToString drv.drvAttrs.args)}
1154 '';
1155
1156 staticPath = "${dirOf shell}:${lib.makeBinPath [ builder ]}";
1157
1158 # https://github.com/NixOS/nix/blob/2.8.0/src/nix-build/nix-build.cc#L493-L526
1159 rcfile = writeText "nix-shell-rc" ''
1160 unset PATH
1161 dontAddDisableDepTrack=1
1162 # TODO: https://github.com/NixOS/nix/blob/2.8.0/src/nix-build/nix-build.cc#L506
1163 [ -e $stdenv/setup ] && source $stdenv/setup
1164 PATH=${staticPath}:"$PATH"
1165 SHELL=${lib.escapeShellArg shell}
1166 BASH=${lib.escapeShellArg shell}
1167 set +e
1168 [ -n "$PS1" -a -z "$NIX_SHELL_PRESERVE_PROMPT" ] && PS1='\n\[\033[1;32m\][nix-shell:\w]\$\[\033[0m\] '
1169 if [ "$(type -t runHook)" = function ]; then
1170 runHook shellHook
1171 fi
1172 unset NIX_ENFORCE_PURITY
1173 shopt -u nullglob
1174 shopt -s execfail
1175 ${optionalString (command != null || run != null) ''
1176 ${optionalString (command != null) command}
1177 ${optionalString (run != null) run}
1178 exit
1179 ''}
1180 '';
1181
1182 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/globals.hh#L464-L465
1183 sandboxBuildDir = "/build";
1184
1185 drvEnv =
1186 devShellTools.unstructuredDerivationInputEnv { inherit (drv) drvAttrs; }
1187 // devShellTools.derivationOutputEnv { outputList = drv.outputs; outputMap = drv; };
1188
1189 # Environment variables set in the image
1190 envVars = {
1191
1192 # Root certificates for internet access
1193 SSL_CERT_FILE = "${cacert}/etc/ssl/certs/ca-bundle.crt";
1194 NIX_SSL_CERT_FILE = "${cacert}/etc/ssl/certs/ca-bundle.crt";
1195
1196 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1027-L1030
1197 # PATH = "/path-not-set";
1198 # Allows calling bash and `buildDerivation` as the Cmd
1199 PATH = staticPath;
1200
1201 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1032-L1038
1202 HOME = homeDirectory;
1203
1204 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1040-L1044
1205 NIX_STORE = storeDir;
1206
1207 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1046-L1047
1208 # TODO: Make configurable?
1209 NIX_BUILD_CORES = "1";
1210
1211 } // drvEnv // {
1212
1213 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1008-L1010
1214 NIX_BUILD_TOP = sandboxBuildDir;
1215
1216 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1012-L1013
1217 TMPDIR = sandboxBuildDir;
1218 TEMPDIR = sandboxBuildDir;
1219 TMP = sandboxBuildDir;
1220 TEMP = sandboxBuildDir;
1221
1222 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1015-L1019
1223 PWD = sandboxBuildDir;
1224
1225 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1071-L1074
1226 # We don't set it here because the output here isn't handled in any special way
1227 # NIX_LOG_FD = "2";
1228
1229 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1076-L1077
1230 TERM = "xterm-256color";
1231 };
1232
1233
1234 in streamLayeredImage {
1235 inherit name tag;
1236 contents = [
1237 binSh
1238 usrBinEnv
1239 (fakeNss.override {
1240 # Allows programs to look up the build user's home directory
1241 # https://github.com/NixOS/nix/blob/ffe155abd36366a870482625543f9bf924a58281/src/libstore/build/local-derivation-goal.cc#L906-L910
1242 # Slightly differs however: We use the passed-in homeDirectory instead of sandboxBuildDir.
1243 # We're doing this because it's arguably a bug in Nix that sandboxBuildDir is used here: https://github.com/NixOS/nix/issues/6379
1244 extraPasswdLines = [
1245 "nixbld:x:${toString uid}:${toString gid}:Build user:${homeDirectory}:/noshell"
1246 ];
1247 extraGroupLines = [
1248 "nixbld:!:${toString gid}:"
1249 ];
1250 })
1251 ];
1252
1253 fakeRootCommands = ''
1254 # Effectively a single-user installation of Nix, giving the user full
1255 # control over the Nix store. Needed for building the derivation this
1256 # shell is for, but also in case one wants to use Nix inside the
1257 # image
1258 mkdir -p ./nix/{store,var/nix} ./etc/nix
1259 chown -R ${toString uid}:${toString gid} ./nix ./etc/nix
1260
1261 # Gives the user control over the build directory
1262 mkdir -p .${sandboxBuildDir}
1263 chown -R ${toString uid}:${toString gid} .${sandboxBuildDir}
1264 '';
1265
1266 # Run this image as the given uid/gid
1267 config.User = "${toString uid}:${toString gid}";
1268 config.Cmd =
1269 # https://github.com/NixOS/nix/blob/2.8.0/src/nix-build/nix-build.cc#L185-L186
1270 # https://github.com/NixOS/nix/blob/2.8.0/src/nix-build/nix-build.cc#L534-L536
1271 if run == null
1272 then [ shell "--rcfile" rcfile ]
1273 else [ shell rcfile ];
1274 config.WorkingDir = sandboxBuildDir;
1275 config.Env = lib.mapAttrsToList (name: value: "${name}=${value}") envVars;
1276 };
1277
1278 # Wrapper around streamNixShellImage to build an image from the result
1279 # Docs: doc/build-helpers/images/dockertools.section.md
1280 # Tests: nixos/tests/docker-tools-nix-shell.nix
1281 buildNixShellImage = { drv, compressor ? "gz", ... }@args:
1282 let
1283 stream = streamNixShellImage (builtins.removeAttrs args ["compressor"]);
1284 compress = compressorForImage compressor drv.name;
1285 in
1286 runCommand "${drv.name}-env.tar${compress.ext}"
1287 {
1288 inherit (stream) imageName;
1289 passthru = { inherit (stream) imageTag; };
1290 nativeBuildInputs = compress.nativeInputs;
1291 } "${stream} | ${compress.compress} > $out";
1292}