lol
at 23.11-beta 1250 lines 46 kB view raw
1{ bashInteractive 2, buildPackages 3, cacert 4, callPackage 5, closureInfo 6, coreutils 7, e2fsprogs 8, proot 9, fakeNss 10, fakeroot 11, go 12, jq 13, jshon 14, lib 15, makeWrapper 16, moreutils 17, nix 18, nixosTests 19, pigz 20, rsync 21, runCommand 22, runtimeShell 23, shadow 24, skopeo 25, storeDir ? builtins.storeDir 26, substituteAll 27, symlinkJoin 28, tarsum 29, util-linux 30, vmTools 31, writeReferencesToFile 32, writeScript 33, writeShellScriptBin 34, writeText 35, writeTextDir 36, writePython3 37}: 38 39let 40 inherit (lib) 41 optionals 42 optionalString 43 ; 44 45 inherit (lib) 46 escapeShellArgs 47 toList 48 ; 49 50 mkDbExtraCommand = contents: 51 let 52 contentsList = if builtins.isList contents then contents else [ contents ]; 53 in 54 '' 55 echo "Generating the nix database..." 56 echo "Warning: only the database of the deepest Nix layer is loaded." 57 echo " If you want to use nix commands in the container, it would" 58 echo " be better to only have one layer that contains a nix store." 59 60 export NIX_REMOTE=local?root=$PWD 61 # A user is required by nix 62 # https://github.com/NixOS/nix/blob/9348f9291e5d9e4ba3c4347ea1b235640f54fd79/src/libutil/util.cc#L478 63 export USER=nobody 64 ${buildPackages.nix}/bin/nix-store --load-db < ${closureInfo {rootPaths = contentsList;}}/registration 65 66 mkdir -p nix/var/nix/gcroots/docker/ 67 for i in ${lib.concatStringsSep " " contentsList}; do 68 ln -s $i nix/var/nix/gcroots/docker/$(basename $i) 69 done; 70 ''; 71 72 # The OCI Image specification recommends that configurations use values listed 73 # in the Go Language document for GOARCH. 74 # Reference: https://github.com/opencontainers/image-spec/blob/master/config.md#properties 75 # For the mapping from Nixpkgs system parameters to GOARCH, we can reuse the 76 # mapping from the go package. 77 defaultArchitecture = go.GOARCH; 78 79in 80rec { 81 examples = callPackage ./examples.nix { 82 inherit buildImage buildLayeredImage fakeNss pullImage shadowSetup buildImageWithNixDb streamNixShellImage; 83 }; 84 85 tests = { 86 inherit (nixosTests) 87 docker-tools 88 docker-tools-overlay 89 # requires remote builder 90 # docker-tools-cross 91 ; 92 }; 93 94 pullImage = 95 let 96 fixName = name: builtins.replaceStrings [ "/" ":" ] [ "-" "-" ] name; 97 in 98 { imageName 99 # To find the digest of an image, you can use skopeo: 100 # see doc/functions.xml 101 , imageDigest 102 , sha256 103 , os ? "linux" 104 , # Image architecture, defaults to the architecture of the `hostPlatform` when unset 105 arch ? defaultArchitecture 106 # This is used to set name to the pulled image 107 , finalImageName ? imageName 108 # This used to set a tag to the pulled image 109 , finalImageTag ? "latest" 110 # This is used to disable TLS certificate verification, allowing access to http registries on (hopefully) trusted networks 111 , tlsVerify ? true 112 113 , name ? fixName "docker-image-${finalImageName}-${finalImageTag}.tar" 114 }: 115 116 runCommand name 117 { 118 inherit imageDigest; 119 imageName = finalImageName; 120 imageTag = finalImageTag; 121 impureEnvVars = lib.fetchers.proxyImpureEnvVars; 122 outputHashMode = "flat"; 123 outputHashAlgo = "sha256"; 124 outputHash = sha256; 125 126 nativeBuildInputs = [ skopeo ]; 127 SSL_CERT_FILE = "${cacert.out}/etc/ssl/certs/ca-bundle.crt"; 128 129 sourceURL = "docker://${imageName}@${imageDigest}"; 130 destNameTag = "${finalImageName}:${finalImageTag}"; 131 } '' 132 skopeo \ 133 --insecure-policy \ 134 --tmpdir=$TMPDIR \ 135 --override-os ${os} \ 136 --override-arch ${arch} \ 137 copy \ 138 --src-tls-verify=${lib.boolToString tlsVerify} \ 139 "$sourceURL" "docker-archive://$out:$destNameTag" \ 140 | cat # pipe through cat to force-disable progress bar 141 ''; 142 143 # We need to sum layer.tar, not a directory, hence tarsum instead of nix-hash. 144 # And we cannot untar it, because then we cannot preserve permissions etc. 145 inherit tarsum; # pkgs.dockerTools.tarsum 146 147 # buildEnv creates symlinks to dirs, which is hard to edit inside the overlay VM 148 mergeDrvs = 149 { derivations 150 , onlyDeps ? false 151 }: 152 runCommand "merge-drvs" 153 { 154 inherit derivations onlyDeps; 155 } '' 156 if [[ -n "$onlyDeps" ]]; then 157 echo $derivations > $out 158 exit 0 159 fi 160 161 mkdir $out 162 for derivation in $derivations; do 163 echo "Merging $derivation..." 164 if [[ -d "$derivation" ]]; then 165 # If it's a directory, copy all of its contents into $out. 166 cp -drf --preserve=mode -f $derivation/* $out/ 167 else 168 # Otherwise treat the derivation as a tarball and extract it 169 # into $out. 170 tar -C $out -xpf $drv || true 171 fi 172 done 173 ''; 174 175 # Helper for setting up the base files for managing users and 176 # groups, only if such files don't exist already. It is suitable for 177 # being used in a runAsRoot script. 178 shadowSetup = '' 179 export PATH=${shadow}/bin:$PATH 180 mkdir -p /etc/pam.d 181 if [[ ! -f /etc/passwd ]]; then 182 echo "root:x:0:0::/root:${runtimeShell}" > /etc/passwd 183 echo "root:!x:::::::" > /etc/shadow 184 fi 185 if [[ ! -f /etc/group ]]; then 186 echo "root:x:0:" > /etc/group 187 echo "root:x::" > /etc/gshadow 188 fi 189 if [[ ! -f /etc/pam.d/other ]]; then 190 cat > /etc/pam.d/other <<EOF 191 account sufficient pam_unix.so 192 auth sufficient pam_rootok.so 193 password requisite pam_unix.so nullok yescrypt 194 session required pam_unix.so 195 EOF 196 fi 197 if [[ ! -f /etc/login.defs ]]; then 198 touch /etc/login.defs 199 fi 200 ''; 201 202 # Run commands in a virtual machine. 203 runWithOverlay = 204 { name 205 , fromImage ? null 206 , fromImageName ? null 207 , fromImageTag ? null 208 , diskSize ? 1024 209 , buildVMMemorySize ? 512 210 , preMount ? "" 211 , postMount ? "" 212 , postUmount ? "" 213 }: 214 vmTools.runInLinuxVM ( 215 runCommand name 216 { 217 preVM = vmTools.createEmptyImage { 218 size = diskSize; 219 fullName = "docker-run-disk"; 220 destination = "./image"; 221 }; 222 inherit fromImage fromImageName fromImageTag; 223 memSize = buildVMMemorySize; 224 225 nativeBuildInputs = [ util-linux e2fsprogs jshon rsync jq ]; 226 } '' 227 mkdir disk 228 mkfs /dev/${vmTools.hd} 229 mount /dev/${vmTools.hd} disk 230 cd disk 231 232 function dedup() { 233 declare -A seen 234 while read ln; do 235 if [[ -z "''${seen["$ln"]:-}" ]]; then 236 echo "$ln"; seen["$ln"]=1 237 fi 238 done 239 } 240 241 if [[ -n "$fromImage" ]]; then 242 echo "Unpacking base image..." 243 mkdir image 244 tar -C image -xpf "$fromImage" 245 246 if [[ -n "$fromImageName" ]] && [[ -n "$fromImageTag" ]]; then 247 parentID="$( 248 cat "image/manifest.json" | 249 jq -r '.[] | select(.RepoTags | contains([$desiredTag])) | rtrimstr(".json")' \ 250 --arg desiredTag "$fromImageName:$fromImageTag" 251 )" 252 else 253 echo "From-image name or tag wasn't set. Reading the first ID." 254 parentID="$(cat "image/manifest.json" | jq -r '.[0].Config | rtrimstr(".json")')" 255 fi 256 257 # In case of repeated layers, unpack only the last occurrence of each 258 cat ./image/manifest.json | jq -r '.[0].Layers | .[]' | tac | dedup | tac > layer-list 259 else 260 touch layer-list 261 fi 262 263 # Unpack all of the parent layers into the image. 264 lowerdir="" 265 extractionID=0 266 for layerTar in $(cat layer-list); do 267 echo "Unpacking layer $layerTar" 268 extractionID=$((extractionID + 1)) 269 270 mkdir -p image/$extractionID/layer 271 tar -C image/$extractionID/layer -xpf image/$layerTar 272 rm image/$layerTar 273 274 find image/$extractionID/layer -name ".wh.*" -exec bash -c 'name="$(basename {}|sed "s/^.wh.//")"; mknod "$(dirname {})/$name" c 0 0; rm {}' \; 275 276 # Get the next lower directory and continue the loop. 277 lowerdir=image/$extractionID/layer''${lowerdir:+:}$lowerdir 278 done 279 280 mkdir work 281 mkdir layer 282 mkdir mnt 283 284 ${lib.optionalString (preMount != "") '' 285 # Execute pre-mount steps 286 echo "Executing pre-mount steps..." 287 ${preMount} 288 ''} 289 290 if [ -n "$lowerdir" ]; then 291 mount -t overlay overlay -olowerdir=$lowerdir,workdir=work,upperdir=layer mnt 292 else 293 mount --bind layer mnt 294 fi 295 296 ${lib.optionalString (postMount != "") '' 297 # Execute post-mount steps 298 echo "Executing post-mount steps..." 299 ${postMount} 300 ''} 301 302 umount mnt 303 304 ( 305 cd layer 306 cmd='name="$(basename {})"; touch "$(dirname {})/.wh.$name"; rm "{}"' 307 find . -type c -exec bash -c "$cmd" \; 308 ) 309 310 ${postUmount} 311 ''); 312 313 exportImage = { name ? fromImage.name, fromImage, fromImageName ? null, fromImageTag ? null, diskSize ? 1024 }: 314 runWithOverlay { 315 inherit name fromImage fromImageName fromImageTag diskSize; 316 317 postMount = '' 318 echo "Packing raw image..." 319 tar -C mnt --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" -cf $out/layer.tar . 320 ''; 321 322 postUmount = '' 323 mv $out/layer.tar . 324 rm -rf $out 325 mv layer.tar $out 326 ''; 327 }; 328 329 # Create an executable shell script which has the coreutils in its 330 # PATH. Since root scripts are executed in a blank environment, even 331 # things like `ls` or `echo` will be missing. 332 shellScript = name: text: 333 writeScript name '' 334 #!${runtimeShell} 335 set -e 336 export PATH=${coreutils}/bin:/bin 337 ${text} 338 ''; 339 340 # Create a "layer" (set of files). 341 mkPureLayer = 342 { 343 # Name of the layer 344 name 345 , # JSON containing configuration and metadata for this layer. 346 baseJson 347 , # Files to add to the layer. 348 copyToRoot ? null 349 , # When copying the contents into the image, preserve symlinks to 350 # directories (see `rsync -K`). Otherwise, transform those symlinks 351 # into directories. 352 keepContentsDirlinks ? false 353 , # Additional commands to run on the layer before it is tar'd up. 354 extraCommands ? "" 355 , uid ? 0 356 , gid ? 0 357 }: 358 runCommand "docker-layer-${name}" 359 { 360 inherit baseJson extraCommands; 361 contents = copyToRoot; 362 nativeBuildInputs = [ jshon rsync tarsum ]; 363 } 364 '' 365 mkdir layer 366 if [[ -n "$contents" ]]; then 367 echo "Adding contents..." 368 for item in $contents; do 369 echo "Adding $item" 370 rsync -a${if keepContentsDirlinks then "K" else "k"} --chown=0:0 $item/ layer/ 371 done 372 else 373 echo "No contents to add to layer." 374 fi 375 376 chmod ug+w layer 377 378 if [[ -n "$extraCommands" ]]; then 379 (cd layer; eval "$extraCommands") 380 fi 381 382 # Tar up the layer and throw it into 'layer.tar'. 383 echo "Packing layer..." 384 mkdir $out 385 tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=${toString uid} --group=${toString gid} -cf - . | tee -p $out/layer.tar | tarsum) 386 387 # Add a 'checksum' field to the JSON, with the value set to the 388 # checksum of the tarball. 389 cat ${baseJson} | jshon -s "$tarhash" -i checksum > $out/json 390 391 # Indicate to docker that we're using schema version 1.0. 392 echo -n "1.0" > $out/VERSION 393 394 echo "Finished building layer '${name}'" 395 ''; 396 397 # Make a "root" layer; required if we need to execute commands as a 398 # privileged user on the image. The commands themselves will be 399 # performed in a virtual machine sandbox. 400 mkRootLayer = 401 { 402 # Name of the image. 403 name 404 , # Script to run as root. Bash. 405 runAsRoot 406 , # Files to add to the layer. If null, an empty layer will be created. 407 # To add packages to /bin, use `buildEnv` or similar. 408 copyToRoot ? null 409 , # When copying the contents into the image, preserve symlinks to 410 # directories (see `rsync -K`). Otherwise, transform those symlinks 411 # into directories. 412 keepContentsDirlinks ? false 413 , # JSON containing configuration and metadata for this layer. 414 baseJson 415 , # Existing image onto which to append the new layer. 416 fromImage ? null 417 , # Name of the image we're appending onto. 418 fromImageName ? null 419 , # Tag of the image we're appending onto. 420 fromImageTag ? null 421 , # How much disk to allocate for the temporary virtual machine. 422 diskSize ? 1024 423 , # How much memory to allocate for the temporary virtual machine. 424 buildVMMemorySize ? 512 425 , # Commands (bash) to run on the layer; these do not require sudo. 426 extraCommands ? "" 427 }: 428 # Generate an executable script from the `runAsRoot` text. 429 let 430 runAsRootScript = shellScript "run-as-root.sh" runAsRoot; 431 extraCommandsScript = shellScript "extra-commands.sh" extraCommands; 432 in 433 runWithOverlay { 434 name = "docker-layer-${name}"; 435 436 inherit fromImage fromImageName fromImageTag diskSize buildVMMemorySize; 437 438 preMount = lib.optionalString (copyToRoot != null && copyToRoot != [ ]) '' 439 echo "Adding contents..." 440 for item in ${escapeShellArgs (map (c: "${c}") (toList copyToRoot))}; do 441 echo "Adding $item..." 442 rsync -a${if keepContentsDirlinks then "K" else "k"} --chown=0:0 $item/ layer/ 443 done 444 445 chmod ug+w layer 446 ''; 447 448 postMount = '' 449 mkdir -p mnt/{dev,proc,sys,tmp} mnt${storeDir} 450 451 # Mount /dev, /sys and the nix store as shared folders. 452 mount --rbind /dev mnt/dev 453 mount --rbind /sys mnt/sys 454 mount --rbind ${storeDir} mnt${storeDir} 455 456 # Execute the run as root script. See 'man unshare' for 457 # details on what's going on here; basically this command 458 # means that the runAsRootScript will be executed in a nearly 459 # completely isolated environment. 460 # 461 # Ideally we would use --mount-proc=mnt/proc or similar, but this 462 # doesn't work. The workaround is to setup proc after unshare. 463 # See: https://github.com/karelzak/util-linux/issues/648 464 unshare -imnpuf --mount-proc sh -c 'mount --rbind /proc mnt/proc && chroot mnt ${runAsRootScript}' 465 466 # Unmount directories and remove them. 467 umount -R mnt/dev mnt/sys mnt${storeDir} 468 rmdir --ignore-fail-on-non-empty \ 469 mnt/dev mnt/proc mnt/sys mnt${storeDir} \ 470 mnt$(dirname ${storeDir}) 471 ''; 472 473 postUmount = '' 474 (cd layer; ${extraCommandsScript}) 475 476 echo "Packing layer..." 477 mkdir -p $out 478 tarhash=$(tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" -cf - . | 479 tee -p $out/layer.tar | 480 ${tarsum}/bin/tarsum) 481 482 cat ${baseJson} | jshon -s "$tarhash" -i checksum > $out/json 483 # Indicate to docker that we're using schema version 1.0. 484 echo -n "1.0" > $out/VERSION 485 486 echo "Finished building layer '${name}'" 487 ''; 488 }; 489 490 buildLayeredImage = lib.makeOverridable ({ name, ... }@args: 491 let 492 stream = streamLayeredImage args; 493 in 494 runCommand "${baseNameOf name}.tar.gz" 495 { 496 inherit (stream) imageName; 497 passthru = { inherit (stream) imageTag; }; 498 nativeBuildInputs = [ pigz ]; 499 } "${stream} | pigz -nTR > $out" 500 ); 501 502 # 1. extract the base image 503 # 2. create the layer 504 # 3. add layer deps to the layer itself, diffing with the base image 505 # 4. compute the layer id 506 # 5. put the layer in the image 507 # 6. repack the image 508 buildImage = lib.makeOverridable ( 509 args@{ 510 # Image name. 511 name 512 , # Image tag, when null then the nix output hash will be used. 513 tag ? null 514 , # Parent image, to append to. 515 fromImage ? null 516 , # Name of the parent image; will be read from the image otherwise. 517 fromImageName ? null 518 , # Tag of the parent image; will be read from the image otherwise. 519 fromImageTag ? null 520 , # Files to put on the image (a nix store path or list of paths). 521 copyToRoot ? null 522 , # When copying the contents into the image, preserve symlinks to 523 # directories (see `rsync -K`). Otherwise, transform those symlinks 524 # into directories. 525 keepContentsDirlinks ? false 526 , # Docker config; e.g. what command to run on the container. 527 config ? null 528 , # Image architecture, defaults to the architecture of the `hostPlatform` when unset 529 architecture ? defaultArchitecture 530 , # Optional bash script to run on the files prior to fixturizing the layer. 531 extraCommands ? "" 532 , uid ? 0 533 , gid ? 0 534 , # Optional bash script to run as root on the image when provisioning. 535 runAsRoot ? null 536 , # Size of the virtual machine disk to provision when building the image. 537 diskSize ? 1024 538 , # Size of the virtual machine memory to provision when building the image. 539 buildVMMemorySize ? 512 540 , # Time of creation of the image. 541 created ? "1970-01-01T00:00:01Z" 542 , # Deprecated. 543 contents ? null 544 , 545 }: 546 547 let 548 checked = 549 lib.warnIf (contents != null) 550 "in docker image ${name}: The contents parameter is deprecated. Change to copyToRoot if the contents are designed to be copied to the root filesystem, such as when you use `buildEnv` or similar between contents and your packages. Use copyToRoot = buildEnv { ... }; or similar if you intend to add packages to /bin." 551 lib.throwIf (contents != null && copyToRoot != null) "in docker image ${name}: You can not specify both contents and copyToRoot." 552 ; 553 554 rootContents = if copyToRoot == null then contents else copyToRoot; 555 556 baseName = baseNameOf name; 557 558 # Create a JSON blob of the configuration. Set the date to unix zero. 559 baseJson = 560 let 561 pure = writeText "${baseName}-config.json" (builtins.toJSON { 562 inherit created config architecture; 563 preferLocalBuild = true; 564 os = "linux"; 565 }); 566 impure = runCommand "${baseName}-config.json" 567 { 568 nativeBuildInputs = [ jq ]; 569 preferLocalBuild = true; 570 } 571 '' 572 jq ".created = \"$(TZ=utc date --iso-8601="seconds")\"" ${pure} > $out 573 ''; 574 in 575 if created == "now" then impure else pure; 576 577 layer = 578 if runAsRoot == null 579 then 580 mkPureLayer 581 { 582 name = baseName; 583 inherit baseJson keepContentsDirlinks extraCommands uid gid; 584 copyToRoot = rootContents; 585 } else 586 mkRootLayer { 587 name = baseName; 588 inherit baseJson fromImage fromImageName fromImageTag 589 keepContentsDirlinks runAsRoot diskSize buildVMMemorySize 590 extraCommands; 591 copyToRoot = rootContents; 592 }; 593 result = runCommand "docker-image-${baseName}.tar.gz" 594 { 595 nativeBuildInputs = [ jshon pigz jq moreutils ]; 596 # Image name must be lowercase 597 imageName = lib.toLower name; 598 imageTag = lib.optionalString (tag != null) tag; 599 inherit fromImage baseJson; 600 layerClosure = writeReferencesToFile layer; 601 passthru.buildArgs = args; 602 passthru.layer = layer; 603 passthru.imageTag = 604 if tag != null 605 then tag 606 else 607 lib.head (lib.strings.splitString "-" (baseNameOf result.outPath)); 608 } '' 609 ${lib.optionalString (tag == null) '' 610 outName="$(basename "$out")" 611 outHash=$(echo "$outName" | cut -d - -f 1) 612 613 imageTag=$outHash 614 ''} 615 616 # Print tar contents: 617 # 1: Interpreted as relative to the root directory 618 # 2: With no trailing slashes on directories 619 # This is useful for ensuring that the output matches the 620 # values generated by the "find" command 621 ls_tar() { 622 for f in $(tar -tf $1 | xargs realpath -ms --relative-to=.); do 623 if [[ "$f" != "." ]]; then 624 echo "/$f" 625 fi 626 done 627 } 628 629 mkdir image 630 touch baseFiles 631 baseEnvs='[]' 632 if [[ -n "$fromImage" ]]; then 633 echo "Unpacking base image..." 634 tar -C image -xpf "$fromImage" 635 636 # Store the layers and the environment variables from the base image 637 cat ./image/manifest.json | jq -r '.[0].Layers | .[]' > layer-list 638 configName="$(cat ./image/manifest.json | jq -r '.[0].Config')" 639 baseEnvs="$(cat "./image/$configName" | jq '.config.Env // []')" 640 641 # Extract the parentID from the manifest 642 if [[ -n "$fromImageName" ]] && [[ -n "$fromImageTag" ]]; then 643 parentID="$( 644 cat "image/manifest.json" | 645 jq -r '.[] | select(.RepoTags | contains([$desiredTag])) | rtrimstr(".json")' \ 646 --arg desiredTag "$fromImageName:$fromImageTag" 647 )" 648 else 649 echo "From-image name or tag wasn't set. Reading the first ID." 650 parentID="$(cat "image/manifest.json" | jq -r '.[0].Config | rtrimstr(".json")')" 651 fi 652 653 # Otherwise do not import the base image configuration and manifest 654 chmod a+w image image/*.json 655 rm -f image/*.json 656 657 for l in image/*/layer.tar; do 658 ls_tar $l >> baseFiles 659 done 660 else 661 touch layer-list 662 fi 663 664 chmod -R ug+rw image 665 666 mkdir temp 667 cp ${layer}/* temp/ 668 chmod ug+w temp/* 669 670 for dep in $(cat $layerClosure); do 671 find $dep >> layerFiles 672 done 673 674 echo "Adding layer..." 675 # Record the contents of the tarball with ls_tar. 676 ls_tar temp/layer.tar >> baseFiles 677 678 # Append nix/store directory to the layer so that when the layer is loaded in the 679 # image /nix/store has read permissions for non-root users. 680 # nix/store is added only if the layer has /nix/store paths in it. 681 if [ $(wc -l < $layerClosure) -gt 1 ] && [ $(grep -c -e "^/nix/store$" baseFiles) -eq 0 ]; then 682 mkdir -p nix/store 683 chmod -R 555 nix 684 echo "./nix" >> layerFiles 685 echo "./nix/store" >> layerFiles 686 fi 687 688 # Get the files in the new layer which were *not* present in 689 # the old layer, and record them as newFiles. 690 comm <(sort -n baseFiles|uniq) \ 691 <(sort -n layerFiles|uniq|grep -v ${layer}) -1 -3 > newFiles 692 # Append the new files to the layer. 693 tar -rpf temp/layer.tar --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" \ 694 --owner=0 --group=0 --no-recursion --verbatim-files-from --files-from newFiles 695 696 echo "Adding meta..." 697 698 # If we have a parentID, add it to the json metadata. 699 if [[ -n "$parentID" ]]; then 700 cat temp/json | jshon -s "$parentID" -i parent > tmpjson 701 mv tmpjson temp/json 702 fi 703 704 # Take the sha256 sum of the generated json and use it as the layer ID. 705 # Compute the size and add it to the json under the 'Size' field. 706 layerID=$(sha256sum temp/json|cut -d ' ' -f 1) 707 size=$(stat --printf="%s" temp/layer.tar) 708 cat temp/json | jshon -s "$layerID" -i id -n $size -i Size > tmpjson 709 mv tmpjson temp/json 710 711 # Use the temp folder we've been working on to create a new image. 712 mv temp image/$layerID 713 714 # Add the new layer ID to the end of the layer list 715 ( 716 cat layer-list 717 # originally this used `sed -i "1i$layerID" layer-list`, but 718 # would fail if layer-list was completely empty. 719 echo "$layerID/layer.tar" 720 ) | sponge layer-list 721 722 # Create image json and image manifest 723 imageJson=$(cat ${baseJson} | jq '.config.Env = $baseenv + .config.Env' --argjson baseenv "$baseEnvs") 724 imageJson=$(echo "$imageJson" | jq ". + {\"rootfs\": {\"diff_ids\": [], \"type\": \"layers\"}}") 725 manifestJson=$(jq -n "[{\"RepoTags\":[\"$imageName:$imageTag\"]}]") 726 727 for layerTar in $(cat ./layer-list); do 728 layerChecksum=$(sha256sum image/$layerTar | cut -d ' ' -f1) 729 imageJson=$(echo "$imageJson" | jq ".history |= . + [{\"created\": \"$(jq -r .created ${baseJson})\"}]") 730 # diff_ids order is from the bottom-most to top-most layer 731 imageJson=$(echo "$imageJson" | jq ".rootfs.diff_ids |= . + [\"sha256:$layerChecksum\"]") 732 manifestJson=$(echo "$manifestJson" | jq ".[0].Layers |= . + [\"$layerTar\"]") 733 done 734 735 imageJsonChecksum=$(echo "$imageJson" | sha256sum | cut -d ' ' -f1) 736 echo "$imageJson" > "image/$imageJsonChecksum.json" 737 manifestJson=$(echo "$manifestJson" | jq ".[0].Config = \"$imageJsonChecksum.json\"") 738 echo "$manifestJson" > image/manifest.json 739 740 # Store the json under the name image/repositories. 741 jshon -n object \ 742 -n object -s "$layerID" -i "$imageTag" \ 743 -i "$imageName" > image/repositories 744 745 # Make the image read-only. 746 chmod -R a-w image 747 748 echo "Cooking the image..." 749 tar -C image --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 --xform s:'^./':: -c . | pigz -nTR > $out 750 751 echo "Finished." 752 ''; 753 754 in 755 checked result 756 ); 757 758 # Merge the tarballs of images built with buildImage into a single 759 # tarball that contains all images. Running `docker load` on the resulting 760 # tarball will load the images into the docker daemon. 761 mergeImages = images: runCommand "merge-docker-images" 762 { 763 inherit images; 764 nativeBuildInputs = [ pigz jq ]; 765 } '' 766 mkdir image inputs 767 # Extract images 768 repos=() 769 manifests=() 770 for item in $images; do 771 name=$(basename $item) 772 mkdir inputs/$name 773 tar -I pigz -xf $item -C inputs/$name 774 if [ -f inputs/$name/repositories ]; then 775 repos+=(inputs/$name/repositories) 776 fi 777 if [ -f inputs/$name/manifest.json ]; then 778 manifests+=(inputs/$name/manifest.json) 779 fi 780 done 781 # Copy all layers from input images to output image directory 782 cp -R --update=none inputs/*/* image/ 783 # Merge repositories objects and manifests 784 jq -s add "''${repos[@]}" > repositories 785 jq -s add "''${manifests[@]}" > manifest.json 786 # Replace output image repositories and manifest with merged versions 787 mv repositories image/repositories 788 mv manifest.json image/manifest.json 789 # Create tarball and gzip 790 tar -C image --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 --xform s:'^./':: -c . | pigz -nTR > $out 791 ''; 792 793 794 # Provide a /etc/passwd and /etc/group that contain root and nobody. 795 # Useful when packaging binaries that insist on using nss to look up 796 # username/groups (like nginx). 797 # /bin/sh is fine to not exist, and provided by another shim. 798 inherit fakeNss; # alias 799 800 # This provides a /usr/bin/env, for shell scripts using the 801 # "#!/usr/bin/env executable" shebang. 802 usrBinEnv = runCommand "usr-bin-env" { } '' 803 mkdir -p $out/usr/bin 804 ln -s ${coreutils}/bin/env $out/usr/bin 805 ''; 806 807 # This provides /bin/sh, pointing to bashInteractive. 808 binSh = runCommand "bin-sh" { } '' 809 mkdir -p $out/bin 810 ln -s ${bashInteractive}/bin/bash $out/bin/sh 811 ''; 812 813 # This provides the ca bundle in common locations 814 caCertificates = runCommand "ca-certificates" { } '' 815 mkdir -p $out/etc/ssl/certs $out/etc/pki/tls/certs 816 # Old NixOS compatibility. 817 ln -s ${cacert}/etc/ssl/certs/ca-bundle.crt $out/etc/ssl/certs/ca-bundle.crt 818 # NixOS canonical location + Debian/Ubuntu/Arch/Gentoo compatibility. 819 ln -s ${cacert}/etc/ssl/certs/ca-bundle.crt $out/etc/ssl/certs/ca-certificates.crt 820 # CentOS/Fedora compatibility. 821 ln -s ${cacert}/etc/ssl/certs/ca-bundle.crt $out/etc/pki/tls/certs/ca-bundle.crt 822 ''; 823 824 # Build an image and populate its nix database with the provided 825 # contents. The main purpose is to be able to use nix commands in 826 # the container. 827 # Be careful since this doesn't work well with multilayer. 828 # TODO: add the dependencies of the config json. 829 buildImageWithNixDb = args@{ copyToRoot ? contents, contents ? null, extraCommands ? "", ... }: ( 830 buildImage (args // { 831 extraCommands = (mkDbExtraCommand copyToRoot) + extraCommands; 832 }) 833 ); 834 835 # TODO: add the dependencies of the config json. 836 buildLayeredImageWithNixDb = args@{ contents ? null, extraCommands ? "", ... }: ( 837 buildLayeredImage (args // { 838 extraCommands = (mkDbExtraCommand contents) + extraCommands; 839 }) 840 ); 841 842 streamLayeredImage = lib.makeOverridable ( 843 { 844 # Image Name 845 name 846 , # Image tag, the Nix's output hash will be used if null 847 tag ? null 848 , # Parent image, to append to. 849 fromImage ? null 850 , # Files to put on the image (a nix store path or list of paths). 851 contents ? [ ] 852 , # Docker config; e.g. what command to run on the container. 853 config ? { } 854 , # Image architecture, defaults to the architecture of the `hostPlatform` when unset 855 architecture ? defaultArchitecture 856 , # Time of creation of the image. Passing "now" will make the 857 # created date be the time of building. 858 created ? "1970-01-01T00:00:01Z" 859 , # Optional bash script to run on the files prior to fixturizing the layer. 860 extraCommands ? "" 861 , # Optional bash script to run inside fakeroot environment. 862 # Could be used for changing ownership of files in customisation layer. 863 fakeRootCommands ? "" 864 , # Whether to run fakeRootCommands in fakechroot as well, so that they 865 # appear to run inside the image, but have access to the normal Nix store. 866 # Perhaps this could be enabled on by default on pkgs.stdenv.buildPlatform.isLinux 867 enableFakechroot ? false 868 , # We pick 100 to ensure there is plenty of room for extension. I 869 # believe the actual maximum is 128. 870 maxLayers ? 100 871 , # Whether to include store paths in the image. You generally want to leave 872 # this on, but tooling may disable this to insert the store paths more 873 # efficiently via other means, such as bind mounting the host store. 874 includeStorePaths ? true 875 , # Passthru arguments for the underlying derivation. 876 passthru ? {} 877 , 878 }: 879 assert 880 (lib.assertMsg (maxLayers > 1) 881 "the maxLayers argument of dockerTools.buildLayeredImage function must be greather than 1 (current value: ${toString maxLayers})"); 882 let 883 baseName = baseNameOf name; 884 885 streamScript = writePython3 "stream" { } ./stream_layered_image.py; 886 baseJson = writeText "${baseName}-base.json" (builtins.toJSON { 887 inherit config architecture; 888 os = "linux"; 889 }); 890 891 contentsList = if builtins.isList contents then contents else [ contents ]; 892 bind-paths = builtins.toString (builtins.map (path: "--bind=${path}:${path}!") [ 893 "/dev/" 894 "/proc/" 895 "/sys/" 896 "${builtins.storeDir}/" 897 "$out/layer.tar" 898 ]); 899 900 # We store the customisation layer as a tarball, to make sure that 901 # things like permissions set on 'extraCommands' are not overridden 902 # by Nix. Then we precompute the sha256 for performance. 903 customisationLayer = symlinkJoin { 904 name = "${baseName}-customisation-layer"; 905 paths = contentsList; 906 inherit extraCommands fakeRootCommands; 907 nativeBuildInputs = [ 908 fakeroot 909 ] ++ optionals enableFakechroot [ 910 proot 911 ]; 912 postBuild = '' 913 mv $out old_out 914 (cd old_out; eval "$extraCommands" ) 915 916 mkdir $out 917 ${if enableFakechroot then '' 918 proot -r $PWD/old_out ${bind-paths} --pwd=/ --root-id bash -c ' 919 source $stdenv/setup 920 eval "$fakeRootCommands" 921 tar \ 922 --sort name \ 923 --exclude=./proc \ 924 --exclude=./sys \ 925 --numeric-owner --mtime "@$SOURCE_DATE_EPOCH" \ 926 --hard-dereference \ 927 -cf $out/layer.tar . 928 ' 929 '' else '' 930 fakeroot bash -c ' 931 source $stdenv/setup 932 cd old_out 933 eval "$fakeRootCommands" 934 tar \ 935 --sort name \ 936 --numeric-owner --mtime "@$SOURCE_DATE_EPOCH" \ 937 --hard-dereference \ 938 -cf $out/layer.tar . 939 ' 940 ''} 941 sha256sum $out/layer.tar \ 942 | cut -f 1 -d ' ' \ 943 > $out/checksum 944 ''; 945 }; 946 947 closureRoots = lib.optionals includeStorePaths /* normally true */ ( 948 [ baseJson customisationLayer ] 949 ); 950 overallClosure = writeText "closure" (lib.concatStringsSep " " closureRoots); 951 952 # These derivations are only created as implementation details of docker-tools, 953 # so they'll be excluded from the created images. 954 unnecessaryDrvs = [ baseJson overallClosure customisationLayer ]; 955 956 conf = runCommand "${baseName}-conf.json" 957 { 958 inherit fromImage maxLayers created; 959 imageName = lib.toLower name; 960 preferLocalBuild = true; 961 passthru.imageTag = 962 if tag != null 963 then tag 964 else 965 lib.head (lib.strings.splitString "-" (baseNameOf conf.outPath)); 966 paths = buildPackages.referencesByPopularity overallClosure; 967 nativeBuildInputs = [ jq ]; 968 } '' 969 ${if (tag == null) then '' 970 outName="$(basename "$out")" 971 outHash=$(echo "$outName" | cut -d - -f 1) 972 973 imageTag=$outHash 974 '' else '' 975 imageTag="${tag}" 976 ''} 977 978 # convert "created" to iso format 979 if [[ "$created" != "now" ]]; then 980 created="$(date -Iseconds -d "$created")" 981 fi 982 983 paths() { 984 cat $paths ${lib.concatMapStringsSep " " 985 (path: "| (grep -v ${path} || true)") 986 unnecessaryDrvs} 987 } 988 989 # Compute the number of layers that are already used by a potential 990 # 'fromImage' as well as the customization layer. Ensure that there is 991 # still at least one layer available to store the image contents. 992 usedLayers=0 993 994 # subtract number of base image layers 995 if [[ -n "$fromImage" ]]; then 996 (( usedLayers += $(tar -xOf "$fromImage" manifest.json | jq '.[0].Layers | length') )) 997 fi 998 999 # one layer will be taken up by the customisation layer 1000 (( usedLayers += 1 )) 1001 1002 if ! (( $usedLayers < $maxLayers )); then 1003 echo >&2 "Error: usedLayers $usedLayers layers to store 'fromImage' and" \ 1004 "'extraCommands', but only maxLayers=$maxLayers were" \ 1005 "allowed. At least 1 layer is required to store contents." 1006 exit 1 1007 fi 1008 availableLayers=$(( maxLayers - usedLayers )) 1009 1010 # Create $maxLayers worth of Docker Layers, one layer per store path 1011 # unless there are more paths than $maxLayers. In that case, create 1012 # $maxLayers-1 for the most popular layers, and smush the remainaing 1013 # store paths in to one final layer. 1014 # 1015 # The following code is fiddly w.r.t. ensuring every layer is 1016 # created, and that no paths are missed. If you change the 1017 # following lines, double-check that your code behaves properly 1018 # when the number of layers equals: 1019 # maxLayers-1, maxLayers, and maxLayers+1, 0 1020 paths | 1021 jq -sR ' 1022 rtrimstr("\n") | split("\n") 1023 | (.[:$maxLayers-1] | map([.])) + [ .[$maxLayers-1:] ] 1024 | map(select(length > 0)) 1025 ' \ 1026 --argjson maxLayers "$availableLayers" > store_layers.json 1027 1028 # The index on $store_layers is necessary because the --slurpfile 1029 # automatically reads the file as an array. 1030 cat ${baseJson} | jq ' 1031 . + { 1032 "store_dir": $store_dir, 1033 "from_image": $from_image, 1034 "store_layers": $store_layers[0], 1035 "customisation_layer", $customisation_layer, 1036 "repo_tag": $repo_tag, 1037 "created": $created 1038 } 1039 ' --arg store_dir "${storeDir}" \ 1040 --argjson from_image ${if fromImage == null then "null" else "'\"${fromImage}\"'"} \ 1041 --slurpfile store_layers store_layers.json \ 1042 --arg customisation_layer ${customisationLayer} \ 1043 --arg repo_tag "$imageName:$imageTag" \ 1044 --arg created "$created" | 1045 tee $out 1046 ''; 1047 1048 result = runCommand "stream-${baseName}" 1049 { 1050 inherit (conf) imageName; 1051 preferLocalBuild = true; 1052 passthru = passthru // { 1053 inherit (conf) imageTag; 1054 1055 # Distinguish tarballs and exes at the Nix level so functions that 1056 # take images can know in advance how the image is supposed to be used. 1057 isExe = true; 1058 }; 1059 nativeBuildInputs = [ makeWrapper ]; 1060 } '' 1061 makeWrapper ${streamScript} $out --add-flags ${conf} 1062 ''; 1063 in 1064 result 1065 ); 1066 1067 # This function streams a docker image that behaves like a nix-shell for a derivation 1068 streamNixShellImage = 1069 { # The derivation whose environment this docker image should be based on 1070 drv 1071 , # Image Name 1072 name ? drv.name + "-env" 1073 , # Image tag, the Nix's output hash will be used if null 1074 tag ? null 1075 , # User id to run the container as. Defaults to 1000, because many 1076 # binaries don't like to be run as root 1077 uid ? 1000 1078 , # Group id to run the container as, see also uid 1079 gid ? 1000 1080 , # The home directory of the user 1081 homeDirectory ? "/build" 1082 , # The path to the bash binary to use as the shell. See `NIX_BUILD_SHELL` in `man nix-shell` 1083 shell ? bashInteractive + "/bin/bash" 1084 , # Run this command in the environment of the derivation, in an interactive shell. See `--command` in `man nix-shell` 1085 command ? null 1086 , # Same as `command`, but runs the command in a non-interactive shell instead. See `--run` in `man nix-shell` 1087 run ? null 1088 }: 1089 assert lib.assertMsg (! (drv.drvAttrs.__structuredAttrs or false)) 1090 "streamNixShellImage: Does not work with the derivation ${drv.name} because it uses __structuredAttrs"; 1091 assert lib.assertMsg (command == null || run == null) 1092 "streamNixShellImage: Can't specify both command and run"; 1093 let 1094 1095 # A binary that calls the command to build the derivation 1096 builder = writeShellScriptBin "buildDerivation" '' 1097 exec ${lib.escapeShellArg (stringValue drv.drvAttrs.builder)} ${lib.escapeShellArgs (map stringValue drv.drvAttrs.args)} 1098 ''; 1099 1100 staticPath = "${dirOf shell}:${lib.makeBinPath [ builder ]}"; 1101 1102 # https://github.com/NixOS/nix/blob/2.8.0/src/nix-build/nix-build.cc#L493-L526 1103 rcfile = writeText "nix-shell-rc" '' 1104 unset PATH 1105 dontAddDisableDepTrack=1 1106 # TODO: https://github.com/NixOS/nix/blob/2.8.0/src/nix-build/nix-build.cc#L506 1107 [ -e $stdenv/setup ] && source $stdenv/setup 1108 PATH=${staticPath}:"$PATH" 1109 SHELL=${lib.escapeShellArg shell} 1110 BASH=${lib.escapeShellArg shell} 1111 set +e 1112 [ -n "$PS1" -a -z "$NIX_SHELL_PRESERVE_PROMPT" ] && PS1='\n\[\033[1;32m\][nix-shell:\w]\$\[\033[0m\] ' 1113 if [ "$(type -t runHook)" = function ]; then 1114 runHook shellHook 1115 fi 1116 unset NIX_ENFORCE_PURITY 1117 shopt -u nullglob 1118 shopt -s execfail 1119 ${optionalString (command != null || run != null) '' 1120 ${optionalString (command != null) command} 1121 ${optionalString (run != null) run} 1122 exit 1123 ''} 1124 ''; 1125 1126 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/globals.hh#L464-L465 1127 sandboxBuildDir = "/build"; 1128 1129 # This function closely mirrors what this Nix code does: 1130 # https://github.com/NixOS/nix/blob/2.8.0/src/libexpr/primops.cc#L1102 1131 # https://github.com/NixOS/nix/blob/2.8.0/src/libexpr/eval.cc#L1981-L2036 1132 stringValue = value: 1133 # We can't just use `toString` on all derivation attributes because that 1134 # would not put path literals in the closure. So we explicitly copy 1135 # those into the store here 1136 if builtins.typeOf value == "path" then "${value}" 1137 else if builtins.typeOf value == "list" then toString (map stringValue value) 1138 else toString value; 1139 1140 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L992-L1004 1141 drvEnv = lib.mapAttrs' (name: value: 1142 let str = stringValue value; 1143 in if lib.elem name (drv.drvAttrs.passAsFile or []) 1144 then lib.nameValuePair "${name}Path" (writeText "pass-as-text-${name}" str) 1145 else lib.nameValuePair name str 1146 ) drv.drvAttrs // 1147 # A mapping from output name to the nix store path where they should end up 1148 # https://github.com/NixOS/nix/blob/2.8.0/src/libexpr/primops.cc#L1253 1149 lib.genAttrs drv.outputs (output: builtins.unsafeDiscardStringContext drv.${output}.outPath); 1150 1151 # Environment variables set in the image 1152 envVars = { 1153 1154 # Root certificates for internet access 1155 SSL_CERT_FILE = "${cacert}/etc/ssl/certs/ca-bundle.crt"; 1156 1157 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1027-L1030 1158 # PATH = "/path-not-set"; 1159 # Allows calling bash and `buildDerivation` as the Cmd 1160 PATH = staticPath; 1161 1162 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1032-L1038 1163 HOME = homeDirectory; 1164 1165 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1040-L1044 1166 NIX_STORE = storeDir; 1167 1168 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1046-L1047 1169 # TODO: Make configurable? 1170 NIX_BUILD_CORES = "1"; 1171 1172 } // drvEnv // { 1173 1174 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1008-L1010 1175 NIX_BUILD_TOP = sandboxBuildDir; 1176 1177 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1012-L1013 1178 TMPDIR = sandboxBuildDir; 1179 TEMPDIR = sandboxBuildDir; 1180 TMP = sandboxBuildDir; 1181 TEMP = sandboxBuildDir; 1182 1183 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1015-L1019 1184 PWD = sandboxBuildDir; 1185 1186 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1071-L1074 1187 # We don't set it here because the output here isn't handled in any special way 1188 # NIX_LOG_FD = "2"; 1189 1190 # https://github.com/NixOS/nix/blob/2.8.0/src/libstore/build/local-derivation-goal.cc#L1076-L1077 1191 TERM = "xterm-256color"; 1192 }; 1193 1194 1195 in streamLayeredImage { 1196 inherit name tag; 1197 contents = [ 1198 binSh 1199 usrBinEnv 1200 (fakeNss.override { 1201 # Allows programs to look up the build user's home directory 1202 # https://github.com/NixOS/nix/blob/ffe155abd36366a870482625543f9bf924a58281/src/libstore/build/local-derivation-goal.cc#L906-L910 1203 # Slightly differs however: We use the passed-in homeDirectory instead of sandboxBuildDir. 1204 # We're doing this because it's arguably a bug in Nix that sandboxBuildDir is used here: https://github.com/NixOS/nix/issues/6379 1205 extraPasswdLines = [ 1206 "nixbld:x:${toString uid}:${toString gid}:Build user:${homeDirectory}:/noshell" 1207 ]; 1208 extraGroupLines = [ 1209 "nixbld:!:${toString gid}:" 1210 ]; 1211 }) 1212 ]; 1213 1214 fakeRootCommands = '' 1215 # Effectively a single-user installation of Nix, giving the user full 1216 # control over the Nix store. Needed for building the derivation this 1217 # shell is for, but also in case one wants to use Nix inside the 1218 # image 1219 mkdir -p ./nix/{store,var/nix} ./etc/nix 1220 chown -R ${toString uid}:${toString gid} ./nix ./etc/nix 1221 1222 # Gives the user control over the build directory 1223 mkdir -p .${sandboxBuildDir} 1224 chown -R ${toString uid}:${toString gid} .${sandboxBuildDir} 1225 ''; 1226 1227 # Run this image as the given uid/gid 1228 config.User = "${toString uid}:${toString gid}"; 1229 config.Cmd = 1230 # https://github.com/NixOS/nix/blob/2.8.0/src/nix-build/nix-build.cc#L185-L186 1231 # https://github.com/NixOS/nix/blob/2.8.0/src/nix-build/nix-build.cc#L534-L536 1232 if run == null 1233 then [ shell "--rcfile" rcfile ] 1234 else [ shell rcfile ]; 1235 config.WorkingDir = sandboxBuildDir; 1236 config.Env = lib.mapAttrsToList (name: value: "${name}=${value}") envVars; 1237 }; 1238 1239 # Wrapper around streamNixShellImage to build an image from the result 1240 buildNixShellImage = { drv, ... }@args: 1241 let 1242 stream = streamNixShellImage args; 1243 in 1244 runCommand "${drv.name}-env.tar.gz" 1245 { 1246 inherit (stream) imageName; 1247 passthru = { inherit (stream) imageTag; }; 1248 nativeBuildInputs = [ pigz ]; 1249 } "${stream} | pigz -nTR > $out"; 1250}