···111112 *Default:* the output path's hash
113000000114`contents` _optional_
115116: Top level paths in the container. Either a single derivation, or a list of derivations.
···111112 *Default:* the output path's hash
113114+`fromImage` _optional_
115+116+: The repository tarball containing the base image. It must be a valid Docker image, such as one exported by `docker save`.
117+118+ *Default:* `null`, which can be seen as equivalent to `FROM scratch` of a `Dockerfile`.
119+120`contents` _optional_
121122: Top level paths in the container. Either a single derivation, or a list of derivations.
···24 Type = "oneshot";
25 User = "acme";
26 Group = mkDefault "acme";
27+ UMask = 0022;
28 StateDirectoryMode = 750;
29 ProtectSystem = "full";
30 PrivateTmp = true;
···303 }
304305 ${optionalString (data.webroot != null) ''
306+ # Ensure the webroot exists. Fixing group is required in case configuration was changed between runs.
307+ # Lego will fail if the webroot does not exist at all.
308+ (
309+ mkdir -p '${data.webroot}/.well-known/acme-challenge' \
310+ && chgrp '${data.group}' ${data.webroot}/.well-known/acme-challenge
311+ ) || (
312+ echo 'Please ensure ${data.webroot}/.well-known/acme-challenge exists and is writable by acme:${data.group}' \
313+ && exit 1
314+ )
315 ''}
316317 echo '${domainHash}' > domainhash.txt
+44-10
nixos/tests/acme.nix
···253254255 def check_connection(node, domain, retries=3):
256- assert retries >= 0
257258 result = node.succeed(
259 "openssl s_client -brief -verify 2 -CAfile /tmp/ca.crt"
···262263 for line in result.lower().split("\n"):
264 if "verification" in line and "error" in line:
265- time.sleep(1)
266 return check_connection(node, domain, retries - 1)
267268269 def check_connection_key_bits(node, domain, bits, retries=3):
270- assert retries >= 0
271272 result = node.succeed(
273 "openssl s_client -CAfile /tmp/ca.crt"
···277 print("Key type:", result)
278279 if bits not in result:
280- time.sleep(1)
281 return check_connection_key_bits(node, domain, bits, retries - 1)
282283284 def check_stapling(node, domain, retries=3):
285- assert retries >= 0
286287 # Pebble doesn't provide a full OCSP responder, so just check the URL
288 result = node.succeed(
···293 print("OCSP Responder URL:", result)
294295 if "${caDomain}:4002" not in result.lower():
296- time.sleep(1)
297 return check_stapling(node, domain, retries - 1)
2982990000000000000300 client.start()
301 dnsserver.start()
302···313 acme.wait_for_unit("network-online.target")
314 acme.wait_for_unit("pebble.service")
315316- client.succeed("curl https://${caDomain}:15000/roots/0 > /tmp/ca.crt")
317- client.succeed("curl https://${caDomain}:15000/intermediate-keys/0 >> /tmp/ca.crt")
318319 with subtest("Can request certificate with HTTPS-01 challenge"):
320 webserver.wait_for_unit("acme-finished-a.example.test.target")
321 check_fullchain(webserver, "a.example.test")
322 check_issuer(webserver, "a.example.test", "pebble")
323 check_connection(client, "a.example.test")
000000000000000324325 with subtest("Can generate valid selfsigned certs"):
326 webserver.succeed("systemctl clean acme-a.example.test.service --what=state")
···375 assert keyhash_old == keyhash_new
376377 with subtest("Can request certificates for vhost + aliases (apache-httpd)"):
378- switch_to(webserver, "httpd-aliases")
379- webserver.wait_for_unit("acme-finished-c.example.test.target")
0000000380 check_issuer(webserver, "c.example.test", "pebble")
381 check_connection(client, "c.example.test")
382 check_connection(client, "d.example.test")
···253254255 def check_connection(node, domain, retries=3):
256+ assert retries >= 0, f"Failed to connect to https://{domain}"
257258 result = node.succeed(
259 "openssl s_client -brief -verify 2 -CAfile /tmp/ca.crt"
···262263 for line in result.lower().split("\n"):
264 if "verification" in line and "error" in line:
265+ time.sleep(3)
266 return check_connection(node, domain, retries - 1)
267268269 def check_connection_key_bits(node, domain, bits, retries=3):
270+ assert retries >= 0, f"Did not find expected number of bits ({bits}) in key"
271272 result = node.succeed(
273 "openssl s_client -CAfile /tmp/ca.crt"
···277 print("Key type:", result)
278279 if bits not in result:
280+ time.sleep(3)
281 return check_connection_key_bits(node, domain, bits, retries - 1)
282283284 def check_stapling(node, domain, retries=3):
285+ assert retries >= 0, "OCSP Stapling check failed"
286287 # Pebble doesn't provide a full OCSP responder, so just check the URL
288 result = node.succeed(
···293 print("OCSP Responder URL:", result)
294295 if "${caDomain}:4002" not in result.lower():
296+ time.sleep(3)
297 return check_stapling(node, domain, retries - 1)
298299300+ def download_ca_certs(node, retries=5):
301+ assert retries >= 0, "Failed to connect to pebble to download root CA certs"
302+303+ exit_code, _ = node.execute("curl https://${caDomain}:15000/roots/0 > /tmp/ca.crt")
304+ exit_code_2, _ = node.execute(
305+ "curl https://${caDomain}:15000/intermediate-keys/0 >> /tmp/ca.crt"
306+ )
307+308+ if exit_code + exit_code_2 > 0:
309+ time.sleep(3)
310+ return download_ca_certs(node, retries - 1)
311+312+313 client.start()
314 dnsserver.start()
315···326 acme.wait_for_unit("network-online.target")
327 acme.wait_for_unit("pebble.service")
328329+ download_ca_certs(client)
0330331 with subtest("Can request certificate with HTTPS-01 challenge"):
332 webserver.wait_for_unit("acme-finished-a.example.test.target")
333 check_fullchain(webserver, "a.example.test")
334 check_issuer(webserver, "a.example.test", "pebble")
335 check_connection(client, "a.example.test")
336+337+ with subtest("Certificates and accounts have safe + valid permissions"):
338+ group = "${nodes.webserver.config.security.acme.certs."a.example.test".group}"
339+ webserver.succeed(
340+ f"test $(stat -L -c \"%a %U %G\" /var/lib/acme/a.example.test/* | tee /dev/stderr | grep '640 acme {group}' | wc -l) -eq 5"
341+ )
342+ webserver.succeed(
343+ f"test $(stat -L -c \"%a %U %G\" /var/lib/acme/.lego/a.example.test/**/* | tee /dev/stderr | grep '640 acme {group}' | wc -l) -eq 5"
344+ )
345+ webserver.succeed(
346+ f"test $(stat -L -c \"%a %U %G\" /var/lib/acme/a.example.test | tee /dev/stderr | grep '750 acme {group}' | wc -l) -eq 1"
347+ )
348+ webserver.succeed(
349+ f"test $(find /var/lib/acme/accounts -type f -exec stat -L -c \"%a %U %G\" {{}} \\; | tee /dev/stderr | grep -v '600 acme {group}' | wc -l) -eq 0"
350+ )
351352 with subtest("Can generate valid selfsigned certs"):
353 webserver.succeed("systemctl clean acme-a.example.test.service --what=state")
···402 assert keyhash_old == keyhash_new
403404 with subtest("Can request certificates for vhost + aliases (apache-httpd)"):
405+ try:
406+ switch_to(webserver, "httpd-aliases")
407+ webserver.wait_for_unit("acme-finished-c.example.test.target")
408+ except Exception as err:
409+ _, output = webserver.execute(
410+ "cat /var/log/httpd/*.log && ls -al /var/lib/acme/acme-challenge"
411+ )
412+ print(output)
413+ raise err
414 check_issuer(webserver, "c.example.test", "pebble")
415 check_connection(client, "c.example.test")
416 check_connection(client, "d.example.test")
+29-1
nixos/tests/docker-tools.nix
···161 "docker run --rm ${examples.layered-image.imageName} cat extraCommands",
162 )
163164- with subtest("Ensure building an image on top of a layered Docker images work"):
165 docker.succeed(
166 "docker load --input='${examples.layered-on-top}'",
167 "docker run --rm ${examples.layered-on-top.imageName}",
000000168 )
169170···205 assert "FROM_CHILD=true" in env, "envvars from the child should be preserved"
206 assert "LAST_LAYER=child" in env, "envvars from the child should take priority"
2070000000000208 with subtest("Ensure image with only 2 layers can be loaded"):
209 docker.succeed(
210 "docker load --input='${examples.two-layered-image}'"
···218 # Ensure the two output paths (ls and hello) are in the layer
219 "docker run bulk-layer ls /bin/hello",
220 )
000000000000221222 with subtest("Ensure correct behavior when no store is needed"):
223 # This check tests that buildLayeredImage can build images that don't need a store.
···161 "docker run --rm ${examples.layered-image.imageName} cat extraCommands",
162 )
163164+ with subtest("Ensure images built on top of layered Docker images work"):
165 docker.succeed(
166 "docker load --input='${examples.layered-on-top}'",
167 "docker run --rm ${examples.layered-on-top.imageName}",
168+ )
169+170+ with subtest("Ensure layered images built on top of layered Docker images work"):
171+ docker.succeed(
172+ "docker load --input='${examples.layered-on-top-layered}'",
173+ "docker run --rm ${examples.layered-on-top-layered.imageName}",
174 )
175176···211 assert "FROM_CHILD=true" in env, "envvars from the child should be preserved"
212 assert "LAST_LAYER=child" in env, "envvars from the child should take priority"
213214+ with subtest("Ensure environment variables of layered images are correctly inherited"):
215+ docker.succeed(
216+ "docker load --input='${examples.environmentVariablesLayered}'"
217+ )
218+ out = docker.succeed("docker run --rm ${examples.environmentVariablesLayered.imageName} env")
219+ env = out.splitlines()
220+ assert "FROM_PARENT=true" in env, "envvars from the parent should be preserved"
221+ assert "FROM_CHILD=true" in env, "envvars from the child should be preserved"
222+ assert "LAST_LAYER=child" in env, "envvars from the child should take priority"
223+224 with subtest("Ensure image with only 2 layers can be loaded"):
225 docker.succeed(
226 "docker load --input='${examples.two-layered-image}'"
···234 # Ensure the two output paths (ls and hello) are in the layer
235 "docker run bulk-layer ls /bin/hello",
236 )
237+238+ with subtest(
239+ "Ensure the bulk layer with a base image respects the number of maxLayers"
240+ ):
241+ docker.succeed(
242+ "docker load --input='${pkgs.dockerTools.examples.layered-bulk-layer}'",
243+ # Ensure the image runs correctly
244+ "docker run layered-bulk-layer ls /bin/hello",
245+ )
246+247+ # Ensure the image has the correct number of layers
248+ assert len(set_of_layers("layered-bulk-layer")) == 4
249250 with subtest("Ensure correct behavior when no store is needed"):
251 # This check tests that buildLayeredImage can build images that don't need a store.
···729 name,
730 # Image tag, the Nix's output hash will be used if null
731 tag ? null,
00732 # Files to put on the image (a nix store path or list of paths).
733 contents ? [],
734 # Docker config; e.g. what command to run on the container.
···791 unnecessaryDrvs = [ baseJson overallClosure ];
792793 conf = runCommand "${baseName}-conf.json" {
794- inherit maxLayers created;
795 imageName = lib.toLower name;
796 passthru.imageTag =
797 if tag != null
···821 unnecessaryDrvs}
822 }
823000000000000000000000824 # Create $maxLayers worth of Docker Layers, one layer per store path
825 # unless there are more paths than $maxLayers. In that case, create
826 # $maxLayers-1 for the most popular layers, and smush the remainaing
···838 | (.[:$maxLayers-1] | map([.])) + [ .[$maxLayers-1:] ]
839 | map(select(length > 0))
840 ' \
841- --argjson maxLayers "$(( maxLayers - 1 ))" # one layer will be taken up by the customisation layer
842 )"
843844 cat ${baseJson} | jq '
845 . + {
846 "store_dir": $store_dir,
0847 "store_layers": $store_layers,
848 "customisation_layer", $customisation_layer,
849 "repo_tag": $repo_tag,
850 "created": $created
851 }
852 ' --arg store_dir "${storeDir}" \
0853 --argjson store_layers "$store_layers" \
854 --arg customisation_layer ${customisationLayer} \
855 --arg repo_tag "$imageName:$imageTag" \
···729 name,
730 # Image tag, the Nix's output hash will be used if null
731 tag ? null,
732+ # Parent image, to append to.
733+ fromImage ? null,
734 # Files to put on the image (a nix store path or list of paths).
735 contents ? [],
736 # Docker config; e.g. what command to run on the container.
···793 unnecessaryDrvs = [ baseJson overallClosure ];
794795 conf = runCommand "${baseName}-conf.json" {
796+ inherit fromImage maxLayers created;
797 imageName = lib.toLower name;
798 passthru.imageTag =
799 if tag != null
···823 unnecessaryDrvs}
824 }
825826+ # Compute the number of layers that are already used by a potential
827+ # 'fromImage' as well as the customization layer. Ensure that there is
828+ # still at least one layer available to store the image contents.
829+ usedLayers=0
830+831+ # subtract number of base image layers
832+ if [[ -n "$fromImage" ]]; then
833+ (( usedLayers += $(tar -xOf "$fromImage" manifest.json | jq '.[0].Layers | length') ))
834+ fi
835+836+ # one layer will be taken up by the customisation layer
837+ (( usedLayers += 1 ))
838+839+ if ! (( $usedLayers < $maxLayers )); then
840+ echo >&2 "Error: usedLayers $usedLayers layers to store 'fromImage' and" \
841+ "'extraCommands', but only maxLayers=$maxLayers were" \
842+ "allowed. At least 1 layer is required to store contents."
843+ exit 1
844+ fi
845+ availableLayers=$(( maxLayers - usedLayers ))
846+847 # Create $maxLayers worth of Docker Layers, one layer per store path
848 # unless there are more paths than $maxLayers. In that case, create
849 # $maxLayers-1 for the most popular layers, and smush the remainaing
···861 | (.[:$maxLayers-1] | map([.])) + [ .[$maxLayers-1:] ]
862 | map(select(length > 0))
863 ' \
864+ --argjson maxLayers "$availableLayers"
865 )"
866867 cat ${baseJson} | jq '
868 . + {
869 "store_dir": $store_dir,
870+ "from_image": $from_image,
871 "store_layers": $store_layers,
872 "customisation_layer", $customisation_layer,
873 "repo_tag": $repo_tag,
874 "created": $created
875 }
876 ' --arg store_dir "${storeDir}" \
877+ --argjson from_image ${if fromImage == null then "null" else "'\"${fromImage}\"'"} \
878 --argjson store_layers "$store_layers" \
879 --arg customisation_layer ${customisationLayer} \
880 --arg repo_tag "$imageName:$imageTag" \
+64-21
pkgs/build-support/docker/examples.nix
···188 };
189 };
190191- # 12. example of running something as root on top of a parent image
000000000000000000192 # Regression test related to PR #52109
193 runAsRootParentImage = buildImage {
194 name = "runAsRootParentImage";
···197 fromImage = bash;
198 };
199200- # 13. example of 3 layers images This image is used to verify the
201 # order of layers is correct.
202 # It allows to validate
203 # - the layer of parent are below
···235 '';
236 };
237238- # 14. Environment variable inheritance.
239 # Child image should inherit parents environment variables,
240 # optionally overriding them.
241- environmentVariables = let
242- parent = pkgs.dockerTools.buildImage {
243- name = "parent";
244- tag = "latest";
245- config = {
246- Env = [
247- "FROM_PARENT=true"
248- "LAST_LAYER=parent"
249- ];
250- };
00000000000251 };
252- in pkgs.dockerTools.buildImage {
00253 name = "child";
254- fromImage = parent;
255 tag = "latest";
256 contents = [ pkgs.coreutils ];
257 config = {
···262 };
263 };
264265- # 15. Create another layered image, for comparing layers with image 10.
266 another-layered-image = pkgs.dockerTools.buildLayeredImage {
267 name = "another-layered-image";
268 tag = "latest";
269 config.Cmd = [ "${pkgs.hello}/bin/hello" ];
270 };
271272- # 16. Create a layered image with only 2 layers
273 two-layered-image = pkgs.dockerTools.buildLayeredImage {
274 name = "two-layered-image";
275 tag = "latest";
···278 maxLayers = 2;
279 };
280281- # 17. Create a layered image with more packages than max layers.
282 # coreutils and hello are part of the same layer
283 bulk-layer = pkgs.dockerTools.buildLayeredImage {
284 name = "bulk-layer";
···289 maxLayers = 2;
290 };
291292- # 18. Create a "layered" image without nix store layers. This is not
000000000000293 # recommended, but can be useful for base images in rare cases.
294 no-store-paths = pkgs.dockerTools.buildLayeredImage {
295 name = "no-store-paths";
···321 };
322 };
323324- # 19. Support files in the store on buildLayeredImage
325 # See: https://github.com/NixOS/nixpkgs/pull/91084#issuecomment-653496223
326 filesInStore = pkgs.dockerTools.buildLayeredImageWithNixDb {
327 name = "file-in-store";
···341 };
342 };
343344- # 20. Ensure that setting created to now results in a date which
345 # isn't the epoch + 1 for layered images.
346 unstableDateLayered = pkgs.dockerTools.buildLayeredImage {
347 name = "unstable-date-layered";
···188 };
189 };
190191+ # 12 Create a layered image on top of a layered image
192+ layered-on-top-layered = pkgs.dockerTools.buildLayeredImage {
193+ name = "layered-on-top-layered";
194+ tag = "latest";
195+ fromImage = layered-image;
196+ extraCommands = ''
197+ mkdir ./example-output
198+ chmod 777 ./example-output
199+ '';
200+ config = {
201+ Env = [ "PATH=${pkgs.coreutils}/bin/" ];
202+ WorkingDir = "/example-output";
203+ Cmd = [
204+ "${pkgs.bash}/bin/bash" "-c" "echo hello > foo; cat foo"
205+ ];
206+ };
207+ };
208+209+ # 13. example of running something as root on top of a parent image
210 # Regression test related to PR #52109
211 runAsRootParentImage = buildImage {
212 name = "runAsRootParentImage";
···215 fromImage = bash;
216 };
217218+ # 14. example of 3 layers images This image is used to verify the
219 # order of layers is correct.
220 # It allows to validate
221 # - the layer of parent are below
···253 '';
254 };
255256+ # 15. Environment variable inheritance.
257 # Child image should inherit parents environment variables,
258 # optionally overriding them.
259+ environmentVariablesParent = pkgs.dockerTools.buildImage {
260+ name = "parent";
261+ tag = "latest";
262+ config = {
263+ Env = [
264+ "FROM_PARENT=true"
265+ "LAST_LAYER=parent"
266+ ];
267+ };
268+ };
269+270+ environmentVariables = pkgs.dockerTools.buildImage {
271+ name = "child";
272+ fromImage = environmentVariablesParent;
273+ tag = "latest";
274+ contents = [ pkgs.coreutils ];
275+ config = {
276+ Env = [
277+ "FROM_CHILD=true"
278+ "LAST_LAYER=child"
279+ ];
280 };
281+ };
282+283+ environmentVariablesLayered = pkgs.dockerTools.buildLayeredImage {
284 name = "child";
285+ fromImage = environmentVariablesParent;
286 tag = "latest";
287 contents = [ pkgs.coreutils ];
288 config = {
···293 };
294 };
295296+ # 16. Create another layered image, for comparing layers with image 10.
297 another-layered-image = pkgs.dockerTools.buildLayeredImage {
298 name = "another-layered-image";
299 tag = "latest";
300 config.Cmd = [ "${pkgs.hello}/bin/hello" ];
301 };
302303+ # 17. Create a layered image with only 2 layers
304 two-layered-image = pkgs.dockerTools.buildLayeredImage {
305 name = "two-layered-image";
306 tag = "latest";
···309 maxLayers = 2;
310 };
311312+ # 18. Create a layered image with more packages than max layers.
313 # coreutils and hello are part of the same layer
314 bulk-layer = pkgs.dockerTools.buildLayeredImage {
315 name = "bulk-layer";
···320 maxLayers = 2;
321 };
322323+ # 19. Create a layered image with a base image and more packages than max
324+ # layers. coreutils and hello are part of the same layer
325+ layered-bulk-layer = pkgs.dockerTools.buildLayeredImage {
326+ name = "layered-bulk-layer";
327+ tag = "latest";
328+ fromImage = two-layered-image;
329+ contents = with pkgs; [
330+ coreutils hello
331+ ];
332+ maxLayers = 4;
333+ };
334+335+ # 20. Create a "layered" image without nix store layers. This is not
336 # recommended, but can be useful for base images in rare cases.
337 no-store-paths = pkgs.dockerTools.buildLayeredImage {
338 name = "no-store-paths";
···364 };
365 };
366367+ # 21. Support files in the store on buildLayeredImage
368 # See: https://github.com/NixOS/nixpkgs/pull/91084#issuecomment-653496223
369 filesInStore = pkgs.dockerTools.buildLayeredImageWithNixDb {
370 name = "file-in-store";
···384 };
385 };
386387+ # 22. Ensure that setting created to now results in a date which
388 # isn't the epoch + 1 for layered images.
389 unstableDateLayered = pkgs.dockerTools.buildLayeredImage {
390 name = "unstable-date-layered";
+87-7
pkgs/build-support/docker/stream_layered_image.py
···3334import io
35import os
036import sys
37import json
38import hashlib
···126 return (self._digest.hexdigest(), self._size)
1271280129# Some metadata for a layer
130LayerInfo = namedtuple("LayerInfo", ["size", "checksum", "path", "paths"])
13113200000000000000000000000000000000000000000000000000000000000000000000000000133def add_layer_dir(tar, paths, store_dir, mtime):
134 """
135 Appends given store paths to a TarFile object as a new layer.
···248 mtime = int(created.timestamp())
249 store_dir = conf["store_dir"]
25000251 with tarfile.open(mode="w|", fileobj=sys.stdout.buffer) as tar:
252 layers = []
253- for num, store_layer in enumerate(conf["store_layers"]):
254- print(
255- "Creating layer", num,
256- "from paths:", store_layer,
257- file=sys.stderr)
0258 info = add_layer_dir(tar, store_layer, store_dir, mtime=mtime)
259 layers.append(info)
260261- print("Creating the customisation layer...", file=sys.stderr)
0262 layers.append(
263 add_customisation_layer(
264 tar,
···273 "created": datetime.isoformat(created),
274 "architecture": conf["architecture"],
275 "os": "linux",
276- "config": conf["config"],
277 "rootfs": {
278 "diff_ids": [f"sha256:{layer.checksum}" for layer in layers],
279 "type": "layers",
···3334import io
35import os
36+import re
37import sys
38import json
39import hashlib
···127 return (self._digest.hexdigest(), self._size)
128129130+FromImage = namedtuple("FromImage", ["tar", "manifest_json", "image_json"])
131# Some metadata for a layer
132LayerInfo = namedtuple("LayerInfo", ["size", "checksum", "path", "paths"])
133134135+def load_from_image(from_image_str):
136+ """
137+ Loads the given base image, if any.
138+139+ from_image_str: Path to the base image archive.
140+141+ Returns: A 'FromImage' object with references to the loaded base image,
142+ or 'None' if no base image was provided.
143+ """
144+ if from_image_str is None:
145+ return None
146+147+ base_tar = tarfile.open(from_image_str)
148+149+ manifest_json_tarinfo = base_tar.getmember("manifest.json")
150+ with base_tar.extractfile(manifest_json_tarinfo) as f:
151+ manifest_json = json.load(f)
152+153+ image_json_tarinfo = base_tar.getmember(manifest_json[0]["Config"])
154+ with base_tar.extractfile(image_json_tarinfo) as f:
155+ image_json = json.load(f)
156+157+ return FromImage(base_tar, manifest_json, image_json)
158+159+160+def add_base_layers(tar, from_image):
161+ """
162+ Adds the layers from the given base image to the final image.
163+164+ tar: 'tarfile.TarFile' object for new layers to be added to.
165+ from_image: 'FromImage' object with references to the loaded base image.
166+ """
167+ if from_image is None:
168+ print("No 'fromImage' provided", file=sys.stderr)
169+ return []
170+171+ layers = from_image.manifest_json[0]["Layers"]
172+ checksums = from_image.image_json["rootfs"]["diff_ids"]
173+ layers_checksums = zip(layers, checksums)
174+175+ for num, (layer, checksum) in enumerate(layers_checksums, start=1):
176+ layer_tarinfo = from_image.tar.getmember(layer)
177+ checksum = re.sub(r"^sha256:", "", checksum)
178+179+ tar.addfile(layer_tarinfo, from_image.tar.extractfile(layer_tarinfo))
180+ path = layer_tarinfo.path
181+ size = layer_tarinfo.size
182+183+ print("Adding base layer", num, "from", path, file=sys.stderr)
184+ yield LayerInfo(size=size, checksum=checksum, path=path, paths=[path])
185+186+ from_image.tar.close()
187+188+189+def overlay_base_config(from_image, final_config):
190+ """
191+ Overlays the final image 'config' JSON on top of selected defaults from the
192+ base image 'config' JSON.
193+194+ from_image: 'FromImage' object with references to the loaded base image.
195+ final_config: 'dict' object of the final image 'config' JSON.
196+ """
197+ if from_image is None:
198+ return final_config
199+200+ base_config = from_image.image_json["config"]
201+202+ # Preserve environment from base image
203+ final_env = base_config.get("Env", []) + final_config.get("Env", [])
204+ if final_env:
205+ final_config["Env"] = final_env
206+ return final_config
207+208+209def add_layer_dir(tar, paths, store_dir, mtime):
210 """
211 Appends given store paths to a TarFile object as a new layer.
···324 mtime = int(created.timestamp())
325 store_dir = conf["store_dir"]
326327+ from_image = load_from_image(conf["from_image"])
328+329 with tarfile.open(mode="w|", fileobj=sys.stdout.buffer) as tar:
330 layers = []
331+ layers.extend(add_base_layers(tar, from_image))
332+333+ start = len(layers) + 1
334+ for num, store_layer in enumerate(conf["store_layers"], start=start):
335+ print("Creating layer", num, "from paths:", store_layer,
336+ file=sys.stderr)
337 info = add_layer_dir(tar, store_layer, store_dir, mtime=mtime)
338 layers.append(info)
339340+ print("Creating layer", len(layers) + 1, "with customisation...",
341+ file=sys.stderr)
342 layers.append(
343 add_customisation_layer(
344 tar,
···353 "created": datetime.isoformat(created),
354 "architecture": conf["architecture"],
355 "os": "linux",
356+ "config": overlay_base_config(from_image, conf["config"]),
357 "rootfs": {
358 "diff_ids": [f"sha256:{layer.checksum}" for layer in layers],
359 "type": "layers",
···1+{ lib
2+, stdenv
3, fetchFromGitHub
4+, argp-standalone
5+, curl
6, meson
7, ninja
8+, pkg-config
9, zstd
0010}:
1112stdenv.mkDerivation rec {
13 pname = "zchunk";
14+ version = "1.1.9";
1516 outputs = [ "out" "lib" "dev" ];
17···19 owner = "zchunk";
20 repo = pname;
21 rev = version;
22+ hash = "sha256-MqnHtqOjLl6R5GZ4f2UX1iLoO9FUT2IfZlSN58wW8JA=";
23 };
2425 nativeBuildInputs = [
···29 ];
3031 buildInputs = [
32+ curl
33 zstd
034 ] ++ lib.optional stdenv.isDarwin argp-standalone;
35000000000036 meta = with lib; {
37+ homepage = "https://github.com/zchunk/zchunk";
38 description = "File format designed for highly efficient deltas while maintaining good compression";
39+ longDescription = ''
40+ zchunk is a compressed file format that splits the file into independent
41+ chunks. This allows you to only download changed chunks when downloading a
42+ new version of the file, and also makes zchunk files efficient over rsync.
43+44+ zchunk files are protected with strong checksums to verify that the file
45+ you downloaded is, in fact, the file you wanted.
46+ '';
47 license = licenses.bsd2;
48+ maintainers = with maintainers; [ AndersonTorres ];
49 platforms = platforms.unix;
50 };
51}
+29-45
pkgs/development/libraries/zziplib/default.nix
···1-{ lib, stdenv
0002, perl
3, pkg-config
4-, fetchFromGitHub
5-, fetchpatch
6-, zip
7-, unzip
8, python3
9, xmlto
010, zlib
11}:
1213stdenv.mkDerivation rec {
14 pname = "zziplib";
15- version = "0.13.71";
1617 src = fetchFromGitHub {
18 owner = "gdraheim";
19- repo = "zziplib";
20 rev = "v${version}";
21- sha256 = "P+7D57sc2oIABhk3k96aRILpGnsND5SLXHh2lqr9O4E=";
22 };
2324- patches = [
25- # Install man pages
26- (fetchpatch {
27- url = "https://github.com/gdraheim/zziplib/commit/5583ccc7a247ee27556ede344e93d3ac1dc72e9b.patch";
28- sha256 = "wVExEZN8Ml1/3GicB0ZYsLVS3KJ8BSz8i4Gu46naz1Y=";
29- excludes = [ "GNUmakefile" ];
30- })
31-32- # Fix man page formatting
33- (fetchpatch {
34- url = "https://github.com/gdraheim/zziplib/commit/22ed64f13dc239f86664c60496261f544bce1088.patch";
35- sha256 = "ScFVWLc4LQPqkcHn9HK/VkLula4b5HzuYl0b5vi4Ikc=";
36- })
37- ];
38-39 nativeBuildInputs = [
040 perl
41 pkg-config
42- zip
43 python3
44 xmlto
045 ];
46-47 buildInputs = [
48 zlib
49 ];
5051- checkInputs = [
52- unzip
000000000053 ];
5455- # tests are broken (https://github.com/gdraheim/zziplib/issues/20),
56- # and test/zziptests.py requires network access
57- # (https://github.com/gdraheim/zziplib/issues/24)
58- doCheck = false;
59- checkTarget = "check";
60-61 meta = with lib; {
062 description = "Library to extract data from files archived in a zip file";
63-64 longDescription = ''
65- The zziplib library is intentionally lightweight, it offers the ability
66- to easily extract data from files archived in a single zip
67- file. Applications can bundle files into a single zip archive and
68- access them. The implementation is based only on the (free) subset of
69- compression with the zlib algorithm which is actually used by the
70- zip/unzip tools.
71 '';
72-73 license = with licenses; [ lgpl2Plus mpl11 ];
74-75- homepage = "http://zziplib.sourceforge.net/";
76-77- maintainers = [ ];
78 platforms = python3.meta.platforms;
79 };
80}
···1+{ lib
2+, stdenv
3+, fetchFromGitHub
4+, cmake
5, perl
6, pkg-config
00007, python3
8, xmlto
9+, zip
10, zlib
11}:
1213stdenv.mkDerivation rec {
14 pname = "zziplib";
15+ version = "0.13.72";
1617 src = fetchFromGitHub {
18 owner = "gdraheim";
19+ repo = pname;
20 rev = "v${version}";
21+ hash = "sha256-Ht3fBgdrTm4mCi5uhgQPNtpGzADoRVOpSuGPsIS6y0Q=";
22 };
2300000000000000024 nativeBuildInputs = [
25+ cmake
26 perl
27 pkg-config
028 python3
29 xmlto
30+ zip
31 ];
032 buildInputs = [
33 zlib
34 ];
3536+ # test/zziptests.py requires network access
37+ # (https://github.com/gdraheim/zziplib/issues/24)
38+ cmakeFlags = [
39+ "-DZZIP_TESTCVE=OFF"
40+ "-DBUILD_SHARED_LIBS=True"
41+ "-DBUILD_STATIC_LIBS=False"
42+ "-DBUILD_TESTS=OFF"
43+ "-DMSVC_STATIC_RUNTIME=OFF"
44+ "-DZZIPSDL=OFF"
45+ "-DZZIPTEST=OFF"
46+ "-DZZIPWRAP=OFF"
47+ "-DBUILDTESTS=OFF"
48 ];
4900000050 meta = with lib; {
51+ homepage = "https://github.com/gdraheim/zziplib";
52 description = "Library to extract data from files archived in a zip file";
053 longDescription = ''
54+ The zziplib library is intentionally lightweight, it offers the ability to
55+ easily extract data from files archived in a single zip file.
56+ Applications can bundle files into a single zip archive and access them.
57+ The implementation is based only on the (free) subset of compression with
58+ the zlib algorithm which is actually used by the zip/unzip tools.
059 '';
060 license = with licenses; [ lgpl2Plus mpl11 ];
61+ maintainers = with maintainers; [ AndersonTorres ];
00062 platforms = python3.meta.platforms;
63 };
64}