···111111112112 *Default:* the output path's hash
113113114114+`fromImage` _optional_
115115+116116+: The repository tarball containing the base image. It must be a valid Docker image, such as one exported by `docker save`.
117117+118118+ *Default:* `null`, which can be seen as equivalent to `FROM scratch` of a `Dockerfile`.
119119+114120`contents` _optional_
115121116122: Top level paths in the container. Either a single derivation, or a list of derivations.
+10-4
nixos/modules/security/acme.nix
···2424 Type = "oneshot";
2525 User = "acme";
2626 Group = mkDefault "acme";
2727- UMask = 0023;
2727+ UMask = 0022;
2828 StateDirectoryMode = 750;
2929 ProtectSystem = "full";
3030 PrivateTmp = true;
···303303 }
304304305305 ${optionalString (data.webroot != null) ''
306306- # Ensure the webroot exists
307307- mkdir -p '${data.webroot}/.well-known/acme-challenge'
308308- chown 'acme:${data.group}' ${data.webroot}/{.well-known,.well-known/acme-challenge}
306306+ # Ensure the webroot exists. Fixing group is required in case configuration was changed between runs.
307307+ # Lego will fail if the webroot does not exist at all.
308308+ (
309309+ mkdir -p '${data.webroot}/.well-known/acme-challenge' \
310310+ && chgrp '${data.group}' ${data.webroot}/.well-known/acme-challenge
311311+ ) || (
312312+ echo 'Please ensure ${data.webroot}/.well-known/acme-challenge exists and is writable by acme:${data.group}' \
313313+ && exit 1
314314+ )
309315 ''}
310316311317 echo '${domainHash}' > domainhash.txt
+44-10
nixos/tests/acme.nix
···253253254254255255 def check_connection(node, domain, retries=3):
256256- assert retries >= 0
256256+ assert retries >= 0, f"Failed to connect to https://{domain}"
257257258258 result = node.succeed(
259259 "openssl s_client -brief -verify 2 -CAfile /tmp/ca.crt"
···262262263263 for line in result.lower().split("\n"):
264264 if "verification" in line and "error" in line:
265265- time.sleep(1)
265265+ time.sleep(3)
266266 return check_connection(node, domain, retries - 1)
267267268268269269 def check_connection_key_bits(node, domain, bits, retries=3):
270270- assert retries >= 0
270270+ assert retries >= 0, f"Did not find expected number of bits ({bits}) in key"
271271272272 result = node.succeed(
273273 "openssl s_client -CAfile /tmp/ca.crt"
···277277 print("Key type:", result)
278278279279 if bits not in result:
280280- time.sleep(1)
280280+ time.sleep(3)
281281 return check_connection_key_bits(node, domain, bits, retries - 1)
282282283283284284 def check_stapling(node, domain, retries=3):
285285- assert retries >= 0
285285+ assert retries >= 0, "OCSP Stapling check failed"
286286287287 # Pebble doesn't provide a full OCSP responder, so just check the URL
288288 result = node.succeed(
···293293 print("OCSP Responder URL:", result)
294294295295 if "${caDomain}:4002" not in result.lower():
296296- time.sleep(1)
296296+ time.sleep(3)
297297 return check_stapling(node, domain, retries - 1)
298298299299300300+ def download_ca_certs(node, retries=5):
301301+ assert retries >= 0, "Failed to connect to pebble to download root CA certs"
302302+303303+ exit_code, _ = node.execute("curl https://${caDomain}:15000/roots/0 > /tmp/ca.crt")
304304+ exit_code_2, _ = node.execute(
305305+ "curl https://${caDomain}:15000/intermediate-keys/0 >> /tmp/ca.crt"
306306+ )
307307+308308+ if exit_code + exit_code_2 > 0:
309309+ time.sleep(3)
310310+ return download_ca_certs(node, retries - 1)
311311+312312+300313 client.start()
301314 dnsserver.start()
302315···313326 acme.wait_for_unit("network-online.target")
314327 acme.wait_for_unit("pebble.service")
315328316316- client.succeed("curl https://${caDomain}:15000/roots/0 > /tmp/ca.crt")
317317- client.succeed("curl https://${caDomain}:15000/intermediate-keys/0 >> /tmp/ca.crt")
329329+ download_ca_certs(client)
318330319331 with subtest("Can request certificate with HTTPS-01 challenge"):
320332 webserver.wait_for_unit("acme-finished-a.example.test.target")
321333 check_fullchain(webserver, "a.example.test")
322334 check_issuer(webserver, "a.example.test", "pebble")
323335 check_connection(client, "a.example.test")
336336+337337+ with subtest("Certificates and accounts have safe + valid permissions"):
338338+ group = "${nodes.webserver.config.security.acme.certs."a.example.test".group}"
339339+ webserver.succeed(
340340+ f"test $(stat -L -c \"%a %U %G\" /var/lib/acme/a.example.test/* | tee /dev/stderr | grep '640 acme {group}' | wc -l) -eq 5"
341341+ )
342342+ webserver.succeed(
343343+ f"test $(stat -L -c \"%a %U %G\" /var/lib/acme/.lego/a.example.test/**/* | tee /dev/stderr | grep '640 acme {group}' | wc -l) -eq 5"
344344+ )
345345+ webserver.succeed(
346346+ f"test $(stat -L -c \"%a %U %G\" /var/lib/acme/a.example.test | tee /dev/stderr | grep '750 acme {group}' | wc -l) -eq 1"
347347+ )
348348+ webserver.succeed(
349349+ f"test $(find /var/lib/acme/accounts -type f -exec stat -L -c \"%a %U %G\" {{}} \\; | tee /dev/stderr | grep -v '600 acme {group}' | wc -l) -eq 0"
350350+ )
324351325352 with subtest("Can generate valid selfsigned certs"):
326353 webserver.succeed("systemctl clean acme-a.example.test.service --what=state")
···375402 assert keyhash_old == keyhash_new
376403377404 with subtest("Can request certificates for vhost + aliases (apache-httpd)"):
378378- switch_to(webserver, "httpd-aliases")
379379- webserver.wait_for_unit("acme-finished-c.example.test.target")
405405+ try:
406406+ switch_to(webserver, "httpd-aliases")
407407+ webserver.wait_for_unit("acme-finished-c.example.test.target")
408408+ except Exception as err:
409409+ _, output = webserver.execute(
410410+ "cat /var/log/httpd/*.log && ls -al /var/lib/acme/acme-challenge"
411411+ )
412412+ print(output)
413413+ raise err
380414 check_issuer(webserver, "c.example.test", "pebble")
381415 check_connection(client, "c.example.test")
382416 check_connection(client, "d.example.test")
+29-1
nixos/tests/docker-tools.nix
···161161 "docker run --rm ${examples.layered-image.imageName} cat extraCommands",
162162 )
163163164164- with subtest("Ensure building an image on top of a layered Docker images work"):
164164+ with subtest("Ensure images built on top of layered Docker images work"):
165165 docker.succeed(
166166 "docker load --input='${examples.layered-on-top}'",
167167 "docker run --rm ${examples.layered-on-top.imageName}",
168168+ )
169169+170170+ with subtest("Ensure layered images built on top of layered Docker images work"):
171171+ docker.succeed(
172172+ "docker load --input='${examples.layered-on-top-layered}'",
173173+ "docker run --rm ${examples.layered-on-top-layered.imageName}",
168174 )
169175170176···205211 assert "FROM_CHILD=true" in env, "envvars from the child should be preserved"
206212 assert "LAST_LAYER=child" in env, "envvars from the child should take priority"
207213214214+ with subtest("Ensure environment variables of layered images are correctly inherited"):
215215+ docker.succeed(
216216+ "docker load --input='${examples.environmentVariablesLayered}'"
217217+ )
218218+ out = docker.succeed("docker run --rm ${examples.environmentVariablesLayered.imageName} env")
219219+ env = out.splitlines()
220220+ assert "FROM_PARENT=true" in env, "envvars from the parent should be preserved"
221221+ assert "FROM_CHILD=true" in env, "envvars from the child should be preserved"
222222+ assert "LAST_LAYER=child" in env, "envvars from the child should take priority"
223223+208224 with subtest("Ensure image with only 2 layers can be loaded"):
209225 docker.succeed(
210226 "docker load --input='${examples.two-layered-image}'"
···218234 # Ensure the two output paths (ls and hello) are in the layer
219235 "docker run bulk-layer ls /bin/hello",
220236 )
237237+238238+ with subtest(
239239+ "Ensure the bulk layer with a base image respects the number of maxLayers"
240240+ ):
241241+ docker.succeed(
242242+ "docker load --input='${pkgs.dockerTools.examples.layered-bulk-layer}'",
243243+ # Ensure the image runs correctly
244244+ "docker run layered-bulk-layer ls /bin/hello",
245245+ )
246246+247247+ # Ensure the image has the correct number of layers
248248+ assert len(set_of_layers("layered-bulk-layer")) == 4
221249222250 with subtest("Ensure correct behavior when no store is needed"):
223251 # This check tests that buildLayeredImage can build images that don't need a store.
···729729 name,
730730 # Image tag, the Nix's output hash will be used if null
731731 tag ? null,
732732+ # Parent image, to append to.
733733+ fromImage ? null,
732734 # Files to put on the image (a nix store path or list of paths).
733735 contents ? [],
734736 # Docker config; e.g. what command to run on the container.
···791793 unnecessaryDrvs = [ baseJson overallClosure ];
792794793795 conf = runCommand "${baseName}-conf.json" {
794794- inherit maxLayers created;
796796+ inherit fromImage maxLayers created;
795797 imageName = lib.toLower name;
796798 passthru.imageTag =
797799 if tag != null
···821823 unnecessaryDrvs}
822824 }
823825826826+ # Compute the number of layers that are already used by a potential
827827+ # 'fromImage' as well as the customization layer. Ensure that there is
828828+ # still at least one layer available to store the image contents.
829829+ usedLayers=0
830830+831831+ # subtract number of base image layers
832832+ if [[ -n "$fromImage" ]]; then
833833+ (( usedLayers += $(tar -xOf "$fromImage" manifest.json | jq '.[0].Layers | length') ))
834834+ fi
835835+836836+ # one layer will be taken up by the customisation layer
837837+ (( usedLayers += 1 ))
838838+839839+ if ! (( $usedLayers < $maxLayers )); then
840840+ echo >&2 "Error: usedLayers $usedLayers layers to store 'fromImage' and" \
841841+ "'extraCommands', but only maxLayers=$maxLayers were" \
842842+ "allowed. At least 1 layer is required to store contents."
843843+ exit 1
844844+ fi
845845+ availableLayers=$(( maxLayers - usedLayers ))
846846+824847 # Create $maxLayers worth of Docker Layers, one layer per store path
825848 # unless there are more paths than $maxLayers. In that case, create
826849 # $maxLayers-1 for the most popular layers, and smush the remainaing
···838861 | (.[:$maxLayers-1] | map([.])) + [ .[$maxLayers-1:] ]
839862 | map(select(length > 0))
840863 ' \
841841- --argjson maxLayers "$(( maxLayers - 1 ))" # one layer will be taken up by the customisation layer
864864+ --argjson maxLayers "$availableLayers"
842865 )"
843866844867 cat ${baseJson} | jq '
845868 . + {
846869 "store_dir": $store_dir,
870870+ "from_image": $from_image,
847871 "store_layers": $store_layers,
848872 "customisation_layer", $customisation_layer,
849873 "repo_tag": $repo_tag,
850874 "created": $created
851875 }
852876 ' --arg store_dir "${storeDir}" \
877877+ --argjson from_image ${if fromImage == null then "null" else "'\"${fromImage}\"'"} \
853878 --argjson store_layers "$store_layers" \
854879 --arg customisation_layer ${customisationLayer} \
855880 --arg repo_tag "$imageName:$imageTag" \
+64-21
pkgs/build-support/docker/examples.nix
···188188 };
189189 };
190190191191- # 12. example of running something as root on top of a parent image
191191+ # 12 Create a layered image on top of a layered image
192192+ layered-on-top-layered = pkgs.dockerTools.buildLayeredImage {
193193+ name = "layered-on-top-layered";
194194+ tag = "latest";
195195+ fromImage = layered-image;
196196+ extraCommands = ''
197197+ mkdir ./example-output
198198+ chmod 777 ./example-output
199199+ '';
200200+ config = {
201201+ Env = [ "PATH=${pkgs.coreutils}/bin/" ];
202202+ WorkingDir = "/example-output";
203203+ Cmd = [
204204+ "${pkgs.bash}/bin/bash" "-c" "echo hello > foo; cat foo"
205205+ ];
206206+ };
207207+ };
208208+209209+ # 13. example of running something as root on top of a parent image
192210 # Regression test related to PR #52109
193211 runAsRootParentImage = buildImage {
194212 name = "runAsRootParentImage";
···197215 fromImage = bash;
198216 };
199217200200- # 13. example of 3 layers images This image is used to verify the
218218+ # 14. example of 3 layers images This image is used to verify the
201219 # order of layers is correct.
202220 # It allows to validate
203221 # - the layer of parent are below
···235253 '';
236254 };
237255238238- # 14. Environment variable inheritance.
256256+ # 15. Environment variable inheritance.
239257 # Child image should inherit parents environment variables,
240258 # optionally overriding them.
241241- environmentVariables = let
242242- parent = pkgs.dockerTools.buildImage {
243243- name = "parent";
244244- tag = "latest";
245245- config = {
246246- Env = [
247247- "FROM_PARENT=true"
248248- "LAST_LAYER=parent"
249249- ];
250250- };
259259+ environmentVariablesParent = pkgs.dockerTools.buildImage {
260260+ name = "parent";
261261+ tag = "latest";
262262+ config = {
263263+ Env = [
264264+ "FROM_PARENT=true"
265265+ "LAST_LAYER=parent"
266266+ ];
267267+ };
268268+ };
269269+270270+ environmentVariables = pkgs.dockerTools.buildImage {
271271+ name = "child";
272272+ fromImage = environmentVariablesParent;
273273+ tag = "latest";
274274+ contents = [ pkgs.coreutils ];
275275+ config = {
276276+ Env = [
277277+ "FROM_CHILD=true"
278278+ "LAST_LAYER=child"
279279+ ];
251280 };
252252- in pkgs.dockerTools.buildImage {
281281+ };
282282+283283+ environmentVariablesLayered = pkgs.dockerTools.buildLayeredImage {
253284 name = "child";
254254- fromImage = parent;
285285+ fromImage = environmentVariablesParent;
255286 tag = "latest";
256287 contents = [ pkgs.coreutils ];
257288 config = {
···262293 };
263294 };
264295265265- # 15. Create another layered image, for comparing layers with image 10.
296296+ # 16. Create another layered image, for comparing layers with image 10.
266297 another-layered-image = pkgs.dockerTools.buildLayeredImage {
267298 name = "another-layered-image";
268299 tag = "latest";
269300 config.Cmd = [ "${pkgs.hello}/bin/hello" ];
270301 };
271302272272- # 16. Create a layered image with only 2 layers
303303+ # 17. Create a layered image with only 2 layers
273304 two-layered-image = pkgs.dockerTools.buildLayeredImage {
274305 name = "two-layered-image";
275306 tag = "latest";
···278309 maxLayers = 2;
279310 };
280311281281- # 17. Create a layered image with more packages than max layers.
312312+ # 18. Create a layered image with more packages than max layers.
282313 # coreutils and hello are part of the same layer
283314 bulk-layer = pkgs.dockerTools.buildLayeredImage {
284315 name = "bulk-layer";
···289320 maxLayers = 2;
290321 };
291322292292- # 18. Create a "layered" image without nix store layers. This is not
323323+ # 19. Create a layered image with a base image and more packages than max
324324+ # layers. coreutils and hello are part of the same layer
325325+ layered-bulk-layer = pkgs.dockerTools.buildLayeredImage {
326326+ name = "layered-bulk-layer";
327327+ tag = "latest";
328328+ fromImage = two-layered-image;
329329+ contents = with pkgs; [
330330+ coreutils hello
331331+ ];
332332+ maxLayers = 4;
333333+ };
334334+335335+ # 20. Create a "layered" image without nix store layers. This is not
293336 # recommended, but can be useful for base images in rare cases.
294337 no-store-paths = pkgs.dockerTools.buildLayeredImage {
295338 name = "no-store-paths";
···321364 };
322365 };
323366324324- # 19. Support files in the store on buildLayeredImage
367367+ # 21. Support files in the store on buildLayeredImage
325368 # See: https://github.com/NixOS/nixpkgs/pull/91084#issuecomment-653496223
326369 filesInStore = pkgs.dockerTools.buildLayeredImageWithNixDb {
327370 name = "file-in-store";
···341384 };
342385 };
343386344344- # 20. Ensure that setting created to now results in a date which
387387+ # 22. Ensure that setting created to now results in a date which
345388 # isn't the epoch + 1 for layered images.
346389 unstableDateLayered = pkgs.dockerTools.buildLayeredImage {
347390 name = "unstable-date-layered";
+87-7
pkgs/build-support/docker/stream_layered_image.py
···33333434import io
3535import os
3636+import re
3637import sys
3738import json
3839import hashlib
···126127 return (self._digest.hexdigest(), self._size)
127128128129130130+FromImage = namedtuple("FromImage", ["tar", "manifest_json", "image_json"])
129131# Some metadata for a layer
130132LayerInfo = namedtuple("LayerInfo", ["size", "checksum", "path", "paths"])
131133132134135135+def load_from_image(from_image_str):
136136+ """
137137+ Loads the given base image, if any.
138138+139139+ from_image_str: Path to the base image archive.
140140+141141+ Returns: A 'FromImage' object with references to the loaded base image,
142142+ or 'None' if no base image was provided.
143143+ """
144144+ if from_image_str is None:
145145+ return None
146146+147147+ base_tar = tarfile.open(from_image_str)
148148+149149+ manifest_json_tarinfo = base_tar.getmember("manifest.json")
150150+ with base_tar.extractfile(manifest_json_tarinfo) as f:
151151+ manifest_json = json.load(f)
152152+153153+ image_json_tarinfo = base_tar.getmember(manifest_json[0]["Config"])
154154+ with base_tar.extractfile(image_json_tarinfo) as f:
155155+ image_json = json.load(f)
156156+157157+ return FromImage(base_tar, manifest_json, image_json)
158158+159159+160160+def add_base_layers(tar, from_image):
161161+ """
162162+ Adds the layers from the given base image to the final image.
163163+164164+ tar: 'tarfile.TarFile' object for new layers to be added to.
165165+ from_image: 'FromImage' object with references to the loaded base image.
166166+ """
167167+ if from_image is None:
168168+ print("No 'fromImage' provided", file=sys.stderr)
169169+ return []
170170+171171+ layers = from_image.manifest_json[0]["Layers"]
172172+ checksums = from_image.image_json["rootfs"]["diff_ids"]
173173+ layers_checksums = zip(layers, checksums)
174174+175175+ for num, (layer, checksum) in enumerate(layers_checksums, start=1):
176176+ layer_tarinfo = from_image.tar.getmember(layer)
177177+ checksum = re.sub(r"^sha256:", "", checksum)
178178+179179+ tar.addfile(layer_tarinfo, from_image.tar.extractfile(layer_tarinfo))
180180+ path = layer_tarinfo.path
181181+ size = layer_tarinfo.size
182182+183183+ print("Adding base layer", num, "from", path, file=sys.stderr)
184184+ yield LayerInfo(size=size, checksum=checksum, path=path, paths=[path])
185185+186186+ from_image.tar.close()
187187+188188+189189+def overlay_base_config(from_image, final_config):
190190+ """
191191+ Overlays the final image 'config' JSON on top of selected defaults from the
192192+ base image 'config' JSON.
193193+194194+ from_image: 'FromImage' object with references to the loaded base image.
195195+ final_config: 'dict' object of the final image 'config' JSON.
196196+ """
197197+ if from_image is None:
198198+ return final_config
199199+200200+ base_config = from_image.image_json["config"]
201201+202202+ # Preserve environment from base image
203203+ final_env = base_config.get("Env", []) + final_config.get("Env", [])
204204+ if final_env:
205205+ final_config["Env"] = final_env
206206+ return final_config
207207+208208+133209def add_layer_dir(tar, paths, store_dir, mtime):
134210 """
135211 Appends given store paths to a TarFile object as a new layer.
···248324 mtime = int(created.timestamp())
249325 store_dir = conf["store_dir"]
250326327327+ from_image = load_from_image(conf["from_image"])
328328+251329 with tarfile.open(mode="w|", fileobj=sys.stdout.buffer) as tar:
252330 layers = []
253253- for num, store_layer in enumerate(conf["store_layers"]):
254254- print(
255255- "Creating layer", num,
256256- "from paths:", store_layer,
257257- file=sys.stderr)
331331+ layers.extend(add_base_layers(tar, from_image))
332332+333333+ start = len(layers) + 1
334334+ for num, store_layer in enumerate(conf["store_layers"], start=start):
335335+ print("Creating layer", num, "from paths:", store_layer,
336336+ file=sys.stderr)
258337 info = add_layer_dir(tar, store_layer, store_dir, mtime=mtime)
259338 layers.append(info)
260339261261- print("Creating the customisation layer...", file=sys.stderr)
340340+ print("Creating layer", len(layers) + 1, "with customisation...",
341341+ file=sys.stderr)
262342 layers.append(
263343 add_customisation_layer(
264344 tar,
···273353 "created": datetime.isoformat(created),
274354 "architecture": conf["architecture"],
275355 "os": "linux",
276276- "config": conf["config"],
356356+ "config": overlay_base_config(from_image, conf["config"]),
277357 "rootfs": {
278358 "diff_ids": [f"sha256:{layer.checksum}" for layer in layers],
279359 "type": "layers",
···11-{ lib, stdenv
11+{ lib
22+, stdenv
23, fetchFromGitHub
33-, fetchpatch
44-, pkg-config
44+, argp-standalone
55+, curl
56, meson
67, ninja
88+, pkg-config
79, zstd
88-, curl
99-, argp-standalone
1010}:
11111212stdenv.mkDerivation rec {
1313 pname = "zchunk";
1414- version = "1.1.8";
1414+ version = "1.1.9";
15151616 outputs = [ "out" "lib" "dev" ];
1717···1919 owner = "zchunk";
2020 repo = pname;
2121 rev = version;
2222- sha256 = "0q1jafxh5nqgn2w5ciljkh8h46xma0qia8a5rj9m0pxixcacqj6q";
2222+ hash = "sha256-MqnHtqOjLl6R5GZ4f2UX1iLoO9FUT2IfZlSN58wW8JA=";
2323 };
24242525 nativeBuildInputs = [
···2929 ];
30303131 buildInputs = [
3232+ curl
3233 zstd
3333- curl
3434 ] ++ lib.optional stdenv.isDarwin argp-standalone;
35353636- # Darwin needs a patch for argp-standalone usage and differing endian.h location on macOS
3737- # https://github.com/zchunk/zchunk/pull/35
3838- patches = [
3939- (fetchpatch {
4040- name = "darwin-support.patch";
4141- url = "https://github.com/zchunk/zchunk/commit/f7db2ac0a95028a7f82ecb89862426bf53a69232.patch";
4242- sha256 = "0cm84gyii4ly6nsmagk15g9kbfa13rw395nqk3fdcwm0dpixlkh4";
4343- })
4444-];
4545-4636 meta = with lib; {
3737+ homepage = "https://github.com/zchunk/zchunk";
4738 description = "File format designed for highly efficient deltas while maintaining good compression";
4848- homepage = "https://github.com/zchunk/zchunk";
3939+ longDescription = ''
4040+ zchunk is a compressed file format that splits the file into independent
4141+ chunks. This allows you to only download changed chunks when downloading a
4242+ new version of the file, and also makes zchunk files efficient over rsync.
4343+4444+ zchunk files are protected with strong checksums to verify that the file
4545+ you downloaded is, in fact, the file you wanted.
4646+ '';
4947 license = licenses.bsd2;
5050- maintainers = with maintainers; [];
4848+ maintainers = with maintainers; [ AndersonTorres ];
5149 platforms = platforms.unix;
5250 };
5351}
+29-45
pkgs/development/libraries/zziplib/default.nix
···11-{ lib, stdenv
11+{ lib
22+, stdenv
33+, fetchFromGitHub
44+, cmake
25, perl
36, pkg-config
44-, fetchFromGitHub
55-, fetchpatch
66-, zip
77-, unzip
87, python3
98, xmlto
99+, zip
1010, zlib
1111}:
12121313stdenv.mkDerivation rec {
1414 pname = "zziplib";
1515- version = "0.13.71";
1515+ version = "0.13.72";
16161717 src = fetchFromGitHub {
1818 owner = "gdraheim";
1919- repo = "zziplib";
1919+ repo = pname;
2020 rev = "v${version}";
2121- sha256 = "P+7D57sc2oIABhk3k96aRILpGnsND5SLXHh2lqr9O4E=";
2121+ hash = "sha256-Ht3fBgdrTm4mCi5uhgQPNtpGzADoRVOpSuGPsIS6y0Q=";
2222 };
23232424- patches = [
2525- # Install man pages
2626- (fetchpatch {
2727- url = "https://github.com/gdraheim/zziplib/commit/5583ccc7a247ee27556ede344e93d3ac1dc72e9b.patch";
2828- sha256 = "wVExEZN8Ml1/3GicB0ZYsLVS3KJ8BSz8i4Gu46naz1Y=";
2929- excludes = [ "GNUmakefile" ];
3030- })
3131-3232- # Fix man page formatting
3333- (fetchpatch {
3434- url = "https://github.com/gdraheim/zziplib/commit/22ed64f13dc239f86664c60496261f544bce1088.patch";
3535- sha256 = "ScFVWLc4LQPqkcHn9HK/VkLula4b5HzuYl0b5vi4Ikc=";
3636- })
3737- ];
3838-3924 nativeBuildInputs = [
2525+ cmake
4026 perl
4127 pkg-config
4242- zip
4328 python3
4429 xmlto
3030+ zip
4531 ];
4646-4732 buildInputs = [
4833 zlib
4934 ];
50355151- checkInputs = [
5252- unzip
3636+ # test/zziptests.py requires network access
3737+ # (https://github.com/gdraheim/zziplib/issues/24)
3838+ cmakeFlags = [
3939+ "-DZZIP_TESTCVE=OFF"
4040+ "-DBUILD_SHARED_LIBS=True"
4141+ "-DBUILD_STATIC_LIBS=False"
4242+ "-DBUILD_TESTS=OFF"
4343+ "-DMSVC_STATIC_RUNTIME=OFF"
4444+ "-DZZIPSDL=OFF"
4545+ "-DZZIPTEST=OFF"
4646+ "-DZZIPWRAP=OFF"
4747+ "-DBUILDTESTS=OFF"
5348 ];
54495555- # tests are broken (https://github.com/gdraheim/zziplib/issues/20),
5656- # and test/zziptests.py requires network access
5757- # (https://github.com/gdraheim/zziplib/issues/24)
5858- doCheck = false;
5959- checkTarget = "check";
6060-6150 meta = with lib; {
5151+ homepage = "https://github.com/gdraheim/zziplib";
6252 description = "Library to extract data from files archived in a zip file";
6363-6453 longDescription = ''
6565- The zziplib library is intentionally lightweight, it offers the ability
6666- to easily extract data from files archived in a single zip
6767- file. Applications can bundle files into a single zip archive and
6868- access them. The implementation is based only on the (free) subset of
6969- compression with the zlib algorithm which is actually used by the
7070- zip/unzip tools.
5454+ The zziplib library is intentionally lightweight, it offers the ability to
5555+ easily extract data from files archived in a single zip file.
5656+ Applications can bundle files into a single zip archive and access them.
5757+ The implementation is based only on the (free) subset of compression with
5858+ the zlib algorithm which is actually used by the zip/unzip tools.
7159 '';
7272-7360 license = with licenses; [ lgpl2Plus mpl11 ];
7474-7575- homepage = "http://zziplib.sourceforge.net/";
7676-7777- maintainers = [ ];
6161+ maintainers = with maintainers; [ AndersonTorres ];
7862 platforms = python3.meta.platforms;
7963 };
8064}