1#!/usr/bin/env python3
2
3# usage: auto-layer.py graph_file [ignore_file] [layer_limit]
4
5# graph_file: Path to a json file as generated by writeReferencesGraph
6# ignore_file: Path to a file with a list of store paths that should not appear in the output
7# layer_limit: Maximum number of layers to generate, default 100
8
9# This module tries to split a dependency graph of nix store paths into a
10# limited set of layers that together cover all mentioned paths. It tries to
11# choose the layers such that different inputs often have the largest layers in
12# common so most layers can be shared, while the differences in the results end
13# up in smaller layers.
14
15# It does this by splitting off the N largest store paths (by nar size) into
16# their own layers, including some of their dependencies.
17# Specifically, for a large store path L, it creates a layer with L and any
18# store path D that L depends on and for which there is no store path in the
19# input that depends on D but not on L.
20# Then, if there are any store paths that are depended on by multiple of the
21# chosen large store paths, those common dependencies will get their own layer,
22# one per set of large store paths that depends on them.
23# N is iteratively increased until the layer limit is reached.
24
25# The reasoning for this algorithm is as follows:
26
27# Most closures contain a few large store paths and many small store paths. If
28# we want to share as many bytes as possible with other layered images, we
29# should focus on putting the largest paths in their own layer.
30
31# If we had data on how much each store path is used and how likely each
32# combination of store paths is, we might be able to infer which large store
33# paths are better off being combined into a single layer. However, getting that
34# information, let alone keeping it up-to-date is very difficult. If we can't
35# tell that two large store paths are often going to appear together, then we're
36# better off giving each of them their own layer.
37
38# This leaves a lot of smaller store paths to be assigned to layers. Anything
39# that will depend on a large store path L will also depend on all the store
40# paths that L depends on, so it makes sense to move the dependencies of L into
41# the same layer as L.
42
43# Possible improvements:
44# - Specifying a size limit below which the algorithm stops using large store
45# paths as new layer roots might further improve sharing as the layer
46# boundaries will depend less on the number of larger store paths in the
47# input.
48
49import json
50import os
51import sys
52
53def layer_count(layer_split):
54 return len(set(layer_split.values()))
55
56def path_key(path):
57 hash, name = path.split('-', 1)
58 return name, hash
59
60def closure(*todo, key):
61 """
62 Find all dependencies of the arguments including the arguments themselves.
63 """
64 todo = set(todo)
65 done = set()
66 while todo:
67 x = todo.pop()
68 if x not in done:
69 done.add(x)
70 todo.update(key(x))
71 return done
72
73def dependencies(*todo, key):
74 """
75 Find all dependencies of the arguments excluding the arguments themselves.
76 """
77 return closure(*todo, key=key) - set(todo)
78
79def minimal_cover(paths, key):
80 """
81 The minimal set of paths that together cover all input paths with their
82 closure. None of the result paths depend on each other.
83 """
84 paths = set(paths)
85 paths_deps = set.union(*(dependencies(d, key=key) for d in paths))
86 return paths - paths_deps
87
88def auto_layer(graph, ignore_paths, layer_limit):
89 # Compute all direct users of each path
90 nodes = {x["path"]: x | {"users": set()} for x in graph}
91 for user in nodes:
92 for ref in nodes[user]["references"]:
93 nodes[ref]["users"] |= {user}
94
95 def node_deps(path):
96 nonlocal nodes
97 return nodes[path]["references"]
98
99 def node_users(path):
100 nonlocal nodes
101 return nodes[path]["users"]
102
103 nodes_by_size = sorted(graph, key=lambda node: node["narSize"])
104
105 # Here starts the main algorithm:
106 # The goal is to split the set of store paths into layers such that the layers are likely to be
107 # reusable and that the closure size is spread out over the layers. We do this by iteratively taking
108 # the largest store path and giving it its own layer. This primary store path becomes the identity
109 # of the layer. We also add every dependency of the identifying store path to the same layer unless
110 # it is also used by something that doesn't depend on the identifying store path. More generally, we
111 # put store paths together in the same layer when the set of other layers that depend on it is the
112 # same.
113
114 # layer_split defines how the layers are currently split. We start with a single layer with no
115 # dependencies. This is encoded as every store path mapped to the empty set of dependencies.
116 # In general, layer_split maps each store path to the set of primary paths that depend on it and
117 # that set defines and identifies the layer.
118 layer_split = {path: frozenset() for path in nodes}
119
120 primary_paths = set()
121 while nodes_by_size:
122 # Every iteration, we choose the next biggest path to be the root of a new layer.
123 new_primary_path = nodes_by_size.pop()["path"]
124 primary_paths.add(new_primary_path)
125 new_layer_split = layer_split.copy()
126 new_layer_split[new_primary_path] = frozenset({new_primary_path})
127 new_primary_path_deps = dependencies(new_primary_path, key=node_deps)
128 new_primary_path_users = dependencies(new_primary_path, key=node_users)
129
130 # Update the set of primary users for every dependency of the new primary path.
131 for dep in new_primary_path_deps:
132 new_layer_split[dep] -= new_primary_path_users
133 if not new_layer_split[dep] & new_primary_path_deps:
134 new_layer_split[dep] |= {new_primary_path}
135
136 # If we exceed the layer limit, we give up. The previous split should be good enough.
137 if layer_count(new_layer_split) > layer_limit:
138 break
139 layer_split = new_layer_split
140
141 # Main algorithm done, the layers have been chosen.
142 # Now, let's give each layer some metadata, mostly for debugging.
143
144 def layer_info(layer_id):
145 nonlocal nodes
146 nonlocal layer_split
147 # The full set of paths in this layer is all the paths that were assigned to it.
148 paths = {path
149 for path, layer_id_2 in layer_split.items()
150 if layer_id == layer_id_2}
151 layerSize = sum(nodes[path]["narSize"] for path in paths)
152 return {
153 "usedBy": sorted(layer_id, key=path_key),
154 "paths": sorted(paths, key=path_key),
155 "layerSize": layerSize,
156 "closureSize": sum(nodes[path]["narSize"] for path in closure(*paths, key=node_deps)),
157 }
158
159 layers = {layer_id: layer_info(layer_id)
160 for layer_id in set(layer_split.values())}
161
162 # The layer order doesn't actually matter for docker but it's still kind of neat to have layers come
163 # after all of their dependencies. The easiest way to do that is to order by closure size since a
164 # layer is necessarily always larger than each of its dependencies since it includes them.
165 layer_order = sorted(layers.values(), key=lambda info: info["closureSize"])
166
167 if os.environ.get("DEBUG"):
168 print(json.dumps(layer_order, indent=2), file=sys.stderr)
169
170 # Sanity check that no store path ends up in multiple layers.
171 total_layer_size = sum(node["layerSize"] for node in layer_order)
172 total_nar_size = sum(node["narSize"] for node in graph)
173 assert total_layer_size == total_nar_size, (total_layer_size, total_nar_size)
174
175 # Format as a list of layers, each defined as a list of store paths.
176 return [[path
177 for path in layer["paths"]
178 if path not in ignore_paths]
179 for layer in layer_order
180 if set(layer["paths"]) - ignore_paths]
181
182if __name__ == '__main__':
183 import argparse
184
185 parser = argparse.ArgumentParser(
186 prog='auto-layer',
187 description='Split store paths into docker layers.'
188 )
189 parser.add_argument('graph_file')
190 parser.add_argument('ignore_file', default="/dev/null")
191 parser.add_argument('layer_limit', type=int, default=100)
192 args = parser.parse_args()
193
194 with open(args.graph_file) as f:
195 graph = json.load(f)
196
197 with open(args.ignore_file) as f:
198 ignore_paths = {line.strip() for line in f}
199
200 print(json.dumps(auto_layer(graph, ignore_paths, args.layer_limit)))