streamLayeredImage: add dynamic tagging of docker image (#329425)

* streamLayeredImage: self-document the script

'podman load' doesn't let me override the name/tag from the image.
This name and tag is dynamic and in a CI environment so I would like to
be able to adjust the tag dynamically.
Since the image is streamed by stream_layered_image.py, there is no need
to stick with the nix-hardcoded image name/tag: the python script can
accept another name

I've added argparse to expose the feature. It has the added benefit of
adding `--help` support to the script, which makes its usage
self-explanatory.

* blackified file
This commit is contained in:
Matthieu Coudron 2024-07-29 19:45:58 +02:00 committed by GitHub
parent 9d3d0e669c
commit ca1657217d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 67 additions and 35 deletions

View File

@ -1115,6 +1115,8 @@ rec {
preferLocalBuild = true;
passthru = passthru // {
inherit (conf) imageTag;
inherit conf;
inherit streamScript;
# Distinguish tarballs and exes at the Nix level so functions that
# take images can know in advance how the image is supposed to be used.

View File

@ -32,7 +32,7 @@ function does all this.
[2]: https://github.com/moby/moby/blob/4fb59c20a4fb54f944fe170d0ff1d00eb4a24d6f/image/spec/v1.2.md#image-json-field-descriptions
""" # noqa: E501
import argparse
import io
import os
import re
@ -225,16 +225,13 @@ def add_layer_dir(tar, paths, store_dir, mtime, uid, gid, uname, gname):
"""
invalid_paths = [i for i in paths if not i.startswith(store_dir)]
assert len(invalid_paths) == 0, \
f"Expecting absolute paths from {store_dir}, but got: {invalid_paths}"
assert (
len(invalid_paths) == 0
), f"Expecting absolute paths from {store_dir}, but got: {invalid_paths}"
# First, calculate the tarball checksum and the size.
extract_checksum = ExtractChecksum()
archive_paths_to(
extract_checksum,
paths,
mtime, uid, gid, uname, gname
)
archive_paths_to(extract_checksum, paths, mtime, uid, gid, uname, gname)
(checksum, size) = extract_checksum.extract()
path = f"{checksum}/layer.tar"
@ -245,12 +242,9 @@ def add_layer_dir(tar, paths, store_dir, mtime, uid, gid, uname, gname):
# Then actually stream the contents to the outer tarball.
read_fd, write_fd = os.pipe()
with open(read_fd, "rb") as read, open(write_fd, "wb") as write:
def producer():
archive_paths_to(
write,
paths,
mtime, uid, gid, uname, gname
)
archive_paths_to(write, paths, mtime, uid, gid, uname, gname)
write.close()
# Closing the write end of the fifo also closes the read end,
@ -292,10 +286,7 @@ def add_customisation_layer(target_tar, customisation_layer, mtime):
target_tar.addfile(tarinfo, f)
return LayerInfo(
size=None,
checksum=checksum,
path=path,
paths=[customisation_layer]
size=None, checksum=checksum, path=path, paths=[customisation_layer]
)
@ -317,13 +308,44 @@ def add_bytes(tar, path, content, mtime):
def main():
with open(sys.argv[1], "r") as f:
arg_parser = argparse.ArgumentParser(
description="""
This script generates a Docker image from a set of store paths. Uses
Docker Image Specification v1.2 as reference [1].
[1]: https://github.com/moby/moby/blob/master/image/spec/v1.2.md
"""
)
arg_parser.add_argument(
"conf",
type=str,
help="""
JSON file with the following properties and writes the
image as an uncompressed tarball to stdout:
* "architecture", "config", "os", "created", "repo_tag" correspond to
the fields with the same name on the image spec [2].
* "created" can be "now".
* "created" is also used as mtime for files added to the image.
* "uid", "gid", "uname", "gname" is the file ownership, for example,
0, 0, "root", "root".
* "store_layers" is a list of layers in ascending order, where each
layer is the list of store paths to include in that layer.
""",
)
arg_parser.add_argument(
"--repo_tag", "-t", type=str,
help="Override the RepoTags from the configuration"
)
args = arg_parser.parse_args()
with open(args.conf, "r") as f:
conf = json.load(f)
created = (
datetime.now(tz=timezone.utc)
if conf["created"] == "now"
else datetime.fromisoformat(conf["created"])
datetime.now(tz=timezone.utc)
if conf["created"] == "now"
else datetime.fromisoformat(conf["created"])
)
mtime = int(created.timestamp())
uid = int(conf["uid"])
@ -340,20 +362,28 @@ def main():
start = len(layers) + 1
for num, store_layer in enumerate(conf["store_layers"], start=start):
print("Creating layer", num, "from paths:", store_layer,
file=sys.stderr)
info = add_layer_dir(tar, store_layer, store_dir,
mtime, uid, gid, uname, gname)
print(
"Creating layer",
num,
"from paths:",
store_layer,
file=sys.stderr,
)
info = add_layer_dir(
tar, store_layer, store_dir, mtime, uid, gid, uname, gname
)
layers.append(info)
print("Creating layer", len(layers) + 1, "with customisation...",
file=sys.stderr)
print(
"Creating layer",
len(layers) + 1,
"with customisation...",
file=sys.stderr,
)
layers.append(
add_customisation_layer(
tar,
conf["customisation_layer"],
mtime=mtime
)
add_customisation_layer(
tar, conf["customisation_layer"], mtime=mtime
)
)
print("Adding manifests...", file=sys.stderr)
@ -369,8 +399,8 @@ def main():
},
"history": [
{
"created": datetime.isoformat(created),
"comment": f"store paths: {layer.paths}"
"created": datetime.isoformat(created),
"comment": f"store paths: {layer.paths}",
}
for layer in layers
],
@ -384,7 +414,7 @@ def main():
manifest_json = [
{
"Config": image_json_path,
"RepoTags": [conf["repo_tag"]],
"RepoTags": [args.repo_tag or conf["repo_tag"]],
"Layers": [layer.path for layer in layers],
}
]