diff options
-rw-r--r-- | releng/__init__.py | 62 | ||||
-rw-r--r-- | releng/cli.py | 10 | ||||
-rw-r--r-- | releng/create_release.xsh | 15 | ||||
-rw-r--r-- | releng/docker.xsh | 29 | ||||
-rw-r--r-- | releng/docker_assemble.py | 14 | ||||
-rw-r--r-- | releng/gitutils.xsh | 2 | ||||
-rw-r--r-- | releng/release-jobs.nix | 57 | ||||
-rw-r--r-- | version.json | 2 |
8 files changed, 134 insertions, 57 deletions
diff --git a/releng/__init__.py b/releng/__init__.py index 59d1709b9..179ea3e2b 100644 --- a/releng/__init__.py +++ b/releng/__init__.py @@ -1,8 +1,12 @@ from xonsh.main import setup + setup() del setup import logging +import sys + +import xonsh.base_shell from . import environment from . import create_release @@ -13,19 +17,55 @@ from . import docker from . import docker_assemble from . import gitutils -rootLogger = logging.getLogger() -rootLogger.setLevel(logging.DEBUG) -log = logging.getLogger(__name__) -log.setLevel(logging.DEBUG) -fmt = logging.Formatter('{asctime} {levelname} {name}: {message}', - datefmt='%b %d %H:%M:%S', - style='{') +def setup_logging(): + """ + Sets up logging to work properly. The following are intended to work: + - ipython/xonsh configuration files adding log handlers out of band + - Reloading the module in xonsh/ipython not causing Bonus Loggers (which is + why we check if there is already a handler. This also helps the previous + case) + - Importing the releng module from xonsh and poking at it interactively + """ + + LEVELS = { + # Root logger must be DEBUG so that anything else can be DEBUG + None: logging.DEBUG, + # Everything in releng + __name__: logging.DEBUG, + # Log spam caused by prompt_toolkit + 'asyncio': logging.INFO, + } + + for name, level in LEVELS.items(): + logger = logging.getLogger(name) + logger.setLevel(level) + + root_logger = logging.getLogger() + + fmt = logging.Formatter('{asctime} {levelname} {name}: {message}', + datefmt='%b %d %H:%M:%S', + style='{') + + if not any( + isinstance(h, logging.StreamHandler) for h in root_logger.handlers): + stderr = sys.stderr + # XXX: Horrible hack required by the virtual stderr xonsh uses for each entered + # command getting closed after the command is run: we need to pull out + # the real stderr because this survives across multiple command runs. + # + # This only applies when running xonsh in interactive mode and importing releng. + if isinstance(sys.stderr, xonsh.base_shell._TeeStd): + stderr = stderr.std # type: ignore + + hand = logging.StreamHandler(stream=stderr) + hand.set_name('releng root handler') + hand.setFormatter(fmt) + root_logger.addHandler(hand) + + +setup_logging() -if not any(isinstance(h, logging.StreamHandler) for h in rootLogger.handlers): - hand = logging.StreamHandler() - hand.setFormatter(fmt) - rootLogger.addHandler(hand) def reload(): import importlib diff --git a/releng/cli.py b/releng/cli.py index 89391e0a7..f78d4b12d 100644 --- a/releng/cli.py +++ b/releng/cli.py @@ -2,16 +2,16 @@ from . import create_release from . import docker from .environment import RelengEnvironment from . import environment -import functools import argparse import sys def do_build(args): if args.target == 'all': - create_release.build_artifacts(no_check_git=args.no_check_git) + create_release.build_artifacts(args.profile, no_check_git=args.no_check_git) elif args.target == 'manual': - eval_result = create_release.eval_jobs() + # n.b. args.profile does nothing here, you will just get the x86_64-linux manual no matter what. + eval_result = create_release.eval_jobs(args.profile) create_release.build_manual(eval_result) else: raise ValueError('invalid target, unreachable') @@ -80,6 +80,10 @@ def main(): build.add_argument('--target', choices=['manual', 'all'], help='Whether to build everything or just the manual') + build.add_argument('--profile', + default='all', + choices=('all', 'x86_64-linux-only'), + help='Which systems to build targets for.') build.set_defaults(cmd=do_build) upload = sps.add_parser( diff --git a/releng/create_release.xsh b/releng/create_release.xsh index b51a3ad23..96b13ae4c 100644 --- a/releng/create_release.xsh +++ b/releng/create_release.xsh @@ -27,9 +27,6 @@ RELENG_MSG = "Release created with releng/create_release.xsh" BUILD_CORES = 16 MAX_JOBS = 2 -# TODO -RELEASE_SYSTEMS = ["x86_64-linux"] - def setup_creds(env: RelengEnvironment): key = keys.get_ephemeral_key(env) @@ -82,11 +79,9 @@ def realise(paths: list[str]): nix-store @(args) @(paths) -def eval_jobs(): - nej_output = $(nix-eval-jobs --workers 4 --gc-roots-dir @(GCROOTS_DIR) --force-recurse --flake '.#release-jobs') - return [x for x in (json.loads(s) for s in nej_output.strip().split('\n')) - if x['system'] in RELEASE_SYSTEMS - ] +def eval_jobs(build_profile): + nej_output = $(nix-eval-jobs --workers 4 --gc-roots-dir @(GCROOTS_DIR) --force-recurse --flake f'.#release-jobs.{build_profile}') + return [json.loads(s) for s in nej_output.strip().split('\n')] def upload_drv_paths_and_outputs(env: RelengEnvironment, paths: list[str]): @@ -295,14 +290,14 @@ def upload_manual(env: RelengEnvironment): aws s3 sync @(MANUAL)/ @(env.docs_bucket)/manual/lix/stable/ -def build_artifacts(no_check_git=False): +def build_artifacts(build_profile, no_check_git=False): rm -rf release/ if not no_check_git: verify_are_on_tag() git_preconditions() print('[+] Evaluating') - eval_result = eval_jobs() + eval_result = eval_jobs(build_profile) drv_paths = [x['drvPath'] for x in eval_result] print('[+] Building') diff --git a/releng/docker.xsh b/releng/docker.xsh index 20fb30cd3..13bdd7868 100644 --- a/releng/docker.xsh +++ b/releng/docker.xsh @@ -19,6 +19,7 @@ def check_all_logins(env: RelengEnvironment): check_login(target) def check_login(target: DockerTarget): + log.info('Checking login for %s', target.registry_name) skopeo login @(target.registry_name()) def upload_docker_images(target: DockerTarget, paths: list[Path]): @@ -43,7 +44,23 @@ def upload_docker_images(target: DockerTarget, paths: list[Path]): for path in paths: digest_file = tmp / (path.name + '.digest') - inspection = json.loads($(skopeo inspect docker-archive:@(path))) + tmp_image = tmp / 'tmp-image.tar.gz' + + # insecure-policy: we don't have any signature policy, we are just uploading an image + # + # Absurd: we copy it into an OCI image first so we can get the hash + # we need to upload it untagged, because skopeo has no "don't tag + # this" option. + # The reason for this is that forgejo's container registry throws + # away old versions of tags immediately, so we cannot use a temp + # tag, and it *does* reduce confusion to not upload tags that + # should not be used. + # + # Workaround for: https://github.com/containers/skopeo/issues/2354 + log.info('skopeo copy to temp oci-archive %s', tmp_image) + skopeo --insecure-policy copy --format oci --all --digestfile @(digest_file) docker-archive:@(path) oci-archive:@(tmp_image) + + inspection = json.loads($(skopeo inspect oci-archive:@(tmp_image))) docker_arch = inspection['Architecture'] docker_os = inspection['Os'] @@ -51,21 +68,13 @@ def upload_docker_images(target: DockerTarget, paths: list[Path]): log.info('Pushing image %s for %s to %s', path, docker_arch, target.registry_path) - # insecure-policy: we don't have any signature policy, we are just uploading an image - # We upload to a junk tag, because otherwise it will upload to `latest`, which is undesirable - skopeo --insecure-policy copy --format oci --digestfile @(digest_file) docker-archive:@(path) docker://@(target.registry_path):temp - digest = digest_file.read_text().strip() + skopeo --insecure-policy copy --preserve-digests --all oci-archive:@(tmp_image) f'docker://{target.registry_path}@{digest}' # skopeo doesn't give us the manifest size directly, so we just ask the registry metadata = reg.image_info(target.registry_path, digest) manifests.append(OCIIndexItem(metadata=metadata, architecture=docker_arch, os=docker_os)) - # delete the temp tag, which we only have to create because of skopeo - # limitations anyhow (it seems to not have a way to say "don't tag it, find - # your checksum and put it there") - # FIXME: this is not possible because GitHub only has a proprietary API for it. amazing. 11/10. - # reg.delete_tag(target.registry_path, 'temp') log.info('Pushed images to %r, building a bigger and more menacing manifest from %r with metadata %r', target, manifests, meta) # send the multiarch manifest to each tag diff --git a/releng/docker_assemble.py b/releng/docker_assemble.py index ef1d8c4e6..d5b47c328 100644 --- a/releng/docker_assemble.py +++ b/releng/docker_assemble.py @@ -49,8 +49,8 @@ if DEBUG_REQUESTS: # fix that. Thus, a little bit of homebrew containers code. # # Essentially what we are doing in here is splatting a bunch of images into the -# registry without tagging them (except as "temp", due to podman issues), then -# simply sending a new composite manifest ourselves. +# registry without tagging them (with a silly workaround to skopeo issues), +# then simply sending a new composite manifest ourselves. DockerArchitecture = Literal['amd64'] | Literal['arm64'] MANIFEST_MIME = 'application/vnd.oci.image.manifest.v1+json' @@ -100,14 +100,6 @@ class OCIIndex: } -def docker_architecture_from_nix_system(system: str) -> DockerArchitecture: - MAP = { - 'x86_64-linux': 'amd64', - 'aarch64-linux': 'arm64', - } - return MAP[system] # type: ignore - - @dataclasses.dataclass class TaggingOperation: manifest: OCIIndex @@ -284,7 +276,7 @@ class AuthState: 'Authorization': 'Basic ' + creds }).json() token = resp['token'] - self.token_cache[service] = token + self.token_cache[authority] = token return token def find_credential_for(self, image_path: str): diff --git a/releng/gitutils.xsh b/releng/gitutils.xsh index 3352a6b21..e18b4da5f 100644 --- a/releng/gitutils.xsh +++ b/releng/gitutils.xsh @@ -1,6 +1,8 @@ import subprocess import json +from .version import VERSION + def version_compare(v1: str, v2: str): return json.loads($(nix-instantiate --eval --json --argstr v1 @(v1) --argstr v2 @(v2) --expr '{v1, v2}: builtins.compareVersions v1 v2')) diff --git a/releng/release-jobs.nix b/releng/release-jobs.nix index 4db0baed3..7f1017002 100644 --- a/releng/release-jobs.nix +++ b/releng/release-jobs.nix @@ -3,8 +3,27 @@ let inherit (pkgs) lib; lix = hydraJobs.build.x86_64-linux; - systems = [ "x86_64-linux" ]; - dockerSystems = [ "x86_64-linux" ]; + # This is all so clumsy because we can't use arguments to functions in + # flakes, and certainly not with n-e-j. + profiles = { + # Used for testing + x86_64-linux-only = { + systems = [ "x86_64-linux" ]; + dockerSystems = [ "x86_64-linux" ]; + }; + all = { + systems = [ + "x86_64-linux" + "aarch64-linux" + "aarch64-darwin" + "x86_64-darwin" + ]; + dockerSystems = [ + "x86_64-linux" + "aarch64-linux" + ]; + }; + }; doTarball = { @@ -27,7 +46,8 @@ let sha256sum --binary $filename | cut -f1 -d' ' > $out/$basename.sha256 ''; - targets = + targetsFor = + { systems, dockerSystems }: builtins.map (system: { target = hydraJobs.binaryTarball.${system}; targetName = "*.tar.xz"; @@ -44,14 +64,29 @@ let tar -cvzf "$out/lix-${lix.version}-manual.tar.gz" lix-${lix.version}-manual ''; - tarballs = pkgs.runCommand "lix-release-tarballs" { } '' - mkdir -p $out - ${lib.concatMapStringsSep "\n" doTarball targets} - cp ${manualTar}/*.tar.gz $out - cp -r ${lix.doc}/share/doc/nix/manual $out - ''; + tarballsFor = + { systems, dockerSystems }: + pkgs.runCommand "lix-release-tarballs" { } '' + mkdir -p $out + ${lib.concatMapStringsSep "\n" doTarball (targetsFor { + inherit systems dockerSystems; + })} + ${doTarball { + target = manualTar; + targetName = "lix-*.tar.gz"; + }} + cp -r ${lix.doc}/share/doc/nix/manual $out + ''; in -{ +(builtins.mapAttrs ( + _: + { systems, dockerSystems }: + { + build = lib.filterAttrs (x: _: builtins.elem x systems) hydraJobs.build; + tarballs = tarballsFor { inherit systems dockerSystems; }; + } +) profiles) +// { inherit (hydraJobs) build; - inherit tarballs; + inherit tarballsFor; } diff --git a/version.json b/version.json index 877e7a20b..1481a97fd 100644 --- a/version.json +++ b/version.json @@ -1,4 +1,4 @@ { - "version": "2.90.0", + "version": "2.90.0-rc1", "release_name": "Vanilla Ice Cream" } |