diff options
Diffstat (limited to 'releng')
-rw-r--r-- | releng/__init__.py | 32 | ||||
-rw-r--r-- | releng/cli.py | 28 | ||||
-rw-r--r-- | releng/create_release.xsh | 65 | ||||
-rw-r--r-- | releng/docker.xsh | 74 | ||||
-rw-r--r-- | releng/docker_assemble.py | 399 | ||||
-rw-r--r-- | releng/environment.py | 92 | ||||
-rw-r--r-- | releng/gitutils.xsh | 37 | ||||
-rw-r--r-- | releng/release-jobs.nix | 2 |
8 files changed, 677 insertions, 52 deletions
diff --git a/releng/__init__.py b/releng/__init__.py index 401b8e322..59d1709b9 100644 --- a/releng/__init__.py +++ b/releng/__init__.py @@ -2,11 +2,30 @@ from xonsh.main import setup setup() del setup -from releng import environment -from releng import create_release -from releng import keys -from releng import version -from releng import cli +import logging + +from . import environment +from . import create_release +from . import keys +from . import version +from . import cli +from . import docker +from . import docker_assemble +from . import gitutils + +rootLogger = logging.getLogger() +rootLogger.setLevel(logging.DEBUG) +log = logging.getLogger(__name__) +log.setLevel(logging.DEBUG) + +fmt = logging.Formatter('{asctime} {levelname} {name}: {message}', + datefmt='%b %d %H:%M:%S', + style='{') + +if not any(isinstance(h, logging.StreamHandler) for h in rootLogger.handlers): + hand = logging.StreamHandler() + hand.setFormatter(fmt) + rootLogger.addHandler(hand) def reload(): import importlib @@ -15,3 +34,6 @@ def reload(): importlib.reload(keys) importlib.reload(version) importlib.reload(cli) + importlib.reload(docker) + importlib.reload(docker_assemble) + importlib.reload(gitutils) diff --git a/releng/cli.py b/releng/cli.py index bba50f534..89391e0a7 100644 --- a/releng/cli.py +++ b/releng/cli.py @@ -1,4 +1,8 @@ from . import create_release +from . import docker +from .environment import RelengEnvironment +from . import environment +import functools import argparse import sys @@ -18,13 +22,16 @@ def do_tag(args): no_check_git=args.no_check_git) -def do_upload(args): - create_release.setup_creds() +def do_upload(env: RelengEnvironment, args): + create_release.setup_creds(env) if args.target == 'all': - create_release.upload_artifacts(force_push_tag=args.force_push_tag, - noconfirm=args.noconfirm) + docker.check_all_logins(env) + create_release.upload_artifacts(env, + force_push_tag=args.force_push_tag, + noconfirm=args.noconfirm, + no_check_git=args.no_check_git) elif args.target == 'manual': - create_release.upload_manual() + create_release.upload_manual(env) else: raise ValueError('invalid target, unreachable') @@ -77,6 +84,10 @@ def main(): upload = sps.add_parser( 'upload', help='Upload artifacts to cache and releases bucket') + upload.add_argument( + '--no-check-git', + action='store_true', + help="Don't check git state before uploading. For testing.") upload.add_argument('--force-push-tag', action='store_true', help='Force push the tag. For testing.') @@ -90,7 +101,12 @@ def main(): '--noconfirm', action='store_true', help="Don't ask for confirmation. For testing/automation.") - upload.set_defaults(cmd=do_upload) + upload.add_argument('--environment', + choices=list(environment.ENVIRONMENTS.keys()), + default='staging', + help='Environment to release to') + upload.set_defaults(cmd=lambda args: do_upload( + environment.ENVIRONMENTS[args.environment], args)) args = ap.parse_args() args.cmd(args) diff --git a/releng/create_release.xsh b/releng/create_release.xsh index c57a92b2f..94c78a83f 100644 --- a/releng/create_release.xsh +++ b/releng/create_release.xsh @@ -7,19 +7,15 @@ import tempfile import hashlib import datetime from . import environment +from .environment import RelengEnvironment from . import keys +from . import docker from .version import VERSION, RELEASE_NAME, MAJOR +from .gitutils import verify_are_on_tag, git_preconditions $RAISE_SUBPROC_ERROR = True $XONSH_SHOW_TRACEBACK = True -RELENG_ENV = environment.STAGING - -RELEASES_BUCKET = RELENG_ENV.releases_bucket -DOCS_BUCKET = RELENG_ENV.docs_bucket -CACHE_STORE = RELENG_ENV.cache_store_uri() -REPO = RELENG_ENV.git_repo - GCROOTS_DIR = Path('./release/gcroots') BUILT_GCROOTS_DIR = Path('./release/gcroots-build') DRVS_TXT = Path('./release/drvs.txt') @@ -35,23 +31,14 @@ MAX_JOBS = 2 RELEASE_SYSTEMS = ["x86_64-linux"] -def setup_creds(): - key = keys.get_ephemeral_key(RELENG_ENV) +def setup_creds(env: RelengEnvironment): + key = keys.get_ephemeral_key(env) $AWS_SECRET_ACCESS_KEY = key.secret_key $AWS_ACCESS_KEY_ID = key.id $AWS_DEFAULT_REGION = 'garage' $AWS_ENDPOINT_URL = environment.S3_ENDPOINT -def git_preconditions(): - # verify there is nothing in index ready to stage - proc = !(git diff-index --quiet --cached HEAD --) - assert proc.rtn == 0 - # verify there is nothing *stageable* and tracked - proc = !(git diff-files --quiet) - assert proc.rtn == 0 - - def official_release_commit_tag(force_tag=False): print('[+] Setting officialRelease in flake.nix and tagging') prev_branch = $(git symbolic-ref --short HEAD).strip() @@ -102,13 +89,13 @@ def eval_jobs(): ] -def upload_drv_paths_and_outputs(paths: list[str]): +def upload_drv_paths_and_outputs(env: RelengEnvironment, paths: list[str]): proc = subprocess.Popen([ 'nix', 'copy', '-v', '--to', - CACHE_STORE, + env.cache_store_uri(), '--stdin', ], stdin=subprocess.PIPE, @@ -245,33 +232,38 @@ def prepare_release_notes(): git commit -m @(commit_msg) -def verify_are_on_tag(): - current_tag = $(git describe --tag).strip() - assert current_tag == VERSION - - -def upload_artifacts(noconfirm=False, force_push_tag=False): +def upload_artifacts(env: RelengEnvironment, noconfirm=False, no_check_git=False, force_push_tag=False): + if not no_check_git: + verify_are_on_tag() + git_preconditions() assert 'AWS_SECRET_ACCESS_KEY' in __xonsh__.env tree @(ARTIFACTS) + env_part = f'environment {env.name}' not noconfirm and confirm( - f'Would you like to release {ARTIFACTS} as {VERSION}? Type "I want to release this" to confirm\n', - 'I want to release this' + f'Would you like to release {ARTIFACTS} as {VERSION} in {env.colour(env_part)}? Type "I want to release this to {env.name}" to confirm\n', + f'I want to release this to {env.name}' ) + docker_images = list((ARTIFACTS / f'lix/lix-{VERSION}').glob(f'lix-{VERSION}-docker-image-*.tar.gz')) + assert docker_images + print('[+] Upload to cache') with open(DRVS_TXT) as fh: - upload_drv_paths_and_outputs([x.strip() for x in fh.readlines() if x]) + upload_drv_paths_and_outputs(env, [x.strip() for x in fh.readlines() if x]) + print('[+] Upload docker images') + for target in env.docker_targets: + docker.upload_docker_images(target, docker_images) print('[+] Upload to release bucket') - aws s3 cp --recursive @(ARTIFACTS)/ @(RELEASES_BUCKET)/ + aws s3 cp --recursive @(ARTIFACTS)/ @(env.releases_bucket)/ print('[+] Upload manual') - upload_manual() + upload_manual(env) print('[+] git push tag') - git push @(['-f'] if force_push_tag else []) @(REPO) f'{VERSION}:refs/tags/{VERSION}' + git push @(['-f'] if force_push_tag else []) @(env.git_repo) f'{VERSION}:refs/tags/{VERSION}' def do_tag_merge(force_tag=False, no_check_git=False): @@ -290,7 +282,7 @@ def build_manual(eval_result): cp --no-preserve=mode -vr @(manual)/share/doc/nix @(MANUAL) -def upload_manual(): +def upload_manual(env: RelengEnvironment): stable = json.loads($(nix eval --json '.#nix.officialRelease')) if stable: version = MAJOR @@ -298,9 +290,9 @@ def upload_manual(): version = 'nightly' print('[+] aws s3 sync manual') - aws s3 sync @(MANUAL)/ @(DOCS_BUCKET)/manual/lix/@(version)/ + aws s3 sync @(MANUAL)/ @(env.docs_bucket)/manual/lix/@(version)/ if stable: - aws s3 sync @(MANUAL)/ @(DOCS_BUCKET)/manual/lix/stable/ + aws s3 sync @(MANUAL)/ @(env.docs_bucket)/manual/lix/stable/ def build_artifacts(no_check_git=False): @@ -318,7 +310,8 @@ def build_artifacts(no_check_git=False): build_manual(eval_result) with open(DRVS_TXT, 'w') as fh: - fh.write('\n'.join(drv_paths)) + # don't bother putting the release tarballs themselves because they are duplicate and huge + fh.write('\n'.join(x['drvPath'] for x in eval_result if x['attr'] != 'lix-release-tarballs')) make_artifacts_dir(eval_result, ARTIFACTS) print(f'[+] Done! See {ARTIFACTS}') diff --git a/releng/docker.xsh b/releng/docker.xsh new file mode 100644 index 000000000..20fb30cd3 --- /dev/null +++ b/releng/docker.xsh @@ -0,0 +1,74 @@ +import json +import logging +from pathlib import Path +import tempfile + +import requests + +from .environment import DockerTarget, RelengEnvironment +from .version import VERSION, MAJOR +from . import gitutils +from .docker_assemble import Registry, OCIIndex, OCIIndexItem +from . import docker_assemble + +log = logging.getLogger(__name__) +log.setLevel(logging.INFO) + +def check_all_logins(env: RelengEnvironment): + for target in env.docker_targets: + check_login(target) + +def check_login(target: DockerTarget): + skopeo login @(target.registry_name()) + +def upload_docker_images(target: DockerTarget, paths: list[Path]): + if not paths: return + + sess = requests.Session() + sess.headers['User-Agent'] = 'lix-releng' + + tag_names = [DockerTarget.resolve(tag, version=VERSION, major=MAJOR) for tag in target.tags] + + # latest only gets tagged for the current release branch of Lix + if not gitutils.is_maintenance_branch('HEAD'): + tag_names.append('latest') + + meta = {} + + reg = docker_assemble.Registry(sess) + manifests = [] + + with tempfile.TemporaryDirectory() as tmp: + tmp = Path(tmp) + + for path in paths: + digest_file = tmp / (path.name + '.digest') + inspection = json.loads($(skopeo inspect docker-archive:@(path))) + + docker_arch = inspection['Architecture'] + docker_os = inspection['Os'] + meta = inspection['Labels'] + + log.info('Pushing image %s for %s to %s', path, docker_arch, target.registry_path) + + # insecure-policy: we don't have any signature policy, we are just uploading an image + # We upload to a junk tag, because otherwise it will upload to `latest`, which is undesirable + skopeo --insecure-policy copy --format oci --digestfile @(digest_file) docker-archive:@(path) docker://@(target.registry_path):temp + + digest = digest_file.read_text().strip() + + # skopeo doesn't give us the manifest size directly, so we just ask the registry + metadata = reg.image_info(target.registry_path, digest) + + manifests.append(OCIIndexItem(metadata=metadata, architecture=docker_arch, os=docker_os)) + # delete the temp tag, which we only have to create because of skopeo + # limitations anyhow (it seems to not have a way to say "don't tag it, find + # your checksum and put it there") + # FIXME: this is not possible because GitHub only has a proprietary API for it. amazing. 11/10. + # reg.delete_tag(target.registry_path, 'temp') + + log.info('Pushed images to %r, building a bigger and more menacing manifest from %r with metadata %r', target, manifests, meta) + # send the multiarch manifest to each tag + index = OCIIndex(manifests=manifests, annotations=meta) + for tag in tag_names: + reg.upload_index(target.registry_path, tag, index) diff --git a/releng/docker_assemble.py b/releng/docker_assemble.py new file mode 100644 index 000000000..ef1d8c4e6 --- /dev/null +++ b/releng/docker_assemble.py @@ -0,0 +1,399 @@ +from typing import Any, Literal, Optional +import re +from pathlib import Path +import json +import dataclasses +import time +from urllib.parse import unquote +import urllib.request +import logging + +import requests.auth +import requests +import xdg_base_dirs + +log = logging.getLogger(__name__) +log.setLevel(logging.INFO) + +DEBUG_REQUESTS = False +if DEBUG_REQUESTS: + urllib3_logger = logging.getLogger('requests.packages.urllib3') + urllib3_logger.setLevel(logging.DEBUG) + urllib3_logger.propagate = True + +# So, there is a bunch of confusing stuff happening in this file. The gist of why it's Like This is: +# +# nix2container does not concern itself with tags (reasonably enough): +# https://github.com/nlewo/nix2container/issues/59 +# +# This is fine. But then we noticed: docker images don't play nice if you have +# multiple architectures you want to abstract over if you don't do special +# things. Those special things are images with manifests containing multiple +# images. +# +# Docker has a data model vaguely analogous to git: you have higher level +# objects referring to a bunch of content-addressed blobs. +# +# A multiarch image is more or less just a manifest that refers to more +# manifests; in OCI it is an Index. +# +# See the API spec here: https://github.com/opencontainers/distribution-spec/blob/v1.0.1/spec.md#definitions +# And the Index spec here: https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md +# +# skopeo doesn't *know* how to make multiarch *manifests*: +# https://github.com/containers/skopeo/issues/1136 +# +# There is a tool called manifest-tool that is supposed to do this +# (https://github.com/estesp/manifest-tool) but it doesn't support putting in +# annotations on the outer image, and I *really* didn't want to write golang to +# fix that. Thus, a little bit of homebrew containers code. +# +# Essentially what we are doing in here is splatting a bunch of images into the +# registry without tagging them (except as "temp", due to podman issues), then +# simply sending a new composite manifest ourselves. + +DockerArchitecture = Literal['amd64'] | Literal['arm64'] +MANIFEST_MIME = 'application/vnd.oci.image.manifest.v1+json' +INDEX_MIME = 'application/vnd.oci.image.index.v1+json' + + +@dataclasses.dataclass(frozen=True, order=True) +class ImageMetadata: + size: int + digest: str + """sha256:SOMEHEX""" + + +@dataclasses.dataclass(frozen=True, order=True) +class OCIIndexItem: + """Information about an untagged uploaded image.""" + + metadata: ImageMetadata + + architecture: DockerArchitecture + + os: str = 'linux' + + def serialize(self): + return { + 'mediaType': MANIFEST_MIME, + 'size': self.metadata.size, + 'digest': self.metadata.digest, + 'platform': { + 'architecture': self.architecture, + 'os': self.os, + } + } + + +@dataclasses.dataclass(frozen=True) +class OCIIndex: + manifests: list[OCIIndexItem] + + annotations: dict[str, str] + + def serialize(self): + return { + 'schemaVersion': 2, + 'manifests': [item.serialize() for item in sorted(self.manifests)], + 'annotations': self.annotations + } + + +def docker_architecture_from_nix_system(system: str) -> DockerArchitecture: + MAP = { + 'x86_64-linux': 'amd64', + 'aarch64-linux': 'arm64', + } + return MAP[system] # type: ignore + + +@dataclasses.dataclass +class TaggingOperation: + manifest: OCIIndex + tags: list[str] + """Tags this image is uploaded under""" + + +runtime_dir = xdg_base_dirs.xdg_runtime_dir() +config_dir = xdg_base_dirs.xdg_config_home() + +AUTH_FILES = ([runtime_dir / 'containers/auth.json'] if runtime_dir else []) + \ + [config_dir / 'containers/auth.json', Path.home() / '.docker/config.json'] + + +# Copied from Werkzeug https://github.com/pallets/werkzeug/blob/62e3ea45846d06576199a2f8470be7fe44c867c1/src/werkzeug/http.py#L300-L325 +def parse_list_header(value: str) -> list[str]: + """Parse a header value that consists of a list of comma separated items according + to `RFC 9110 <https://httpwg.org/specs/rfc9110.html#abnf.extension>`__. + + This extends :func:`urllib.request.parse_http_list` to remove surrounding quotes + from values. + + .. code-block:: python + + parse_list_header('token, "quoted value"') + ['token', 'quoted value'] + + This is the reverse of :func:`dump_header`. + + :param value: The header value to parse. + """ + result = [] + + for item in urllib.request.parse_http_list(value): + if len(item) >= 2 and item[0] == item[-1] == '"': + item = item[1:-1] + + result.append(item) + + return result + + +# https://www.rfc-editor.org/rfc/rfc2231#section-4 +_charset_value_re = re.compile( + r""" + ([\w!#$%&*+\-.^`|~]*)' # charset part, could be empty + [\w!#$%&*+\-.^`|~]*' # don't care about language part, usually empty + ([\w!#$%&'*+\-.^`|~]+) # one or more token chars with percent encoding + """, + re.ASCII | re.VERBOSE, +) + + +# Copied from: https://github.com/pallets/werkzeug/blob/62e3ea45846d06576199a2f8470be7fe44c867c1/src/werkzeug/http.py#L327-L394 +def parse_dict_header(value: str) -> dict[str, str | None]: + """Parse a list header using :func:`parse_list_header`, then parse each item as a + ``key=value`` pair. + + .. code-block:: python + + parse_dict_header('a=b, c="d, e", f') + {"a": "b", "c": "d, e", "f": None} + + This is the reverse of :func:`dump_header`. + + If a key does not have a value, it is ``None``. + + This handles charsets for values as described in + `RFC 2231 <https://www.rfc-editor.org/rfc/rfc2231#section-3>`__. Only ASCII, UTF-8, + and ISO-8859-1 charsets are accepted, otherwise the value remains quoted. + + :param value: The header value to parse. + + .. versionchanged:: 3.0 + Passing bytes is not supported. + + .. versionchanged:: 3.0 + The ``cls`` argument is removed. + + .. versionchanged:: 2.3 + Added support for ``key*=charset''value`` encoded items. + + .. versionchanged:: 0.9 + The ``cls`` argument was added. + """ + result: dict[str, str | None] = {} + + for item in parse_list_header(value): + key, has_value, value = item.partition("=") + key = key.strip() + + if not has_value: + result[key] = None + continue + + value = value.strip() + encoding: str | None = None + + if key[-1] == "*": + # key*=charset''value becomes key=value, where value is percent encoded + # adapted from parse_options_header, without the continuation handling + key = key[:-1] + match = _charset_value_re.match(value) + + if match: + # If there is a charset marker in the value, split it off. + encoding, value = match.groups() + assert encoding + encoding = encoding.lower() + + # A safe list of encodings. Modern clients should only send ASCII or UTF-8. + # This list will not be extended further. An invalid encoding will leave the + # value quoted. + if encoding in {"ascii", "us-ascii", "utf-8", "iso-8859-1"}: + # invalid bytes are replaced during unquoting + value = unquote(value, encoding=encoding) + + if len(value) >= 2 and value[0] == value[-1] == '"': + value = value[1:-1] + + result[key] = value + + return result + + +def parse_www_authenticate(www_authenticate): + scheme, _, rest = www_authenticate.partition(' ') + scheme = scheme.lower() + rest = rest.strip() + + parsed = parse_dict_header(rest.rstrip('=')) + return parsed + + +class AuthState: + + def __init__(self, auth_files: list[Path] = AUTH_FILES): + self.auth_map: dict[str, str] = {} + for f in auth_files: + self.auth_map.update(AuthState.load_auth_file(f)) + self.token_cache: dict[str, str] = {} + + @staticmethod + def load_auth_file(path: Path) -> dict[str, str]: + if path.exists(): + with path.open() as fh: + try: + json_obj = json.load(fh) + return {k: v['auth'] for k, v in json_obj['auths'].items()} + except (json.JSONDecodeError, KeyError) as e: + log.exception('JSON decode error in %s', path, exc_info=e) + return {} + + def get_token(self, hostname: str) -> Optional[str]: + return self.token_cache.get(hostname) + + def obtain_token(self, session: requests.Session, token_endpoint: str, + scope: str, service: str, image_path: str) -> str: + authority, _, _ = image_path.partition('/') + if tok := self.get_token(authority): + return tok + + creds = self.find_credential_for(image_path) + if not creds: + raise ValueError('No credentials available for ' + image_path) + + resp = session.get(token_endpoint, + params={ + 'client_id': 'lix-releng', + 'scope': scope, + 'service': service, + }, + headers={ + 'Authorization': 'Basic ' + creds + }).json() + token = resp['token'] + self.token_cache[service] = token + return token + + def find_credential_for(self, image_path: str): + trails = image_path.split('/') + for i in range(len(trails)): + prefix = '/'.join(trails[:len(trails) - i]) + if prefix in self.auth_map: + return self.auth_map[prefix] + + return None + + +class RegistryAuthenticator(requests.auth.AuthBase): + """Authenticates to an OCI compliant registry""" + + def __init__(self, auth_state: AuthState, session: requests.Session, + image: str): + self.auth_map: dict[str, str] = {} + self.image = image + self.session = session + self.auth_state = auth_state + + def response_hook(self, r: requests.Response, + **kwargs: Any) -> requests.Response: + if r.status_code == 401: + www_authenticate = r.headers.get('www-authenticate', '').lower() + parsed = parse_www_authenticate(www_authenticate) + assert parsed + + tok = self.auth_state.obtain_token( + self.session, + parsed['realm'], # type: ignore + parsed['scope'], # type: ignore + parsed['service'], # type: ignore + self.image) + + new_req = r.request.copy() + new_req.headers['Authorization'] = 'Bearer ' + tok + + return self.session.send(new_req) + else: + return r + + def __call__(self, + r: requests.PreparedRequest) -> requests.PreparedRequest: + authority, _, _ = self.image.partition('/') + auth_may = self.auth_state.get_token(authority) + + if auth_may: + r.headers['Authorization'] = 'Bearer ' + auth_may + + r.register_hook('response', self.response_hook) + return r + + +class Registry: + + def __init__(self, session: requests.Session): + self.auth_state = AuthState() + self.session = session + + def image_info(self, image_path: str, manifest_id: str) -> ImageMetadata: + authority, _, path = image_path.partition('/') + resp = self.session.head( + f'https://{authority}/v2/{path}/manifests/{manifest_id}', + headers={'Accept': MANIFEST_MIME}, + auth=RegistryAuthenticator(self.auth_state, self.session, + image_path)) + resp.raise_for_status() + return ImageMetadata(int(resp.headers['content-length']), + resp.headers['docker-content-digest']) + + def delete_tag(self, image_path: str, tag: str): + authority, _, path = image_path.partition('/') + resp = self.session.delete( + f'https://{authority}/v2/{path}/manifests/{tag}', + headers={'Content-Type': INDEX_MIME}, + auth=RegistryAuthenticator(self.auth_state, self.session, + image_path)) + resp.raise_for_status() + + def _upload_index(self, image_path: str, tag: str, index: OCIIndex): + authority, _, path = image_path.partition('/') + body = json.dumps(index.serialize(), + separators=(',', ':'), + sort_keys=True) + + resp = self.session.put( + f'https://{authority}/v2/{path}/manifests/{tag}', + data=body, + headers={'Content-Type': INDEX_MIME}, + auth=RegistryAuthenticator(self.auth_state, self.session, + image_path)) + resp.raise_for_status() + + return resp.headers['Location'] + + def upload_index(self, + image_path: str, + tag: str, + index: OCIIndex, + retries=20, + retry_delay=1): + # eventual consistency lmao + for _ in range(retries): + try: + return self._upload_index(image_path, tag, index) + except requests.HTTPError as e: + if e.response.status_code != 404: + raise + + time.sleep(retry_delay) diff --git a/releng/environment.py b/releng/environment.py index 58633d548..ca8194fe5 100644 --- a/releng/environment.py +++ b/releng/environment.py @@ -1,5 +1,9 @@ -import dataclasses +from typing import Callable import urllib.parse +import re +import functools +import subprocess +import dataclasses S3_HOST = 's3.lix.systems' S3_ENDPOINT = 'https://s3.lix.systems' @@ -17,8 +21,31 @@ DEFAULT_STORE_URI_BITS = { @dataclasses.dataclass +class DockerTarget: + registry_path: str + """Registry path without the tag, e.g. ghcr.io/lix-project/lix""" + + tags: list[str] + """List of tags this image should take. There must be at least one.""" + + @staticmethod + def resolve(item: str, version: str, major: str) -> str: + """ + Applies templates: + - version: the Lix version e.g. 2.90.0 + - major: the major Lix version e.g. 2.90 + """ + return item.format(version=version, major=major) + + def registry_name(self) -> str: + [a, _, _] = self.registry_path.partition('/') + return a + + +@dataclasses.dataclass class RelengEnvironment: name: str + colour: Callable[[str], str] cache_store_overlay: dict[str, str] cache_bucket: str @@ -26,22 +53,79 @@ class RelengEnvironment: docs_bucket: str git_repo: str + docker_targets: list[DockerTarget] + def cache_store_uri(self): qs = DEFAULT_STORE_URI_BITS.copy() qs.update(self.cache_store_overlay) return self.cache_bucket + "?" + urllib.parse.urlencode(qs) + +SGR = '\x1b[' +RED = '31;1m' +GREEN = '32;1m' +RESET = '0m' + + +def sgr(colour: str, text: str) -> str: + return f'{SGR}{colour}{text}{SGR}{RESET}' + + STAGING = RelengEnvironment( name='staging', + colour=functools.partial(sgr, GREEN), docs_bucket='s3://staging-docs', cache_bucket='s3://staging-cache', - cache_store_overlay={ - 'secret-key': 'staging.key' - }, + cache_store_overlay={'secret-key': 'staging.key'}, releases_bucket='s3://staging-releases', git_repo='ssh://git@git.lix.systems/lix-project/lix-releng-staging', + docker_targets=[ + # latest will be auto tagged if appropriate + DockerTarget('git.lix.systems/lix-project/lix-releng-staging', + tags=['{version}', '{major}']), + DockerTarget('ghcr.io/lix-project/lix-releng-staging', + tags=['{version}', '{major}']), + ], ) +GERRIT_REMOTE_RE = re.compile(r'^ssh://(\w+@)?gerrit.lix.systems:2022/lix$') + + +def guess_gerrit_remote(): + """ + Deals with people having unknown gerrit username. + """ + out = [ + x.split()[1] for x in subprocess.check_output( + ['git', 'remote', '-v']).decode().splitlines() + ] + return next(x for x in out if GERRIT_REMOTE_RE.match(x)) + + +PROD = RelengEnvironment( + name='production', + colour=functools.partial(sgr, RED), + docs_bucket='s3://docs', + cache_bucket='s3://cache', + # FIXME: we should decrypt this with age into a tempdir in the future, but + # the issue is how to deal with the recipients file. For now, we should + # just delete it after doing a release. + cache_store_overlay={'secret-key': 'prod.key'}, + releases_bucket='s3://releases', + git_repo=guess_gerrit_remote(), + docker_targets=[ + # latest will be auto tagged if appropriate + DockerTarget('git.lix.systems/lix-project/lix', + tags=['{version}', '{major}']), + DockerTarget('ghcr.io/lix-project/lix', tags=['{version}', '{major}']), + ], +) + +ENVIRONMENTS = { + 'staging': STAGING, + 'production': PROD, +} + @dataclasses.dataclass class S3Credentials: diff --git a/releng/gitutils.xsh b/releng/gitutils.xsh new file mode 100644 index 000000000..3352a6b21 --- /dev/null +++ b/releng/gitutils.xsh @@ -0,0 +1,37 @@ +import subprocess +import json + + +def version_compare(v1: str, v2: str): + return json.loads($(nix-instantiate --eval --json --argstr v1 @(v1) --argstr v2 @(v2) --expr '{v1, v2}: builtins.compareVersions v1 v2')) + + +def latest_tag_on_branch(branch: str) -> str: + return $(git describe --abbrev=0 @(branch) e>/dev/null).strip() + + +def is_maintenance_branch(branch: str) -> bool: + try: + main_tag = latest_tag_on_branch('main') + current_tag = latest_tag_on_branch(branch) + + return version_compare(current_tag, main_tag) < 0 + except subprocess.CalledProcessError: + # This is the case before Lix releases 2.90, since main *has* no + # release tag on it. + # FIXME: delete this case after 2.91 + return False + + +def verify_are_on_tag(): + current_tag = $(git describe --tag).strip() + assert current_tag == VERSION + + +def git_preconditions(): + # verify there is nothing in index ready to stage + proc = !(git diff-index --quiet --cached HEAD --) + assert proc.rtn == 0 + # verify there is nothing *stageable* and tracked + proc = !(git diff-files --quiet) + assert proc.rtn == 0 diff --git a/releng/release-jobs.nix b/releng/release-jobs.nix index e693a3a81..4db0baed3 100644 --- a/releng/release-jobs.nix +++ b/releng/release-jobs.nix @@ -33,7 +33,7 @@ let targetName = "*.tar.xz"; }) systems ++ builtins.map (system: { - target = hydraJobs.dockerImage.${system}; + target = hydraJobs.dockerImage.${system}.tarball; targetName = "image.tar.gz"; rename = "lix-${lix.version}-docker-image-${system}.tar.gz"; }) dockerSystems; |