diff options
117 files changed, 2156 insertions, 735 deletions
diff --git a/.github/STALE-BOT.md b/.github/STALE-BOT.md index 5e8f5d929..383717bfc 100644 --- a/.github/STALE-BOT.md +++ b/.github/STALE-BOT.md @@ -3,7 +3,7 @@ - Thanks for your contribution! - To remove the stale label, just leave a new comment. - _How to find the right people to ping?_ → [`git blame`](https://git-scm.com/docs/git-blame) to the rescue! (or GitHub's history and blame buttons.) -- You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on the [#nixos IRC channel](https://webchat.freenode.net/#nixos). +- You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on [Matrix - #nix:nixos.org](https://matrix.to/#/#nix:nixos.org). ## Suggestions for PRs diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 17a79dc97..b2b1f07fb 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -20,8 +20,7 @@ jobs: name: '${{ env.CACHIX_NAME }}' signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' - #- run: nix flake check - - run: nix-build -A checks.$(if [[ `uname` = Linux ]]; then echo x86_64-linux; else echo x86_64-darwin; fi) + - run: nix-build -A checks.$(nix-instantiate --eval -E '(builtins.currentSystem)') check_cachix: name: Cachix secret present for installer tests runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index 37aada307..2e14561fe 100644 --- a/.gitignore +++ b/.gitignore @@ -82,6 +82,7 @@ perl/Makefile.config /tests/shell /tests/shell.drv /tests/config.nix +/tests/ca/config.nix # /tests/lang/ /tests/lang/*.out @@ -12,6 +12,8 @@ makefiles = \ src/resolve-system-dependencies/local.mk \ scripts/local.mk \ misc/bash/local.mk \ + misc/fish/local.mk \ + misc/zsh/local.mk \ misc/systemd/local.mk \ misc/launchd/local.mk \ misc/upstart/local.mk \ @@ -28,7 +28,8 @@ build nix from source with nix-build or how to get a development environment. - [Nix manual](https://nixos.org/nix/manual) - [Nix jobsets on hydra.nixos.org](https://hydra.nixos.org/project/nix) - [NixOS Discourse](https://discourse.nixos.org/) -- [IRC - #nixos on freenode.net](irc://irc.freenode.net/#nixos) +- [Matrix - #nix:nixos.org](https://matrix.to/#/#nix:nixos.org) +- [IRC - #nixos on libera.chat](irc://irc.libera.chat/#nixos) ## License diff --git a/boehmgc-coroutine-sp-fallback.diff b/boehmgc-coroutine-sp-fallback.diff new file mode 100644 index 000000000..fa8dd0325 --- /dev/null +++ b/boehmgc-coroutine-sp-fallback.diff @@ -0,0 +1,42 @@ +diff --git a/pthread_stop_world.c b/pthread_stop_world.c +index 1cee6a0b..46c3acd9 100644 +--- a/pthread_stop_world.c ++++ b/pthread_stop_world.c +@@ -674,6 +674,8 @@ GC_INNER void GC_push_all_stacks(void) + struct GC_traced_stack_sect_s *traced_stack_sect; + pthread_t self = pthread_self(); + word total_size = 0; ++ size_t stack_limit; ++ pthread_attr_t pattr; + + if (!EXPECT(GC_thr_initialized, TRUE)) + GC_thr_init(); +@@ -723,6 +725,28 @@ GC_INNER void GC_push_all_stacks(void) + hi = p->altstack + p->altstack_size; + /* FIXME: Need to scan the normal stack too, but how ? */ + /* FIXME: Assume stack grows down */ ++ } else { ++ if (pthread_getattr_np(p->id, &pattr)) { ++ ABORT("GC_push_all_stacks: pthread_getattr_np failed!"); ++ } ++ if (pthread_attr_getstacksize(&pattr, &stack_limit)) { ++ ABORT("GC_push_all_stacks: pthread_attr_getstacksize failed!"); ++ } ++ // When a thread goes into a coroutine, we lose its original sp until ++ // control flow returns to the thread. ++ // While in the coroutine, the sp points outside the thread stack, ++ // so we can detect this and push the entire thread stack instead, ++ // as an approximation. ++ // We assume that the coroutine has similarly added its entire stack. ++ // This could be made accurate by cooperating with the application ++ // via new functions and/or callbacks. ++ #ifndef STACK_GROWS_UP ++ if (lo >= hi || lo < hi - stack_limit) { // sp outside stack ++ lo = hi - stack_limit; ++ } ++ #else ++ #error "STACK_GROWS_UP not supported in boost_coroutine2 (as of june 2021), so we don't support it in Nix." ++ #endif + } + GC_push_all_stack_sections(lo, hi, traced_stack_sect); + # ifdef STACK_GROWS_UP diff --git a/config/config.guess b/config/config.guess index 699b3a10b..1972fda8e 100755 --- a/config/config.guess +++ b/config/config.guess @@ -1,8 +1,8 @@ #! /bin/sh # Attempt to guess a canonical system name. -# Copyright 1992-2020 Free Software Foundation, Inc. +# Copyright 1992-2021 Free Software Foundation, Inc. -timestamp='2020-11-19' +timestamp='2021-01-25' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -50,7 +50,7 @@ version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. -Copyright 1992-2020 Free Software Foundation, Inc. +Copyright 1992-2021 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -188,10 +188,9 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in # # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". - sysctl="sysctl -n hw.machine_arch" UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \ - "/sbin/$sysctl" 2>/dev/null || \ - "/usr/sbin/$sysctl" 2>/dev/null || \ + /sbin/sysctl -n hw.machine_arch 2>/dev/null || \ + /usr/sbin/sysctl -n hw.machine_arch 2>/dev/null || \ echo unknown)) case "$UNAME_MACHINE_ARCH" in aarch64eb) machine=aarch64_be-unknown ;; @@ -996,6 +995,9 @@ EOF k1om:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; + loongarch32:Linux:*:* | loongarch64:Linux:*:* | loongarchx32:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; m32r*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; @@ -1084,7 +1086,7 @@ EOF ppcle:Linux:*:*) echo powerpcle-unknown-linux-"$LIBC" exit ;; - riscv32:Linux:*:* | riscv64:Linux:*:*) + riscv32:Linux:*:* | riscv32be:Linux:*:* | riscv64:Linux:*:* | riscv64be:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; s390:Linux:*:* | s390x:Linux:*:*) @@ -1480,8 +1482,8 @@ EOF i*86:rdos:*:*) echo "$UNAME_MACHINE"-pc-rdos exit ;; - i*86:AROS:*:*) - echo "$UNAME_MACHINE"-pc-aros + *:AROS:*:*) + echo "$UNAME_MACHINE"-unknown-aros exit ;; x86_64:VMkernel:*:*) echo "$UNAME_MACHINE"-unknown-esx diff --git a/config/config.sub b/config/config.sub index 19c9553b1..63c1f1c8b 100755 --- a/config/config.sub +++ b/config/config.sub @@ -1,8 +1,8 @@ #! /bin/sh # Configuration validation subroutine script. -# Copyright 1992-2020 Free Software Foundation, Inc. +# Copyright 1992-2021 Free Software Foundation, Inc. -timestamp='2020-12-02' +timestamp='2021-01-08' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -67,7 +67,7 @@ Report bugs and patches to <config-patches@gnu.org>." version="\ GNU config.sub ($timestamp) -Copyright 1992-2020 Free Software Foundation, Inc. +Copyright 1992-2021 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -1185,6 +1185,7 @@ case $cpu-$vendor in | k1om \ | le32 | le64 \ | lm32 \ + | loongarch32 | loongarch64 | loongarchx32 \ | m32c | m32r | m32rle \ | m5200 | m68000 | m680[012346]0 | m68360 | m683?2 | m68k \ | m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \ @@ -1229,7 +1230,7 @@ case $cpu-$vendor in | powerpc | powerpc64 | powerpc64le | powerpcle | powerpcspe \ | pru \ | pyramid \ - | riscv | riscv32 | riscv64 \ + | riscv | riscv32 | riscv32be | riscv64 | riscv64be \ | rl78 | romp | rs6000 | rx \ | s390 | s390x \ | score \ @@ -1682,11 +1683,14 @@ fi # Now, validate our (potentially fixed-up) OS. case $os in - # Sometimes we do "kernel-abi", so those need to count as OSes. + # Sometimes we do "kernel-libc", so those need to count as OSes. musl* | newlib* | uclibc*) ;; - # Likewise for "kernel-libc" - eabi | eabihf | gnueabi | gnueabihf) + # Likewise for "kernel-abi" + eabi* | gnueabi*) + ;; + # VxWorks passes extra cpu info in the 4th filed. + simlinux | simwindows | spe) ;; # Now accept the basic system types. # The portable systems comes first. @@ -1750,6 +1754,8 @@ case $kernel-$os in ;; kfreebsd*-gnu* | kopensolaris*-gnu*) ;; + vxworks-simlinux | vxworks-simwindows | vxworks-spe) + ;; nto-qnx*) ;; os2-emx) diff --git a/configure.ac b/configure.ac index 6c36787f3..6e563eec3 100644 --- a/configure.ac +++ b/configure.ac @@ -1,4 +1,4 @@ -AC_INIT(nix, m4_esyscmd([bash -c "echo -n $(cat ./.version)$VERSION_SUFFIX"])) +AC_INIT([nix],[m4_esyscmd(bash -c "echo -n $(cat ./.version)$VERSION_SUFFIX")]) AC_CONFIG_MACRO_DIRS([m4]) AC_CONFIG_SRCDIR(README.md) AC_CONFIG_AUX_DIR(config) @@ -9,8 +9,7 @@ AC_PROG_SED AC_CANONICAL_HOST AC_MSG_CHECKING([for the canonical Nix system name]) -AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM], - [Platform identifier (e.g., `i686-linux').]), +AC_ARG_WITH(system, AS_HELP_STRING([--with-system=SYSTEM],[Platform identifier (e.g., `i686-linux').]), [system=$withval], [case "$host_cpu" in i*86) @@ -66,7 +65,7 @@ AC_SYS_LARGEFILE AC_STRUCT_DIRENT_D_TYPE if test "$sys_name" = sunos; then # Solaris requires -lsocket -lnsl for network functions - LIBS="-lsocket -lnsl $LIBS" + LDFLAGS="-lsocket -lnsl $LDFLAGS" fi @@ -127,8 +126,7 @@ NEED_PROG(jq, jq) AC_SUBST(coreutils, [$(dirname $(type -p cat))]) -AC_ARG_WITH(store-dir, AC_HELP_STRING([--with-store-dir=PATH], - [path of the Nix store (defaults to /nix/store)]), +AC_ARG_WITH(store-dir, AS_HELP_STRING([--with-store-dir=PATH],[path of the Nix store (defaults to /nix/store)]), storedir=$withval, storedir='/nix/store') AC_SUBST(storedir) @@ -152,13 +150,12 @@ int main() { }]])], GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=no, GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=yes) AC_MSG_RESULT($GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC) if test "x$GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC" = xyes; then - LIBS="-latomic $LIBS" + LDFLAGS="-latomic $LDFLAGS" fi PKG_PROG_PKG_CONFIG -AC_ARG_ENABLE(shared, AC_HELP_STRING([--enable-shared], - [Build shared libraries for Nix [default=yes]]), +AC_ARG_ENABLE(shared, AS_HELP_STRING([--enable-shared],[Build shared libraries for Nix [default=yes]]), shared=$enableval, shared=yes) if test "$shared" = yes; then AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.]) @@ -215,9 +212,8 @@ AC_SUBST(HAVE_LIBCPUID, [$have_libcpuid]) # Look for libseccomp, required for Linux sandboxing. if test "$sys_name" = linux; then AC_ARG_ENABLE([seccomp-sandboxing], - AC_HELP_STRING([--disable-seccomp-sandboxing], - [Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)] - )) + AS_HELP_STRING([--disable-seccomp-sandboxing],[Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!) + ])) if test "x$enable_seccomp_sandboxing" != "xno"; then PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp], [CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"]) @@ -235,8 +231,8 @@ AC_SUBST(HAVE_SECCOMP, [$have_seccomp]) # Look for aws-cpp-sdk-s3. AC_LANG_PUSH(C++) AC_CHECK_HEADERS([aws/s3/S3Client.h], - [AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.]) - enable_s3=1], [enable_s3=]) + [AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=1], + [AC_DEFINE([ENABLE_S3], [0], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=]) AC_SUBST(ENABLE_S3, [$enable_s3]) AC_LANG_POP(C++) @@ -249,8 +245,7 @@ fi # Whether to use the Boehm garbage collector. -AC_ARG_ENABLE(gc, AC_HELP_STRING([--enable-gc], - [enable garbage collection in the Nix expression evaluator (requires Boehm GC) [default=yes]]), +AC_ARG_ENABLE(gc, AS_HELP_STRING([--enable-gc],[enable garbage collection in the Nix expression evaluator (requires Boehm GC) [default=yes]]), gc=$enableval, gc=yes) if test "$gc" = yes; then PKG_CHECK_MODULES([BDW_GC], [bdw-gc]) @@ -264,8 +259,7 @@ PKG_CHECK_MODULES([GTEST], [gtest_main]) # documentation generation switch -AC_ARG_ENABLE(doc-gen, AC_HELP_STRING([--disable-doc-gen], - [disable documentation generation]), +AC_ARG_ENABLE(doc-gen, AS_HELP_STRING([--disable-doc-gen],[disable documentation generation]), doc_generate=$enableval, doc_generate=yes) AC_SUBST(doc_generate) @@ -285,8 +279,7 @@ if test "$(uname)" = "Darwin"; then fi -AC_ARG_WITH(sandbox-shell, AC_HELP_STRING([--with-sandbox-shell=PATH], - [path of a statically-linked shell to use as /bin/sh in sandboxes]), +AC_ARG_WITH(sandbox-shell, AS_HELP_STRING([--with-sandbox-shell=PATH],[path of a statically-linked shell to use as /bin/sh in sandboxes]), sandbox_shell=$withval) AC_SUBST(sandbox_shell) @@ -301,6 +294,6 @@ done rm -f Makefile.config -AC_CONFIG_HEADER([config.h]) +AC_CONFIG_HEADERS([config.h]) AC_CONFIG_FILES([]) AC_OUTPUT diff --git a/doc/manual/src/command-ref/env-common.md b/doc/manual/src/command-ref/env-common.md index b709ca9d1..6e2403461 100644 --- a/doc/manual/src/command-ref/env-common.md +++ b/doc/manual/src/command-ref/env-common.md @@ -10,35 +10,39 @@ Most Nix commands interpret the following environment variables: A colon-separated list of directories used to look up Nix expressions enclosed in angle brackets (i.e., `<path>`). For instance, the value - + /home/eelco/Dev:/etc/nixos - + will cause Nix to look for paths relative to `/home/eelco/Dev` and `/etc/nixos`, in this order. It is also possible to match paths against a prefix. For example, the value - + nixpkgs=/home/eelco/Dev/nixpkgs-branch:/etc/nixos - + will cause Nix to search for `<nixpkgs/path>` in `/home/eelco/Dev/nixpkgs-branch/path` and `/etc/nixos/nixpkgs/path`. - + If a path in the Nix search path starts with `http://` or `https://`, it is interpreted as the URL of a tarball that will be downloaded and unpacked to a temporary location. The tarball must consist of a single top-level directory. For example, setting `NIX_PATH` to - - nixpkgs=https://github.com/NixOS/nixpkgs/archive/nixos-15.09.tar.gz - - tells Nix to download the latest revision in the Nixpkgs/NixOS 15.09 - channel. - - A following shorthand can be used to refer to the official channels: - - nixpkgs=channel:nixos-15.09 - - The search path can be extended using the `-I` option, which takes - precedence over `NIX_PATH`. + + nixpkgs=https://github.com/NixOS/nixpkgs/archive/master.tar.gz + + tells Nix to download and use the current contents of the + `master` branch in the `nixpkgs` repository. + + The URLs of the tarballs from the official nixos.org channels (see + [the manual for `nix-channel`](nix-channel.md)) can be abbreviated + as `channel:<channel-name>`. For instance, the following two + values of `NIX_PATH` are equivalent: + + nixpkgs=channel:nixos-21.05 + nixpkgs=https://nixos.org/channels/nixos-21.05/nixexprs.tar.xz + + The Nix search path can also be extended using the `-I` option to + many Nix commands, which takes precedence over `NIX_PATH`. - `NIX_IGNORE_SYMLINK_STORE`\ Normally, the Nix store directory (typically `/nix/store`) is not @@ -50,7 +54,7 @@ Most Nix commands interpret the following environment variables: builds are deployed to machines where `/nix/store` resolves differently. If you are sure that you’re not going to do that, you can set `NIX_IGNORE_SYMLINK_STORE` to `1`. - + Note that if you’re symlinking the Nix store so that you can put it on another file system than the root file system, on Linux you’re better off using `bind` mount points, e.g., @@ -59,7 +63,7 @@ Most Nix commands interpret the following environment variables: $ mkdir /nix $ mount -o bind /mnt/otherdisk/nix /nix ``` - + Consult the mount 8 manual page for details. - `NIX_STORE_DIR`\ diff --git a/doc/manual/src/command-ref/nix-shell.md b/doc/manual/src/command-ref/nix-shell.md index dcd7cc70c..72f6730f1 100644 --- a/doc/manual/src/command-ref/nix-shell.md +++ b/doc/manual/src/command-ref/nix-shell.md @@ -78,9 +78,7 @@ All options not listed here are passed to `nix-store cleared before the interactive shell is started, so you get an environment that more closely corresponds to the “real” Nix build. A few variables, in particular `HOME`, `USER` and `DISPLAY`, are - retained. Note that (depending on your Bash - installation) `/etc/bashrc` is still sourced, so any variables set - there will affect the interactive shell. + retained. - `--packages` / `-p` *packages*…\ Set up an environment in which the specified packages are present. diff --git a/flake.lock b/flake.lock index 06c507e7d..5fc969d7b 100644 --- a/flake.lock +++ b/flake.lock @@ -19,16 +19,16 @@ }, "nixpkgs": { "locked": { - "lastModified": 1614309161, - "narHash": "sha256-93kRxDPyEW9QIpxU71kCaV1r+hgOgP6/aVgC7vvO8IU=", + "lastModified": 1624862269, + "narHash": "sha256-JFcsh2+7QtfKdJFoPibLFPLgIW6Ycnv8Bts9a7RYme0=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "0e499fde7af3c28d63e9b13636716b86c3162b93", + "rev": "f77036342e2b690c61c97202bf48f2ce13acc022", "type": "github" }, "original": { "id": "nixpkgs", - "ref": "nixos-20.09-small", + "ref": "nixos-21.05-small", "type": "indirect" } }, @@ -1,7 +1,7 @@ { description = "The purely functional package manager"; - inputs.nixpkgs.url = "nixpkgs/nixos-20.09-small"; + inputs.nixpkgs.url = "nixpkgs/nixos-21.05-small"; inputs.lowdown-src = { url = "github:kristapsdz/lowdown/VERSION_0_8_4"; flake = false; }; outputs = { self, nixpkgs, lowdown-src }: @@ -18,7 +18,9 @@ linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ]; linuxSystems = linux64BitSystems ++ [ "i686-linux" ]; - systems = linuxSystems ++ [ "x86_64-darwin" ]; + systems = linuxSystems ++ [ "x86_64-darwin" "aarch64-darwin" ]; + + crossSystems = [ "armv6l-linux" "armv7l-linux" ]; forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system); @@ -79,7 +81,7 @@ buildPackages.mercurial buildPackages.jq ] - ++ lib.optionals stdenv.isLinux [(pkgs.util-linuxMinimal or pkgs.utillinuxMinimal)]; + ++ lib.optionals stdenv.hostPlatform.isLinux [(buildPackages.util-linuxMinimal or buildPackages.utillinuxMinimal)]; buildDeps = [ curl @@ -93,7 +95,7 @@ ] ++ lib.optionals stdenv.isLinux [libseccomp] ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium - ++ lib.optional stdenv.isx86_64 libcpuid; + ++ lib.optional stdenv.hostPlatform.isx86_64 libcpuid; awsDeps = lib.optional (stdenv.isLinux || stdenv.isDarwin) (aws-sdk-cpp.override { @@ -102,7 +104,13 @@ }); propagatedDeps = - [ (boehmgc.override { enableLargeConfig = true; }) + [ ((boehmgc.override { + enableLargeConfig = true; + }).overrideAttrs(o: { + patches = (o.patches or []) ++ [ + ./boehmgc-coroutine-sp-fallback.diff + ]; + })) ]; perlDeps = @@ -133,10 +141,11 @@ substitute ${./scripts/install.in} $out/install \ ${pkgs.lib.concatMapStrings - (system: - '' \ - --replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \ - --replace '@tarballPath_${system}@' $(tarballPath ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \ + (system: let + tarball = if builtins.elem system crossSystems then self.hydraJobs.binaryTarballCross.x86_64-linux.${system} else self.hydraJobs.binaryTarball.${system}; + in '' \ + --replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${tarball}/*.tar.xz) \ + --replace '@tarballPath_${system}@' $(tarballPath ${tarball}/*.tar.xz) \ '' ) systems @@ -174,6 +183,77 @@ }; + binaryTarball = buildPackages: nix: pkgs: let + inherit (pkgs) cacert; + installerClosureInfo = buildPackages.closureInfo { rootPaths = [ nix cacert ]; }; + in + + buildPackages.runCommand "nix-binary-tarball-${version}" + { #nativeBuildInputs = lib.optional (system != "aarch64-linux") shellcheck; + meta.description = "Distribution-independent Nix bootstrap binaries for ${pkgs.system}"; + } + '' + cp ${installerClosureInfo}/registration $TMPDIR/reginfo + cp ${./scripts/create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh + substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \ + --subst-var-by nix ${nix} \ + --subst-var-by cacert ${cacert} + + substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \ + --subst-var-by nix ${nix} \ + --subst-var-by cacert ${cacert} + substitute ${./scripts/install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \ + --subst-var-by nix ${nix} \ + --subst-var-by cacert ${cacert} + substitute ${./scripts/install-multi-user.sh} $TMPDIR/install-multi-user \ + --subst-var-by nix ${nix} \ + --subst-var-by cacert ${cacert} + + if type -p shellcheck; then + # SC1090: Don't worry about not being able to find + # $nix/etc/profile.d/nix.sh + shellcheck --exclude SC1090 $TMPDIR/install + shellcheck $TMPDIR/create-darwin-volume.sh + shellcheck $TMPDIR/install-darwin-multi-user.sh + shellcheck $TMPDIR/install-systemd-multi-user.sh + + # SC1091: Don't panic about not being able to source + # /etc/profile + # SC2002: Ignore "useless cat" "error", when loading + # .reginfo, as the cat is a much cleaner + # implementation, even though it is "useless" + # SC2116: Allow ROOT_HOME=$(echo ~root) for resolving + # root's home directory + shellcheck --external-sources \ + --exclude SC1091,SC2002,SC2116 $TMPDIR/install-multi-user + fi + + chmod +x $TMPDIR/install + chmod +x $TMPDIR/create-darwin-volume.sh + chmod +x $TMPDIR/install-darwin-multi-user.sh + chmod +x $TMPDIR/install-systemd-multi-user.sh + chmod +x $TMPDIR/install-multi-user + dir=nix-${version}-${pkgs.system} + fn=$out/$dir.tar.xz + mkdir -p $out/nix-support + echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products + tar cvfJ $fn \ + --owner=0 --group=0 --mode=u+rw,uga+r \ + --absolute-names \ + --hard-dereference \ + --transform "s,$TMPDIR/install,$dir/install," \ + --transform "s,$TMPDIR/create-darwin-volume.sh,$dir/create-darwin-volume.sh," \ + --transform "s,$TMPDIR/reginfo,$dir/.reginfo," \ + --transform "s,$NIX_STORE,$dir/store,S" \ + $TMPDIR/install \ + $TMPDIR/create-darwin-volume.sh \ + $TMPDIR/install-darwin-multi-user.sh \ + $TMPDIR/install-systemd-multi-user.sh \ + $TMPDIR/install-multi-user \ + $TMPDIR/reginfo \ + $(cat ${installerClosureInfo}/store-paths) + ''; + in { # A Nixpkgs overlay that overrides the 'nix' and @@ -256,7 +336,8 @@ boost nlohmann_json ] - ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium; + ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium + ++ lib.optional stdenv.isDarwin darwin.apple_sdk.frameworks.Security; configureFlags = '' --with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix} @@ -284,10 +365,10 @@ outputs = [ "out" "bin" "dev" ]; - nativeBuildInputs = [ which ]; + nativeBuildInputs = [ buildPackages.which ]; - configurePhase = - '' + configurePhase = '' + ${if (stdenv.isDarwin && stdenv.isAarch64) then "echo \"HAVE_SANDBOX_INIT=false\" > configure.local" else ""} ./configure \ PREFIX=${placeholder "dev"} \ BINDIR=${placeholder "bin"}/bin @@ -303,92 +384,33 @@ buildStatic = nixpkgs.lib.genAttrs linux64BitSystems (system: self.packages.${system}.nix-static); + buildCross = nixpkgs.lib.genAttrs crossSystems (crossSystem: + nixpkgs.lib.genAttrs ["x86_64-linux"] (system: self.packages.${system}."nix-${crossSystem}")); + # Perl bindings for various platforms. perlBindings = nixpkgs.lib.genAttrs systems (system: self.packages.${system}.nix.perl-bindings); # Binary tarball for various platforms, containing a Nix store # with the closure of 'nix' package, and the second half of # the installation script. - binaryTarball = nixpkgs.lib.genAttrs systems (system: - - with nixpkgsFor.${system}; - - let - installerClosureInfo = closureInfo { rootPaths = [ nix cacert ]; }; - in - - runCommand "nix-binary-tarball-${version}" - { #nativeBuildInputs = lib.optional (system != "aarch64-linux") shellcheck; - meta.description = "Distribution-independent Nix bootstrap binaries for ${system}"; - } - '' - cp ${installerClosureInfo}/registration $TMPDIR/reginfo - cp ${./scripts/create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh - substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \ - --subst-var-by nix ${nix} \ - --subst-var-by cacert ${cacert} - - substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \ - --subst-var-by nix ${nix} \ - --subst-var-by cacert ${cacert} - substitute ${./scripts/install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \ - --subst-var-by nix ${nix} \ - --subst-var-by cacert ${cacert} - substitute ${./scripts/install-multi-user.sh} $TMPDIR/install-multi-user \ - --subst-var-by nix ${nix} \ - --subst-var-by cacert ${cacert} - - if type -p shellcheck; then - # SC1090: Don't worry about not being able to find - # $nix/etc/profile.d/nix.sh - shellcheck --exclude SC1090 $TMPDIR/install - shellcheck $TMPDIR/create-darwin-volume.sh - shellcheck $TMPDIR/install-darwin-multi-user.sh - shellcheck $TMPDIR/install-systemd-multi-user.sh - - # SC1091: Don't panic about not being able to source - # /etc/profile - # SC2002: Ignore "useless cat" "error", when loading - # .reginfo, as the cat is a much cleaner - # implementation, even though it is "useless" - # SC2116: Allow ROOT_HOME=$(echo ~root) for resolving - # root's home directory - shellcheck --external-sources \ - --exclude SC1091,SC2002,SC2116 $TMPDIR/install-multi-user - fi - - chmod +x $TMPDIR/install - chmod +x $TMPDIR/create-darwin-volume.sh - chmod +x $TMPDIR/install-darwin-multi-user.sh - chmod +x $TMPDIR/install-systemd-multi-user.sh - chmod +x $TMPDIR/install-multi-user - dir=nix-${version}-${system} - fn=$out/$dir.tar.xz - mkdir -p $out/nix-support - echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products - tar cvfJ $fn \ - --owner=0 --group=0 --mode=u+rw,uga+r \ - --absolute-names \ - --hard-dereference \ - --transform "s,$TMPDIR/install,$dir/install," \ - --transform "s,$TMPDIR/create-darwin-volume.sh,$dir/create-darwin-volume.sh," \ - --transform "s,$TMPDIR/reginfo,$dir/.reginfo," \ - --transform "s,$NIX_STORE,$dir/store,S" \ - $TMPDIR/install \ - $TMPDIR/create-darwin-volume.sh \ - $TMPDIR/install-darwin-multi-user.sh \ - $TMPDIR/install-systemd-multi-user.sh \ - $TMPDIR/install-multi-user \ - $TMPDIR/reginfo \ - $(cat ${installerClosureInfo}/store-paths) - ''); + binaryTarball = nixpkgs.lib.genAttrs systems (system: binaryTarball nixpkgsFor.${system} nixpkgsFor.${system}.nix nixpkgsFor.${system}); + + binaryTarballCross = nixpkgs.lib.genAttrs ["x86_64-linux"] (system: builtins.listToAttrs (map (crossSystem: { + name = crossSystem; + value = let + nixpkgsCross = import nixpkgs { + inherit system crossSystem; + overlays = [ self.overlay ]; + }; + in binaryTarball nixpkgsFor.${system} self.packages.${system}."nix-${crossSystem}" nixpkgsCross; + }) crossSystems)); # The first half of the installation script. This is uploaded # to https://nixos.org/nix/install. It downloads the binary # tarball for the user's system and calls the second half of the # installation script. - installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ]; - installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" ]; + installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" "armv6l-linux" "armv7l-linux" ]; + installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" "armv6l-linux" "armv7l-linux"]; # Line coverage analysis. coverage = @@ -482,11 +504,14 @@ # `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work # againstLatestStable = testNixVersions pkgs pkgs.nix pkgs.nixStable; } "touch $out"; - }); + } // (if system == "x86_64-linux" then (builtins.listToAttrs (map (crossSystem: { + name = "binaryTarball-${crossSystem}"; + value = self.hydraJobs.binaryTarballCross.${system}.${crossSystem}; + }) crossSystems)) else {})); packages = forAllSystems (system: { inherit (nixpkgsFor.${system}) nix; - } // nixpkgs.lib.optionalAttrs (builtins.elem system linux64BitSystems) { + } // (nixpkgs.lib.optionalAttrs (builtins.elem system linux64BitSystems) { nix-static = let nixpkgs = nixpkgsFor.${system}.pkgsStatic; in with commonDeps nixpkgs; nixpkgs.stdenv.mkDerivation { @@ -524,8 +549,49 @@ stripAllList = ["bin"]; strictDeps = true; + + hardeningDisable = [ "pie" ]; + }; + } // builtins.listToAttrs (map (crossSystem: { + name = "nix-${crossSystem}"; + value = let + nixpkgsCross = import nixpkgs { + inherit system crossSystem; + overlays = [ self.overlay ]; + }; + in with commonDeps nixpkgsCross; nixpkgsCross.stdenv.mkDerivation { + name = "nix-${version}"; + + src = self; + + VERSION_SUFFIX = versionSuffix; + + outputs = [ "out" "dev" "doc" ]; + + nativeBuildInputs = nativeBuildDeps; + buildInputs = buildDeps ++ propagatedDeps; + + configureFlags = [ "--sysconfdir=/etc" "--disable-doc-gen" ]; + + enableParallelBuilding = true; + + makeFlags = "profiledir=$(out)/etc/profile.d"; + + doCheck = true; + + installFlags = "sysconfdir=$(out)/etc"; + + postInstall = '' + mkdir -p $doc/nix-support + echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products + mkdir -p $out/nix-support + echo "file binary-dist $out/bin/nix" >> $out/nix-support/hydra-build-products + ''; + + doInstallCheck = true; + installCheckFlags = "sysconfdir=$(out)/etc"; }; - }); + }) crossSystems))); defaultPackage = forAllSystems (system: self.packages.${system}.nix); diff --git a/maintainers/upload-release.pl b/maintainers/upload-release.pl index 6f3882a12..3a8d27499 100755 --- a/maintainers/upload-release.pl +++ b/maintainers/upload-release.pl @@ -110,6 +110,9 @@ downloadFile("binaryTarball.i686-linux", "1"); downloadFile("binaryTarball.x86_64-linux", "1"); downloadFile("binaryTarball.aarch64-linux", "1"); downloadFile("binaryTarball.x86_64-darwin", "1"); +downloadFile("binaryTarball.aarch64-darwin", "1"); +downloadFile("binaryTarballCross.x86_64-linux.armv6l-linux", "1"); +downloadFile("binaryTarballCross.x86_64-linux.armv7l-linux", "1"); downloadFile("installerScript", "1"); for my $fn (glob "$tmpDir/*") { @@ -133,20 +136,8 @@ for my $fn (glob "$tmpDir/*") { exit if $version =~ /pre/; -# Update Nixpkgs in a very hacky way. +# Update nix-fallback-paths.nix. system("cd $nixpkgsDir && git pull") == 0 or die; -my $oldName = `nix-instantiate --eval $nixpkgsDir -A nix.name`; chomp $oldName; -my $oldHash = `nix-instantiate --eval $nixpkgsDir -A nix.src.outputHash`; chomp $oldHash; -print STDERR "old stable version in Nixpkgs = $oldName / $oldHash\n"; - -my $fn = "$nixpkgsDir/pkgs/tools/package-management/nix/default.nix"; -my $oldFile = read_file($fn); -$oldFile =~ s/$oldName/"$releaseName"/g; -$oldFile =~ s/$oldHash/"$tarballHash"/g; -write_file($fn, $oldFile); - -$oldName =~ s/nix-//g; -$oldName =~ s/"//g; sub getStorePath { my ($jobName) = @_; @@ -165,9 +156,10 @@ write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix", " i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" . " aarch64-linux = \"" . getStorePath("build.aarch64-linux") . "\";\n" . " x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" . + " aarch64-darwin = \"" . getStorePath("build.aarch64-darwin") . "\";\n" . "}\n"); -system("cd $nixpkgsDir && git commit -a -m 'nix: $oldName -> $version'") == 0 or die; +system("cd $nixpkgsDir && git commit -a -m 'nix-fallback-paths.nix: Update to $version'") == 0 or die; # Update the "latest" symlink. $channelsBucket->add_key( diff --git a/misc/fish/completion.fish b/misc/fish/completion.fish new file mode 100644 index 000000000..bedbefaf8 --- /dev/null +++ b/misc/fish/completion.fish @@ -0,0 +1,37 @@ +function _nix_complete + # Get the current command up to a cursor. + # - Behaves correctly even with pipes and nested in commands like env. + # - TODO: Returns the command verbatim (does not interpolate variables). + # That might not be optimal for arguments like -f. + set -l nix_args (commandline --current-process --tokenize --cut-at-cursor) + # --cut-at-cursor with --tokenize removes the current token so we need to add it separately. + # https://github.com/fish-shell/fish-shell/issues/7375 + # Can be an empty string. + set -l current_token (commandline --current-token --cut-at-cursor) + + # Nix wants the index of the argv item to complete but the $nix_args variable + # also contains the program name (argv[0]) so we would need to subtract 1. + # But the variable also misses the current token so it cancels out. + set -l nix_arg_to_complete (count $nix_args) + + env NIX_GET_COMPLETIONS=$nix_arg_to_complete $nix_args $current_token +end + +function _nix_accepts_files + set -l response (_nix_complete) + # First line is either filenames or no-filenames. + test $response[1] = 'filenames' +end + +function _nix + set -l response (_nix_complete) + # Skip the first line since it handled by _nix_accepts_files. + # Tail lines each contain a command followed by a tab character and, optionally, a description. + # This is also the format fish expects. + string collect -- $response[2..-1] +end + +# Disable file path completion if paths do not belong in the current context. +complete --command nix --condition 'not _nix_accepts_files' --no-files + +complete --command nix --arguments '(_nix)' diff --git a/misc/fish/local.mk b/misc/fish/local.mk new file mode 100644 index 000000000..ece899fc3 --- /dev/null +++ b/misc/fish/local.mk @@ -0,0 +1 @@ +$(eval $(call install-file-as, $(d)/completion.fish, $(datarootdir)/fish/vendor_completions.d/nix.fish, 0644)) diff --git a/misc/launchd/org.nixos.nix-daemon.plist.in b/misc/launchd/org.nixos.nix-daemon.plist.in index c334639e2..f1b439840 100644 --- a/misc/launchd/org.nixos.nix-daemon.plist.in +++ b/misc/launchd/org.nixos.nix-daemon.plist.in @@ -19,7 +19,7 @@ <array> <string>/bin/sh</string> <string>-c</string> - <string>/bin/wait4path /nix/var/nix/profiles/default/bin/nix-daemon && /nix/var/nix/profiles/default/bin/nix-daemon</string> + <string>/bin/wait4path /nix/var/nix/profiles/default/bin/nix-daemon && exec /nix/var/nix/profiles/default/bin/nix-daemon</string> </array> <key>StandardErrorPath</key> <string>/var/log/nix-daemon.log</string> diff --git a/misc/zsh/completion.zsh b/misc/zsh/completion.zsh index d4df6447e..a902e37dc 100644 --- a/misc/zsh/completion.zsh +++ b/misc/zsh/completion.zsh @@ -1,3 +1,5 @@ +#compdef nix + function _nix() { local ifs_bk="$IFS" local input=("${(Q)words[@]}") @@ -18,4 +20,4 @@ function _nix() { _describe 'nix' suggestions } -compdef _nix nix +_nix "$@" diff --git a/misc/zsh/local.mk b/misc/zsh/local.mk new file mode 100644 index 000000000..418fb1377 --- /dev/null +++ b/misc/zsh/local.mk @@ -0,0 +1 @@ +$(eval $(call install-file-as, $(d)/completion.zsh, $(datarootdir)/zsh/site-functions/_nix, 0644)) diff --git a/nix-rust/local.mk b/nix-rust/local.mk index 50db4783c..9650cdf93 100644 --- a/nix-rust/local.mk +++ b/nix-rust/local.mk @@ -8,8 +8,13 @@ endif libnixrust_PATH := $(d)/target/$(RUST_DIR)/libnixrust.$(SO_EXT) libnixrust_INSTALL_PATH := $(libdir)/libnixrust.$(SO_EXT) -libnixrust_LDFLAGS_USE := -L$(d)/target/$(RUST_DIR) -lnixrust -ldl -libnixrust_LDFLAGS_USE_INSTALLED := -L$(libdir) -lnixrust -ldl +libnixrust_LDFLAGS_USE := -L$(d)/target/$(RUST_DIR) -lnixrust +libnixrust_LDFLAGS_USE_INSTALLED := -L$(libdir) -lnixrust + +ifeq ($(OS), Linux) +libnixrust_LDFLAGS_USE += -ldl +libnixrust_LDFLAGS_USE_INSTALLED += -ldl +endif ifeq ($(OS), Darwin) libnixrust_BUILD_FLAGS = NIX_LDFLAGS="-undefined dynamic_lookup" diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh index 9b62e708f..e1046c19c 100644 --- a/scripts/install-multi-user.sh +++ b/scripts/install-multi-user.sh @@ -63,7 +63,8 @@ contact_us() { echo "You can open an issue at https://github.com/nixos/nix/issues" echo "" echo "Or feel free to contact the team:" - echo " - IRC: in #nixos on irc.freenode.net" + echo " - Matrix: #nix:nixos.org" + echo " - IRC: in #nixos on irc.libera.chat" echo " - twitter: @nixos_org" echo " - forum: https://discourse.nixos.org" } diff --git a/scripts/install.in b/scripts/install.in index 7d25f7bd7..e801d4268 100755 --- a/scripts/install.in +++ b/scripts/install.in @@ -40,21 +40,25 @@ case "$(uname -s).$(uname -m)" in path=@tarballPath_aarch64-linux@ system=aarch64-linux ;; + Linux.armv6l_linux) + hash=@tarballHash_armv6l-linux@ + path=@tarballPath_armv6l-linux@ + system=armv6l-linux + ;; + Linux.armv7l_linux) + hash=@tarballHash_armv7l-linux@ + path=@tarballPath_armv7l-linux@ + system=armv7l-linux + ;; Darwin.x86_64) hash=@tarballHash_x86_64-darwin@ path=@tarballPath_x86_64-darwin@ system=x86_64-darwin ;; Darwin.arm64|Darwin.aarch64) - # check for Rosetta 2 support - if ! [ -f /Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist ]; then - oops "Rosetta 2 is not installed on this ARM64 macOS machine. Run softwareupdate --install-rosetta then restart installation" - fi - - hash=@binaryTarball_x86_64-darwin@ - path=@tarballPath_x86_64-darwin@ - # eventually maybe: aarch64-darwin - system=x86_64-darwin + hash=@binaryTarball_aarch64-darwin@ + path=@tarballPath_aarch64-darwin@ + system=aarch64-darwin ;; *) oops "sorry, there is no binary distribution of Nix for your platform";; esac diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc index 57f2cd32d..0904f2ce9 100644 --- a/src/build-remote/build-remote.cc +++ b/src/build-remote/build-remote.cc @@ -277,7 +277,16 @@ connected: auto drv = store->readDerivation(*drvPath); auto outputHashes = staticOutputHashes(*store, drv); - drv.inputSrcs = store->parseStorePathSet(inputs); + + // Hijack the inputs paths of the derivation to include all the paths + // that come from the `inputDrvs` set. + // We don’t do that for the derivations whose `inputDrvs` is empty + // because + // 1. It’s not needed + // 2. Changing the `inputSrcs` set changes the associated output ids, + // which break CA derivations + if (!drv.inputDrvs.empty()) + drv.inputSrcs = store->parseStorePathSet(inputs); auto result = sshStore->buildDerivation(*drvPath, drv); diff --git a/src/libcmd/command.cc b/src/libcmd/command.cc index 9da470c15..6777b23be 100644 --- a/src/libcmd/command.cc +++ b/src/libcmd/command.cc @@ -54,7 +54,7 @@ void StoreCommand::run() run(getStore()); } -RealisedPathsCommand::RealisedPathsCommand(bool recursive) +BuiltPathsCommand::BuiltPathsCommand(bool recursive) : recursive(recursive) { if (recursive) @@ -81,39 +81,45 @@ RealisedPathsCommand::RealisedPathsCommand(bool recursive) }); } -void RealisedPathsCommand::run(ref<Store> store) +void BuiltPathsCommand::run(ref<Store> store) { - std::vector<RealisedPath> paths; + BuiltPaths paths; if (all) { if (installables.size()) throw UsageError("'--all' does not expect arguments"); // XXX: Only uses opaque paths, ignores all the realisations for (auto & p : store->queryAllValidPaths()) - paths.push_back(p); + paths.push_back(BuiltPath::Opaque{p}); } else { - auto pathSet = toRealisedPaths(store, realiseMode, operateOn, installables); + paths = toBuiltPaths(store, realiseMode, operateOn, installables); if (recursive) { - auto roots = std::move(pathSet); - pathSet = {}; - RealisedPath::closure(*store, roots, pathSet); + // XXX: This only computes the store path closure, ignoring + // intermediate realisations + StorePathSet pathsRoots, pathsClosure; + for (auto & root: paths) { + auto rootFromThis = root.outPaths(); + pathsRoots.insert(rootFromThis.begin(), rootFromThis.end()); + } + store->computeFSClosure(pathsRoots, pathsClosure); + for (auto & path : pathsClosure) + paths.push_back(BuiltPath::Opaque{path}); } - for (auto & path : pathSet) - paths.push_back(path); } run(store, std::move(paths)); } StorePathsCommand::StorePathsCommand(bool recursive) - : RealisedPathsCommand(recursive) + : BuiltPathsCommand(recursive) { } -void StorePathsCommand::run(ref<Store> store, std::vector<RealisedPath> paths) +void StorePathsCommand::run(ref<Store> store, BuiltPaths paths) { StorePaths storePaths; - for (auto & p : paths) - storePaths.push_back(p.path()); + for (auto& builtPath : paths) + for (auto& p : builtPath.outPaths()) + storePaths.push_back(p); run(store, std::move(storePaths)); } @@ -162,7 +168,7 @@ void MixProfile::updateProfile(const StorePath & storePath) profile2, storePath)); } -void MixProfile::updateProfile(const DerivedPathsWithHints & buildables) +void MixProfile::updateProfile(const BuiltPaths & buildables) { if (!profile) return; @@ -170,22 +176,19 @@ void MixProfile::updateProfile(const DerivedPathsWithHints & buildables) for (auto & buildable : buildables) { std::visit(overloaded { - [&](DerivedPathWithHints::Opaque bo) { + [&](BuiltPath::Opaque bo) { result.push_back(bo.path); }, - [&](DerivedPathWithHints::Built bfd) { + [&](BuiltPath::Built bfd) { for (auto & output : bfd.outputs) { - /* Output path should be known because we just tried to - build it. */ - assert(output.second); - result.push_back(*output.second); + result.push_back(output.second); } }, }, buildable.raw()); } if (result.size() != 1) - throw Error("'--profile' requires that the arguments produce a single store path, but there are %d", result.size()); + throw UsageError("'--profile' requires that the arguments produce a single store path, but there are %d", result.size()); updateProfile(result[0]); } diff --git a/src/libcmd/command.hh b/src/libcmd/command.hh index 9e18c6e51..35b3a384b 100644 --- a/src/libcmd/command.hh +++ b/src/libcmd/command.hh @@ -143,7 +143,7 @@ private: }; /* A command that operates on zero or more store paths. */ -struct RealisedPathsCommand : public InstallablesCommand +struct BuiltPathsCommand : public InstallablesCommand { private: @@ -156,26 +156,26 @@ protected: public: - RealisedPathsCommand(bool recursive = false); + BuiltPathsCommand(bool recursive = false); using StoreCommand::run; - virtual void run(ref<Store> store, std::vector<RealisedPath> paths) = 0; + virtual void run(ref<Store> store, BuiltPaths paths) = 0; void run(ref<Store> store) override; bool useDefaultInstallables() override { return !all; } }; -struct StorePathsCommand : public RealisedPathsCommand +struct StorePathsCommand : public BuiltPathsCommand { StorePathsCommand(bool recursive = false); - using RealisedPathsCommand::run; + using BuiltPathsCommand::run; virtual void run(ref<Store> store, std::vector<StorePath> storePaths) = 0; - void run(ref<Store> store, std::vector<RealisedPath> paths) override; + void run(ref<Store> store, BuiltPaths paths) override; }; /* A command that operates on exactly one store path. */ @@ -216,7 +216,7 @@ static RegisterCommand registerCommand2(std::vector<std::string> && name) return RegisterCommand(std::move(name), [](){ return make_ref<T>(); }); } -DerivedPathsWithHints build(ref<Store> store, Realise mode, +BuiltPaths build(ref<Store> store, Realise mode, std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode = bmNormal); std::set<StorePath> toStorePaths(ref<Store> store, @@ -231,7 +231,7 @@ std::set<StorePath> toDerivations(ref<Store> store, std::vector<std::shared_ptr<Installable>> installables, bool useDeriver = false); -std::set<RealisedPath> toRealisedPaths( +BuiltPaths toBuiltPaths( ref<Store> store, Realise mode, OperateOn operateOn, @@ -252,7 +252,7 @@ struct MixProfile : virtual StoreCommand /* If 'profile' is set, make it point at the store path produced by 'buildables'. */ - void updateProfile(const DerivedPathsWithHints & buildables); + void updateProfile(const BuiltPaths & buildables); }; struct MixDefaultProfile : MixProfile diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 06ef4c669..fe52912cf 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -285,9 +285,9 @@ void completeFlakeRef(ref<Store> store, std::string_view prefix) } } -DerivedPathWithHints Installable::toDerivedPathWithHints() +DerivedPath Installable::toDerivedPath() { - auto buildables = toDerivedPathsWithHints(); + auto buildables = toDerivedPaths(); if (buildables.size() != 1) throw Error("installable '%s' evaluates to %d derivations, where only one is expected", what(), buildables.size()); return std::move(buildables[0]); @@ -321,22 +321,19 @@ struct InstallableStorePath : Installable std::string what() override { return store->printStorePath(storePath); } - DerivedPathsWithHints toDerivedPathsWithHints() override + DerivedPaths toDerivedPaths() override { if (storePath.isDerivation()) { - std::map<std::string, std::optional<StorePath>> outputs; auto drv = store->readDerivation(storePath); - for (auto & [name, output] : drv.outputsAndOptPaths(*store)) - outputs.emplace(name, output.second); return { - DerivedPathWithHints::Built { + DerivedPath::Built { .drvPath = storePath, - .outputs = std::move(outputs) + .outputs = drv.outputNames(), } }; } else { return { - DerivedPathWithHints::Opaque { + DerivedPath::Opaque { .path = storePath, } }; @@ -349,22 +346,22 @@ struct InstallableStorePath : Installable } }; -DerivedPathsWithHints InstallableValue::toDerivedPathsWithHints() +DerivedPaths InstallableValue::toDerivedPaths() { - DerivedPathsWithHints res; + DerivedPaths res; - std::map<StorePath, std::map<std::string, std::optional<StorePath>>> drvsToOutputs; + std::map<StorePath, std::set<std::string>> drvsToOutputs; // Group by derivation, helps with .all in particular for (auto & drv : toDerivations()) { auto outputName = drv.outputName; if (outputName == "") throw Error("derivation '%s' lacks an 'outputName' attribute", state->store->printStorePath(drv.drvPath)); - drvsToOutputs[drv.drvPath].insert_or_assign(outputName, drv.outPath); + drvsToOutputs[drv.drvPath].insert(outputName); } for (auto & i : drvsToOutputs) - res.push_back(DerivedPathWithHints::Built { i.first, i.second }); + res.push_back(DerivedPath::Built { i.first, i.second }); return res; } @@ -675,32 +672,67 @@ std::shared_ptr<Installable> SourceExprCommand::parseInstallable( return installables.front(); } -DerivedPathsWithHints build(ref<Store> store, Realise mode, +BuiltPaths getBuiltPaths(ref<Store> store, DerivedPaths hopefullyBuiltPaths) +{ + BuiltPaths res; + for (auto& b : hopefullyBuiltPaths) + std::visit( + overloaded{ + [&](DerivedPath::Opaque bo) { + res.push_back(BuiltPath::Opaque{bo.path}); + }, + [&](DerivedPath::Built bfd) { + OutputPathMap outputs; + auto drv = store->readDerivation(bfd.drvPath); + auto outputHashes = staticOutputHashes(*store, drv); + auto drvOutputs = drv.outputsAndOptPaths(*store); + for (auto& output : bfd.outputs) { + if (!outputHashes.count(output)) + throw Error( + "the derivation '%s' doesn't have an output " + "named '%s'", + store->printStorePath(bfd.drvPath), output); + if (settings.isExperimentalFeatureEnabled( + "ca-derivations")) { + auto outputId = + DrvOutput{outputHashes.at(output), output}; + auto realisation = + store->queryRealisation(outputId); + if (!realisation) + throw Error( + "cannot operate on an output of unbuilt " + "content-addresed derivation '%s'", + outputId.to_string()); + outputs.insert_or_assign( + output, realisation->outPath); + } else { + // If ca-derivations isn't enabled, assume that + // the output path is statically known. + assert(drvOutputs.count(output)); + assert(drvOutputs.at(output).second); + outputs.insert_or_assign( + output, *drvOutputs.at(output).second); + } + } + res.push_back(BuiltPath::Built{bfd.drvPath, outputs}); + }, + }, + b.raw()); + + return res; +} + +BuiltPaths build(ref<Store> store, Realise mode, std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode) { if (mode == Realise::Nothing) settings.readOnlyMode = true; - DerivedPathsWithHints buildables; - std::vector<DerivedPath> pathsToBuild; for (auto & i : installables) { - for (auto & b : i->toDerivedPathsWithHints()) { - std::visit(overloaded { - [&](DerivedPathWithHints::Opaque bo) { - pathsToBuild.push_back(bo); - }, - [&](DerivedPathWithHints::Built bfd) { - StringSet outputNames; - for (auto & output : bfd.outputs) - outputNames.insert(output.first); - pathsToBuild.push_back( - DerivedPath::Built{bfd.drvPath, outputNames}); - }, - }, b.raw()); - buildables.push_back(std::move(b)); - } + auto b = i->toDerivedPaths(); + pathsToBuild.insert(pathsToBuild.end(), b.begin(), b.end()); } if (mode == Realise::Nothing) @@ -708,57 +740,26 @@ DerivedPathsWithHints build(ref<Store> store, Realise mode, else if (mode == Realise::Outputs) store->buildPaths(pathsToBuild, bMode); - return buildables; + return getBuiltPaths(store, pathsToBuild); } -std::set<RealisedPath> toRealisedPaths( +BuiltPaths toBuiltPaths( ref<Store> store, Realise mode, OperateOn operateOn, std::vector<std::shared_ptr<Installable>> installables) { - std::set<RealisedPath> res; if (operateOn == OperateOn::Output) { - for (auto & b : build(store, mode, installables)) - std::visit(overloaded { - [&](DerivedPathWithHints::Opaque bo) { - res.insert(bo.path); - }, - [&](DerivedPathWithHints::Built bfd) { - auto drv = store->readDerivation(bfd.drvPath); - auto outputHashes = staticOutputHashes(*store, drv); - for (auto & output : bfd.outputs) { - if (settings.isExperimentalFeatureEnabled("ca-derivations")) { - if (!outputHashes.count(output.first)) - throw Error( - "the derivation '%s' doesn't have an output named '%s'", - store->printStorePath(bfd.drvPath), - output.first); - auto outputId = DrvOutput{outputHashes.at(output.first), output.first}; - auto realisation = store->queryRealisation(outputId); - if (!realisation) - throw Error("cannot operate on an output of unbuilt content-addresed derivation '%s'", outputId.to_string()); - res.insert(RealisedPath{*realisation}); - } - else { - // If ca-derivations isn't enabled, behave as if - // all the paths are opaque to keep the default - // behavior - assert(output.second); - res.insert(*output.second); - } - } - }, - }, b.raw()); + return build(store, mode, installables); } else { if (mode == Realise::Nothing) settings.readOnlyMode = true; - auto drvPaths = toDerivations(store, installables, true); - res.insert(drvPaths.begin(), drvPaths.end()); + BuiltPaths res; + for (auto & drvPath : toDerivations(store, installables, true)) + res.push_back(BuiltPath::Opaque{drvPath}); + return res; } - - return res; } StorePathSet toStorePaths(ref<Store> store, @@ -766,8 +767,10 @@ StorePathSet toStorePaths(ref<Store> store, std::vector<std::shared_ptr<Installable>> installables) { StorePathSet outPaths; - for (auto & path : toRealisedPaths(store, mode, operateOn, installables)) - outPaths.insert(path.path()); + for (auto & path : toBuiltPaths(store, mode, operateOn, installables)) { + auto thisOutPaths = path.outPaths(); + outPaths.insert(thisOutPaths.begin(), thisOutPaths.end()); + } return outPaths; } @@ -789,9 +792,9 @@ StorePathSet toDerivations(ref<Store> store, StorePathSet drvPaths; for (auto & i : installables) - for (auto & b : i->toDerivedPathsWithHints()) + for (auto & b : i->toDerivedPaths()) std::visit(overloaded { - [&](DerivedPathWithHints::Opaque bo) { + [&](DerivedPath::Opaque bo) { if (!useDeriver) throw Error("argument '%s' did not evaluate to a derivation", i->what()); auto derivers = store->queryValidDerivers(bo.path); @@ -800,7 +803,7 @@ StorePathSet toDerivations(ref<Store> store, // FIXME: use all derivers? drvPaths.insert(*derivers.begin()); }, - [&](DerivedPathWithHints::Built bfd) { + [&](DerivedPath::Built bfd) { drvPaths.insert(bfd.drvPath); }, }, b.raw()); diff --git a/src/libcmd/installables.hh b/src/libcmd/installables.hh index 403403c07..298fd48f8 100644 --- a/src/libcmd/installables.hh +++ b/src/libcmd/installables.hh @@ -23,17 +23,23 @@ struct App // FIXME: add args, sandbox settings, metadata, ... }; +struct UnresolvedApp +{ + App unresolved; + App resolve(ref<Store>); +}; + struct Installable { virtual ~Installable() { } virtual std::string what() = 0; - virtual DerivedPathsWithHints toDerivedPathsWithHints() = 0; + virtual DerivedPaths toDerivedPaths() = 0; - DerivedPathWithHints toDerivedPathWithHints(); + DerivedPath toDerivedPath(); - App toApp(EvalState & state); + UnresolvedApp toApp(EvalState & state); virtual std::pair<Value *, Pos> toValue(EvalState & state) { @@ -74,7 +80,7 @@ struct InstallableValue : Installable virtual std::vector<DerivationInfo> toDerivations() = 0; - DerivedPathsWithHints toDerivedPathsWithHints() override; + DerivedPaths toDerivedPaths() override; }; struct InstallableFlake : InstallableValue diff --git a/src/libexpr/attr-path.cc b/src/libexpr/attr-path.cc index 9dd557205..867eb13a5 100644 --- a/src/libexpr/attr-path.cc +++ b/src/libexpr/attr-path.cc @@ -19,7 +19,7 @@ static Strings parseAttrPath(std::string_view s) ++i; while (1) { if (i == s.end()) - throw Error("missing closing quote in selection path '%1%'", s); + throw ParseError("missing closing quote in selection path '%1%'", s); if (*i == '"') break; cur.push_back(*i++); } @@ -116,14 +116,14 @@ Pos findDerivationFilename(EvalState & state, Value & v, std::string what) auto colon = pos.rfind(':'); if (colon == std::string::npos) - throw Error("cannot parse meta.position attribute '%s'", pos); + throw ParseError("cannot parse meta.position attribute '%s'", pos); std::string filename(pos, 0, colon); unsigned int lineno; try { lineno = std::stoi(std::string(pos, colon + 1)); } catch (std::invalid_argument & e) { - throw Error("cannot parse line number '%s'", pos); + throw ParseError("cannot parse line number '%s'", pos); } Symbol file = state.symbols.create(filename); diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index ef9f8efca..c3206a577 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -64,7 +64,11 @@ static char * dupStringWithLen(const char * s, size_t size) RootValue allocRootValue(Value * v) { +#if HAVE_BOEHMGC return std::allocate_shared<Value *>(traceable_allocator<Value *>(), v); +#else + return std::make_shared<Value *>(v); +#endif } @@ -233,22 +237,34 @@ static void * oomHandler(size_t requested) } class BoehmGCStackAllocator : public StackAllocator { - boost::coroutines2::protected_fixedsize_stack stack { - // We allocate 8 MB, the default max stack size on NixOS. - // A smaller stack might be quicker to allocate but reduces the stack - // depth available for source filter expressions etc. - std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024)) + boost::coroutines2::protected_fixedsize_stack stack { + // We allocate 8 MB, the default max stack size on NixOS. + // A smaller stack might be quicker to allocate but reduces the stack + // depth available for source filter expressions etc. + std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024)) }; + // This is specific to boost::coroutines2::protected_fixedsize_stack. + // The stack protection page is included in sctx.size, so we have to + // subtract one page size from the stack size. + std::size_t pfss_usable_stack_size(boost::context::stack_context &sctx) { + return sctx.size - boost::context::stack_traits::page_size(); + } + public: boost::context::stack_context allocate() override { auto sctx = stack.allocate(); - GC_add_roots(static_cast<char *>(sctx.sp) - sctx.size, sctx.sp); + + // Stacks generally start at a high address and grow to lower addresses. + // Architectures that do the opposite are rare; in fact so rare that + // boost_routine does not implement it. + // So we subtract the stack size. + GC_add_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp); return sctx; } void deallocate(boost::context::stack_context sctx) override { - GC_remove_roots(static_cast<char *>(sctx.sp) - sctx.size, sctx.sp); + GC_remove_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp); stack.deallocate(sctx); } @@ -908,7 +924,7 @@ void EvalState::evalFile(const Path & path_, Value & v, bool mustBeTrivial) // computation. if (mustBeTrivial && !(dynamic_cast<ExprAttrs *>(e))) - throw Error("file '%s' must be an attribute set", path); + throw EvalError("file '%s' must be an attribute set", path); eval(e, v); } catch (Error & e) { addErrorTrace(e, "while evaluating the file '%1%':", path2); diff --git a/src/libexpr/flake/config.cc b/src/libexpr/flake/config.cc index 63566131e..c8a5a319f 100644 --- a/src/libexpr/flake/config.cc +++ b/src/libexpr/flake/config.cc @@ -22,7 +22,9 @@ static TrustedList readTrustedList() static void writeTrustedList(const TrustedList & trustedList) { - writeFile(trustedListPath(), nlohmann::json(trustedList).dump()); + auto path = trustedListPath(); + createDirs(dirOf(path)); + writeFile(path, nlohmann::json(trustedList).dump()); } void ConfigFile::apply() diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index 2e94490d4..8e6f06949 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -359,7 +359,12 @@ LockedFlake lockFlake( ancestors? */ auto i = overrides.find(inputPath); bool hasOverride = i != overrides.end(); - if (hasOverride) overridesUsed.insert(inputPath); + if (hasOverride) { + overridesUsed.insert(inputPath); + // Respect the “flakeness” of the input even if we + // override it + i->second.isFlake = input2.isFlake; + } auto & input = hasOverride ? i->second : input2; /* Resolve 'follows' later (since it may refer to an input diff --git a/src/libexpr/local.mk b/src/libexpr/local.mk index 26c53d301..c40abfb78 100644 --- a/src/libexpr/local.mk +++ b/src/libexpr/local.mk @@ -16,7 +16,7 @@ libexpr_CXXFLAGS += -I src/libutil -I src/libstore -I src/libfetchers -I src/lib libexpr_LIBS = libutil libstore libfetchers libexpr_LDFLAGS = -lboost_context -ifneq ($(OS), FreeBSD) +ifeq ($(OS), Linux) libexpr_LDFLAGS += -ldl endif diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index fc4afaab8..e8569b654 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -21,6 +21,8 @@ #include <regex> #include <dlfcn.h> +#include <cmath> + namespace nix { @@ -714,6 +716,44 @@ static RegisterPrimOp primop_addErrorContext(RegisterPrimOp::Info { .fun = prim_addErrorContext, }); +static void prim_ceil(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + auto value = state.forceFloat(*args[0], args[0]->determinePos(pos)); + mkInt(v, ceil(value)); +} + +static RegisterPrimOp primop_ceil({ + .name = "__ceil", + .args = {"double"}, + .doc = R"( + Converts an IEEE-754 double-precision floating-point number (*double*) to + the next higher integer. + + If the datatype is neither an integer nor a "float", an evaluation error will be + thrown. + )", + .fun = prim_ceil, +}); + +static void prim_floor(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + auto value = state.forceFloat(*args[0], args[0]->determinePos(pos)); + mkInt(v, floor(value)); +} + +static RegisterPrimOp primop_floor({ + .name = "__floor", + .args = {"double"}, + .doc = R"( + Converts an IEEE-754 double-precision floating-point number (*double*) to + the next lower integer. + + If the datatype is neither an integer nor a "float", an evaluation error will be + thrown. + )", + .fun = prim_floor, +}); + /* Try evaluating the argument. Success => {success=true; value=something;}, * else => {success=false; value=false;} */ static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Value & v) diff --git a/src/libfetchers/attrs.hh b/src/libfetchers/attrs.hh index a2d53a7bf..e41037633 100644 --- a/src/libfetchers/attrs.hh +++ b/src/libfetchers/attrs.hh @@ -6,6 +6,8 @@ #include <nlohmann/json_fwd.hpp> +#include <optional> + namespace nix::fetchers { typedef std::variant<std::string, uint64_t, Explicit<bool>> Attr; diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index b8d7d2c70..257465bae 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -178,7 +178,8 @@ struct TarballInputScheme : InputScheme && !hasSuffix(url.path, ".tar") && !hasSuffix(url.path, ".tar.gz") && !hasSuffix(url.path, ".tar.xz") - && !hasSuffix(url.path, ".tar.bz2")) + && !hasSuffix(url.path, ".tar.bz2") + && !hasSuffix(url.path, ".tar.zst")) return {}; Input input; diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index 15354549a..b2a6e2a82 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -484,7 +484,7 @@ Logger * makeProgressBar(bool printBuildLogs) { return new ProgressBar( printBuildLogs, - isatty(STDERR_FILENO) && getEnv("TERM").value_or("dumb") != "dumb" + shouldANSI() ); } diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index 09e1c254b..df401e6f4 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -450,18 +450,43 @@ StorePath BinaryCacheStore::addTextToStore(const string & name, const string & s std::optional<const Realisation> BinaryCacheStore::queryRealisation(const DrvOutput & id) { + if (diskCache) { + auto [cacheOutcome, maybeCachedRealisation] = + diskCache->lookupRealisation(getUri(), id); + switch (cacheOutcome) { + case NarInfoDiskCache::oValid: + debug("Returning a cached realisation for %s", id.to_string()); + return *maybeCachedRealisation; + case NarInfoDiskCache::oInvalid: + debug("Returning a cached missing realisation for %s", id.to_string()); + return {}; + case NarInfoDiskCache::oUnknown: + break; + } + } + auto outputInfoFilePath = realisationsPrefix + "/" + id.to_string() + ".doi"; auto rawOutputInfo = getFile(outputInfoFilePath); if (rawOutputInfo) { - return {Realisation::fromJSON( - nlohmann::json::parse(*rawOutputInfo), outputInfoFilePath)}; + auto realisation = Realisation::fromJSON( + nlohmann::json::parse(*rawOutputInfo), outputInfoFilePath); + + if (diskCache) + diskCache->upsertRealisation( + getUri(), realisation); + + return {realisation}; } else { + if (diskCache) + diskCache->upsertAbsentRealisation(getUri(), id); return std::nullopt; } } void BinaryCacheStore::registerDrvOutput(const Realisation& info) { + if (diskCache) + diskCache->upsertRealisation(getUri(), info); auto filePath = realisationsPrefix + "/" + info.id.to_string() + ".doi"; upsertFile(filePath, info.toJSON().dump(), "application/json"); } diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 9100d3333..73d1ed7cc 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -739,6 +739,63 @@ void DerivationGoal::cleanupPostOutputsRegisteredModeNonCheck() { } +void runPostBuildHook( + Store & store, + Logger & logger, + const StorePath & drvPath, + StorePathSet outputPaths +) +{ + auto hook = settings.postBuildHook; + if (hook == "") + return; + + Activity act(logger, lvlInfo, actPostBuildHook, + fmt("running post-build-hook '%s'", settings.postBuildHook), + Logger::Fields{store.printStorePath(drvPath)}); + PushActivity pact(act.id); + std::map<std::string, std::string> hookEnvironment = getEnv(); + + hookEnvironment.emplace("DRV_PATH", store.printStorePath(drvPath)); + hookEnvironment.emplace("OUT_PATHS", chomp(concatStringsSep(" ", store.printStorePathSet(outputPaths)))); + + RunOptions opts(settings.postBuildHook, {}); + opts.environment = hookEnvironment; + + struct LogSink : Sink { + Activity & act; + std::string currentLine; + + LogSink(Activity & act) : act(act) { } + + void operator() (std::string_view data) override { + for (auto c : data) { + if (c == '\n') { + flushLine(); + } else { + currentLine += c; + } + } + } + + void flushLine() { + act.result(resPostBuildLogLine, currentLine); + currentLine.clear(); + } + + ~LogSink() { + if (currentLine != "") { + currentLine += '\n'; + flushLine(); + } + } + }; + LogSink sink(act); + + opts.standardOut = &sink; + opts.mergeStderrToStdout = true; + runProgram2(opts); +} void DerivationGoal::buildDone() { @@ -804,57 +861,15 @@ void DerivationGoal::buildDone() being valid. */ registerOutputs(); - if (settings.postBuildHook != "") { - Activity act(*logger, lvlInfo, actPostBuildHook, - fmt("running post-build-hook '%s'", settings.postBuildHook), - Logger::Fields{worker.store.printStorePath(drvPath)}); - PushActivity pact(act.id); - StorePathSet outputPaths; - for (auto i : drv->outputs) { - outputPaths.insert(finalOutputs.at(i.first)); - } - std::map<std::string, std::string> hookEnvironment = getEnv(); - - hookEnvironment.emplace("DRV_PATH", worker.store.printStorePath(drvPath)); - hookEnvironment.emplace("OUT_PATHS", chomp(concatStringsSep(" ", worker.store.printStorePathSet(outputPaths)))); - - RunOptions opts(settings.postBuildHook, {}); - opts.environment = hookEnvironment; - - struct LogSink : Sink { - Activity & act; - std::string currentLine; - - LogSink(Activity & act) : act(act) { } - - void operator() (std::string_view data) override { - for (auto c : data) { - if (c == '\n') { - flushLine(); - } else { - currentLine += c; - } - } - } - - void flushLine() { - act.result(resPostBuildLogLine, currentLine); - currentLine.clear(); - } - - ~LogSink() { - if (currentLine != "") { - currentLine += '\n'; - flushLine(); - } - } - }; - LogSink sink(act); - - opts.standardOut = &sink; - opts.mergeStderrToStdout = true; - runProgram2(opts); - } + StorePathSet outputPaths; + for (auto & [_, path] : finalOutputs) + outputPaths.insert(path); + runPostBuildHook( + worker.store, + *logger, + drvPath, + outputPaths + ); if (buildMode == bmCheck) { cleanupPostOutputsRegisteredModeCheck(); @@ -910,6 +925,8 @@ void DerivationGoal::resolvedFinished() { auto resolvedHashes = staticOutputHashes(worker.store, *resolvedDrv); + StorePathSet outputPaths; + // `wantedOutputs` might be empty, which means “all the outputs” auto realWantedOutputs = wantedOutputs; if (realWantedOutputs.empty()) @@ -927,8 +944,10 @@ void DerivationGoal::resolvedFinished() { auto newRealisation = *realisation; newRealisation.id = DrvOutput{initialOutputs.at(wantedOutput).outputHash, wantedOutput}; newRealisation.signatures.clear(); + newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation->outPath); signRealisation(newRealisation); worker.store.registerDrvOutput(newRealisation); + outputPaths.insert(realisation->outPath); } else { // If we don't have a realisation, then it must mean that something // failed when building the resolved drv @@ -936,6 +955,13 @@ void DerivationGoal::resolvedFinished() { } } + runPostBuildHook( + worker.store, + *logger, + drvPath, + outputPaths + ); + // This is potentially a bit fishy in terms of error reporting. Not sure // how to do it in a cleaner way amDone(nrFailed == 0 ? ecSuccess : ecFailed, ex); diff --git a/src/libstore/build/drv-output-substitution-goal.cc b/src/libstore/build/drv-output-substitution-goal.cc index a5ac4c49d..be270d079 100644 --- a/src/libstore/build/drv-output-substitution-goal.cc +++ b/src/libstore/build/drv-output-substitution-goal.cc @@ -17,6 +17,13 @@ DrvOutputSubstitutionGoal::DrvOutputSubstitutionGoal(const DrvOutput& id, Worker void DrvOutputSubstitutionGoal::init() { trace("init"); + + /* If the derivation already exists, we’re done */ + if (worker.store.queryRealisation(id)) { + amDone(ecSuccess); + return; + } + subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>(); tryNext(); } @@ -53,6 +60,26 @@ void DrvOutputSubstitutionGoal::tryNext() return; } + for (const auto & [depId, depPath] : outputInfo->dependentRealisations) { + if (depId != id) { + if (auto localOutputInfo = worker.store.queryRealisation(depId); + localOutputInfo && localOutputInfo->outPath != depPath) { + warn( + "substituter '%s' has an incompatible realisation for '%s', ignoring.\n" + "Local: %s\n" + "Remote: %s", + sub->getUri(), + depId.to_string(), + worker.store.printStorePath(localOutputInfo->outPath), + worker.store.printStorePath(depPath) + ); + tryNext(); + return; + } + addWaitee(worker.makeDrvOutputSubstitutionGoal(depId)); + } + } + addWaitee(worker.makePathSubstitutionGoal(outputInfo->outPath)); if (waitees.empty()) outPathValid(); diff --git a/src/libstore/build/local-derivation-goal.cc b/src/libstore/build/local-derivation-goal.cc index 4a67b3f47..8320dd1c4 100644 --- a/src/libstore/build/local-derivation-goal.cc +++ b/src/libstore/build/local-derivation-goal.cc @@ -153,6 +153,7 @@ void LocalDerivationGoal::killChild() void LocalDerivationGoal::tryLocalBuild() { unsigned int curBuilds = worker.getNrLocalBuilds(); if (curBuilds >= settings.maxBuildJobs) { + state = &DerivationGoal::tryToBuild; worker.waitForBuildSlot(shared_from_this()); outputLocks.unlock(); return; @@ -291,7 +292,7 @@ bool LocalDerivationGoal::cleanupDecideWhetherDiskFull() auto & localStore = getLocalStore(); uint64_t required = 8ULL * 1024 * 1024; // FIXME: make configurable struct statvfs st; - if (statvfs(localStore.realStoreDir.c_str(), &st) == 0 && + if (statvfs(localStore.realStoreDir.get().c_str(), &st) == 0 && (uint64_t) st.f_bavail * st.f_bsize < required) diskFull = true; if (statvfs(tmpDir.c_str(), &st) == 0 && @@ -416,7 +417,7 @@ void LocalDerivationGoal::startBuilder() } auto & localStore = getLocalStore(); - if (localStore.storeDir != localStore.realStoreDir) { + if (localStore.storeDir != localStore.realStoreDir.get()) { #if __linux__ useChroot = true; #else @@ -1332,13 +1333,18 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo std::optional<const Realisation> queryRealisation(const DrvOutput & id) override // XXX: This should probably be allowed if the realisation corresponds to // an allowed derivation - { throw Error("queryRealisation"); } + { + if (!goal.isAllowed(id)) + throw InvalidPath("cannot query an unknown output id '%s' in recursive Nix", id.to_string()); + return next->queryRealisation(id); + } void buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode) override { if (buildMode != bmNormal) throw Error("unsupported build mode"); StorePathSet newPaths; + std::set<Realisation> newRealisations; for (auto & req : paths) { if (!goal.isAllowed(req)) @@ -1351,16 +1357,28 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo auto p = std::get_if<DerivedPath::Built>(&path); if (!p) continue; auto & bfd = *p; + auto drv = readDerivation(bfd.drvPath); + auto drvHashes = staticOutputHashes(*this, drv); auto outputs = next->queryDerivationOutputMap(bfd.drvPath); for (auto & [outputName, outputPath] : outputs) - if (wantOutput(outputName, bfd.outputs)) + if (wantOutput(outputName, bfd.outputs)) { newPaths.insert(outputPath); + if (settings.isExperimentalFeatureEnabled("ca-derivations")) { + auto thisRealisation = next->queryRealisation( + DrvOutput{drvHashes.at(outputName), outputName} + ); + assert(thisRealisation); + newRealisations.insert(*thisRealisation); + } + } } StorePathSet closure; next->computeFSClosure(newPaths, closure); for (auto & path : closure) goal.addDependency(path); + for (auto & real : Realisation::closure(*next, newRealisations)) + goal.addedDrvOutputs.insert(real.id); } BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, @@ -2300,10 +2318,6 @@ void LocalDerivationGoal::registerOutputs() sink.s = make_ref<std::string>(rewriteStrings(*sink.s, outputRewrites)); StringSource source(*sink.s); restorePath(actualPath, source); - - /* FIXME: set proper permissions in restorePath() so - we don't have to do another traversal. */ - canonicalisePathMetaData(actualPath, -1, inodesSeen); } }; @@ -2357,32 +2371,19 @@ void LocalDerivationGoal::registerOutputs() } auto got = caSink.finish().first; auto refs = rewriteRefs(); - HashModuloSink narSink { htSHA256, oldHashPart }; - dumpPath(actualPath, narSink); - auto narHashAndSize = narSink.finish(); - ValidPathInfo newInfo0 { - worker.store.makeFixedOutputPath( + + auto finalPath = worker.store.makeFixedOutputPath( outputHash.method, got, outputPathName(drv->name, outputName), refs.second, - refs.first), - narHashAndSize.first, - }; - newInfo0.narSize = narHashAndSize.second; - newInfo0.ca = FixedOutputHash { - .method = outputHash.method, - .hash = got, - }; - newInfo0.references = refs.second; - if (refs.first) - newInfo0.references.insert(newInfo0.path); - if (scratchPath != newInfo0.path) { + refs.first); + if (scratchPath != finalPath) { // Also rewrite the output path auto source = sinkToSource([&](Sink & nextSink) { StringSink sink; dumpPath(actualPath, sink); - RewritingSink rsink2(oldHashPart, std::string(newInfo0.path.hashPart()), nextSink); + RewritingSink rsink2(oldHashPart, std::string(finalPath.hashPart()), nextSink); rsink2(*sink.s); rsink2.flush(); }); @@ -2392,6 +2393,21 @@ void LocalDerivationGoal::registerOutputs() movePath(tmpPath, actualPath); } + HashResult narHashAndSize = hashPath(htSHA256, actualPath); + ValidPathInfo newInfo0 { + finalPath, + narHashAndSize.first, + }; + + newInfo0.narSize = narHashAndSize.second; + newInfo0.ca = FixedOutputHash { + .method = outputHash.method, + .hash = got, + }; + newInfo0.references = refs.second; + if (refs.first) + newInfo0.references.insert(newInfo0.path); + assert(newInfo0.ca); return newInfo0; }; @@ -2452,6 +2468,10 @@ void LocalDerivationGoal::registerOutputs() }, }, output.output); + /* FIXME: set proper permissions in restorePath() so + we don't have to do another traversal. */ + canonicalisePathMetaData(actualPath, -1, inodesSeen); + /* Calculate where we'll move the output files. In the checking case we will leave leave them where they are, for now, rather than move to their usual "final destination" */ @@ -2461,6 +2481,7 @@ void LocalDerivationGoal::registerOutputs() floating CA derivations and hash-mismatching fixed-output derivations. */ PathLocks dynamicOutputLock; + dynamicOutputLock.setDeletion(true); auto optFixedPath = output.path(worker.store, drv->name, outputName); if (!optFixedPath || worker.store.printStorePath(*optFixedPath) != finalDestPath) @@ -2484,6 +2505,7 @@ void LocalDerivationGoal::registerOutputs() assert(newInfo.ca); } else { auto destPath = worker.store.toRealPath(finalDestPath); + deletePath(destPath); movePath(actualPath, destPath); actualPath = destPath; } diff --git a/src/libstore/build/local-derivation-goal.hh b/src/libstore/build/local-derivation-goal.hh index d30be2351..088a57209 100644 --- a/src/libstore/build/local-derivation-goal.hh +++ b/src/libstore/build/local-derivation-goal.hh @@ -108,6 +108,9 @@ struct LocalDerivationGoal : public DerivationGoal /* Paths that were added via recursive Nix calls. */ StorePathSet addedPaths; + /* Realisations that were added via recursive Nix calls. */ + std::set<DrvOutput> addedDrvOutputs; + /* Recursive Nix calls are only allowed to build or realize paths in the original input closure or added via a recursive Nix call (so e.g. you can't do 'nix-store -r /nix/store/<bla>' where @@ -116,6 +119,11 @@ struct LocalDerivationGoal : public DerivationGoal { return inputPaths.count(path) || addedPaths.count(path); } + bool isAllowed(const DrvOutput & id) + { + return addedDrvOutputs.count(id); + } + bool isAllowed(const DerivedPath & req); friend struct RestrictedStore; diff --git a/src/libstore/ca-specific-schema.sql b/src/libstore/ca-specific-schema.sql index 20ee046a1..08af0cc1f 100644 --- a/src/libstore/ca-specific-schema.sql +++ b/src/libstore/ca-specific-schema.sql @@ -3,10 +3,19 @@ -- is enabled create table if not exists Realisations ( + id integer primary key autoincrement not null, drvPath text not null, outputName text not null, -- symbolic output id, usually "out" outputPath integer not null, signatures text, -- space-separated list - primary key (drvPath, outputName), foreign key (outputPath) references ValidPaths(id) on delete cascade ); + +create index if not exists IndexRealisations on Realisations(drvPath, outputName); + +create table if not exists RealisationsRefs ( + referrer integer not null, + realisationReference integer, + foreign key (referrer) references Realisations(id) on delete cascade, + foreign key (realisationReference) references Realisations(id) on delete restrict +); diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 72b3e3e13..e06fb9ce2 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -885,10 +885,15 @@ static void performOp(TunnelLogger * logger, ref<Store> store, case wopRegisterDrvOutput: { logger->startWork(); - auto outputId = DrvOutput::parse(readString(from)); - auto outputPath = StorePath(readString(from)); - store->registerDrvOutput(Realisation{ - .id = outputId, .outPath = outputPath}); + if (GET_PROTOCOL_MINOR(clientVersion) < 31) { + auto outputId = DrvOutput::parse(readString(from)); + auto outputPath = StorePath(readString(from)); + store->registerDrvOutput(Realisation{ + .id = outputId, .outPath = outputPath}); + } else { + auto realisation = worker_proto::read(*store, from, Phantom<Realisation>()); + store->registerDrvOutput(realisation); + } logger->stopWork(); break; } @@ -898,9 +903,15 @@ static void performOp(TunnelLogger * logger, ref<Store> store, auto outputId = DrvOutput::parse(readString(from)); auto info = store->queryRealisation(outputId); logger->stopWork(); - std::set<StorePath> outPaths; - if (info) outPaths.insert(info->outPath); - worker_proto::write(*store, to, outPaths); + if (GET_PROTOCOL_MINOR(clientVersion) < 31) { + std::set<StorePath> outPaths; + if (info) outPaths.insert(info->outPath); + worker_proto::write(*store, to, outPaths); + } else { + std::set<Realisation> realisations; + if (info) realisations.insert(*info); + worker_proto::write(*store, to, realisations); + } break; } diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc index 13833c58e..8da81d0ac 100644 --- a/src/libstore/derived-path.cc +++ b/src/libstore/derived-path.cc @@ -11,18 +11,33 @@ nlohmann::json DerivedPath::Opaque::toJSON(ref<Store> store) const { return res; } -nlohmann::json DerivedPathWithHints::Built::toJSON(ref<Store> store) const { +nlohmann::json BuiltPath::Built::toJSON(ref<Store> store) const { nlohmann::json res; res["drvPath"] = store->printStorePath(drvPath); for (const auto& [output, path] : outputs) { - res["outputs"][output] = path ? store->printStorePath(*path) : ""; + res["outputs"][output] = store->printStorePath(path); } return res; } -nlohmann::json derivedPathsWithHintsToJSON(const DerivedPathsWithHints & buildables, ref<Store> store) { +StorePathSet BuiltPath::outPaths() const +{ + return std::visit( + overloaded{ + [](BuiltPath::Opaque p) { return StorePathSet{p.path}; }, + [](BuiltPath::Built b) { + StorePathSet res; + for (auto & [_, path] : b.outputs) + res.insert(path); + return res; + }, + }, raw() + ); +} + +nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store) { auto res = nlohmann::json::array(); - for (const DerivedPathWithHints & buildable : buildables) { + for (const BuiltPath & buildable : buildables) { std::visit([&res, store](const auto & buildable) { res.push_back(buildable.toJSON(store)); }, buildable.raw()); @@ -62,7 +77,7 @@ DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_vi auto outputsS = s.substr(n + 1); std::set<string> outputs; if (outputsS != "*") - outputs = tokenizeString<std::set<string>>(outputsS); + outputs = tokenizeString<std::set<string>>(outputsS, ","); return {drvPath, outputs}; } @@ -74,4 +89,30 @@ DerivedPath DerivedPath::parse(const Store & store, std::string_view s) : (DerivedPath) DerivedPath::Built::parse(store, s); } +RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const +{ + RealisedPath::Set res; + std::visit( + overloaded{ + [&](BuiltPath::Opaque p) { res.insert(p.path); }, + [&](BuiltPath::Built p) { + auto drvHashes = + staticOutputHashes(store, store.readDerivation(p.drvPath)); + for (auto& [outputName, outputPath] : p.outputs) { + if (settings.isExperimentalFeatureEnabled( + "ca-derivations")) { + auto thisRealisation = store.queryRealisation( + DrvOutput{drvHashes.at(outputName), outputName}); + assert(thisRealisation); // We’ve built it, so we must h + // ve the realisation + res.insert(*thisRealisation); + } else { + res.insert(outputPath); + } + } + }, + }, + raw()); + return res; +} } diff --git a/src/libstore/derived-path.hh b/src/libstore/derived-path.hh index 7a2fe59de..9d6ace069 100644 --- a/src/libstore/derived-path.hh +++ b/src/libstore/derived-path.hh @@ -2,6 +2,7 @@ #include "util.hh" #include "path.hh" +#include "realisation.hh" #include <optional> @@ -79,51 +80,44 @@ struct DerivedPath : _DerivedPathRaw { /** * A built derived path with hints in the form of optional concrete output paths. * - * See 'DerivedPathWithHints' for more an explanation. + * See 'BuiltPath' for more an explanation. */ -struct DerivedPathWithHintsBuilt { +struct BuiltPathBuilt { StorePath drvPath; - std::map<std::string, std::optional<StorePath>> outputs; + std::map<std::string, StorePath> outputs; nlohmann::json toJSON(ref<Store> store) const; - static DerivedPathWithHintsBuilt parse(const Store & store, std::string_view); + static BuiltPathBuilt parse(const Store & store, std::string_view); }; -using _DerivedPathWithHintsRaw = std::variant< +using _BuiltPathRaw = std::variant< DerivedPath::Opaque, - DerivedPathWithHintsBuilt + BuiltPathBuilt >; /** - * A derived path with hints in the form of optional concrete output paths in the built case. - * - * This type is currently just used by the CLI. The paths are filled in - * during evaluation for derivations that know what paths they will - * produce in advanced, i.e. input-addressed or fixed-output content - * addressed derivations. - * - * That isn't very good, because it puts floating content-addressed - * derivations "at a disadvantage". It would be better to never rely on - * the output path of unbuilt derivations, and exclusively use the - * realizations types to work with built derivations' concrete output - * paths. + * A built path. Similar to a `DerivedPath`, but enriched with the corresponding + * output path(s). */ -// FIXME Stop using and delete this, or if that is not possible move out of libstore to libcmd. -struct DerivedPathWithHints : _DerivedPathWithHintsRaw { - using Raw = _DerivedPathWithHintsRaw; +struct BuiltPath : _BuiltPathRaw { + using Raw = _BuiltPathRaw; using Raw::Raw; using Opaque = DerivedPathOpaque; - using Built = DerivedPathWithHintsBuilt; + using Built = BuiltPathBuilt; inline const Raw & raw() const { return static_cast<const Raw &>(*this); } + StorePathSet outPaths() const; + RealisedPath::Set toRealisedPaths(Store & store) const; + }; -typedef std::vector<DerivedPathWithHints> DerivedPathsWithHints; +typedef std::vector<DerivedPath> DerivedPaths; +typedef std::vector<BuiltPath> BuiltPaths; -nlohmann::json derivedPathsWithHintsToJSON(const DerivedPathsWithHints & buildables, ref<Store> store); +nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store); } diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 514ab3bf9..2cf35ec83 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -7,7 +7,7 @@ #include "finally.hh" #include "callback.hh" -#ifdef ENABLE_S3 +#if ENABLE_S3 #include <aws/core/client/ClientConfiguration.h> #endif @@ -665,7 +665,7 @@ struct curlFileTransfer : public FileTransfer writeFull(wakeupPipe.writeSide.get(), " "); } -#ifdef ENABLE_S3 +#if ENABLE_S3 std::tuple<std::string, std::string, Store::Params> parseS3Uri(std::string uri) { auto [path, params] = splitUriAndParams(uri); @@ -688,7 +688,7 @@ struct curlFileTransfer : public FileTransfer if (hasPrefix(request.uri, "s3://")) { // FIXME: do this on a worker thread try { -#ifdef ENABLE_S3 +#if ENABLE_S3 auto [bucketName, key, params] = parseS3Uri(request.uri); std::string profile = get(params, "profile").value_or(""); diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index bc692ca42..5a62c6529 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -775,7 +775,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) try { - AutoCloseDir dir(opendir(realStoreDir.c_str())); + AutoCloseDir dir(opendir(realStoreDir.get().c_str())); if (!dir) throw SysError("opening directory '%1%'", realStoreDir); /* Read the store and immediately delete all paths that @@ -856,7 +856,7 @@ void LocalStore::autoGC(bool sync) return std::stoll(readFile(*fakeFreeSpaceFile)); struct statvfs st; - if (statvfs(realStoreDir.c_str(), &st)) + if (statvfs(realStoreDir.get().c_str(), &st)) throw SysError("getting filesystem info about '%s'", realStoreDir); return (uint64_t) st.f_bavail * st.f_frsize; diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index 6f8749254..dd570cd63 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -617,8 +617,10 @@ public: Strings{"https://cache.nixos.org/"}, "substituters", R"( - A list of URLs of substituters, separated by whitespace. The default - is `https://cache.nixos.org`. + A list of URLs of substituters, separated by whitespace. Substituters + are tried based on their Priority value, which each substituter can set + independently. Lower value means higher priority. + The default is `https://cache.nixos.org`, with a Priority of 40. )", {"binary-caches"}}; diff --git a/src/libstore/local-fs-store.hh b/src/libstore/local-fs-store.hh index 55941b771..f8b19d00d 100644 --- a/src/libstore/local-fs-store.hh +++ b/src/libstore/local-fs-store.hh @@ -18,6 +18,9 @@ struct LocalFSStoreConfig : virtual StoreConfig const PathSetting logDir{(StoreConfig*) this, false, rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir, "log", "directory where Nix will store state"}; + const PathSetting realStoreDir{(StoreConfig*) this, false, + rootDir != "" ? rootDir + "/nix/store" : storeDir, "real", + "physical path to the Nix store"}; }; class LocalFSStore : public virtual LocalFSStoreConfig, public virtual Store @@ -34,7 +37,7 @@ public: /* Register a permanent GC root. */ Path addPermRoot(const StorePath & storePath, const Path & gcRoot); - virtual Path getRealStoreDir() { return storeDir; } + virtual Path getRealStoreDir() { return realStoreDir; } Path toRealPath(const Path & storePath) override { diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 83daa7506..d7c7f8e1d 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -53,12 +53,15 @@ struct LocalStore::State::Stmts { SQLiteStmt InvalidatePath; SQLiteStmt AddDerivationOutput; SQLiteStmt RegisterRealisedOutput; + SQLiteStmt UpdateRealisedOutput; SQLiteStmt QueryValidDerivers; SQLiteStmt QueryDerivationOutputs; SQLiteStmt QueryRealisedOutput; SQLiteStmt QueryAllRealisedOutputs; SQLiteStmt QueryPathFromHashPart; SQLiteStmt QueryValidPaths; + SQLiteStmt QueryRealisationReferences; + SQLiteStmt AddRealisationReference; }; int getSchema(Path schemaPath) @@ -76,7 +79,7 @@ int getSchema(Path schemaPath) void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd) { - const int nixCASchemaVersion = 1; + const int nixCASchemaVersion = 2; int curCASchema = getSchema(schemaPath); if (curCASchema != nixCASchemaVersion) { if (curCASchema > nixCASchemaVersion) { @@ -94,7 +97,39 @@ void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd) #include "ca-specific-schema.sql.gen.hh" ; db.exec(schema); + curCASchema = nixCASchemaVersion; } + + if (curCASchema < 2) { + SQLiteTxn txn(db); + // Ugly little sql dance to add a new `id` column and make it the primary key + db.exec(R"( + create table Realisations2 ( + id integer primary key autoincrement not null, + drvPath text not null, + outputName text not null, -- symbolic output id, usually "out" + outputPath integer not null, + signatures text, -- space-separated list + foreign key (outputPath) references ValidPaths(id) on delete cascade + ); + insert into Realisations2 (drvPath, outputName, outputPath, signatures) + select drvPath, outputName, outputPath, signatures from Realisations; + drop table Realisations; + alter table Realisations2 rename to Realisations; + )"); + db.exec(R"( + create index if not exists IndexRealisations on Realisations(drvPath, outputName); + + create table if not exists RealisationsRefs ( + referrer integer not null, + realisationReference integer, + foreign key (referrer) references Realisations(id) on delete cascade, + foreign key (realisationReference) references Realisations(id) on delete restrict + ); + )"); + txn.commit(); + } + writeFile(schemaPath, fmt("%d", nixCASchemaVersion)); lockFile(lockFd.get(), ltRead, true); } @@ -106,9 +141,6 @@ LocalStore::LocalStore(const Params & params) , LocalStoreConfig(params) , Store(params) , LocalFSStore(params) - , realStoreDir_{this, false, rootDir != "" ? rootDir + "/nix/store" : storeDir, "real", - "physical path to the Nix store"} - , realStoreDir(realStoreDir_) , dbDir(stateDir + "/db") , linksDir(realStoreDir + "/.links") , reservedPath(dbDir + "/reserved") @@ -153,13 +185,13 @@ LocalStore::LocalStore(const Params & params) printError("warning: the group '%1%' specified in 'build-users-group' does not exist", settings.buildUsersGroup); else { struct stat st; - if (stat(realStoreDir.c_str(), &st)) + if (stat(realStoreDir.get().c_str(), &st)) throw SysError("getting attributes of path '%1%'", realStoreDir); if (st.st_uid != 0 || st.st_gid != gr->gr_gid || (st.st_mode & ~S_IFMT) != perm) { - if (chown(realStoreDir.c_str(), 0, gr->gr_gid) == -1) + if (chown(realStoreDir.get().c_str(), 0, gr->gr_gid) == -1) throw SysError("changing ownership of path '%1%'", realStoreDir); - if (chmod(realStoreDir.c_str(), perm) == -1) + if (chmod(realStoreDir.get().c_str(), perm) == -1) throw SysError("changing permissions on path '%1%'", realStoreDir); } } @@ -314,9 +346,18 @@ LocalStore::LocalStore(const Params & params) values (?, ?, (select id from ValidPaths where path = ?), ?) ; )"); + state->stmts->UpdateRealisedOutput.create(state->db, + R"( + update Realisations + set signatures = ? + where + drvPath = ? and + outputName = ? + ; + )"); state->stmts->QueryRealisedOutput.create(state->db, R"( - select Output.path, Realisations.signatures from Realisations + select Realisations.id, Output.path, Realisations.signatures from Realisations inner join ValidPaths as Output on Output.id = Realisations.outputPath where drvPath = ? and outputName = ? ; @@ -328,6 +369,19 @@ LocalStore::LocalStore(const Params & params) where drvPath = ? ; )"); + state->stmts->QueryRealisationReferences.create(state->db, + R"( + select drvPath, outputName from Realisations + join RealisationsRefs on realisationReference = Realisations.id + where referrer = ?; + )"); + state->stmts->AddRealisationReference.create(state->db, + R"( + insert or replace into RealisationsRefs (referrer, realisationReference) + values ( + ?, + (select id from Realisations where drvPath = ? and outputName = ?)); + )"); } } @@ -437,14 +491,14 @@ void LocalStore::makeStoreWritable() if (getuid() != 0) return; /* Check if /nix/store is on a read-only mount. */ struct statvfs stat; - if (statvfs(realStoreDir.c_str(), &stat) != 0) + if (statvfs(realStoreDir.get().c_str(), &stat) != 0) throw SysError("getting info about the Nix store mount point"); if (stat.f_flag & ST_RDONLY) { if (unshare(CLONE_NEWNS) == -1) throw SysError("setting up a private mount namespace"); - if (mount(0, realStoreDir.c_str(), "none", MS_REMOUNT | MS_BIND, 0) == -1) + if (mount(0, realStoreDir.get().c_str(), "none", MS_REMOUNT | MS_BIND, 0) == -1) throw SysError("remounting %1% writable", realStoreDir); } #endif @@ -664,14 +718,54 @@ void LocalStore::registerDrvOutput(const Realisation & info, CheckSigsFlag check void LocalStore::registerDrvOutput(const Realisation & info) { settings.requireExperimentalFeature("ca-derivations"); - auto state(_state.lock()); retrySQLite<void>([&]() { - state->stmts->RegisterRealisedOutput.use() - (info.id.strHash()) - (info.id.outputName) - (printStorePath(info.outPath)) - (concatStringsSep(" ", info.signatures)) - .exec(); + auto state(_state.lock()); + if (auto oldR = queryRealisation_(*state, info.id)) { + if (info.isCompatibleWith(*oldR)) { + auto combinedSignatures = oldR->signatures; + combinedSignatures.insert(info.signatures.begin(), + info.signatures.end()); + state->stmts->UpdateRealisedOutput.use() + (concatStringsSep(" ", combinedSignatures)) + (info.id.strHash()) + (info.id.outputName) + .exec(); + } else { + throw Error("Trying to register a realisation of '%s', but we already " + "have another one locally.\n" + "Local: %s\n" + "Remote: %s", + info.id.to_string(), + printStorePath(oldR->outPath), + printStorePath(info.outPath) + ); + } + } else { + state->stmts->RegisterRealisedOutput.use() + (info.id.strHash()) + (info.id.outputName) + (printStorePath(info.outPath)) + (concatStringsSep(" ", info.signatures)) + .exec(); + } + uint64_t myId = state->db.getLastInsertedRowId(); + for (auto & [outputId, depPath] : info.dependentRealisations) { + auto localRealisation = queryRealisationCore_(*state, outputId); + if (!localRealisation) + throw Error("unable to register the derivation '%s' as it " + "depends on the non existent '%s'", + info.id.to_string(), outputId.to_string()); + if (localRealisation->second.outPath != depPath) + throw Error("unable to register the derivation '%s' as it " + "depends on a realisation of '%s' that doesn’t" + "match what we have locally", + info.id.to_string(), outputId.to_string()); + state->stmts->AddRealisationReference.use() + (myId) + (outputId.strHash()) + (outputId.outputName) + .exec(); + } }); } @@ -1152,17 +1246,13 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source, /* While restoring the path from the NAR, compute the hash of the NAR. */ - std::unique_ptr<AbstractHashSink> hashSink; - if (!info.ca.has_value() || !info.references.count(info.path)) - hashSink = std::make_unique<HashSink>(htSHA256); - else - hashSink = std::make_unique<HashModuloSink>(htSHA256, std::string(info.path.hashPart())); + HashSink hashSink(htSHA256); - TeeSource wrapperSource { source, *hashSink }; + TeeSource wrapperSource { source, hashSink }; restorePath(realPath, wrapperSource); - auto hashResult = hashSink->finish(); + auto hashResult = hashSink.finish(); if (hashResult.first != info.narHash) throw Error("hash mismatch importing path '%s';\n specified: %s\n got: %s", @@ -1172,6 +1262,31 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source, throw Error("size mismatch importing path '%s';\n specified: %s\n got: %s", printStorePath(info.path), info.narSize, hashResult.second); + if (info.ca) { + if (auto foHash = std::get_if<FixedOutputHash>(&*info.ca)) { + auto actualFoHash = hashCAPath( + foHash->method, + foHash->hash.type, + info.path + ); + if (foHash->hash != actualFoHash.hash) { + throw Error("ca hash mismatch importing path '%s';\n specified: %s\n got: %s", + printStorePath(info.path), + foHash->hash.to_string(Base32, true), + actualFoHash.hash.to_string(Base32, true)); + } + } + if (auto textHash = std::get_if<TextHash>(&*info.ca)) { + auto actualTextHash = hashString(htSHA256, readFile(realPath)); + if (textHash->hash != actualTextHash) { + throw Error("ca hash mismatch importing path '%s';\n specified: %s\n got: %s", + printStorePath(info.path), + textHash->hash.to_string(Base32, true), + actualTextHash.to_string(Base32, true)); + } + } + } + autoGC(); canonicalisePathMetaData(realPath, -1); @@ -1440,14 +1555,10 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) /* Check the content hash (optionally - slow). */ printMsg(lvlTalkative, "checking contents of '%s'", printStorePath(i)); - std::unique_ptr<AbstractHashSink> hashSink; - if (!info->ca || !info->references.count(info->path)) - hashSink = std::make_unique<HashSink>(info->narHash.type); - else - hashSink = std::make_unique<HashModuloSink>(info->narHash.type, std::string(info->path.hashPart())); + auto hashSink = HashSink(info->narHash.type); - dumpPath(Store::toRealPath(i), *hashSink); - auto current = hashSink->finish(); + dumpPath(Store::toRealPath(i), hashSink); + auto current = hashSink.finish(); if (info->narHash != nullHash && info->narHash != current.first) { printError("path '%s' was modified! expected hash '%s', got '%s'", @@ -1665,19 +1776,97 @@ void LocalStore::createUser(const std::string & userName, uid_t userId) } } -std::optional<const Realisation> LocalStore::queryRealisation( - const DrvOutput& id) { - typedef std::optional<const Realisation> Ret; - return retrySQLite<Ret>([&]() -> Ret { +std::optional<std::pair<int64_t, Realisation>> LocalStore::queryRealisationCore_( + LocalStore::State & state, + const DrvOutput & id) +{ + auto useQueryRealisedOutput( + state.stmts->QueryRealisedOutput.use() + (id.strHash()) + (id.outputName)); + if (!useQueryRealisedOutput.next()) + return std::nullopt; + auto realisationDbId = useQueryRealisedOutput.getInt(0); + auto outputPath = parseStorePath(useQueryRealisedOutput.getStr(1)); + auto signatures = + tokenizeString<StringSet>(useQueryRealisedOutput.getStr(2)); + + return {{ + realisationDbId, + Realisation{ + .id = id, + .outPath = outputPath, + .signatures = signatures, + } + }}; +} + +std::optional<const Realisation> LocalStore::queryRealisation_( + LocalStore::State & state, + const DrvOutput & id) +{ + auto maybeCore = queryRealisationCore_(state, id); + if (!maybeCore) + return std::nullopt; + auto [realisationDbId, res] = *maybeCore; + + std::map<DrvOutput, StorePath> dependentRealisations; + auto useRealisationRefs( + state.stmts->QueryRealisationReferences.use() + (realisationDbId)); + while (useRealisationRefs.next()) { + auto depId = DrvOutput { + Hash::parseAnyPrefixed(useRealisationRefs.getStr(0)), + useRealisationRefs.getStr(1), + }; + auto dependentRealisation = queryRealisationCore_(state, depId); + assert(dependentRealisation); // Enforced by the db schema + auto outputPath = dependentRealisation->second.outPath; + dependentRealisations.insert({depId, outputPath}); + } + + res.dependentRealisations = dependentRealisations; + + return { res }; +} + +std::optional<const Realisation> +LocalStore::queryRealisation(const DrvOutput & id) +{ + return retrySQLite<std::optional<const Realisation>>([&]() { auto state(_state.lock()); - auto use(state->stmts->QueryRealisedOutput.use()(id.strHash())( - id.outputName)); - if (!use.next()) - return std::nullopt; - auto outputPath = parseStorePath(use.getStr(0)); - auto signatures = tokenizeString<StringSet>(use.getStr(1)); - return Ret{Realisation{ - .id = id, .outPath = outputPath, .signatures = signatures}}; + return queryRealisation_(*state, id); }); } + +FixedOutputHash LocalStore::hashCAPath( + const FileIngestionMethod & method, const HashType & hashType, + const StorePath & path) +{ + return hashCAPath(method, hashType, Store::toRealPath(path), path.hashPart()); +} + +FixedOutputHash LocalStore::hashCAPath( + const FileIngestionMethod & method, + const HashType & hashType, + const Path & path, + const std::string_view pathHash +) +{ + HashModuloSink caSink ( hashType, std::string(pathHash) ); + switch (method) { + case FileIngestionMethod::Recursive: + dumpPath(path, caSink); + break; + case FileIngestionMethod::Flat: + readFile(path, caSink); + break; + } + auto hash = caSink.finish().first; + return FixedOutputHash{ + .method = method, + .hash = hash, + }; +} + } // namespace nix diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index 26e034a82..a01d48c4b 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -83,9 +83,6 @@ private: public: - PathSetting realStoreDir_; - - const Path realStoreDir; const Path dbDir; const Path linksDir; const Path reservedPath; @@ -206,6 +203,8 @@ public: void registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs) override; void cacheDrvOutputMapping(State & state, const uint64_t deriver, const string & outputName, const StorePath & output); + std::optional<const Realisation> queryRealisation_(State & state, const DrvOutput & id); + std::optional<std::pair<int64_t, Realisation>> queryRealisationCore_(State & state, const DrvOutput & id); std::optional<const Realisation> queryRealisation(const DrvOutput&) override; private: @@ -279,10 +278,21 @@ private: void signPathInfo(ValidPathInfo & info); void signRealisation(Realisation &); - Path getRealStoreDir() override { return realStoreDir; } - void createUser(const std::string & userName, uid_t userId) override; + // XXX: Make a generic `Store` method + FixedOutputHash hashCAPath( + const FileIngestionMethod & method, + const HashType & hashType, + const StorePath & path); + + FixedOutputHash hashCAPath( + const FileIngestionMethod & method, + const HashType & hashType, + const Path & path, + const std::string_view pathHash + ); + friend struct LocalDerivationGoal; friend struct PathSubstitutionGoal; friend struct SubstitutionGoal; diff --git a/src/libstore/local.mk b/src/libstore/local.mk index cf0933705..b6652984c 100644 --- a/src/libstore/local.mk +++ b/src/libstore/local.mk @@ -9,7 +9,7 @@ libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc $(d)/build/*.cc) libstore_LIBS = libutil libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread -ifneq ($(OS), FreeBSD) +ifeq ($(OS), Linux) libstore_LDFLAGS += -ldl endif diff --git a/src/libstore/machines.cc b/src/libstore/machines.cc index b42e5e434..9843ccf04 100644 --- a/src/libstore/machines.cc +++ b/src/libstore/machines.cc @@ -16,13 +16,18 @@ Machine::Machine(decltype(storeUri) storeUri, decltype(mandatoryFeatures) mandatoryFeatures, decltype(sshPublicHostKey) sshPublicHostKey) : storeUri( - // Backwards compatibility: if the URI is a hostname, - // prepend ssh://. + // Backwards compatibility: if the URI is schemeless, is not a path, + // and is not one of the special store connection words, prepend + // ssh://. storeUri.find("://") != std::string::npos - || hasPrefix(storeUri, "local") - || hasPrefix(storeUri, "remote") - || hasPrefix(storeUri, "auto") - || hasPrefix(storeUri, "/") + || storeUri.find("/") != std::string::npos + || storeUri == "auto" + || storeUri == "daemon" + || storeUri == "local" + || hasPrefix(storeUri, "auto?") + || hasPrefix(storeUri, "daemon?") + || hasPrefix(storeUri, "local?") + || hasPrefix(storeUri, "?") ? storeUri : "ssh://" + storeUri), systemTypes(systemTypes), diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index a99a2fc78..b4929b445 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -6,98 +6,73 @@ #include "thread-pool.hh" #include "topo-sort.hh" #include "callback.hh" +#include "closure.hh" namespace nix { - void Store::computeFSClosure(const StorePathSet & startPaths, StorePathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers) { - struct State - { - size_t pending; - StorePathSet & paths; - std::exception_ptr exc; - }; - - Sync<State> state_(State{0, paths_, 0}); - - std::function<void(const StorePath &)> enqueue; - - std::condition_variable done; - - enqueue = [&](const StorePath & path) -> void { - { - auto state(state_.lock()); - if (state->exc) return; - if (!state->paths.insert(path).second) return; - state->pending++; - } - - queryPathInfo(path, {[&](std::future<ref<const ValidPathInfo>> fut) { - // FIXME: calls to isValidPath() should be async - - try { - auto info = fut.get(); - - if (flipDirection) { - - StorePathSet referrers; - queryReferrers(path, referrers); - for (auto & ref : referrers) - if (ref != path) - enqueue(ref); - - if (includeOutputs) - for (auto & i : queryValidDerivers(path)) - enqueue(i); - - if (includeDerivers && path.isDerivation()) - for (auto & i : queryDerivationOutputs(path)) - if (isValidPath(i) && queryPathInfo(i)->deriver == path) - enqueue(i); - - } else { - - for (auto & ref : info->references) - if (ref != path) - enqueue(ref); - - if (includeOutputs && path.isDerivation()) - for (auto & i : queryDerivationOutputs(path)) - if (isValidPath(i)) enqueue(i); - - if (includeDerivers && info->deriver && isValidPath(*info->deriver)) - enqueue(*info->deriver); - - } - - { - auto state(state_.lock()); - assert(state->pending); - if (!--state->pending) done.notify_one(); - } - - } catch (...) { - auto state(state_.lock()); - if (!state->exc) state->exc = std::current_exception(); - assert(state->pending); - if (!--state->pending) done.notify_one(); - }; - }}); - }; - - for (auto & startPath : startPaths) - enqueue(startPath); - - { - auto state(state_.lock()); - while (state->pending) state.wait(done); - if (state->exc) std::rethrow_exception(state->exc); - } + std::function<std::set<StorePath>(const StorePath & path, std::future<ref<const ValidPathInfo>> &)> queryDeps; + if (flipDirection) + queryDeps = [&](const StorePath& path, + std::future<ref<const ValidPathInfo>> & fut) { + StorePathSet res; + StorePathSet referrers; + queryReferrers(path, referrers); + for (auto& ref : referrers) + if (ref != path) + res.insert(ref); + + if (includeOutputs) + for (auto& i : queryValidDerivers(path)) + res.insert(i); + + if (includeDerivers && path.isDerivation()) + for (auto& [_, maybeOutPath] : queryPartialDerivationOutputMap(path)) + if (maybeOutPath && isValidPath(*maybeOutPath)) + res.insert(*maybeOutPath); + return res; + }; + else + queryDeps = [&](const StorePath& path, + std::future<ref<const ValidPathInfo>> & fut) { + StorePathSet res; + auto info = fut.get(); + for (auto& ref : info->references) + if (ref != path) + res.insert(ref); + + if (includeOutputs && path.isDerivation()) + for (auto& [_, maybeOutPath] : queryPartialDerivationOutputMap(path)) + if (maybeOutPath && isValidPath(*maybeOutPath)) + res.insert(*maybeOutPath); + + if (includeDerivers && info->deriver && isValidPath(*info->deriver)) + res.insert(*info->deriver); + return res; + }; + + computeClosure<StorePath>( + startPaths, paths_, + [&](const StorePath& path, + std::function<void(std::promise<std::set<StorePath>>&)> + processEdges) { + std::promise<std::set<StorePath>> promise; + std::function<void(std::future<ref<const ValidPathInfo>>)> + getDependencies = + [&](std::future<ref<const ValidPathInfo>> fut) { + try { + promise.set_value(queryDeps(path, fut)); + } catch (...) { + promise.set_exception(std::current_exception()); + } + }; + queryPathInfo(path, getDependencies); + processEdges(promise); + }); } - void Store::computeFSClosure(const StorePath & startPath, StorePathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers) { @@ -279,5 +254,44 @@ StorePaths Store::topoSortPaths(const StorePathSet & paths) }}); } +std::map<DrvOutput, StorePath> drvOutputReferences( + const std::set<Realisation> & inputRealisations, + const StorePathSet & pathReferences) +{ + std::map<DrvOutput, StorePath> res; + for (const auto & input : inputRealisations) { + if (pathReferences.count(input.outPath)) { + res.insert({input.id, input.outPath}); + } + } + + return res; +} + +std::map<DrvOutput, StorePath> drvOutputReferences( + Store & store, + const Derivation & drv, + const StorePath & outputPath) +{ + std::set<Realisation> inputRealisations; + + for (const auto& [inputDrv, outputNames] : drv.inputDrvs) { + auto outputHashes = + staticOutputHashes(store, store.readDerivation(inputDrv)); + for (const auto& outputName : outputNames) { + auto thisRealisation = store.queryRealisation( + DrvOutput{outputHashes.at(outputName), outputName}); + if (!thisRealisation) + throw Error( + "output '%s' of derivation '%s' isn’t built", outputName, + store.printStorePath(inputDrv)); + inputRealisations.insert(*thisRealisation); + } + } + + auto info = store.queryPathInfo(outputPath); + + return drvOutputReferences(Realisation::closure(store, inputRealisations), info->references); +} } diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc index 1d8d2d57e..9dd81ddfb 100644 --- a/src/libstore/nar-info-disk-cache.cc +++ b/src/libstore/nar-info-disk-cache.cc @@ -4,6 +4,7 @@ #include "globals.hh" #include <sqlite3.h> +#include <nlohmann/json.hpp> namespace nix { @@ -38,6 +39,15 @@ create table if not exists NARs ( foreign key (cache) references BinaryCaches(id) on delete cascade ); +create table if not exists Realisations ( + cache integer not null, + outputId text not null, + content blob, -- Json serialisation of the realisation, or null if the realisation is absent + timestamp integer not null, + primary key (cache, outputId), + foreign key (cache) references BinaryCaches(id) on delete cascade +); + create table if not exists LastPurge ( dummy text primary key, value integer @@ -63,7 +73,9 @@ public: struct State { SQLite db; - SQLiteStmt insertCache, queryCache, insertNAR, insertMissingNAR, queryNAR, purgeCache; + SQLiteStmt insertCache, queryCache, insertNAR, insertMissingNAR, + queryNAR, insertRealisation, insertMissingRealisation, + queryRealisation, purgeCache; std::map<std::string, Cache> caches; }; @@ -98,6 +110,26 @@ public: state->queryNAR.create(state->db, "select present, namePart, url, compression, fileHash, fileSize, narHash, narSize, refs, deriver, sigs, ca from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))"); + state->insertRealisation.create(state->db, + R"( + insert or replace into Realisations(cache, outputId, content, timestamp) + values (?, ?, ?, ?) + )"); + + state->insertMissingRealisation.create(state->db, + R"( + insert or replace into Realisations(cache, outputId, timestamp) + values (?, ?, ?) + )"); + + state->queryRealisation.create(state->db, + R"( + select content from Realisations + where cache = ? and outputId = ? and + ((content is null and timestamp > ?) or + (content is not null and timestamp > ?)) + )"); + /* Periodically purge expired entries from the database. */ retrySQLite<void>([&]() { auto now = time(0); @@ -212,6 +244,38 @@ public: }); } + std::pair<Outcome, std::shared_ptr<Realisation>> lookupRealisation( + const std::string & uri, const DrvOutput & id) override + { + return retrySQLite<std::pair<Outcome, std::shared_ptr<Realisation>>>( + [&]() -> std::pair<Outcome, std::shared_ptr<Realisation>> { + auto state(_state.lock()); + + auto & cache(getCache(*state, uri)); + + auto now = time(0); + + auto queryRealisation(state->queryRealisation.use() + (cache.id) + (id.to_string()) + (now - settings.ttlNegativeNarInfoCache) + (now - settings.ttlPositiveNarInfoCache)); + + if (!queryRealisation.next()) + return {oUnknown, 0}; + + if (queryRealisation.isNull(0)) + return {oInvalid, 0}; + + auto realisation = + std::make_shared<Realisation>(Realisation::fromJSON( + nlohmann::json::parse(queryRealisation.getStr(0)), + "Local disk cache")); + + return {oValid, realisation}; + }); + } + void upsertNarInfo( const std::string & uri, const std::string & hashPart, std::shared_ptr<const ValidPathInfo> info) override @@ -251,6 +315,39 @@ public: } }); } + + void upsertRealisation( + const std::string & uri, + const Realisation & realisation) override + { + retrySQLite<void>([&]() { + auto state(_state.lock()); + + auto & cache(getCache(*state, uri)); + + state->insertRealisation.use() + (cache.id) + (realisation.id.to_string()) + (realisation.toJSON().dump()) + (time(0)).exec(); + }); + + } + + virtual void upsertAbsentRealisation( + const std::string & uri, + const DrvOutput & id) override + { + retrySQLite<void>([&]() { + auto state(_state.lock()); + + auto & cache(getCache(*state, uri)); + state->insertMissingRealisation.use() + (cache.id) + (id.to_string()) + (time(0)).exec(); + }); + } }; ref<NarInfoDiskCache> getNarInfoDiskCache() diff --git a/src/libstore/nar-info-disk-cache.hh b/src/libstore/nar-info-disk-cache.hh index 04de2c5eb..2dcaa76a4 100644 --- a/src/libstore/nar-info-disk-cache.hh +++ b/src/libstore/nar-info-disk-cache.hh @@ -2,6 +2,7 @@ #include "ref.hh" #include "nar-info.hh" +#include "realisation.hh" namespace nix { @@ -29,6 +30,15 @@ public: virtual void upsertNarInfo( const std::string & uri, const std::string & hashPart, std::shared_ptr<const ValidPathInfo> info) = 0; + + virtual void upsertRealisation( + const std::string & uri, + const Realisation & realisation) = 0; + virtual void upsertAbsentRealisation( + const std::string & uri, + const DrvOutput & id) = 0; + virtual std::pair<Outcome, std::shared_ptr<Realisation>> lookupRealisation( + const std::string & uri, const DrvOutput & id) = 0; }; /* Return a singleton cache object that can be used concurrently by diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc index 78d587139..d95e54af1 100644 --- a/src/libstore/optimise-store.cc +++ b/src/libstore/optimise-store.cc @@ -198,7 +198,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats, /* Make the containing directory writable, but only if it's not the store itself (we don't want or need to mess with its permissions). */ - bool mustToggle = dirOf(path) != realStoreDir; + bool mustToggle = dirOf(path) != realStoreDir.get(); if (mustToggle) makeWritable(dirOf(path)); /* When we're done, make the directory read-only again and reset diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc index c5c3ae3dc..5e383a9a4 100644 --- a/src/libstore/parsed-derivations.cc +++ b/src/libstore/parsed-derivations.cc @@ -91,6 +91,8 @@ StringSet ParsedDerivation::getRequiredSystemFeatures() const StringSet res; for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings())) res.insert(i); + if (!derivationHasKnownOutputPaths(drv.type())) + res.insert("ca-derivations"); return res; } diff --git a/src/libstore/realisation.cc b/src/libstore/realisation.cc index 638065547..eadec594c 100644 --- a/src/libstore/realisation.cc +++ b/src/libstore/realisation.cc @@ -1,5 +1,6 @@ #include "realisation.hh" #include "store-api.hh" +#include "closure.hh" #include <nlohmann/json.hpp> namespace nix { @@ -21,11 +22,52 @@ std::string DrvOutput::to_string() const { return strHash() + "!" + outputName; } +std::set<Realisation> Realisation::closure(Store & store, const std::set<Realisation> & startOutputs) +{ + std::set<Realisation> res; + Realisation::closure(store, startOutputs, res); + return res; +} + +void Realisation::closure(Store & store, const std::set<Realisation> & startOutputs, std::set<Realisation> & res) +{ + auto getDeps = [&](const Realisation& current) -> std::set<Realisation> { + std::set<Realisation> res; + for (auto& [currentDep, _] : current.dependentRealisations) { + if (auto currentRealisation = store.queryRealisation(currentDep)) + res.insert(*currentRealisation); + else + throw Error( + "Unrealised derivation '%s'", currentDep.to_string()); + } + return res; + }; + + computeClosure<Realisation>( + startOutputs, res, + [&](const Realisation& current, + std::function<void(std::promise<std::set<Realisation>>&)> + processEdges) { + std::promise<std::set<Realisation>> promise; + try { + auto res = getDeps(current); + promise.set_value(res); + } catch (...) { + promise.set_exception(std::current_exception()); + } + return processEdges(promise); + }); +} + nlohmann::json Realisation::toJSON() const { + auto jsonDependentRealisations = nlohmann::json::object(); + for (auto & [depId, depOutPath] : dependentRealisations) + jsonDependentRealisations.emplace(depId.to_string(), depOutPath.to_string()); return nlohmann::json{ {"id", id.to_string()}, {"outPath", outPath.to_string()}, {"signatures", signatures}, + {"dependentRealisations", jsonDependentRealisations}, }; } @@ -51,10 +93,16 @@ Realisation Realisation::fromJSON( if (auto signaturesIterator = json.find("signatures"); signaturesIterator != json.end()) signatures.insert(signaturesIterator->begin(), signaturesIterator->end()); + std::map <DrvOutput, StorePath> dependentRealisations; + if (auto jsonDependencies = json.find("dependentRealisations"); jsonDependencies != json.end()) + for (auto & [jsonDepId, jsonDepOutPath] : jsonDependencies->get<std::map<std::string, std::string>>()) + dependentRealisations.insert({DrvOutput::parse(jsonDepId), StorePath(jsonDepOutPath)}); + return Realisation{ .id = DrvOutput::parse(getField("id")), .outPath = StorePath(getField("outPath")), .signatures = signatures, + .dependentRealisations = dependentRealisations, }; } @@ -92,6 +140,16 @@ StorePath RealisedPath::path() const { return std::visit([](auto && arg) { return arg.getPath(); }, raw); } +bool Realisation::isCompatibleWith(const Realisation & other) const +{ + assert (id == other.id); + if (outPath == other.outPath) { + assert(dependentRealisations == other.dependentRealisations); + return true; + } + return false; +} + void RealisedPath::closure( Store& store, const RealisedPath::Set& startPaths, diff --git a/src/libstore/realisation.hh b/src/libstore/realisation.hh index f5049c9e9..05d2bc44f 100644 --- a/src/libstore/realisation.hh +++ b/src/libstore/realisation.hh @@ -28,6 +28,14 @@ struct Realisation { StringSet signatures; + /** + * The realisations that are required for the current one to be valid. + * + * When importing this realisation, the store will first check that all its + * dependencies exist, and map to the correct output path + */ + std::map<DrvOutput, StorePath> dependentRealisations; + nlohmann::json toJSON() const; static Realisation fromJSON(const nlohmann::json& json, const std::string& whence); @@ -36,6 +44,11 @@ struct Realisation { bool checkSignature(const PublicKeys & publicKeys, const std::string & sig) const; size_t checkSignatures(const PublicKeys & publicKeys) const; + static std::set<Realisation> closure(Store &, const std::set<Realisation> &); + static void closure(Store &, const std::set<Realisation> &, std::set<Realisation>& res); + + bool isCompatibleWith(const Realisation & other) const; + StorePath getPath() const { return outPath; } GENERATE_CMP(Realisation, me->id, me->outPath); diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index d9b6e9488..aec243637 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -653,8 +653,12 @@ void RemoteStore::registerDrvOutput(const Realisation & info) { auto conn(getConnection()); conn->to << wopRegisterDrvOutput; - conn->to << info.id.to_string(); - conn->to << std::string(info.outPath.to_string()); + if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) { + conn->to << info.id.to_string(); + conn->to << std::string(info.outPath.to_string()); + } else { + worker_proto::write(*this, conn->to, info); + } conn.processStderr(); } @@ -664,10 +668,17 @@ std::optional<const Realisation> RemoteStore::queryRealisation(const DrvOutput & conn->to << wopQueryRealisation; conn->to << id.to_string(); conn.processStderr(); - auto outPaths = worker_proto::read(*this, conn->from, Phantom<std::set<StorePath>>{}); - if (outPaths.empty()) - return std::nullopt; - return {Realisation{.id = id, .outPath = *outPaths.begin()}}; + if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) { + auto outPaths = worker_proto::read(*this, conn->from, Phantom<std::set<StorePath>>{}); + if (outPaths.empty()) + return std::nullopt; + return {Realisation{.id = id, .outPath = *outPaths.begin()}}; + } else { + auto realisations = worker_proto::read(*this, conn->from, Phantom<std::set<Realisation>>{}); + if (realisations.empty()) + return std::nullopt; + return *realisations.begin(); + } } static void writeDerivedPaths(RemoteStore & store, ConnectionHandle & conn, const std::vector<DerivedPath> & reqs) diff --git a/src/libstore/sandbox-defaults.sb b/src/libstore/sandbox-defaults.sb index 351037822..2bb1ea130 100644 --- a/src/libstore/sandbox-defaults.sb +++ b/src/libstore/sandbox-defaults.sb @@ -32,7 +32,9 @@ (literal "/tmp") (subpath TMPDIR)) ; Some packages like to read the system version. -(allow file-read* (literal "/System/Library/CoreServices/SystemVersion.plist")) +(allow file-read* + (literal "/System/Library/CoreServices/SystemVersion.plist") + (literal "/System/Library/CoreServices/SystemVersionCompat.plist")) ; Without this line clang cannot write to /dev/null, breaking some configure tests. (allow file-read-metadata (literal "/dev")) diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 93fcb068f..6736adb24 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -337,6 +337,13 @@ ValidPathInfo Store::addToStoreSlow(std::string_view name, const Path & srcPath, return info; } +StringSet StoreConfig::getDefaultSystemFeatures() +{ + auto res = settings.systemFeatures.get(); + if (settings.isExperimentalFeatureEnabled("ca-derivations")) + res.insert("ca-derivations"); + return res; +} Store::Store(const Params & params) : StoreConfig(params) @@ -780,20 +787,39 @@ std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStor RepairFlag repair, CheckSigsFlag checkSigs, SubstituteFlag substitute) { StorePathSet storePaths; - std::set<Realisation> realisations; + std::set<Realisation> toplevelRealisations; for (auto & path : paths) { storePaths.insert(path.path()); if (auto realisation = std::get_if<Realisation>(&path.raw)) { settings.requireExperimentalFeature("ca-derivations"); - realisations.insert(*realisation); + toplevelRealisations.insert(*realisation); } } auto pathsMap = copyPaths(srcStore, dstStore, storePaths, repair, checkSigs, substitute); + + ThreadPool pool; + try { - for (auto & realisation : realisations) { - dstStore->registerDrvOutput(realisation, checkSigs); - } - } catch (MissingExperimentalFeature & e) { + // Copy the realisation closure + processGraph<Realisation>( + pool, Realisation::closure(*srcStore, toplevelRealisations), + [&](const Realisation& current) -> std::set<Realisation> { + std::set<Realisation> children; + for (const auto& [drvOutput, _] : current.dependentRealisations) { + auto currentChild = srcStore->queryRealisation(drvOutput); + if (!currentChild) + throw Error( + "Incomplete realisation closure: '%s' is a " + "dependency of '%s' but isn’t registered", + drvOutput.to_string(), current.id.to_string()); + children.insert(*currentChild); + } + return children; + }, + [&](const Realisation& current) -> void { + dstStore->registerDrvOutput(current, checkSigs); + }); + } catch (MissingExperimentalFeature& e) { // Don't fail if the remote doesn't support CA derivations is it might // not be within our control to change that, and we might still want // to at least copy the output paths. diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index f66298991..9657d2adf 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -180,6 +180,8 @@ struct StoreConfig : public Config StoreConfig() = delete; + StringSet getDefaultSystemFeatures(); + virtual ~StoreConfig() { } virtual const std::string name() = 0; @@ -196,7 +198,7 @@ struct StoreConfig : public Config Setting<bool> wantMassQuery{this, false, "want-mass-query", "whether this substituter can be queried efficiently for path validity"}; - Setting<StringSet> systemFeatures{this, settings.systemFeatures, + Setting<StringSet> systemFeatures{this, getDefaultSystemFeatures(), "system-features", "Optional features that the system this store builds on implements (like \"kvm\")."}; @@ -864,4 +866,9 @@ std::pair<std::string, Store::Params> splitUriAndParams(const std::string & uri) std::optional<ContentAddress> getDerivationCA(const BasicDerivation & drv); +std::map<DrvOutput, StorePath> drvOutputReferences( + Store & store, + const Derivation & drv, + const StorePath & outputPath); + } diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index fdd692cf0..e89183d40 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -9,7 +9,7 @@ namespace nix { #define WORKER_MAGIC_1 0x6e697863 #define WORKER_MAGIC_2 0x6478696f -#define PROTOCOL_VERSION (1 << 8 | 30) +#define PROTOCOL_VERSION (1 << 8 | 31) #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) diff --git a/src/libutil/closure.hh b/src/libutil/closure.hh new file mode 100644 index 000000000..779b9b2d5 --- /dev/null +++ b/src/libutil/closure.hh @@ -0,0 +1,69 @@ +#include <set> +#include <future> +#include "sync.hh" + +using std::set; + +namespace nix { + +template<typename T> +using GetEdgesAsync = std::function<void(const T &, std::function<void(std::promise<set<T>> &)>)>; + +template<typename T> +void computeClosure( + const set<T> startElts, + set<T> & res, + GetEdgesAsync<T> getEdgesAsync +) +{ + struct State + { + size_t pending; + set<T> & res; + std::exception_ptr exc; + }; + + Sync<State> state_(State{0, res, 0}); + + std::function<void(const T &)> enqueue; + + std::condition_variable done; + + enqueue = [&](const T & current) -> void { + { + auto state(state_.lock()); + if (state->exc) return; + if (!state->res.insert(current).second) return; + state->pending++; + } + + getEdgesAsync(current, [&](std::promise<set<T>> & prom) { + try { + auto children = prom.get_future().get(); + for (auto & child : children) + enqueue(child); + { + auto state(state_.lock()); + assert(state->pending); + if (!--state->pending) done.notify_one(); + } + } catch (...) { + auto state(state_.lock()); + if (!state->exc) state->exc = std::current_exception(); + assert(state->pending); + if (!--state->pending) done.notify_one(); + }; + }); + }; + + for (auto & startElt : startElts) + enqueue(startElt); + + { + auto state(state_.lock()); + while (state->pending) state.wait(done); + if (state->exc) std::rethrow_exception(state->exc); + } +} + +} diff --git a/src/libutil/comparator.hh b/src/libutil/comparator.hh index 0315dc506..eecd5b819 100644 --- a/src/libutil/comparator.hh +++ b/src/libutil/comparator.hh @@ -25,6 +25,8 @@ } #define GENERATE_EQUAL(args...) GENERATE_ONE_CMP(==, args) #define GENERATE_LEQ(args...) GENERATE_ONE_CMP(<, args) +#define GENERATE_NEQ(args...) GENERATE_ONE_CMP(!=, args) #define GENERATE_CMP(args...) \ GENERATE_EQUAL(args) \ - GENERATE_LEQ(args) + GENERATE_LEQ(args) \ + GENERATE_NEQ(args) diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index d2e801175..6b9b850ca 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -46,7 +46,7 @@ public: : printBuildLogs(printBuildLogs) { systemd = getEnv("IN_SYSTEMD") == "1"; - tty = isatty(STDERR_FILENO); + tty = shouldANSI(); } bool isVerbose() override { diff --git a/src/libutil/tests/closure.cc b/src/libutil/tests/closure.cc new file mode 100644 index 000000000..7597e7807 --- /dev/null +++ b/src/libutil/tests/closure.cc @@ -0,0 +1,70 @@ +#include "closure.hh" +#include <gtest/gtest.h> + +namespace nix { + +using namespace std; + +map<string, set<string>> testGraph = { + { "A", { "B", "C", "G" } }, + { "B", { "A" } }, // Loops back to A + { "C", { "F" } }, // Indirect reference + { "D", { "A" } }, // Not reachable, but has backreferences + { "E", {} }, // Just not reachable + { "F", {} }, + { "G", { "G" } }, // Self reference +}; + +TEST(closure, correctClosure) { + set<string> aClosure; + set<string> expectedClosure = {"A", "B", "C", "F", "G"}; + computeClosure<string>( + {"A"}, + aClosure, + [&](const string currentNode, function<void(promise<set<string>> &)> processEdges) { + promise<set<string>> promisedNodes; + promisedNodes.set_value(testGraph[currentNode]); + processEdges(promisedNodes); + } + ); + + ASSERT_EQ(aClosure, expectedClosure); +} + +TEST(closure, properlyHandlesDirectExceptions) { + struct TestExn {}; + set<string> aClosure; + EXPECT_THROW( + computeClosure<string>( + {"A"}, + aClosure, + [&](const string currentNode, function<void(promise<set<string>> &)> processEdges) { + throw TestExn(); + } + ), + TestExn + ); +} + +TEST(closure, properlyHandlesExceptionsInPromise) { + struct TestExn {}; + set<string> aClosure; + EXPECT_THROW( + computeClosure<string>( + {"A"}, + aClosure, + [&](const string currentNode, function<void(promise<set<string>> &)> processEdges) { + promise<set<string>> promise; + try { + throw TestExn(); + } catch (...) { + promise.set_exception(std::current_exception()); + } + processEdges(promise); + } + ), + TestExn + ); +} + +} diff --git a/src/libutil/url.cc b/src/libutil/url.cc index c1bab866c..f6232d255 100644 --- a/src/libutil/url.cc +++ b/src/libutil/url.cc @@ -32,7 +32,7 @@ ParsedURL parseURL(const std::string & url) auto isFile = scheme.find("file") != std::string::npos; if (authority && *authority != "" && isFile) - throw Error("file:// URL '%s' has unexpected authority '%s'", + throw BadURL("file:// URL '%s' has unexpected authority '%s'", url, *authority); if (isFile && path.empty()) diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 5f597bf06..ee9f17228 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -155,6 +155,9 @@ Path canonPath(const Path & path, bool resolveSymlinks) s.clear(); /* restart for symlinks pointing to absolute path */ } else { s = dirOf(s); + if (s == "/") { // we don’t want trailing slashes here, which dirOf only produces if s = / + s.clear(); + } } } } @@ -1369,6 +1372,12 @@ void ignoreException() } } +bool shouldANSI() +{ + return isatty(STDERR_FILENO) + && getEnv("TERM").value_or("dumb") != "dumb" + && !getEnv("NO_COLOR").has_value(); +} std::string filterANSIEscapes(const std::string & s, bool filterAll, unsigned int width) { diff --git a/src/libutil/util.hh b/src/libutil/util.hh index f84d0fb31..a8dd4bd47 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -482,6 +482,9 @@ constexpr char treeLast[] = "└───"; constexpr char treeLine[] = "│ "; constexpr char treeNull[] = " "; +/* Determine whether ANSI escape sequences are appropriate for the + present output. */ +bool shouldANSI(); /* Truncate a string to 'width' printable characters. If 'filterAll' is true, all ANSI escape sequences are filtered out. Otherwise, diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index 9acbedda2..3fec2c06c 100755 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -387,6 +387,12 @@ static void main_nix_build(int argc, char * * argv) if (dryRun) return; + if (settings.isExperimentalFeatureEnabled("ca-derivations")) { + auto resolvedDrv = drv.tryResolve(*store); + assert(resolvedDrv && "Successfully resolved the derivation"); + drv = *resolvedDrv; + } + // Set the environment. auto env = getEnv(); diff --git a/src/nix/app.cc b/src/nix/app.cc index cf147c631..01a0064db 100644 --- a/src/nix/app.cc +++ b/src/nix/app.cc @@ -3,34 +3,79 @@ #include "eval-inline.hh" #include "eval-cache.hh" #include "names.hh" +#include "command.hh" namespace nix { -App Installable::toApp(EvalState & state) +struct InstallableDerivedPath : Installable { - auto [cursor, attrPath] = getCursor(state); + ref<Store> store; + const DerivedPath derivedPath; - auto type = cursor->getAttr("type")->getString(); + InstallableDerivedPath(ref<Store> store, const DerivedPath & derivedPath) + : store(store) + , derivedPath(derivedPath) + { + } + + + std::string what() override { return derivedPath.to_string(*store); } + + DerivedPaths toDerivedPaths() override + { + return {derivedPath}; + } - auto checkProgram = [&](const Path & program) + std::optional<StorePath> getStorePath() override { - if (!state.store->isInStore(program)) - throw Error("app program '%s' is not in the Nix store", program); - }; + return std::nullopt; + } +}; + +/** + * Return the rewrites that are needed to resolve a string whose context is + * included in `dependencies` + */ +StringPairs resolveRewrites(Store & store, const BuiltPaths dependencies) +{ + StringPairs res; + for (auto & dep : dependencies) + if (auto drvDep = std::get_if<BuiltPathBuilt>(&dep)) + for (auto & [ outputName, outputPath ] : drvDep->outputs) + res.emplace( + downstreamPlaceholder(store, drvDep->drvPath, outputName), + store.printStorePath(outputPath) + ); + return res; +} + +/** + * Resolve the given string assuming the given context + */ +std::string resolveString(Store & store, const std::string & toResolve, const BuiltPaths dependencies) +{ + auto rewrites = resolveRewrites(store, dependencies); + return rewriteStrings(toResolve, rewrites); +} + +UnresolvedApp Installable::toApp(EvalState & state) +{ + auto [cursor, attrPath] = getCursor(state); + + auto type = cursor->getAttr("type")->getString(); if (type == "app") { auto [program, context] = cursor->getAttr("program")->getStringWithContext(); - checkProgram(program); std::vector<StorePathWithOutputs> context2; for (auto & [path, name] : context) context2.push_back({state.store->parseStorePath(path), {name}}); - return App { + return UnresolvedApp{App { .context = std::move(context2), .program = program, - }; + }}; } else if (type == "derivation") { @@ -45,15 +90,32 @@ App Installable::toApp(EvalState & state) ? aMainProgram->getString() : DrvName(name).name; auto program = outPath + "/bin/" + mainProgram; - checkProgram(program); - return App { + return UnresolvedApp { App { .context = { { drvPath, {outputName} } }, .program = program, - }; + }}; } else throw Error("attribute '%s' has unsupported type '%s'", attrPath, type); } +App UnresolvedApp::resolve(ref<Store> store) +{ + auto res = unresolved; + + std::vector<std::shared_ptr<Installable>> installableContext; + + for (auto & ctxElt : unresolved.context) + installableContext.push_back( + std::make_shared<InstallableDerivedPath>(store, ctxElt.toDerivedPath())); + + auto builtContext = build(store, Realise::Outputs, installableContext); + res.program = resolveString(*store, unresolved.program, builtContext); + if (!store->isInStore(res.program)) + throw Error("app program '%s' is not in the Nix store", res.program); + + return res; +} + } diff --git a/src/nix/build.cc b/src/nix/build.cc index 226c551fa..15923ebc3 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -63,14 +63,13 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile for (const auto & [_i, buildable] : enumerate(buildables)) { auto i = _i; std::visit(overloaded { - [&](DerivedPathWithHints::Opaque bo) { + [&](BuiltPath::Opaque bo) { std::string symlink = outLink; if (i) symlink += fmt("-%d", i); store2->addPermRoot(bo.path, absPath(symlink)); }, - [&](DerivedPathWithHints::Built bfd) { - auto builtOutputs = store->queryDerivationOutputMap(bfd.drvPath); - for (auto & output : builtOutputs) { + [&](BuiltPath::Built bfd) { + for (auto & output : bfd.outputs) { std::string symlink = outLink; if (i) symlink += fmt("-%d", i); if (output.first != "out") symlink += fmt("-%s", output.first); diff --git a/src/nix/bundle.cc b/src/nix/bundle.cc index 53dccc63a..88bc3d1d1 100644 --- a/src/nix/bundle.cc +++ b/src/nix/bundle.cc @@ -69,8 +69,7 @@ struct CmdBundle : InstallableCommand { auto evalState = getEvalState(); - auto app = installable->toApp(*evalState); - store->buildPaths(toDerivedPaths(app.context)); + auto app = installable->toApp(*evalState).resolve(store); auto [bundlerFlakeRef, bundlerName] = parseFlakeRefWithFragment(bundler, absPath(".")); const flake::LockFlags lockFlags{ .writeLockFile = false }; diff --git a/src/nix/copy.cc b/src/nix/copy.cc index f59f7c76b..674cce4b4 100644 --- a/src/nix/copy.cc +++ b/src/nix/copy.cc @@ -8,7 +8,7 @@ using namespace nix; -struct CmdCopy : RealisedPathsCommand +struct CmdCopy : BuiltPathsCommand { std::string srcUri, dstUri; @@ -16,10 +16,10 @@ struct CmdCopy : RealisedPathsCommand SubstituteFlag substitute = NoSubstitute; - using RealisedPathsCommand::run; + using BuiltPathsCommand::run; CmdCopy() - : RealisedPathsCommand(true) + : BuiltPathsCommand(true) { addFlag({ .longName = "from", @@ -75,16 +75,22 @@ struct CmdCopy : RealisedPathsCommand if (srcUri.empty() && dstUri.empty()) throw UsageError("you must pass '--from' and/or '--to'"); - RealisedPathsCommand::run(store); + BuiltPathsCommand::run(store); } - void run(ref<Store> srcStore, std::vector<RealisedPath> paths) override + void run(ref<Store> srcStore, BuiltPaths paths) override { ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri); + RealisedPath::Set stuffToCopy; + + for (auto & builtPath : paths) { + auto theseRealisations = builtPath.toRealisedPaths(*srcStore); + stuffToCopy.insert(theseRealisations.begin(), theseRealisations.end()); + } + copyPaths( - srcStore, dstStore, RealisedPath::Set(paths.begin(), paths.end()), - NoRepair, checkSigs, substitute); + srcStore, dstStore, stuffToCopy, NoRepair, checkSigs, substitute); } }; diff --git a/src/nix/develop.cc b/src/nix/develop.cc index 498a7b45c..699ec0b99 100644 --- a/src/nix/develop.cc +++ b/src/nix/develop.cc @@ -54,7 +54,7 @@ BuildEnvironment readEnvironment(const Path & path) R"re((?:[a-zA-Z_][a-zA-Z0-9_]*))re"; static std::string simpleStringRegex = - R"re((?:[a-zA-Z0-9_/:\.\-\+=]*))re"; + R"re((?:[a-zA-Z0-9_/:\.\-\+=@%]*))re"; static std::string dquotedStringRegex = R"re((?:\$?"(?:[^"\\]|\\[$`"\\\n])*"))re"; @@ -144,17 +144,26 @@ StorePath getDerivationEnvironment(ref<Store> store, const StorePath & drvPath) /* Rehash and write the derivation. FIXME: would be nice to use 'buildDerivation', but that's privileged. */ drv.name += "-env"; - for (auto & output : drv.outputs) { - output.second = { .output = DerivationOutputInputAddressed { .path = StorePath::dummy } }; - drv.env[output.first] = ""; - } drv.inputSrcs.insert(std::move(getEnvShPath)); - Hash h = std::get<0>(hashDerivationModulo(*store, drv, true)); + if (settings.isExperimentalFeatureEnabled("ca-derivations")) { + for (auto & output : drv.outputs) { + output.second = { + .output = DerivationOutputDeferred{}, + }; + drv.env[output.first] = hashPlaceholder(output.first); + } + } else { + for (auto & output : drv.outputs) { + output.second = { .output = DerivationOutputInputAddressed { .path = StorePath::dummy } }; + drv.env[output.first] = ""; + } + Hash h = std::get<0>(hashDerivationModulo(*store, drv, true)); - for (auto & output : drv.outputs) { - auto outPath = store->makeOutputPath(output.first, h, drv.name); - output.second = { .output = DerivationOutputInputAddressed { .path = outPath } }; - drv.env[output.first] = store->printStorePath(outPath); + for (auto & output : drv.outputs) { + auto outPath = store->makeOutputPath(output.first, h, drv.name); + output.second = { .output = DerivationOutputInputAddressed { .path = outPath } }; + drv.env[output.first] = store->printStorePath(outPath); + } } auto shellDrvPath = writeDerivation(*store, drv); @@ -162,8 +171,7 @@ StorePath getDerivationEnvironment(ref<Store> store, const StorePath & drvPath) /* Build the derivation. */ store->buildPaths({DerivedPath::Built{shellDrvPath}}); - for (auto & [_0, outputAndOptPath] : drv.outputsAndOptPaths(*store)) { - auto & [_1, optPath] = outputAndOptPath; + for (auto & [_0, optPath] : store->queryPartialDerivationOutputMap(shellDrvPath)) { assert(optPath); auto & outPath = *optPath; assert(store->isValidPath(outPath)); @@ -185,6 +193,7 @@ struct Common : InstallableCommand, MixProfile "NIX_BUILD_TOP", "NIX_ENFORCE_PURITY", "NIX_LOG_FD", + "NIX_REMOTE", "PPID", "PWD", "SHELLOPTS", @@ -265,9 +274,9 @@ struct Common : InstallableCommand, MixProfile for (auto & [installable_, dir_] : redirects) { auto dir = absPath(dir_); auto installable = parseInstallable(store, installable_); - auto buildable = installable->toDerivedPathWithHints(); - auto doRedirect = [&](const StorePath & path) - { + auto builtPaths = toStorePaths( + store, Realise::Nothing, OperateOn::Output, {installable}); + for (auto & path: builtPaths) { auto from = store->printStorePath(path); if (script.find(from) == std::string::npos) warn("'%s' (path '%s') is not used by this build environment", installable->what(), from); @@ -275,16 +284,7 @@ struct Common : InstallableCommand, MixProfile printInfo("redirecting '%s' to '%s'", from, dir); rewrites.insert({from, dir}); } - }; - std::visit(overloaded { - [&](const DerivedPathWithHints::Opaque & bo) { - doRedirect(bo.path); - }, - [&](const DerivedPathWithHints::Built & bfd) { - for (auto & [outputName, path] : bfd.outputs) - if (path) doRedirect(*path); - }, - }, buildable.raw()); + } } return rewriteStrings(script, rewrites); @@ -404,7 +404,7 @@ struct CmdDevelop : Common, MixEnvironment if (verbosity >= lvlDebug) script += "set -x\n"; - script += fmt("rm -f '%s'\n", rcFilePath); + script += fmt("command rm -f '%s'\n", rcFilePath); if (phase) { if (!command.empty()) @@ -423,7 +423,7 @@ struct CmdDevelop : Common, MixEnvironment } else { - script += "[ -n \"$PS1\" ] && [ -e ~/.bashrc ] && source ~/.bashrc;\n"; + script = "[ -n \"$PS1\" ] && [ -e ~/.bashrc ] && source ~/.bashrc;\n" + script; if (developSettings.bashPrompt != "") script += fmt("[ -n \"$PS1\" ] && PS1=%s;\n", shellEscape(developSettings.bashPrompt)); if (developSettings.bashPromptSuffix != "") @@ -443,13 +443,17 @@ struct CmdDevelop : Common, MixEnvironment try { auto state = getEvalState(); + auto nixpkgsLockFlags = lockFlags; + nixpkgsLockFlags.inputOverrides = {}; + nixpkgsLockFlags.inputUpdates = {}; + auto bashInstallable = std::make_shared<InstallableFlake>( this, state, installable->nixpkgsFlakeRef(), Strings{"bashInteractive"}, Strings{"legacyPackages." + settings.thisSystem.get() + "."}, - lockFlags); + nixpkgsLockFlags); shell = state->store->printStorePath( toStorePath(state->store, Realise::Outputs, OperateOn::Output, bashInstallable)) + "/bin/bash"; diff --git a/src/nix/flake-check.md b/src/nix/flake-check.md index dc079ba0c..8ef932954 100644 --- a/src/nix/flake-check.md +++ b/src/nix/flake-check.md @@ -22,9 +22,13 @@ This command verifies that the flake specified by flake reference that the derivations specified by the flake's `checks` output can be built successfully. +If the `keep-going` option is set to `true`, Nix will keep evaluating as much +as it can and report the errors as it encounters them. Otherwise it will stop +at the first error. + # Evaluation checks -This following flake output attributes must be derivations: +The following flake output attributes must be derivations: * `checks.`*system*`.`*name* * `defaultPackage.`*system*` diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 62a413e27..64fcfc000 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -272,25 +272,40 @@ struct CmdFlakeCheck : FlakeCommand auto state = getEvalState(); auto flake = lockFlake(); + bool hasErrors = false; + auto reportError = [&](const Error & e) { + try { + throw e; + } catch (Error & e) { + if (settings.keepGoing) { + ignoreException(); + hasErrors = true; + } + else + throw; + } + }; + // FIXME: rewrite to use EvalCache. auto checkSystemName = [&](const std::string & system, const Pos & pos) { // FIXME: what's the format of "system"? if (system.find('-') == std::string::npos) - throw Error("'%s' is not a valid system type, at %s", system, pos); + reportError(Error("'%s' is not a valid system type, at %s", system, pos)); }; - auto checkDerivation = [&](const std::string & attrPath, Value & v, const Pos & pos) { + auto checkDerivation = [&](const std::string & attrPath, Value & v, const Pos & pos) -> std::optional<StorePath> { try { auto drvInfo = getDerivation(*state, v, false); if (!drvInfo) throw Error("flake attribute '%s' is not a derivation", attrPath); // FIXME: check meta attributes - return store->parseStorePath(drvInfo->queryDrvPath()); + return std::make_optional(store->parseStorePath(drvInfo->queryDrvPath())); } catch (Error & e) { e.addTrace(pos, hintfmt("while checking the derivation '%s'", attrPath)); - throw; + reportError(e); } + return std::nullopt; }; std::vector<DerivedPath> drvPaths; @@ -307,7 +322,7 @@ struct CmdFlakeCheck : FlakeCommand #endif } catch (Error & e) { e.addTrace(pos, hintfmt("while checking the app definition '%s'", attrPath)); - throw; + reportError(e); } }; @@ -323,7 +338,7 @@ struct CmdFlakeCheck : FlakeCommand // evaluate the overlay. } catch (Error & e) { e.addTrace(pos, hintfmt("while checking the overlay '%s'", attrPath)); - throw; + reportError(e); } }; @@ -347,7 +362,7 @@ struct CmdFlakeCheck : FlakeCommand // check the module. } catch (Error & e) { e.addTrace(pos, hintfmt("while checking the NixOS module '%s'", attrPath)); - throw; + reportError(e); } }; @@ -369,7 +384,7 @@ struct CmdFlakeCheck : FlakeCommand } catch (Error & e) { e.addTrace(pos, hintfmt("while checking the Hydra jobset '%s'", attrPath)); - throw; + reportError(e); } }; @@ -384,7 +399,7 @@ struct CmdFlakeCheck : FlakeCommand throw Error("attribute 'config.system.build.toplevel' is not a derivation"); } catch (Error & e) { e.addTrace(pos, hintfmt("while checking the NixOS configuration '%s'", attrPath)); - throw; + reportError(e); } }; @@ -418,7 +433,7 @@ struct CmdFlakeCheck : FlakeCommand } } catch (Error & e) { e.addTrace(pos, hintfmt("while checking the template '%s'", attrPath)); - throw; + reportError(e); } }; @@ -433,7 +448,7 @@ struct CmdFlakeCheck : FlakeCommand throw Error("bundler must take formal arguments 'program' and 'system'"); } catch (Error & e) { e.addTrace(pos, hintfmt("while checking the template '%s'", attrPath)); - throw; + reportError(e); } }; @@ -461,8 +476,8 @@ struct CmdFlakeCheck : FlakeCommand auto drvPath = checkDerivation( fmt("%s.%s.%s", name, attr.name, attr2.name), *attr2.value, *attr2.pos); - if ((std::string) attr.name == settings.thisSystem.get()) - drvPaths.push_back(DerivedPath::Built{drvPath}); + if (drvPath && (std::string) attr.name == settings.thisSystem.get()) + drvPaths.push_back(DerivedPath::Built{*drvPath}); } } } @@ -574,7 +589,7 @@ struct CmdFlakeCheck : FlakeCommand } catch (Error & e) { e.addTrace(pos, hintfmt("while checking flake output '%s'", name)); - throw; + reportError(e); } }); } @@ -583,6 +598,8 @@ struct CmdFlakeCheck : FlakeCommand Activity act(*logger, lvlInfo, actUnknown, "running flake checks"); store->buildPaths(drvPaths); } + if (hasErrors) + throw Error("Some errors were encountered during the evaluation"); } }; diff --git a/src/nix/flake.md b/src/nix/flake.md index 9e936a049..3d273100b 100644 --- a/src/nix/flake.md +++ b/src/nix/flake.md @@ -186,8 +186,8 @@ Currently the `type` attribute can be one of the following: attribute `url`. In URL form, the schema must be `http://`, `https://` or `file://` - URLs and the extension must be `.zip`, `.tar`, `.tar.gz`, `.tar.xz` - or `.tar.bz2`. + URLs and the extension must be `.zip`, `.tar`, `.tar.gz`, `.tar.xz`, + `.tar.bz2` or `.tar.zst`. * `github`: A more efficient way to fetch repositories from GitHub. The following attributes are required: diff --git a/src/nix/log.cc b/src/nix/log.cc index 638bb5073..962c47525 100644 --- a/src/nix/log.cc +++ b/src/nix/log.cc @@ -30,15 +30,15 @@ struct CmdLog : InstallableCommand subs.push_front(store); - auto b = installable->toDerivedPathWithHints(); + auto b = installable->toDerivedPath(); RunPager pager; for (auto & sub : subs) { auto log = std::visit(overloaded { - [&](DerivedPathWithHints::Opaque bo) { + [&](DerivedPath::Opaque bo) { return sub->getBuildLog(bo.path); }, - [&](DerivedPathWithHints::Built bfd) { + [&](DerivedPath::Built bfd) { return sub->getBuildLog(bfd.drvPath); }, }, b.raw()); diff --git a/src/nix/profile-remove.md b/src/nix/profile-remove.md index dcf825da9..ba85441d8 100644 --- a/src/nix/profile-remove.md +++ b/src/nix/profile-remove.md @@ -15,6 +15,7 @@ R""( ``` * Remove all packages: + ```console # nix profile remove '.*' ``` diff --git a/src/nix/profile-upgrade.md b/src/nix/profile-upgrade.md index 2bd5d256d..e06e74abe 100644 --- a/src/nix/profile-upgrade.md +++ b/src/nix/profile-upgrade.md @@ -18,7 +18,7 @@ R""( * Upgrade a specific profile element by number: ```console - # nix profile info + # nix profile list 0 flake:nixpkgs#legacyPackages.x86_64-linux.spotify … # nix profile upgrade 0 diff --git a/src/nix/profile.cc b/src/nix/profile.cc index 667904cd2..511771f89 100644 --- a/src/nix/profile.cc +++ b/src/nix/profile.cc @@ -259,11 +259,11 @@ struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile ProfileElement element; std::visit(overloaded { - [&](DerivedPathWithHints::Opaque bo) { + [&](BuiltPath::Opaque bo) { pathsToBuild.push_back(bo); element.storePaths.insert(bo.path); }, - [&](DerivedPathWithHints::Built bfd) { + [&](BuiltPath::Built bfd) { // TODO: Why are we querying if we know the output // names already? Is it just to figure out what the // default one is? @@ -426,7 +426,7 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf attrPath, }; - pathsToBuild.push_back(DerivedPath::Built{drv.drvPath, {"out"}}); // FIXME + pathsToBuild.push_back(DerivedPath::Built{drv.drvPath, {drv.outputName}}); } } diff --git a/src/nix/realisation.cc b/src/nix/realisation.cc index 9ee9ccb91..d59e594df 100644 --- a/src/nix/realisation.cc +++ b/src/nix/realisation.cc @@ -28,7 +28,7 @@ struct CmdRealisation : virtual NixMultiCommand static auto rCmdRealisation = registerCommand<CmdRealisation>("realisation"); -struct CmdRealisationInfo : RealisedPathsCommand, MixJSON +struct CmdRealisationInfo : BuiltPathsCommand, MixJSON { std::string description() override { @@ -44,12 +44,19 @@ struct CmdRealisationInfo : RealisedPathsCommand, MixJSON Category category() override { return catSecondary; } - void run(ref<Store> store, std::vector<RealisedPath> paths) override + void run(ref<Store> store, BuiltPaths paths) override { settings.requireExperimentalFeature("ca-derivations"); + RealisedPath::Set realisations; + + for (auto & builtPath : paths) { + auto theseRealisations = builtPath.toRealisedPaths(*store); + realisations.insert(theseRealisations.begin(), theseRealisations.end()); + } + if (json) { nlohmann::json res = nlohmann::json::array(); - for (auto & path : paths) { + for (auto & path : realisations) { nlohmann::json currentPath; if (auto realisation = std::get_if<Realisation>(&path.raw)) currentPath = realisation->toJSON(); @@ -61,7 +68,7 @@ struct CmdRealisationInfo : RealisedPathsCommand, MixJSON std::cout << res.dump(); } else { - for (auto & path : paths) { + for (auto & path : realisations) { if (auto realisation = std::get_if<Realisation>(&path.raw)) { std::cout << realisation->id.to_string() << " " << diff --git a/src/nix/repl.cc b/src/nix/repl.cc index eed79c332..0275feae7 100644 --- a/src/nix/repl.cc +++ b/src/nix/repl.cc @@ -104,6 +104,26 @@ NixRepl::~NixRepl() write_history(historyFile.c_str()); } +string runNix(Path program, const Strings & args, + const std::optional<std::string> & input = {}) +{ + auto experimentalFeatures = concatStringsSep(" ", settings.experimentalFeatures.get()); + auto nixConf = getEnv("NIX_CONFIG").value_or(""); + nixConf.append("\nexperimental-features = " + experimentalFeatures); + auto subprocessEnv = getEnv(); + subprocessEnv["NIX_CONFIG"] = nixConf; + RunOptions opts(settings.nixBinDir+ "/" + program, args); + opts.input = input; + opts.environment = subprocessEnv; + + auto res = runProgram(opts); + + if (!statusOk(res.first)) + throw ExecError(res.first, fmt("program '%1%' %2%", program, statusToString(res.first))); + + return res.second; +} + static NixRepl * curRepl; // ugly static char * completionCallback(char * s, int *match) { @@ -463,7 +483,7 @@ bool NixRepl::processLine(string line) state->callFunction(f, v, result, Pos()); StorePath drvPath = getDerivationPath(result); - runProgram(settings.nixBinDir + "/nix-shell", true, {state->store->printStorePath(drvPath)}); + runNix("nix-shell", {state->store->printStorePath(drvPath)}); } else if (command == ":b" || command == ":i" || command == ":s") { @@ -477,7 +497,7 @@ bool NixRepl::processLine(string line) but doing it in a child makes it easier to recover from problems / SIGINT. */ try { - runProgram(settings.nixBinDir + "/nix", true, {"build", "--no-link", drvPathRaw}); + runNix("nix", {"build", "--no-link", drvPathRaw}); auto drv = state->store->readDerivation(drvPath); std::cout << std::endl << "this derivation produced the following outputs:" << std::endl; for (auto & i : drv.outputsAndOptPaths(*state->store)) @@ -485,9 +505,9 @@ bool NixRepl::processLine(string line) } catch (ExecError &) { } } else if (command == ":i") { - runProgram(settings.nixBinDir + "/nix-env", true, {"-i", drvPathRaw}); + runNix("nix-env", {"-i", drvPathRaw}); } else { - runProgram(settings.nixBinDir + "/nix-shell", true, {drvPathRaw}); + runNix("nix-shell", {drvPathRaw}); } } diff --git a/src/nix/run.cc b/src/nix/run.cc index b5d8ab38a..c0ba05a3e 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -43,8 +43,8 @@ struct RunCommon : virtual Command helper program (chrootHelper() below) to do the work. */ auto store2 = store.dynamic_pointer_cast<LocalStore>(); - if (store2 && store->storeDir != store2->realStoreDir) { - Strings helperArgs = { chrootHelperName, store->storeDir, store2->realStoreDir, program }; + if (store2 && store->storeDir != store2->getRealStoreDir()) { + Strings helperArgs = { chrootHelperName, store->storeDir, store2->getRealStoreDir(), program }; for (auto & arg : args) helperArgs.push_back(arg); execv(readLink("/proc/self/exe").c_str(), stringsToCharPtrs(helperArgs).data()); @@ -178,9 +178,7 @@ struct CmdRun : InstallableCommand, RunCommon { auto state = getEvalState(); - auto app = installable->toApp(*state); - - state->store->buildPaths(toDerivedPaths(app.context)); + auto app = installable->toApp(*state).resolve(store); Strings allArgs{app.program}; for (auto & i : args) allArgs.push_back(i); diff --git a/src/nix/verify.cc b/src/nix/verify.cc index 1721c7f16..f5a576064 100644 --- a/src/nix/verify.cc +++ b/src/nix/verify.cc @@ -97,15 +97,11 @@ struct CmdVerify : StorePathsCommand if (!noContents) { - std::unique_ptr<AbstractHashSink> hashSink; - if (!info->ca) - hashSink = std::make_unique<HashSink>(info->narHash.type); - else - hashSink = std::make_unique<HashModuloSink>(info->narHash.type, std::string(info->path.hashPart())); + auto hashSink = HashSink(info->narHash.type); - store->narFromPath(info->path, *hashSink); + store->narFromPath(info->path, hashSink); - auto hash = hashSink->finish(); + auto hash = hashSink.finish(); if (hash.first != info->narHash) { corrupted++; diff --git a/tests/build-remote-content-addressed-floating.sh b/tests/build-remote-content-addressed-floating.sh index 7447d92bd..13ef47d2d 100644 --- a/tests/build-remote-content-addressed-floating.sh +++ b/tests/build-remote-content-addressed-floating.sh @@ -4,4 +4,6 @@ file=build-hook-ca-floating.nix sed -i 's/experimental-features .*/& ca-derivations/' "$NIX_CONF_DIR"/nix.conf +CONTENT_ADDRESSED=true + source build-remote.sh diff --git a/tests/build-remote.sh b/tests/build-remote.sh index 70f82e939..27d85a83d 100644 --- a/tests/build-remote.sh +++ b/tests/build-remote.sh @@ -6,12 +6,17 @@ unset NIX_STATE_DIR function join_by { local d=$1; shift; echo -n "$1"; shift; printf "%s" "${@/#/$d}"; } +EXTRA_SYSTEM_FEATURES=() +if [[ -n "$CONTENT_ADDRESSED" ]]; then + EXTRA_SYSTEM_FEATURES=("ca-derivations") +fi + builders=( # system-features will automatically be added to the outer URL, but not inner # remote-store URL. - "ssh://localhost?remote-store=$TEST_ROOT/machine1?system-features=foo - - 1 1 foo" - "$TEST_ROOT/machine2 - - 1 1 bar" - "ssh-ng://localhost?remote-store=$TEST_ROOT/machine3?system-features=baz - - 1 1 baz" + "ssh://localhost?remote-store=$TEST_ROOT/machine1?system-features=$(join_by "%20" foo ${EXTRA_SYSTEM_FEATURES[@]}) - - 1 1 $(join_by "," foo ${EXTRA_SYSTEM_FEATURES[@]})" + "$TEST_ROOT/machine2 - - 1 1 $(join_by "," bar ${EXTRA_SYSTEM_FEATURES[@]})" + "ssh-ng://localhost?remote-store=$TEST_ROOT/machine3?system-features=$(join_by "%20" baz ${EXTRA_SYSTEM_FEATURES[@]}) - - 1 1 $(join_by "," baz ${EXTRA_SYSTEM_FEATURES[@]})" ) chmod -R +w $TEST_ROOT/machine* || true diff --git a/tests/build.sh b/tests/build.sh index aa54b88eb..c77f620f7 100644 --- a/tests/build.sh +++ b/tests/build.sh @@ -1,7 +1,7 @@ source common.sh expectedJSONRegex='\[\{"drvPath":".*multiple-outputs-a.drv","outputs":\{"first":".*multiple-outputs-a-first","second":".*multiple-outputs-a-second"}},\{"drvPath":".*multiple-outputs-b.drv","outputs":\{"out":".*multiple-outputs-b"}}]' -nix build -f multiple-outputs.nix --json a.all b.all | jq --exit-status ' +nix build -f multiple-outputs.nix --json a.all b.all --no-link | jq --exit-status ' (.[0] | (.drvPath | match(".*multiple-outputs-a.drv")) and (.outputs.first | match(".*multiple-outputs-a-first")) and @@ -10,3 +10,10 @@ nix build -f multiple-outputs.nix --json a.all b.all | jq --exit-status ' (.drvPath | match(".*multiple-outputs-b.drv")) and (.outputs.out | match(".*multiple-outputs-b"))) ' +testNormalization () { + clearStore + outPath=$(nix-build ./simple.nix --no-out-link) + test "$(stat -c %Y $outPath)" -eq 1 +} + +testNormalization diff --git a/tests/ca/build-with-garbage-path.sh b/tests/ca/build-with-garbage-path.sh new file mode 100755 index 000000000..e6f878702 --- /dev/null +++ b/tests/ca/build-with-garbage-path.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# Regression test for https://github.com/NixOS/nix/issues/4858 + +source common.sh +sed -i 's/experimental-features .*/& ca-derivations ca-references/' "$NIX_CONF_DIR"/nix.conf + +# Get the output path of `rootCA`, and put some garbage instead +outPath="$(nix-build ./content-addressed.nix -A rootCA --no-out-link)" +nix-store --delete "$outPath" +touch "$outPath" + +# The build should correctly remove the garbage and put the expected path instead +nix-build ./content-addressed.nix -A rootCA --no-out-link + +# Rebuild it. This shouldn’t overwrite the existing path +oldInode=$(stat -c '%i' "$outPath") +nix-build ./content-addressed.nix -A rootCA --no-out-link --arg seed 2 +newInode=$(stat -c '%i' "$outPath") +[[ "$oldInode" == "$newInode" ]] diff --git a/tests/ca/build.sh b/tests/ca/build.sh index 35bf1dcf7..c8877f87f 100644 --- a/tests/ca/build.sh +++ b/tests/ca/build.sh @@ -59,9 +59,17 @@ testNixCommand () { nix build --experimental-features 'nix-command ca-derivations' --file ./content-addressed.nix --no-link } +# Regression test for https://github.com/NixOS/nix/issues/4775 +testNormalization () { + clearStore + outPath=$(buildAttr rootCA 1) + test "$(stat -c %Y $outPath)" -eq 1 +} + # Disabled until we have it properly working # testRemoteCache clearStore +testNormalization testDeterministicCA clearStore testCutoff diff --git a/tests/ca/config.nix.in b/tests/ca/config.nix.in new file mode 120000 index 000000000..af24ddb30 --- /dev/null +++ b/tests/ca/config.nix.in @@ -0,0 +1 @@ +../config.nix.in
\ No newline at end of file diff --git a/tests/ca/content-addressed.nix b/tests/ca/content-addressed.nix index e5b1c4de3..d328fc92c 100644 --- a/tests/ca/content-addressed.nix +++ b/tests/ca/content-addressed.nix @@ -1,4 +1,11 @@ -with import ../config.nix; +with import ./config.nix; + +let mkCADerivation = args: mkDerivation ({ + __contentAddressed = true; + outputHashMode = "recursive"; + outputHashAlgo = "sha256"; +} // args); +in { seed ? 0 }: # A simple content-addressed derivation. @@ -14,7 +21,7 @@ rec { echo "Hello World" > $out/hello ''; }; - rootCA = mkDerivation { + rootCA = mkCADerivation { name = "rootCA"; outputs = [ "out" "dev" "foo"]; buildCommand = '' @@ -27,11 +34,8 @@ rec { ln -s $out $dev ln -s $out $foo ''; - __contentAddressed = true; - outputHashMode = "recursive"; - outputHashAlgo = "sha256"; }; - dependentCA = mkDerivation { + dependentCA = mkCADerivation { name = "dependent"; buildCommand = '' echo "building a dependent derivation" @@ -39,20 +43,14 @@ rec { cat ${rootCA}/self/dep echo ${rootCA}/self/dep > $out/dep ''; - __contentAddressed = true; - outputHashMode = "recursive"; - outputHashAlgo = "sha256"; }; - transitivelyDependentCA = mkDerivation { + transitivelyDependentCA = mkCADerivation { name = "transitively-dependent"; buildCommand = '' echo "building transitively-dependent" cat ${dependentCA}/dep echo ${dependentCA} > $out ''; - __contentAddressed = true; - outputHashMode = "recursive"; - outputHashAlgo = "sha256"; }; dependentNonCA = mkDerivation { name = "dependent-non-ca"; @@ -72,6 +70,14 @@ rec { cat ${dependentCA}/dep echo foo > $out ''; - + }; + runnable = mkCADerivation rec { + name = "runnable-thing"; + buildCommand = '' + mkdir -p $out/bin + echo ${rootCA} # Just to make it depend on it + echo "" > $out/bin/${name} + chmod +x $out/bin/${name} + ''; }; } diff --git a/tests/ca/duplicate-realisation-in-closure.sh b/tests/ca/duplicate-realisation-in-closure.sh new file mode 100644 index 000000000..ca9099641 --- /dev/null +++ b/tests/ca/duplicate-realisation-in-closure.sh @@ -0,0 +1,26 @@ +source ./common.sh + +sed -i 's/experimental-features .*/& ca-derivations ca-references/' "$NIX_CONF_DIR"/nix.conf + +export REMOTE_STORE_DIR="$TEST_ROOT/remote_store" +export REMOTE_STORE="file://$REMOTE_STORE_DIR" + +rm -rf $REMOTE_STORE_DIR +clearStore + +# Build dep1 and push that to the binary cache. +# This entails building (and pushing) current-time. +nix copy --to "$REMOTE_STORE" -f nondeterministic.nix dep1 +clearStore +sleep 2 # To make sure that `$(date)` will be different +# Build dep2. +# As we’ve cleared the cache, we’ll have to rebuild current-time. And because +# the current time isn’t the same as before, this will yield a new (different) +# realisation +nix build -f nondeterministic.nix dep2 --no-link + +# Build something that depends both on dep1 and dep2. +# If everything goes right, we should rebuild dep2 rather than fetch it from +# the cache (because that would mean duplicating `current-time` in the closure), +# and have `dep1 == dep2`. +nix build --substituters "$REMOTE_STORE" -f nondeterministic.nix toplevel --no-require-sigs --no-link diff --git a/tests/ca/flake.nix b/tests/ca/flake.nix new file mode 100644 index 000000000..332c92a67 --- /dev/null +++ b/tests/ca/flake.nix @@ -0,0 +1,3 @@ +{ + outputs = { self }: import ./content-addressed.nix {}; +} diff --git a/tests/ca/gc.sh b/tests/ca/gc.sh new file mode 100755 index 000000000..e4f9857d6 --- /dev/null +++ b/tests/ca/gc.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +# Ensure that garbage collection works properly with ca derivations + +source common.sh + +sed -i 's/experimental-features .*/& ca-derivations ca-references/' "$NIX_CONF_DIR"/nix.conf + +export NIX_TESTS_CA_BY_DEFAULT=1 + +cd .. +source gc.sh diff --git a/tests/ca/nix-run.sh b/tests/ca/nix-run.sh new file mode 100755 index 000000000..81402af10 --- /dev/null +++ b/tests/ca/nix-run.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +source common.sh + +sed -i 's/experimental-features .*/& ca-derivations ca-references nix-command flakes/' "$NIX_CONF_DIR"/nix.conf + +FLAKE_PATH=path:$PWD + +nix run --no-write-lock-file $FLAKE_PATH#runnable diff --git a/tests/ca/nix-shell.sh b/tests/ca/nix-shell.sh new file mode 100755 index 000000000..7f1a3a73e --- /dev/null +++ b/tests/ca/nix-shell.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +source common.sh + +sed -i 's/experimental-features .*/& ca-derivations ca-references nix-command flakes/' "$NIX_CONF_DIR"/nix.conf + +CONTENT_ADDRESSED=true +cd .. +source ./nix-shell.sh + diff --git a/tests/ca/nondeterministic.nix b/tests/ca/nondeterministic.nix new file mode 100644 index 000000000..d6d099a3e --- /dev/null +++ b/tests/ca/nondeterministic.nix @@ -0,0 +1,35 @@ +with import ./config.nix; + +let mkCADerivation = args: mkDerivation ({ + __contentAddressed = true; + outputHashMode = "recursive"; + outputHashAlgo = "sha256"; +} // args); +in + +rec { + currentTime = mkCADerivation { + name = "current-time"; + buildCommand = '' + mkdir $out + echo $(date) > $out/current-time + ''; + }; + dep = seed: mkCADerivation { + name = "dep"; + inherit seed; + buildCommand = '' + echo ${currentTime} > $out + ''; + }; + dep1 = dep 1; + dep2 = dep 2; + toplevel = mkCADerivation { + name = "toplevel"; + buildCommand = '' + test ${dep1} == ${dep2} + touch $out + ''; + }; +} + diff --git a/tests/ca/post-hook.sh b/tests/ca/post-hook.sh new file mode 100755 index 000000000..4b8da4cd8 --- /dev/null +++ b/tests/ca/post-hook.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +source common.sh + +sed -i 's/experimental-features .*/& ca-derivations ca-references nix-command flakes/' "$NIX_CONF_DIR"/nix.conf + +export NIX_TESTS_CA_BY_DEFAULT=1 +cd .. +source ./post-hook.sh + + diff --git a/tests/ca/recursive.sh b/tests/ca/recursive.sh new file mode 100755 index 000000000..d9281d91f --- /dev/null +++ b/tests/ca/recursive.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +source common.sh + +sed -i 's/experimental-features .*/& ca-derivations ca-references nix-command flakes/' "$NIX_CONF_DIR"/nix.conf + +export NIX_TESTS_CA_BY_DEFAULT=1 +cd .. +source ./recursive.sh + + diff --git a/tests/ca/substitute.sh b/tests/ca/substitute.sh index 737c851a5..c80feaacf 100644 --- a/tests/ca/substitute.sh +++ b/tests/ca/substitute.sh @@ -17,11 +17,15 @@ buildDrvs () { # Populate the remote cache clearStore -buildDrvs --post-build-hook ../push-to-store.sh +nix copy --to $REMOTE_STORE --file ./content-addressed.nix # Restart the build on an empty store, ensuring that we don't build clearStore -buildDrvs --substitute --substituters $REMOTE_STORE --no-require-sigs -j0 +buildDrvs --substitute --substituters $REMOTE_STORE --no-require-sigs -j0 transitivelyDependentCA +# Check that the thing we’ve just substituted has its realisation stored +nix realisation info --file ./content-addressed.nix transitivelyDependentCA +# Check that its dependencies have it too +nix realisation info --file ./content-addressed.nix dependentCA rootCA # Same thing, but # 1. With non-ca derivations @@ -45,3 +49,16 @@ if [[ -z "$(ls "$REMOTE_STORE_DIR/realisations")" ]]; then echo "Realisations not rebuilt" exit 1 fi + +# Test the local realisation disk cache +buildDrvs --post-build-hook ../push-to-store.sh +clearStore +# Add the realisations of rootCA to the cachecache +clearCacheCache +export _NIX_FORCE_HTTP=1 +buildDrvs --substitute --substituters $REMOTE_STORE --no-require-sigs -j0 +# Try rebuilding, but remove the realisations from the remote cache to force +# using the cachecache +clearStore +rm $REMOTE_STORE_DIR/realisations/* +buildDrvs --substitute --substituters $REMOTE_STORE --no-require-sigs -j0 diff --git a/tests/config.nix.in b/tests/config.nix.in index a57a8c596..7facbdcbc 100644 --- a/tests/config.nix.in +++ b/tests/config.nix.in @@ -1,3 +1,12 @@ +let + contentAddressedByDefault = builtins.getEnv "NIX_TESTS_CA_BY_DEFAULT" == "1"; + caArgs = if contentAddressedByDefault then { + __contentAddressed = true; + outputHashMode = "recursive"; + outputHashAlgo = "sha256"; + } else {}; +in + rec { shell = "@bash@"; @@ -13,6 +22,6 @@ rec { builder = shell; args = ["-e" args.builder or (builtins.toFile "builder-${args.name}.sh" "if [ -e .attrs.sh ]; then source .attrs.sh; fi; eval \"$buildCommand\"")]; PATH = path; - } // removeAttrs args ["builder" "meta"]) + } // caArgs // removeAttrs args ["builder" "meta"]) // { meta = args.meta or {}; }; } diff --git a/tests/flakes.sh b/tests/flakes.sh index e78e4a39d..9764e1a6c 100644 --- a/tests/flakes.sh +++ b/tests/flakes.sh @@ -535,6 +535,21 @@ EOF (! nix flake check $flake3Dir) +cat > $flake3Dir/flake.nix <<EOF +{ + outputs = { flake1, self }: { + defaultPackage = { + system-1 = "foo"; + system-2 = "bar"; + }; + }; +} +EOF + +checkRes=$(nix flake check --keep-going $flake3Dir 2>&1 && fail "nix flake check should have failed" || true) +echo "$checkRes" | grep -q "defaultPackage.system-1" +echo "$checkRes" | grep -q "defaultPackage.system-2" + # Test 'follows' inputs. cat > $flake3Dir/flake.nix <<EOF { diff --git a/tests/gc.sh b/tests/gc.sh index 8b4f8d282..cf0e2c32d 100644 --- a/tests/gc.sh +++ b/tests/gc.sh @@ -12,7 +12,7 @@ ln -sf $outPath "$NIX_STATE_DIR"/gcroots/foo nix-store --gc --print-roots | grep $outPath nix-store --gc --print-live | grep $outPath nix-store --gc --print-dead | grep $drvPath -if nix-store --gc --print-dead | grep $outPath; then false; fi +if nix-store --gc --print-dead | grep -E $outPath$; then false; fi nix-store --gc --print-dead diff --git a/tests/lang/eval-okay-floor-ceil.exp b/tests/lang/eval-okay-floor-ceil.exp new file mode 100644 index 000000000..81f80420b --- /dev/null +++ b/tests/lang/eval-okay-floor-ceil.exp @@ -0,0 +1 @@ +"23;24;23;23" diff --git a/tests/lang/eval-okay-floor-ceil.nix b/tests/lang/eval-okay-floor-ceil.nix new file mode 100644 index 000000000..d76a0d86e --- /dev/null +++ b/tests/lang/eval-okay-floor-ceil.nix @@ -0,0 +1,9 @@ +with import ./lib.nix; + +let + n1 = builtins.floor 23.5; + n2 = builtins.ceil 23.5; + n3 = builtins.floor 23; + n4 = builtins.ceil 23; +in + builtins.concatStringsSep ";" (map toString [ n1 n2 n3 n4 ]) diff --git a/tests/lang/parse-okay-url.nix b/tests/lang/parse-okay-url.nix index fce3b13ee..08de27d0a 100644 --- a/tests/lang/parse-okay-url.nix +++ b/tests/lang/parse-okay-url.nix @@ -3,5 +3,6 @@ http://www2.mplayerhq.hu/MPlayer/releases/fonts/font-arial-iso-8859-1.tar.bz2 http://losser.st-lab.cs.uu.nl/~armijn/.nix/gcc-3.3.4-static-nix.tar.gz http://fpdownload.macromedia.com/get/shockwave/flash/english/linux/7.0r25/install_flash_player_7_linux.tar.gz + https://ftp5.gwdg.de/pub/linux/archlinux/extra/os/x86_64/unzip-6.0-14-x86_64.pkg.tar.zst ftp://ftp.gtk.org/pub/gtk/v1.2/gtk+-1.2.10.tar.gz ] diff --git a/tests/local.mk b/tests/local.mk index e2c94dde6..cbdf4efdb 100644 --- a/tests/local.mk +++ b/tests/local.mk @@ -2,6 +2,7 @@ nix_tests = \ hash.sh lang.sh add.sh simple.sh dependencies.sh \ config.sh \ gc.sh \ + ca/gc.sh \ gc-concurrent.sh \ gc-auto.sh \ referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \ @@ -11,6 +12,7 @@ nix_tests = \ timeout.sh secure-drv-outputs.sh nix-channel.sh \ multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \ binary-cache.sh \ + substitute-with-invalid-ca.sh \ binary-cache-build-remote.sh \ nix-profile.sh repair.sh dump-db.sh case-hack.sh \ check-reqs.sh pass-as-file.sh tarball.sh restricted.sh \ @@ -37,6 +39,7 @@ nix_tests = \ search.sh \ nix-copy-ssh.sh \ post-hook.sh \ + ca/post-hook.sh \ function-trace.sh \ recursive.sh \ describe-stores.sh \ @@ -44,8 +47,13 @@ nix_tests = \ build.sh \ compute-levels.sh \ ca/build.sh \ + ca/build-with-garbage-path.sh \ + ca/duplicate-realisation-in-closure.sh \ ca/substitute.sh \ ca/signatures.sh \ + ca/nix-shell.sh \ + ca/nix-run.sh \ + ca/recursive.sh \ ca/nix-copy.sh # parallel.sh @@ -53,6 +61,6 @@ install-tests += $(foreach x, $(nix_tests), tests/$(x)) tests-environment = NIX_REMOTE= $(bash) -e -clean-files += $(d)/common.sh $(d)/config.nix +clean-files += $(d)/common.sh $(d)/config.nix $(d)/ca/config.nix -test-deps += tests/common.sh tests/config.nix tests/plugins/libplugintest.$(SO_EXT) +test-deps += tests/common.sh tests/config.nix tests/ca/config.nix tests/plugins/libplugintest.$(SO_EXT) diff --git a/tests/nix-shell.sh b/tests/nix-shell.sh index 4775bafb9..3481c2c69 100644 --- a/tests/nix-shell.sh +++ b/tests/nix-shell.sh @@ -2,6 +2,20 @@ source common.sh clearStore +if [[ -n ${CONTENT_ADDRESSED:-} ]]; then + nix-shell () { + command nix-shell --arg contentAddressed true "$@" + } + + nix_develop() { + nix develop --arg contentAddressed true "$@" + } +else + nix_develop() { + nix develop "$@" + } +fi + # Test nix-shell -A export IMPURE_VAR=foo export SELECTED_IMPURE_VAR=baz @@ -41,7 +55,7 @@ output=$(NIX_PATH=nixpkgs=shell.nix nix-shell --pure -p foo bar --run 'echo "$(f [ "$output" = "foo bar" ] # Test nix-shell shebang mode -sed -e "s|@ENV_PROG@|$(type -p env)|" shell.shebang.sh > $TEST_ROOT/shell.shebang.sh +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > $TEST_ROOT/shell.shebang.sh chmod a+rx $TEST_ROOT/shell.shebang.sh output=$($TEST_ROOT/shell.shebang.sh abc def) @@ -49,7 +63,7 @@ output=$($TEST_ROOT/shell.shebang.sh abc def) # Test nix-shell shebang mode again with metacharacters in the filename. # First word of filename is chosen to not match any file in the test root. -sed -e "s|@ENV_PROG@|$(type -p env)|" shell.shebang.sh > $TEST_ROOT/spaced\ \\\'\"shell.shebang.sh +sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > $TEST_ROOT/spaced\ \\\'\"shell.shebang.sh chmod a+rx $TEST_ROOT/spaced\ \\\'\"shell.shebang.sh output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.sh abc def) @@ -58,7 +72,7 @@ output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.sh abc def) # Test nix-shell shebang mode for ruby # This uses a fake interpreter that returns the arguments passed # This, in turn, verifies the `rc` script is valid and the `load()` script (given using `-e`) is as expected. -sed -e "s|@SHELL_PROG@|$(type -p nix-shell)|" shell.shebang.rb > $TEST_ROOT/shell.shebang.rb +sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > $TEST_ROOT/shell.shebang.rb chmod a+rx $TEST_ROOT/shell.shebang.rb output=$($TEST_ROOT/shell.shebang.rb abc ruby) @@ -66,20 +80,20 @@ output=$($TEST_ROOT/shell.shebang.rb abc ruby) # Test nix-shell shebang mode for ruby again with metacharacters in the filename. # Note: fake interpreter only space-separates args without adding escapes to its output. -sed -e "s|@SHELL_PROG@|$(type -p nix-shell)|" shell.shebang.rb > $TEST_ROOT/spaced\ \\\'\"shell.shebang.rb +sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > $TEST_ROOT/spaced\ \\\'\"shell.shebang.rb chmod a+rx $TEST_ROOT/spaced\ \\\'\"shell.shebang.rb output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.rb abc ruby) [ "$output" = '-e load(ARGV.shift) -- '"$TEST_ROOT"'/spaced \'\''"shell.shebang.rb abc ruby' ] # Test 'nix develop'. -nix develop -f shell.nix shellDrv -c bash -c '[[ -n $stdenv ]]' +nix_develop -f shell.nix shellDrv -c bash -c '[[ -n $stdenv ]]' # Ensure `nix develop -c` preserves stdin echo foo | nix develop -f shell.nix shellDrv -c cat | grep -q foo # Ensure `nix develop -c` actually executes the command if stdout isn't a terminal -nix develop -f shell.nix shellDrv -c echo foo |& grep -q foo +nix_develop -f shell.nix shellDrv -c echo foo |& grep -q foo # Test 'nix print-dev-env'. source <(nix print-dev-env -f shell.nix shellDrv) diff --git a/tests/post-hook.sh b/tests/post-hook.sh index aa3e6a574..238a8f826 100644 --- a/tests/post-hook.sh +++ b/tests/post-hook.sh @@ -4,7 +4,7 @@ clearStore rm -f $TEST_ROOT/result -export REMOTE_STORE=$TEST_ROOT/remote_store +export REMOTE_STORE=file:$TEST_ROOT/remote_store # Build the dependencies and push them to the remote store nix-build -o $TEST_ROOT/result dependencies.nix --post-build-hook $PWD/push-to-store.sh diff --git a/tests/recursive.sh b/tests/recursive.sh index a55b061b5..b6740877d 100644 --- a/tests/recursive.sh +++ b/tests/recursive.sh @@ -9,9 +9,9 @@ rm -f $TEST_ROOT/result export unreachable=$(nix store add-path ./recursive.sh) -NIX_BIN_DIR=$(dirname $(type -p nix)) nix --experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --expr ' +NIX_BIN_DIR=$(dirname $(type -p nix)) nix --extra-experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --expr ' with import ./config.nix; - mkDerivation { + mkDerivation rec { name = "recursive"; dummy = builtins.toFile "dummy" "bla bla"; SHELL = shell; @@ -19,11 +19,13 @@ NIX_BIN_DIR=$(dirname $(type -p nix)) nix --experimental-features 'nix-command r # Note: this is a string without context. unreachable = builtins.getEnv "unreachable"; + NIX_TESTS_CA_BY_DEFAULT = builtins.getEnv "NIX_TESTS_CA_BY_DEFAULT"; + requiredSystemFeatures = [ "recursive-nix" ]; buildCommand = '\'\'' mkdir $out - opts="--experimental-features nix-command" + opts="--experimental-features nix-command ${if (NIX_TESTS_CA_BY_DEFAULT == "1") then "--extra-experimental-features ca-derivations" else ""}" PATH=${builtins.getEnv "NIX_BIN_DIR"}:$PATH @@ -46,16 +48,15 @@ NIX_BIN_DIR=$(dirname $(type -p nix)) nix --experimental-features 'nix-command r # Add it to our closure. ln -s $foobar $out/foobar - [[ $(nix $opts path-info --all | wc -l) -eq 3 ]] + [[ $(nix $opts path-info --all | wc -l) -eq 4 ]] # Build a derivation. nix $opts build -L --impure --expr '\'' - derivation { + with import ${./config.nix}; + mkDerivation { name = "inner1"; - builder = builtins.getEnv "SHELL"; - system = builtins.getEnv "system"; + buildCommand = "echo $fnord blaat > $out"; fnord = builtins.toFile "fnord" "fnord"; - args = [ "-c" "echo $fnord blaat > $out" ]; } '\'' diff --git a/tests/shell.nix b/tests/shell.nix index 24ebcc04c..f174db583 100644 --- a/tests/shell.nix +++ b/tests/shell.nix @@ -1,6 +1,18 @@ -{ inNixShell ? false }: +{ inNixShell ? false, contentAddressed ? false }: -with import ./config.nix; +let cfg = import ./config.nix; in +with cfg; + +let + mkDerivation = + if contentAddressed then + args: cfg.mkDerivation ({ + __contentAddressed = true; + outputHashMode = "recursive"; + outputHashAlgo = "sha256"; + } // args) + else cfg.mkDerivation; +in let pkgs = rec { setupSh = builtins.toFile "setup" '' @@ -22,6 +34,8 @@ let pkgs = rec { name = "shellDrv"; builder = "/does/not/exist"; VAR_FROM_NIX = "bar"; + ASCII_PERCENT = "%"; + ASCII_AT = "@"; TEST_inNixShell = if inNixShell then "true" else "false"; inherit stdenv; outputs = ["dev" "out"]; diff --git a/tests/substitute-with-invalid-ca.sh b/tests/substitute-with-invalid-ca.sh new file mode 100644 index 000000000..4d0b01e0f --- /dev/null +++ b/tests/substitute-with-invalid-ca.sh @@ -0,0 +1,38 @@ +source common.sh + +BINARY_CACHE=file://$cacheDir + +getHash() { + basename "$1" | cut -d '-' -f 1 +} +getRemoteNarInfo () { + echo "$cacheDir/$(getHash "$1").narinfo" +} + +cat <<EOF > $TEST_HOME/good.txt +I’m a good path +EOF + +cat <<EOF > $TEST_HOME/bad.txt +I’m a bad path +EOF + +good=$(nix-store --add $TEST_HOME/good.txt) +bad=$(nix-store --add $TEST_HOME/bad.txt) +nix copy --to "$BINARY_CACHE" "$good" +nix copy --to "$BINARY_CACHE" "$bad" +nix-collect-garbage >/dev/null 2>&1 + +# Falsifying the narinfo file for '$good' +goodPathNarInfo=$(getRemoteNarInfo "$good") +badPathNarInfo=$(getRemoteNarInfo "$bad") +for fieldName in URL FileHash FileSize NarHash NarSize; do + sed -i "/^$fieldName/d" "$goodPathNarInfo" + grep -E "^$fieldName" "$badPathNarInfo" >> "$goodPathNarInfo" +done + +# Copying back '$good' from the binary cache. This should fail as it is +# corrupted +if nix copy --from "$BINARY_CACHE" "$good"; then + fail "Importing a path with a wrong CA field should fail" +fi |