aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/test.yml3
-rw-r--r--boehmgc-coroutine-sp-fallback.diff42
-rw-r--r--configure.ac2
-rw-r--r--flake.lock6
-rw-r--r--flake.nix237
-rwxr-xr-xmaintainers/upload-release.pl4
-rwxr-xr-xscripts/install.in10
-rw-r--r--src/libcmd/command.cc2
-rw-r--r--src/libexpr/attr-path.cc6
-rw-r--r--src/libexpr/eval.cc32
-rw-r--r--src/libexpr/flake/flake.cc7
-rw-r--r--src/libmain/progress-bar.cc2
-rw-r--r--src/libstore/build/derivation-goal.cc127
-rw-r--r--src/libstore/build/local-derivation-goal.cc22
-rw-r--r--src/libstore/build/local-derivation-goal.hh8
-rw-r--r--src/libstore/machines.cc17
-rw-r--r--src/libutil/logging.cc2
-rw-r--r--src/libutil/url.cc2
-rw-r--r--src/libutil/util.cc6
-rw-r--r--src/libutil/util.hh3
-rw-r--r--src/nix/develop.cc8
-rw-r--r--src/nix/profile-remove.md1
-rw-r--r--src/nix/repl.cc28
-rw-r--r--tests/build.sh6
-rw-r--r--tests/ca/duplicate-realisation-in-closure.sh4
-rwxr-xr-xtests/ca/post-hook.sh11
-rwxr-xr-xtests/ca/recursive.sh11
-rw-r--r--tests/config.nix.in4
-rw-r--r--tests/gc.sh2
-rw-r--r--tests/local.mk2
-rw-r--r--tests/post-hook.sh2
-rw-r--r--tests/recursive.sh17
-rw-r--r--tests/shell.nix2
33 files changed, 448 insertions, 190 deletions
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 17a79dc97..b2b1f07fb 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -20,8 +20,7 @@ jobs:
name: '${{ env.CACHIX_NAME }}'
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
- #- run: nix flake check
- - run: nix-build -A checks.$(if [[ `uname` = Linux ]]; then echo x86_64-linux; else echo x86_64-darwin; fi)
+ - run: nix-build -A checks.$(nix-instantiate --eval -E '(builtins.currentSystem)')
check_cachix:
name: Cachix secret present for installer tests
runs-on: ubuntu-latest
diff --git a/boehmgc-coroutine-sp-fallback.diff b/boehmgc-coroutine-sp-fallback.diff
new file mode 100644
index 000000000..fa8dd0325
--- /dev/null
+++ b/boehmgc-coroutine-sp-fallback.diff
@@ -0,0 +1,42 @@
+diff --git a/pthread_stop_world.c b/pthread_stop_world.c
+index 1cee6a0b..46c3acd9 100644
+--- a/pthread_stop_world.c
++++ b/pthread_stop_world.c
+@@ -674,6 +674,8 @@ GC_INNER void GC_push_all_stacks(void)
+ struct GC_traced_stack_sect_s *traced_stack_sect;
+ pthread_t self = pthread_self();
+ word total_size = 0;
++ size_t stack_limit;
++ pthread_attr_t pattr;
+
+ if (!EXPECT(GC_thr_initialized, TRUE))
+ GC_thr_init();
+@@ -723,6 +725,28 @@ GC_INNER void GC_push_all_stacks(void)
+ hi = p->altstack + p->altstack_size;
+ /* FIXME: Need to scan the normal stack too, but how ? */
+ /* FIXME: Assume stack grows down */
++ } else {
++ if (pthread_getattr_np(p->id, &pattr)) {
++ ABORT("GC_push_all_stacks: pthread_getattr_np failed!");
++ }
++ if (pthread_attr_getstacksize(&pattr, &stack_limit)) {
++ ABORT("GC_push_all_stacks: pthread_attr_getstacksize failed!");
++ }
++ // When a thread goes into a coroutine, we lose its original sp until
++ // control flow returns to the thread.
++ // While in the coroutine, the sp points outside the thread stack,
++ // so we can detect this and push the entire thread stack instead,
++ // as an approximation.
++ // We assume that the coroutine has similarly added its entire stack.
++ // This could be made accurate by cooperating with the application
++ // via new functions and/or callbacks.
++ #ifndef STACK_GROWS_UP
++ if (lo >= hi || lo < hi - stack_limit) { // sp outside stack
++ lo = hi - stack_limit;
++ }
++ #else
++ #error "STACK_GROWS_UP not supported in boost_coroutine2 (as of june 2021), so we don't support it in Nix."
++ #endif
+ }
+ GC_push_all_stack_sections(lo, hi, traced_stack_sect);
+ # ifdef STACK_GROWS_UP
diff --git a/configure.ac b/configure.ac
index 03b123366..6e563eec3 100644
--- a/configure.ac
+++ b/configure.ac
@@ -231,7 +231,7 @@ AC_SUBST(HAVE_SECCOMP, [$have_seccomp])
# Look for aws-cpp-sdk-s3.
AC_LANG_PUSH(C++)
AC_CHECK_HEADERS([aws/s3/S3Client.h],
- [AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=1],
+ [AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=1],
[AC_DEFINE([ENABLE_S3], [0], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=])
AC_SUBST(ENABLE_S3, [$enable_s3])
AC_LANG_POP(C++)
diff --git a/flake.lock b/flake.lock
index 8aad22957..5fc969d7b 100644
--- a/flake.lock
+++ b/flake.lock
@@ -19,11 +19,11 @@
},
"nixpkgs": {
"locked": {
- "lastModified": 1622593737,
- "narHash": "sha256-9loxFJg85AbzJrSkU4pE/divZ1+zOxDy2FSjlrufCB8=",
+ "lastModified": 1624862269,
+ "narHash": "sha256-JFcsh2+7QtfKdJFoPibLFPLgIW6Ycnv8Bts9a7RYme0=",
"owner": "NixOS",
"repo": "nixpkgs",
- "rev": "bb8a5e54845012ed1375ffd5f317d2fdf434b20e",
+ "rev": "f77036342e2b690c61c97202bf48f2ce13acc022",
"type": "github"
},
"original": {
diff --git a/flake.nix b/flake.nix
index 466145366..5430d3904 100644
--- a/flake.nix
+++ b/flake.nix
@@ -20,6 +20,8 @@
linuxSystems = linux64BitSystems ++ [ "i686-linux" ];
systems = linuxSystems ++ [ "x86_64-darwin" "aarch64-darwin" ];
+ crossSystems = [ "armv6l-linux" "armv7l-linux" ];
+
forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system);
# Memoize nixpkgs for different platforms for efficiency.
@@ -79,7 +81,7 @@
buildPackages.mercurial
buildPackages.jq
]
- ++ lib.optionals stdenv.isLinux [(pkgs.util-linuxMinimal or pkgs.utillinuxMinimal)];
+ ++ lib.optionals stdenv.hostPlatform.isLinux [(buildPackages.util-linuxMinimal or buildPackages.utillinuxMinimal)];
buildDeps =
[ curl
@@ -93,7 +95,7 @@
]
++ lib.optionals stdenv.isLinux [libseccomp]
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
- ++ lib.optional stdenv.isx86_64 libcpuid;
+ ++ lib.optional stdenv.hostPlatform.isx86_64 libcpuid;
awsDeps = lib.optional (stdenv.isLinux || stdenv.isDarwin)
(aws-sdk-cpp.override {
@@ -102,7 +104,13 @@
});
propagatedDeps =
- [ (boehmgc.override { enableLargeConfig = true; })
+ [ ((boehmgc.override {
+ enableLargeConfig = true;
+ }).overrideAttrs(o: {
+ patches = (o.patches or []) ++ [
+ ./boehmgc-coroutine-sp-fallback.diff
+ ];
+ }))
];
perlDeps =
@@ -133,10 +141,11 @@
substitute ${./scripts/install.in} $out/install \
${pkgs.lib.concatMapStrings
- (system:
- '' \
- --replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \
- --replace '@tarballPath_${system}@' $(tarballPath ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \
+ (system: let
+ tarball = if builtins.elem system crossSystems then self.hydraJobs.binaryTarballCross.x86_64-linux.${system} else self.hydraJobs.binaryTarball.${system};
+ in '' \
+ --replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${tarball}/*.tar.xz) \
+ --replace '@tarballPath_${system}@' $(tarballPath ${tarball}/*.tar.xz) \
''
)
systems
@@ -174,6 +183,77 @@
};
+ binaryTarball = buildPackages: nix: pkgs: let
+ inherit (pkgs) cacert;
+ installerClosureInfo = buildPackages.closureInfo { rootPaths = [ nix cacert ]; };
+ in
+
+ buildPackages.runCommand "nix-binary-tarball-${version}"
+ { #nativeBuildInputs = lib.optional (system != "aarch64-linux") shellcheck;
+ meta.description = "Distribution-independent Nix bootstrap binaries for ${pkgs.system}";
+ }
+ ''
+ cp ${installerClosureInfo}/registration $TMPDIR/reginfo
+ cp ${./scripts/create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh
+ substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \
+ --subst-var-by nix ${nix} \
+ --subst-var-by cacert ${cacert}
+
+ substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \
+ --subst-var-by nix ${nix} \
+ --subst-var-by cacert ${cacert}
+ substitute ${./scripts/install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \
+ --subst-var-by nix ${nix} \
+ --subst-var-by cacert ${cacert}
+ substitute ${./scripts/install-multi-user.sh} $TMPDIR/install-multi-user \
+ --subst-var-by nix ${nix} \
+ --subst-var-by cacert ${cacert}
+
+ if type -p shellcheck; then
+ # SC1090: Don't worry about not being able to find
+ # $nix/etc/profile.d/nix.sh
+ shellcheck --exclude SC1090 $TMPDIR/install
+ shellcheck $TMPDIR/create-darwin-volume.sh
+ shellcheck $TMPDIR/install-darwin-multi-user.sh
+ shellcheck $TMPDIR/install-systemd-multi-user.sh
+
+ # SC1091: Don't panic about not being able to source
+ # /etc/profile
+ # SC2002: Ignore "useless cat" "error", when loading
+ # .reginfo, as the cat is a much cleaner
+ # implementation, even though it is "useless"
+ # SC2116: Allow ROOT_HOME=$(echo ~root) for resolving
+ # root's home directory
+ shellcheck --external-sources \
+ --exclude SC1091,SC2002,SC2116 $TMPDIR/install-multi-user
+ fi
+
+ chmod +x $TMPDIR/install
+ chmod +x $TMPDIR/create-darwin-volume.sh
+ chmod +x $TMPDIR/install-darwin-multi-user.sh
+ chmod +x $TMPDIR/install-systemd-multi-user.sh
+ chmod +x $TMPDIR/install-multi-user
+ dir=nix-${version}-${pkgs.system}
+ fn=$out/$dir.tar.xz
+ mkdir -p $out/nix-support
+ echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products
+ tar cvfJ $fn \
+ --owner=0 --group=0 --mode=u+rw,uga+r \
+ --absolute-names \
+ --hard-dereference \
+ --transform "s,$TMPDIR/install,$dir/install," \
+ --transform "s,$TMPDIR/create-darwin-volume.sh,$dir/create-darwin-volume.sh," \
+ --transform "s,$TMPDIR/reginfo,$dir/.reginfo," \
+ --transform "s,$NIX_STORE,$dir/store,S" \
+ $TMPDIR/install \
+ $TMPDIR/create-darwin-volume.sh \
+ $TMPDIR/install-darwin-multi-user.sh \
+ $TMPDIR/install-systemd-multi-user.sh \
+ $TMPDIR/install-multi-user \
+ $TMPDIR/reginfo \
+ $(cat ${installerClosureInfo}/store-paths)
+ '';
+
in {
# A Nixpkgs overlay that overrides the 'nix' and
@@ -285,7 +365,7 @@
outputs = [ "out" "bin" "dev" ];
- nativeBuildInputs = [ which ];
+ nativeBuildInputs = [ buildPackages.which ];
configurePhase = ''
${if (stdenv.isDarwin && stdenv.isAarch64) then "echo \"HAVE_SANDBOX_INIT=false\" > configure.local" else ""}
@@ -304,92 +384,33 @@
buildStatic = nixpkgs.lib.genAttrs linux64BitSystems (system: self.packages.${system}.nix-static);
+ buildCross = nixpkgs.lib.genAttrs crossSystems (crossSystem:
+ nixpkgs.lib.genAttrs ["x86_64-linux"] (system: self.packages.${system}."nix-${crossSystem}"));
+
# Perl bindings for various platforms.
perlBindings = nixpkgs.lib.genAttrs systems (system: self.packages.${system}.nix.perl-bindings);
# Binary tarball for various platforms, containing a Nix store
# with the closure of 'nix' package, and the second half of
# the installation script.
- binaryTarball = nixpkgs.lib.genAttrs systems (system:
-
- with nixpkgsFor.${system};
-
- let
- installerClosureInfo = closureInfo { rootPaths = [ nix cacert ]; };
- in
-
- runCommand "nix-binary-tarball-${version}"
- { #nativeBuildInputs = lib.optional (system != "aarch64-linux") shellcheck;
- meta.description = "Distribution-independent Nix bootstrap binaries for ${system}";
- }
- ''
- cp ${installerClosureInfo}/registration $TMPDIR/reginfo
- cp ${./scripts/create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh
- substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \
- --subst-var-by nix ${nix} \
- --subst-var-by cacert ${cacert}
-
- substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \
- --subst-var-by nix ${nix} \
- --subst-var-by cacert ${cacert}
- substitute ${./scripts/install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \
- --subst-var-by nix ${nix} \
- --subst-var-by cacert ${cacert}
- substitute ${./scripts/install-multi-user.sh} $TMPDIR/install-multi-user \
- --subst-var-by nix ${nix} \
- --subst-var-by cacert ${cacert}
-
- if type -p shellcheck; then
- # SC1090: Don't worry about not being able to find
- # $nix/etc/profile.d/nix.sh
- shellcheck --exclude SC1090 $TMPDIR/install
- shellcheck $TMPDIR/create-darwin-volume.sh
- shellcheck $TMPDIR/install-darwin-multi-user.sh
- shellcheck $TMPDIR/install-systemd-multi-user.sh
-
- # SC1091: Don't panic about not being able to source
- # /etc/profile
- # SC2002: Ignore "useless cat" "error", when loading
- # .reginfo, as the cat is a much cleaner
- # implementation, even though it is "useless"
- # SC2116: Allow ROOT_HOME=$(echo ~root) for resolving
- # root's home directory
- shellcheck --external-sources \
- --exclude SC1091,SC2002,SC2116 $TMPDIR/install-multi-user
- fi
-
- chmod +x $TMPDIR/install
- chmod +x $TMPDIR/create-darwin-volume.sh
- chmod +x $TMPDIR/install-darwin-multi-user.sh
- chmod +x $TMPDIR/install-systemd-multi-user.sh
- chmod +x $TMPDIR/install-multi-user
- dir=nix-${version}-${system}
- fn=$out/$dir.tar.xz
- mkdir -p $out/nix-support
- echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products
- tar cvfJ $fn \
- --owner=0 --group=0 --mode=u+rw,uga+r \
- --absolute-names \
- --hard-dereference \
- --transform "s,$TMPDIR/install,$dir/install," \
- --transform "s,$TMPDIR/create-darwin-volume.sh,$dir/create-darwin-volume.sh," \
- --transform "s,$TMPDIR/reginfo,$dir/.reginfo," \
- --transform "s,$NIX_STORE,$dir/store,S" \
- $TMPDIR/install \
- $TMPDIR/create-darwin-volume.sh \
- $TMPDIR/install-darwin-multi-user.sh \
- $TMPDIR/install-systemd-multi-user.sh \
- $TMPDIR/install-multi-user \
- $TMPDIR/reginfo \
- $(cat ${installerClosureInfo}/store-paths)
- '');
+ binaryTarball = nixpkgs.lib.genAttrs systems (system: binaryTarball nixpkgsFor.${system} nixpkgsFor.${system}.nix nixpkgsFor.${system});
+
+ binaryTarballCross = nixpkgs.lib.genAttrs ["x86_64-linux"] (system: builtins.listToAttrs (map (crossSystem: {
+ name = crossSystem;
+ value = let
+ nixpkgsCross = import nixpkgs {
+ inherit system crossSystem;
+ overlays = [ self.overlay ];
+ };
+ in binaryTarball nixpkgsFor.${system} self.packages.${system}."nix-${crossSystem}" nixpkgsCross;
+ }) crossSystems));
# The first half of the installation script. This is uploaded
# to https://nixos.org/nix/install. It downloads the binary
# tarball for the user's system and calls the second half of the
# installation script.
- installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" ];
- installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" ];
+ installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" "armv6l-linux" "armv7l-linux" ];
+ installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" "armv6l-linux" "armv7l-linux"];
# Line coverage analysis.
coverage =
@@ -483,11 +504,14 @@
# `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work
# againstLatestStable = testNixVersions pkgs pkgs.nix pkgs.nixStable;
} "touch $out";
- });
+ } // (if system == "x86_64-linux" then (builtins.listToAttrs (map (crossSystem: {
+ name = "binaryTarball-${crossSystem}";
+ value = self.hydraJobs.binaryTarballCross.${system}.${crossSystem};
+ }) crossSystems)) else {}));
packages = forAllSystems (system: {
inherit (nixpkgsFor.${system}) nix;
- } // nixpkgs.lib.optionalAttrs (builtins.elem system linux64BitSystems) {
+ } // (nixpkgs.lib.optionalAttrs (builtins.elem system linux64BitSystems) {
nix-static = let
nixpkgs = nixpkgsFor.${system}.pkgsStatic;
in with commonDeps nixpkgs; nixpkgs.stdenv.mkDerivation {
@@ -525,8 +549,49 @@
stripAllList = ["bin"];
strictDeps = true;
+
+ hardeningDisable = [ "pie" ];
+ };
+ } // builtins.listToAttrs (map (crossSystem: {
+ name = "nix-${crossSystem}";
+ value = let
+ nixpkgsCross = import nixpkgs {
+ inherit system crossSystem;
+ overlays = [ self.overlay ];
+ };
+ in with commonDeps nixpkgsCross; nixpkgsCross.stdenv.mkDerivation {
+ name = "nix-${version}";
+
+ src = self;
+
+ VERSION_SUFFIX = versionSuffix;
+
+ outputs = [ "out" "dev" "doc" ];
+
+ nativeBuildInputs = nativeBuildDeps;
+ buildInputs = buildDeps ++ propagatedDeps;
+
+ configureFlags = [ "--sysconfdir=/etc" "--disable-doc-gen" ];
+
+ enableParallelBuilding = true;
+
+ makeFlags = "profiledir=$(out)/etc/profile.d";
+
+ doCheck = true;
+
+ installFlags = "sysconfdir=$(out)/etc";
+
+ postInstall = ''
+ mkdir -p $doc/nix-support
+ echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products
+ mkdir -p $out/nix-support
+ echo "file binary-dist $out/bin/nix" >> $out/nix-support/hydra-build-products
+ '';
+
+ doInstallCheck = true;
+ installCheckFlags = "sysconfdir=$(out)/etc";
};
- });
+ }) crossSystems)));
defaultPackage = forAllSystems (system: self.packages.${system}.nix);
diff --git a/maintainers/upload-release.pl b/maintainers/upload-release.pl
index c2933300f..3a8d27499 100755
--- a/maintainers/upload-release.pl
+++ b/maintainers/upload-release.pl
@@ -110,6 +110,9 @@ downloadFile("binaryTarball.i686-linux", "1");
downloadFile("binaryTarball.x86_64-linux", "1");
downloadFile("binaryTarball.aarch64-linux", "1");
downloadFile("binaryTarball.x86_64-darwin", "1");
+downloadFile("binaryTarball.aarch64-darwin", "1");
+downloadFile("binaryTarballCross.x86_64-linux.armv6l-linux", "1");
+downloadFile("binaryTarballCross.x86_64-linux.armv7l-linux", "1");
downloadFile("installerScript", "1");
for my $fn (glob "$tmpDir/*") {
@@ -153,6 +156,7 @@ write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix",
" i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" .
" aarch64-linux = \"" . getStorePath("build.aarch64-linux") . "\";\n" .
" x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" .
+ " aarch64-darwin = \"" . getStorePath("build.aarch64-darwin") . "\";\n" .
"}\n");
system("cd $nixpkgsDir && git commit -a -m 'nix-fallback-paths.nix: Update to $version'") == 0 or die;
diff --git a/scripts/install.in b/scripts/install.in
index 39016d161..e801d4268 100755
--- a/scripts/install.in
+++ b/scripts/install.in
@@ -40,6 +40,16 @@ case "$(uname -s).$(uname -m)" in
path=@tarballPath_aarch64-linux@
system=aarch64-linux
;;
+ Linux.armv6l_linux)
+ hash=@tarballHash_armv6l-linux@
+ path=@tarballPath_armv6l-linux@
+ system=armv6l-linux
+ ;;
+ Linux.armv7l_linux)
+ hash=@tarballHash_armv7l-linux@
+ path=@tarballPath_armv7l-linux@
+ system=armv7l-linux
+ ;;
Darwin.x86_64)
hash=@tarballHash_x86_64-darwin@
path=@tarballPath_x86_64-darwin@
diff --git a/src/libcmd/command.cc b/src/libcmd/command.cc
index 569c4b9e4..6777b23be 100644
--- a/src/libcmd/command.cc
+++ b/src/libcmd/command.cc
@@ -188,7 +188,7 @@ void MixProfile::updateProfile(const BuiltPaths & buildables)
}
if (result.size() != 1)
- throw Error("'--profile' requires that the arguments produce a single store path, but there are %d", result.size());
+ throw UsageError("'--profile' requires that the arguments produce a single store path, but there are %d", result.size());
updateProfile(result[0]);
}
diff --git a/src/libexpr/attr-path.cc b/src/libexpr/attr-path.cc
index 9dd557205..867eb13a5 100644
--- a/src/libexpr/attr-path.cc
+++ b/src/libexpr/attr-path.cc
@@ -19,7 +19,7 @@ static Strings parseAttrPath(std::string_view s)
++i;
while (1) {
if (i == s.end())
- throw Error("missing closing quote in selection path '%1%'", s);
+ throw ParseError("missing closing quote in selection path '%1%'", s);
if (*i == '"') break;
cur.push_back(*i++);
}
@@ -116,14 +116,14 @@ Pos findDerivationFilename(EvalState & state, Value & v, std::string what)
auto colon = pos.rfind(':');
if (colon == std::string::npos)
- throw Error("cannot parse meta.position attribute '%s'", pos);
+ throw ParseError("cannot parse meta.position attribute '%s'", pos);
std::string filename(pos, 0, colon);
unsigned int lineno;
try {
lineno = std::stoi(std::string(pos, colon + 1));
} catch (std::invalid_argument & e) {
- throw Error("cannot parse line number '%s'", pos);
+ throw ParseError("cannot parse line number '%s'", pos);
}
Symbol file = state.symbols.create(filename);
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
index ef9f8efca..c3206a577 100644
--- a/src/libexpr/eval.cc
+++ b/src/libexpr/eval.cc
@@ -64,7 +64,11 @@ static char * dupStringWithLen(const char * s, size_t size)
RootValue allocRootValue(Value * v)
{
+#if HAVE_BOEHMGC
return std::allocate_shared<Value *>(traceable_allocator<Value *>(), v);
+#else
+ return std::make_shared<Value *>(v);
+#endif
}
@@ -233,22 +237,34 @@ static void * oomHandler(size_t requested)
}
class BoehmGCStackAllocator : public StackAllocator {
- boost::coroutines2::protected_fixedsize_stack stack {
- // We allocate 8 MB, the default max stack size on NixOS.
- // A smaller stack might be quicker to allocate but reduces the stack
- // depth available for source filter expressions etc.
- std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024))
+ boost::coroutines2::protected_fixedsize_stack stack {
+ // We allocate 8 MB, the default max stack size on NixOS.
+ // A smaller stack might be quicker to allocate but reduces the stack
+ // depth available for source filter expressions etc.
+ std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024))
};
+ // This is specific to boost::coroutines2::protected_fixedsize_stack.
+ // The stack protection page is included in sctx.size, so we have to
+ // subtract one page size from the stack size.
+ std::size_t pfss_usable_stack_size(boost::context::stack_context &sctx) {
+ return sctx.size - boost::context::stack_traits::page_size();
+ }
+
public:
boost::context::stack_context allocate() override {
auto sctx = stack.allocate();
- GC_add_roots(static_cast<char *>(sctx.sp) - sctx.size, sctx.sp);
+
+ // Stacks generally start at a high address and grow to lower addresses.
+ // Architectures that do the opposite are rare; in fact so rare that
+ // boost_routine does not implement it.
+ // So we subtract the stack size.
+ GC_add_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
return sctx;
}
void deallocate(boost::context::stack_context sctx) override {
- GC_remove_roots(static_cast<char *>(sctx.sp) - sctx.size, sctx.sp);
+ GC_remove_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
stack.deallocate(sctx);
}
@@ -908,7 +924,7 @@ void EvalState::evalFile(const Path & path_, Value & v, bool mustBeTrivial)
// computation.
if (mustBeTrivial &&
!(dynamic_cast<ExprAttrs *>(e)))
- throw Error("file '%s' must be an attribute set", path);
+ throw EvalError("file '%s' must be an attribute set", path);
eval(e, v);
} catch (Error & e) {
addErrorTrace(e, "while evaluating the file '%1%':", path2);
diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc
index 2e94490d4..8e6f06949 100644
--- a/src/libexpr/flake/flake.cc
+++ b/src/libexpr/flake/flake.cc
@@ -359,7 +359,12 @@ LockedFlake lockFlake(
ancestors? */
auto i = overrides.find(inputPath);
bool hasOverride = i != overrides.end();
- if (hasOverride) overridesUsed.insert(inputPath);
+ if (hasOverride) {
+ overridesUsed.insert(inputPath);
+ // Respect the “flakeness” of the input even if we
+ // override it
+ i->second.isFlake = input2.isFlake;
+ }
auto & input = hasOverride ? i->second : input2;
/* Resolve 'follows' later (since it may refer to an input
diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc
index 15354549a..b2a6e2a82 100644
--- a/src/libmain/progress-bar.cc
+++ b/src/libmain/progress-bar.cc
@@ -484,7 +484,7 @@ Logger * makeProgressBar(bool printBuildLogs)
{
return new ProgressBar(
printBuildLogs,
- isatty(STDERR_FILENO) && getEnv("TERM").value_or("dumb") != "dumb"
+ shouldANSI()
);
}
diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc
index 8c9ef0101..73d1ed7cc 100644
--- a/src/libstore/build/derivation-goal.cc
+++ b/src/libstore/build/derivation-goal.cc
@@ -739,6 +739,63 @@ void DerivationGoal::cleanupPostOutputsRegisteredModeNonCheck()
{
}
+void runPostBuildHook(
+ Store & store,
+ Logger & logger,
+ const StorePath & drvPath,
+ StorePathSet outputPaths
+)
+{
+ auto hook = settings.postBuildHook;
+ if (hook == "")
+ return;
+
+ Activity act(logger, lvlInfo, actPostBuildHook,
+ fmt("running post-build-hook '%s'", settings.postBuildHook),
+ Logger::Fields{store.printStorePath(drvPath)});
+ PushActivity pact(act.id);
+ std::map<std::string, std::string> hookEnvironment = getEnv();
+
+ hookEnvironment.emplace("DRV_PATH", store.printStorePath(drvPath));
+ hookEnvironment.emplace("OUT_PATHS", chomp(concatStringsSep(" ", store.printStorePathSet(outputPaths))));
+
+ RunOptions opts(settings.postBuildHook, {});
+ opts.environment = hookEnvironment;
+
+ struct LogSink : Sink {
+ Activity & act;
+ std::string currentLine;
+
+ LogSink(Activity & act) : act(act) { }
+
+ void operator() (std::string_view data) override {
+ for (auto c : data) {
+ if (c == '\n') {
+ flushLine();
+ } else {
+ currentLine += c;
+ }
+ }
+ }
+
+ void flushLine() {
+ act.result(resPostBuildLogLine, currentLine);
+ currentLine.clear();
+ }
+
+ ~LogSink() {
+ if (currentLine != "") {
+ currentLine += '\n';
+ flushLine();
+ }
+ }
+ };
+ LogSink sink(act);
+
+ opts.standardOut = &sink;
+ opts.mergeStderrToStdout = true;
+ runProgram2(opts);
+}
void DerivationGoal::buildDone()
{
@@ -804,57 +861,15 @@ void DerivationGoal::buildDone()
being valid. */
registerOutputs();
- if (settings.postBuildHook != "") {
- Activity act(*logger, lvlInfo, actPostBuildHook,
- fmt("running post-build-hook '%s'", settings.postBuildHook),
- Logger::Fields{worker.store.printStorePath(drvPath)});
- PushActivity pact(act.id);
- StorePathSet outputPaths;
- for (auto i : drv->outputs) {
- outputPaths.insert(finalOutputs.at(i.first));
- }
- std::map<std::string, std::string> hookEnvironment = getEnv();
-
- hookEnvironment.emplace("DRV_PATH", worker.store.printStorePath(drvPath));
- hookEnvironment.emplace("OUT_PATHS", chomp(concatStringsSep(" ", worker.store.printStorePathSet(outputPaths))));
-
- RunOptions opts(settings.postBuildHook, {});
- opts.environment = hookEnvironment;
-
- struct LogSink : Sink {
- Activity & act;
- std::string currentLine;
-
- LogSink(Activity & act) : act(act) { }
-
- void operator() (std::string_view data) override {
- for (auto c : data) {
- if (c == '\n') {
- flushLine();
- } else {
- currentLine += c;
- }
- }
- }
-
- void flushLine() {
- act.result(resPostBuildLogLine, currentLine);
- currentLine.clear();
- }
-
- ~LogSink() {
- if (currentLine != "") {
- currentLine += '\n';
- flushLine();
- }
- }
- };
- LogSink sink(act);
-
- opts.standardOut = &sink;
- opts.mergeStderrToStdout = true;
- runProgram2(opts);
- }
+ StorePathSet outputPaths;
+ for (auto & [_, path] : finalOutputs)
+ outputPaths.insert(path);
+ runPostBuildHook(
+ worker.store,
+ *logger,
+ drvPath,
+ outputPaths
+ );
if (buildMode == bmCheck) {
cleanupPostOutputsRegisteredModeCheck();
@@ -910,6 +925,8 @@ void DerivationGoal::resolvedFinished() {
auto resolvedHashes = staticOutputHashes(worker.store, *resolvedDrv);
+ StorePathSet outputPaths;
+
// `wantedOutputs` might be empty, which means “all the outputs”
auto realWantedOutputs = wantedOutputs;
if (realWantedOutputs.empty())
@@ -930,6 +947,7 @@ void DerivationGoal::resolvedFinished() {
newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation->outPath);
signRealisation(newRealisation);
worker.store.registerDrvOutput(newRealisation);
+ outputPaths.insert(realisation->outPath);
} else {
// If we don't have a realisation, then it must mean that something
// failed when building the resolved drv
@@ -937,6 +955,13 @@ void DerivationGoal::resolvedFinished() {
}
}
+ runPostBuildHook(
+ worker.store,
+ *logger,
+ drvPath,
+ outputPaths
+ );
+
// This is potentially a bit fishy in terms of error reporting. Not sure
// how to do it in a cleaner way
amDone(nrFailed == 0 ? ecSuccess : ecFailed, ex);
diff --git a/src/libstore/build/local-derivation-goal.cc b/src/libstore/build/local-derivation-goal.cc
index 279139020..8320dd1c4 100644
--- a/src/libstore/build/local-derivation-goal.cc
+++ b/src/libstore/build/local-derivation-goal.cc
@@ -1333,13 +1333,18 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
std::optional<const Realisation> queryRealisation(const DrvOutput & id) override
// XXX: This should probably be allowed if the realisation corresponds to
// an allowed derivation
- { throw Error("queryRealisation"); }
+ {
+ if (!goal.isAllowed(id))
+ throw InvalidPath("cannot query an unknown output id '%s' in recursive Nix", id.to_string());
+ return next->queryRealisation(id);
+ }
void buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode) override
{
if (buildMode != bmNormal) throw Error("unsupported build mode");
StorePathSet newPaths;
+ std::set<Realisation> newRealisations;
for (auto & req : paths) {
if (!goal.isAllowed(req))
@@ -1352,16 +1357,28 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
auto p = std::get_if<DerivedPath::Built>(&path);
if (!p) continue;
auto & bfd = *p;
+ auto drv = readDerivation(bfd.drvPath);
+ auto drvHashes = staticOutputHashes(*this, drv);
auto outputs = next->queryDerivationOutputMap(bfd.drvPath);
for (auto & [outputName, outputPath] : outputs)
- if (wantOutput(outputName, bfd.outputs))
+ if (wantOutput(outputName, bfd.outputs)) {
newPaths.insert(outputPath);
+ if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
+ auto thisRealisation = next->queryRealisation(
+ DrvOutput{drvHashes.at(outputName), outputName}
+ );
+ assert(thisRealisation);
+ newRealisations.insert(*thisRealisation);
+ }
+ }
}
StorePathSet closure;
next->computeFSClosure(newPaths, closure);
for (auto & path : closure)
goal.addDependency(path);
+ for (auto & real : Realisation::closure(*next, newRealisations))
+ goal.addedDrvOutputs.insert(real.id);
}
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
@@ -2464,6 +2481,7 @@ void LocalDerivationGoal::registerOutputs()
floating CA derivations and hash-mismatching fixed-output
derivations. */
PathLocks dynamicOutputLock;
+ dynamicOutputLock.setDeletion(true);
auto optFixedPath = output.path(worker.store, drv->name, outputName);
if (!optFixedPath ||
worker.store.printStorePath(*optFixedPath) != finalDestPath)
diff --git a/src/libstore/build/local-derivation-goal.hh b/src/libstore/build/local-derivation-goal.hh
index d30be2351..088a57209 100644
--- a/src/libstore/build/local-derivation-goal.hh
+++ b/src/libstore/build/local-derivation-goal.hh
@@ -108,6 +108,9 @@ struct LocalDerivationGoal : public DerivationGoal
/* Paths that were added via recursive Nix calls. */
StorePathSet addedPaths;
+ /* Realisations that were added via recursive Nix calls. */
+ std::set<DrvOutput> addedDrvOutputs;
+
/* Recursive Nix calls are only allowed to build or realize paths
in the original input closure or added via a recursive Nix call
(so e.g. you can't do 'nix-store -r /nix/store/<bla>' where
@@ -116,6 +119,11 @@ struct LocalDerivationGoal : public DerivationGoal
{
return inputPaths.count(path) || addedPaths.count(path);
}
+ bool isAllowed(const DrvOutput & id)
+ {
+ return addedDrvOutputs.count(id);
+ }
+
bool isAllowed(const DerivedPath & req);
friend struct RestrictedStore;
diff --git a/src/libstore/machines.cc b/src/libstore/machines.cc
index b42e5e434..9843ccf04 100644
--- a/src/libstore/machines.cc
+++ b/src/libstore/machines.cc
@@ -16,13 +16,18 @@ Machine::Machine(decltype(storeUri) storeUri,
decltype(mandatoryFeatures) mandatoryFeatures,
decltype(sshPublicHostKey) sshPublicHostKey) :
storeUri(
- // Backwards compatibility: if the URI is a hostname,
- // prepend ssh://.
+ // Backwards compatibility: if the URI is schemeless, is not a path,
+ // and is not one of the special store connection words, prepend
+ // ssh://.
storeUri.find("://") != std::string::npos
- || hasPrefix(storeUri, "local")
- || hasPrefix(storeUri, "remote")
- || hasPrefix(storeUri, "auto")
- || hasPrefix(storeUri, "/")
+ || storeUri.find("/") != std::string::npos
+ || storeUri == "auto"
+ || storeUri == "daemon"
+ || storeUri == "local"
+ || hasPrefix(storeUri, "auto?")
+ || hasPrefix(storeUri, "daemon?")
+ || hasPrefix(storeUri, "local?")
+ || hasPrefix(storeUri, "?")
? storeUri
: "ssh://" + storeUri),
systemTypes(systemTypes),
diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc
index d2e801175..6b9b850ca 100644
--- a/src/libutil/logging.cc
+++ b/src/libutil/logging.cc
@@ -46,7 +46,7 @@ public:
: printBuildLogs(printBuildLogs)
{
systemd = getEnv("IN_SYSTEMD") == "1";
- tty = isatty(STDERR_FILENO);
+ tty = shouldANSI();
}
bool isVerbose() override {
diff --git a/src/libutil/url.cc b/src/libutil/url.cc
index c1bab866c..f6232d255 100644
--- a/src/libutil/url.cc
+++ b/src/libutil/url.cc
@@ -32,7 +32,7 @@ ParsedURL parseURL(const std::string & url)
auto isFile = scheme.find("file") != std::string::npos;
if (authority && *authority != "" && isFile)
- throw Error("file:// URL '%s' has unexpected authority '%s'",
+ throw BadURL("file:// URL '%s' has unexpected authority '%s'",
url, *authority);
if (isFile && path.empty())
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 7e57fd7ca..ee9f17228 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -1372,6 +1372,12 @@ void ignoreException()
}
}
+bool shouldANSI()
+{
+ return isatty(STDERR_FILENO)
+ && getEnv("TERM").value_or("dumb") != "dumb"
+ && !getEnv("NO_COLOR").has_value();
+}
std::string filterANSIEscapes(const std::string & s, bool filterAll, unsigned int width)
{
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
index f84d0fb31..a8dd4bd47 100644
--- a/src/libutil/util.hh
+++ b/src/libutil/util.hh
@@ -482,6 +482,9 @@ constexpr char treeLast[] = "└───";
constexpr char treeLine[] = "│ ";
constexpr char treeNull[] = " ";
+/* Determine whether ANSI escape sequences are appropriate for the
+ present output. */
+bool shouldANSI();
/* Truncate a string to 'width' printable characters. If 'filterAll'
is true, all ANSI escape sequences are filtered out. Otherwise,
diff --git a/src/nix/develop.cc b/src/nix/develop.cc
index d77ff52d7..699ec0b99 100644
--- a/src/nix/develop.cc
+++ b/src/nix/develop.cc
@@ -54,7 +54,7 @@ BuildEnvironment readEnvironment(const Path & path)
R"re((?:[a-zA-Z_][a-zA-Z0-9_]*))re";
static std::string simpleStringRegex =
- R"re((?:[a-zA-Z0-9_/:\.\-\+=]*))re";
+ R"re((?:[a-zA-Z0-9_/:\.\-\+=@%]*))re";
static std::string dquotedStringRegex =
R"re((?:\$?"(?:[^"\\]|\\[$`"\\\n])*"))re";
@@ -443,13 +443,17 @@ struct CmdDevelop : Common, MixEnvironment
try {
auto state = getEvalState();
+ auto nixpkgsLockFlags = lockFlags;
+ nixpkgsLockFlags.inputOverrides = {};
+ nixpkgsLockFlags.inputUpdates = {};
+
auto bashInstallable = std::make_shared<InstallableFlake>(
this,
state,
installable->nixpkgsFlakeRef(),
Strings{"bashInteractive"},
Strings{"legacyPackages." + settings.thisSystem.get() + "."},
- lockFlags);
+ nixpkgsLockFlags);
shell = state->store->printStorePath(
toStorePath(state->store, Realise::Outputs, OperateOn::Output, bashInstallable)) + "/bin/bash";
diff --git a/src/nix/profile-remove.md b/src/nix/profile-remove.md
index dcf825da9..ba85441d8 100644
--- a/src/nix/profile-remove.md
+++ b/src/nix/profile-remove.md
@@ -15,6 +15,7 @@ R""(
```
* Remove all packages:
+
```console
# nix profile remove '.*'
```
diff --git a/src/nix/repl.cc b/src/nix/repl.cc
index eed79c332..0275feae7 100644
--- a/src/nix/repl.cc
+++ b/src/nix/repl.cc
@@ -104,6 +104,26 @@ NixRepl::~NixRepl()
write_history(historyFile.c_str());
}
+string runNix(Path program, const Strings & args,
+ const std::optional<std::string> & input = {})
+{
+ auto experimentalFeatures = concatStringsSep(" ", settings.experimentalFeatures.get());
+ auto nixConf = getEnv("NIX_CONFIG").value_or("");
+ nixConf.append("\nexperimental-features = " + experimentalFeatures);
+ auto subprocessEnv = getEnv();
+ subprocessEnv["NIX_CONFIG"] = nixConf;
+ RunOptions opts(settings.nixBinDir+ "/" + program, args);
+ opts.input = input;
+ opts.environment = subprocessEnv;
+
+ auto res = runProgram(opts);
+
+ if (!statusOk(res.first))
+ throw ExecError(res.first, fmt("program '%1%' %2%", program, statusToString(res.first)));
+
+ return res.second;
+}
+
static NixRepl * curRepl; // ugly
static char * completionCallback(char * s, int *match) {
@@ -463,7 +483,7 @@ bool NixRepl::processLine(string line)
state->callFunction(f, v, result, Pos());
StorePath drvPath = getDerivationPath(result);
- runProgram(settings.nixBinDir + "/nix-shell", true, {state->store->printStorePath(drvPath)});
+ runNix("nix-shell", {state->store->printStorePath(drvPath)});
}
else if (command == ":b" || command == ":i" || command == ":s") {
@@ -477,7 +497,7 @@ bool NixRepl::processLine(string line)
but doing it in a child makes it easier to recover from
problems / SIGINT. */
try {
- runProgram(settings.nixBinDir + "/nix", true, {"build", "--no-link", drvPathRaw});
+ runNix("nix", {"build", "--no-link", drvPathRaw});
auto drv = state->store->readDerivation(drvPath);
std::cout << std::endl << "this derivation produced the following outputs:" << std::endl;
for (auto & i : drv.outputsAndOptPaths(*state->store))
@@ -485,9 +505,9 @@ bool NixRepl::processLine(string line)
} catch (ExecError &) {
}
} else if (command == ":i") {
- runProgram(settings.nixBinDir + "/nix-env", true, {"-i", drvPathRaw});
+ runNix("nix-env", {"-i", drvPathRaw});
} else {
- runProgram(settings.nixBinDir + "/nix-shell", true, {drvPathRaw});
+ runNix("nix-shell", {drvPathRaw});
}
}
diff --git a/tests/build.sh b/tests/build.sh
index ce9d6602c..c77f620f7 100644
--- a/tests/build.sh
+++ b/tests/build.sh
@@ -1,7 +1,7 @@
source common.sh
expectedJSONRegex='\[\{"drvPath":".*multiple-outputs-a.drv","outputs":\{"first":".*multiple-outputs-a-first","second":".*multiple-outputs-a-second"}},\{"drvPath":".*multiple-outputs-b.drv","outputs":\{"out":".*multiple-outputs-b"}}]'
-nix build -f multiple-outputs.nix --json a.all b.all | jq --exit-status '
+nix build -f multiple-outputs.nix --json a.all b.all --no-link | jq --exit-status '
(.[0] |
(.drvPath | match(".*multiple-outputs-a.drv")) and
(.outputs.first | match(".*multiple-outputs-a-first")) and
@@ -10,10 +10,10 @@ nix build -f multiple-outputs.nix --json a.all b.all | jq --exit-status '
(.drvPath | match(".*multiple-outputs-b.drv")) and
(.outputs.out | match(".*multiple-outputs-b")))
'
-
testNormalization () {
clearStore
- outPath=$(nix-build ./simple.nix)
+ outPath=$(nix-build ./simple.nix --no-out-link)
test "$(stat -c %Y $outPath)" -eq 1
}
+
testNormalization
diff --git a/tests/ca/duplicate-realisation-in-closure.sh b/tests/ca/duplicate-realisation-in-closure.sh
index bfe2a4e08..ca9099641 100644
--- a/tests/ca/duplicate-realisation-in-closure.sh
+++ b/tests/ca/duplicate-realisation-in-closure.sh
@@ -17,10 +17,10 @@ sleep 2 # To make sure that `$(date)` will be different
# As we’ve cleared the cache, we’ll have to rebuild current-time. And because
# the current time isn’t the same as before, this will yield a new (different)
# realisation
-nix build -f nondeterministic.nix dep2
+nix build -f nondeterministic.nix dep2 --no-link
# Build something that depends both on dep1 and dep2.
# If everything goes right, we should rebuild dep2 rather than fetch it from
# the cache (because that would mean duplicating `current-time` in the closure),
# and have `dep1 == dep2`.
-nix build --substituters "$REMOTE_STORE" -f nondeterministic.nix toplevel --no-require-sigs
+nix build --substituters "$REMOTE_STORE" -f nondeterministic.nix toplevel --no-require-sigs --no-link
diff --git a/tests/ca/post-hook.sh b/tests/ca/post-hook.sh
new file mode 100755
index 000000000..4b8da4cd8
--- /dev/null
+++ b/tests/ca/post-hook.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+source common.sh
+
+sed -i 's/experimental-features .*/& ca-derivations ca-references nix-command flakes/' "$NIX_CONF_DIR"/nix.conf
+
+export NIX_TESTS_CA_BY_DEFAULT=1
+cd ..
+source ./post-hook.sh
+
+
diff --git a/tests/ca/recursive.sh b/tests/ca/recursive.sh
new file mode 100755
index 000000000..d9281d91f
--- /dev/null
+++ b/tests/ca/recursive.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+source common.sh
+
+sed -i 's/experimental-features .*/& ca-derivations ca-references nix-command flakes/' "$NIX_CONF_DIR"/nix.conf
+
+export NIX_TESTS_CA_BY_DEFAULT=1
+cd ..
+source ./recursive.sh
+
+
diff --git a/tests/config.nix.in b/tests/config.nix.in
index 9b00d9ddb..7facbdcbc 100644
--- a/tests/config.nix.in
+++ b/tests/config.nix.in
@@ -22,6 +22,6 @@ rec {
builder = shell;
args = ["-e" args.builder or (builtins.toFile "builder-${args.name}.sh" "if [ -e .attrs.sh ]; then source .attrs.sh; fi; eval \"$buildCommand\"")];
PATH = path;
- } // removeAttrs args ["builder" "meta"])
+ } // caArgs // removeAttrs args ["builder" "meta"])
// { meta = args.meta or {}; };
-} // caArgs
+}
diff --git a/tests/gc.sh b/tests/gc.sh
index 8b4f8d282..cf0e2c32d 100644
--- a/tests/gc.sh
+++ b/tests/gc.sh
@@ -12,7 +12,7 @@ ln -sf $outPath "$NIX_STATE_DIR"/gcroots/foo
nix-store --gc --print-roots | grep $outPath
nix-store --gc --print-live | grep $outPath
nix-store --gc --print-dead | grep $drvPath
-if nix-store --gc --print-dead | grep $outPath; then false; fi
+if nix-store --gc --print-dead | grep -E $outPath$; then false; fi
nix-store --gc --print-dead
diff --git a/tests/local.mk b/tests/local.mk
index 82cec1df3..cbdf4efdb 100644
--- a/tests/local.mk
+++ b/tests/local.mk
@@ -39,6 +39,7 @@ nix_tests = \
search.sh \
nix-copy-ssh.sh \
post-hook.sh \
+ ca/post-hook.sh \
function-trace.sh \
recursive.sh \
describe-stores.sh \
@@ -52,6 +53,7 @@ nix_tests = \
ca/signatures.sh \
ca/nix-shell.sh \
ca/nix-run.sh \
+ ca/recursive.sh \
ca/nix-copy.sh
# parallel.sh
diff --git a/tests/post-hook.sh b/tests/post-hook.sh
index aa3e6a574..238a8f826 100644
--- a/tests/post-hook.sh
+++ b/tests/post-hook.sh
@@ -4,7 +4,7 @@ clearStore
rm -f $TEST_ROOT/result
-export REMOTE_STORE=$TEST_ROOT/remote_store
+export REMOTE_STORE=file:$TEST_ROOT/remote_store
# Build the dependencies and push them to the remote store
nix-build -o $TEST_ROOT/result dependencies.nix --post-build-hook $PWD/push-to-store.sh
diff --git a/tests/recursive.sh b/tests/recursive.sh
index a55b061b5..b6740877d 100644
--- a/tests/recursive.sh
+++ b/tests/recursive.sh
@@ -9,9 +9,9 @@ rm -f $TEST_ROOT/result
export unreachable=$(nix store add-path ./recursive.sh)
-NIX_BIN_DIR=$(dirname $(type -p nix)) nix --experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --expr '
+NIX_BIN_DIR=$(dirname $(type -p nix)) nix --extra-experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --expr '
with import ./config.nix;
- mkDerivation {
+ mkDerivation rec {
name = "recursive";
dummy = builtins.toFile "dummy" "bla bla";
SHELL = shell;
@@ -19,11 +19,13 @@ NIX_BIN_DIR=$(dirname $(type -p nix)) nix --experimental-features 'nix-command r
# Note: this is a string without context.
unreachable = builtins.getEnv "unreachable";
+ NIX_TESTS_CA_BY_DEFAULT = builtins.getEnv "NIX_TESTS_CA_BY_DEFAULT";
+
requiredSystemFeatures = [ "recursive-nix" ];
buildCommand = '\'\''
mkdir $out
- opts="--experimental-features nix-command"
+ opts="--experimental-features nix-command ${if (NIX_TESTS_CA_BY_DEFAULT == "1") then "--extra-experimental-features ca-derivations" else ""}"
PATH=${builtins.getEnv "NIX_BIN_DIR"}:$PATH
@@ -46,16 +48,15 @@ NIX_BIN_DIR=$(dirname $(type -p nix)) nix --experimental-features 'nix-command r
# Add it to our closure.
ln -s $foobar $out/foobar
- [[ $(nix $opts path-info --all | wc -l) -eq 3 ]]
+ [[ $(nix $opts path-info --all | wc -l) -eq 4 ]]
# Build a derivation.
nix $opts build -L --impure --expr '\''
- derivation {
+ with import ${./config.nix};
+ mkDerivation {
name = "inner1";
- builder = builtins.getEnv "SHELL";
- system = builtins.getEnv "system";
+ buildCommand = "echo $fnord blaat > $out";
fnord = builtins.toFile "fnord" "fnord";
- args = [ "-c" "echo $fnord blaat > $out" ];
}
'\''
diff --git a/tests/shell.nix b/tests/shell.nix
index 53759f99a..f174db583 100644
--- a/tests/shell.nix
+++ b/tests/shell.nix
@@ -34,6 +34,8 @@ let pkgs = rec {
name = "shellDrv";
builder = "/does/not/exist";
VAR_FROM_NIX = "bar";
+ ASCII_PERCENT = "%";
+ ASCII_AT = "@";
TEST_inNixShell = if inNixShell then "true" else "false";
inherit stdenv;
outputs = ["dev" "out"];