aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/CODEOWNERS15
-rw-r--r--.github/ISSUE_TEMPLATE/bug_report.md4
-rw-r--r--.github/ISSUE_TEMPLATE/feature_request.md4
-rw-r--r--.github/ISSUE_TEMPLATE/installer.md36
-rw-r--r--.github/ISSUE_TEMPLATE/missing_documentation.md3
-rw-r--r--.github/PULL_REQUEST_TEMPLATE/pull_request_template.md4
-rw-r--r--.github/workflows/backport.yml2
-rw-r--r--.version2
-rw-r--r--doc/manual/generate-builtins.nix32
-rw-r--r--doc/manual/generate-manpage.nix1
-rw-r--r--doc/manual/generate-options.nix66
-rw-r--r--doc/manual/local.mk6
-rw-r--r--doc/manual/redirects.js3
-rw-r--r--doc/manual/src/SUMMARY.md.in1
-rw-r--r--doc/manual/src/advanced-topics/diff-hook.md34
-rw-r--r--doc/manual/src/advanced-topics/post-build-hook.md15
-rw-r--r--doc/manual/src/command-ref/nix-build.md12
-rw-r--r--doc/manual/src/command-ref/nix-store.md7
-rw-r--r--doc/manual/src/language/index.md2
-rw-r--r--doc/manual/src/release-notes/rl-2.12.md43
-rw-r--r--doc/manual/src/release-notes/rl-next.md12
-rw-r--r--docker.nix12
-rw-r--r--flake.nix12
-rw-r--r--maintainers/README.md79
-rw-r--r--misc/launchd/org.nixos.nix-daemon.plist.in2
-rw-r--r--misc/systemd/nix-daemon.service.in2
-rw-r--r--scripts/install-multi-user.sh9
-rw-r--r--src/libcmd/installables.cc31
-rw-r--r--src/libcmd/installables.hh11
-rw-r--r--src/libcmd/repl.cc1
-rw-r--r--src/libexpr/eval.cc158
-rw-r--r--src/libexpr/primops.cc21
-rw-r--r--src/libexpr/value-to-json.cc50
-rw-r--r--src/libexpr/value-to-json.hh7
-rw-r--r--src/libexpr/value.hh6
-rw-r--r--src/libmain/progress-bar.cc10
-rw-r--r--src/libstore/binary-cache-store.cc18
-rw-r--r--src/libstore/build-result.hh5
-rw-r--r--src/libstore/build/derivation-goal.cc64
-rw-r--r--src/libstore/build/derivation-goal.hh5
-rw-r--r--src/libstore/build/local-derivation-goal.cc336
-rw-r--r--src/libstore/build/local-derivation-goal.hh11
-rw-r--r--src/libstore/daemon.cc1
-rw-r--r--src/libstore/derived-path.cc23
-rw-r--r--src/libstore/derived-path.hh3
-rw-r--r--src/libstore/gc.cc3
-rw-r--r--src/libstore/globals.cc4
-rw-r--r--src/libstore/globals.hh92
-rw-r--r--src/libstore/legacy-ssh-store.cc4
-rw-r--r--src/libstore/local-store.cc27
-rw-r--r--src/libstore/local-store.hh15
-rw-r--r--src/libstore/lock.cc236
-rw-r--r--src/libstore/lock.hh43
-rw-r--r--src/libstore/nar-accessor.cc30
-rw-r--r--src/libstore/nar-accessor.hh6
-rw-r--r--src/libstore/parsed-derivations.cc22
-rw-r--r--src/libstore/parsed-derivations.hh2
-rw-r--r--src/libstore/remote-fs-accessor.cc8
-rw-r--r--src/libstore/store-api.cc48
-rw-r--r--src/libstore/store-api.hh4
-rw-r--r--src/libutil/cgroup.cc148
-rw-r--r--src/libutil/cgroup.hh29
-rw-r--r--src/libutil/experimental-features.cc2
-rw-r--r--src/libutil/experimental-features.hh2
-rw-r--r--src/libutil/filesystem.cc9
-rw-r--r--src/libutil/json.cc203
-rw-r--r--src/libutil/json.hh185
-rw-r--r--src/libutil/tests/json.cc193
-rw-r--r--src/libutil/util.cc61
-rw-r--r--src/libutil/util.hh2
-rw-r--r--src/nix-env/nix-env.cc35
-rw-r--r--src/nix-store/nix-store.cc11
-rw-r--r--src/nix/app.cc11
-rw-r--r--src/nix/build.cc42
-rw-r--r--src/nix/eval.cc7
-rw-r--r--src/nix/flake.cc37
-rw-r--r--src/nix/ls.cc5
-rw-r--r--src/nix/make-content-addressed.cc14
-rw-r--r--src/nix/path-info.cc8
-rw-r--r--src/nix/profile.cc4
-rw-r--r--src/nix/search.cc19
-rw-r--r--src/nix/show-derivation.cc67
-rw-r--r--src/nix/why-depends.cc39
-rw-r--r--tests/ca/why-depends.sh5
-rw-r--r--tests/check.nix2
-rw-r--r--tests/check.sh6
-rw-r--r--tests/containers.nix68
-rw-r--r--tests/fetchClosure.sh21
-rw-r--r--tests/id-test.nix8
-rw-r--r--tests/systemd-nspawn.nix78
90 files changed, 1638 insertions, 1408 deletions
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 000000000..d58577551
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,15 @@
+# Pull requests concerning the listed files will automatically invite the respective maintainers as reviewers.
+# This file is not used for denoting any kind of ownership, but is merely a tool for handling notifications.
+#
+# Merge permissions are required for maintaining an entry in this file.
+# For documentation on this mechanism, see https://help.github.com/articles/about-codeowners/
+
+# Default reviewers if nothing else matches
+* @edolstra @thufschmitt
+
+# This file
+.github/CODEOWNERS @edolstra
+
+# Public documentation
+/doc @fricklerhandwerk
+*.md @fricklerhandwerk
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index e6d346bc1..984f9a9ea 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -30,3 +30,7 @@ A clear and concise description of what you expected to happen.
**Additional context**
Add any other context about the problem here.
+
+**Priorities**
+
+Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc).
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
index 4fe86d5ec..42c658b52 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -18,3 +18,7 @@ A clear and concise description of any alternative solutions or features you've
**Additional context**
Add any other context or screenshots about the feature request here.
+
+**Priorities**
+
+Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc).
diff --git a/.github/ISSUE_TEMPLATE/installer.md b/.github/ISSUE_TEMPLATE/installer.md
new file mode 100644
index 000000000..3768a49c9
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/installer.md
@@ -0,0 +1,36 @@
+---
+name: Installer issue
+about: Report problems with installation
+title: ''
+labels: installer
+assignees: ''
+
+---
+
+## Platform
+
+<!-- select the platform on which you tried to install Nix -->
+
+- [ ] Linux: <!-- state your distribution, e.g. Arch Linux, Ubuntu, ... -->
+- [ ] macOS
+- [ ] WSL
+
+## Additional information
+
+<!-- state special circumstances on your system or additional steps you have taken prior to installation -->
+
+## Output
+
+<details><summary>Output</summary>
+
+```log
+
+<!-- paste console output here and remove this comment -->
+
+```
+
+</details>
+
+## Priorities
+
+Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc).
diff --git a/.github/ISSUE_TEMPLATE/missing_documentation.md b/.github/ISSUE_TEMPLATE/missing_documentation.md
index fbabd868e..942d7a971 100644
--- a/.github/ISSUE_TEMPLATE/missing_documentation.md
+++ b/.github/ISSUE_TEMPLATE/missing_documentation.md
@@ -26,3 +26,6 @@ assignees: ''
<!-- propose a solution -->
+## Priorities
+
+Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc).
diff --git a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md b/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md
index 537aa0909..5311be01f 100644
--- a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md
+++ b/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md
@@ -5,3 +5,7 @@ Please include relevant [release notes](https://github.com/NixOS/nix/blob/master
**Testing**
If this issue is a regression or something that should block release, please consider including a test either in the [testsuite](https://github.com/NixOS/nix/tree/master/tests) or as a [hydraJob]( https://github.com/NixOS/nix/blob/master/flake.nix#L396) so that it can be part of the [automatic checks](https://hydra.nixos.org/jobset/nix/master).
+
+**Priorities**
+
+Add :+1: to [pull requests you find important](https://github.com/NixOS/nix/pulls?q=is%3Aopen+sort%3Areactions-%2B1-desc).
diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml
index 75be788ef..7568145b6 100644
--- a/.github/workflows/backport.yml
+++ b/.github/workflows/backport.yml
@@ -21,7 +21,7 @@ jobs:
fetch-depth: 0
- name: Create backport PRs
# should be kept in sync with `version`
- uses: zeebe-io/backport-action@v0.0.8
+ uses: zeebe-io/backport-action@v0.0.9
with:
# Config README: https://github.com/zeebe-io/backport-action#backport-action
github_token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.version b/.version
index 3ca2c9b2c..fb2c0766b 100644
--- a/.version
+++ b/.version
@@ -1 +1 @@
-2.12.0 \ No newline at end of file
+2.13.0
diff --git a/doc/manual/generate-builtins.nix b/doc/manual/generate-builtins.nix
index 6c8b88da2..115bb3f94 100644
--- a/doc/manual/generate-builtins.nix
+++ b/doc/manual/generate-builtins.nix
@@ -1,16 +1,20 @@
-with builtins;
-with import ./utils.nix;
+builtinsDump:
+let
+ showBuiltin = name:
+ let
+ inherit (builtinsDump.${name}) doc args;
+ in
+ ''
+ <dt id="builtins-${name}">
+ <a href="#builtins-${name}"><code>${name} ${listArgs args}</code></a>
+ </dt>
+ <dd>
-builtins:
+ ${doc}
+
+ </dd>
+ '';
+ listArgs = args: builtins.concatStringsSep " " (map (s: "<var>${s}</var>") args);
+in
+with builtins; concatStringsSep "\n" (map showBuiltin (attrNames builtinsDump))
-concatStrings (map
- (name:
- let builtin = builtins.${name}; in
- "<dt id=\"builtins-${name}\"><a href=\"#builtins-${name}\"><code>${name} "
- + concatStringsSep " " (map (s: "<var>${s}</var>") builtin.args)
- + "</code></a></dt>"
- + "<dd>\n\n"
- + builtin.doc
- + "\n\n</dd>"
- )
- (attrNames builtins))
diff --git a/doc/manual/generate-manpage.nix b/doc/manual/generate-manpage.nix
index 057719e34..8c7c4d358 100644
--- a/doc/manual/generate-manpage.nix
+++ b/doc/manual/generate-manpage.nix
@@ -99,6 +99,7 @@ let
in [ cmd ] ++ concatMap subcommand (attrNames details.commands or {});
parsedToplevel = builtins.fromJSON toplevel;
+
manpages = processCommand {
command = "nix";
details = parsedToplevel;
diff --git a/doc/manual/generate-options.nix b/doc/manual/generate-options.nix
index 680b709c8..a4ec36477 100644
--- a/doc/manual/generate-options.nix
+++ b/doc/manual/generate-options.nix
@@ -1,29 +1,41 @@
-with builtins;
-with import ./utils.nix;
+let
+ inherit (builtins) attrNames concatStringsSep isAttrs isBool;
+ inherit (import ./utils.nix) concatStrings squash splitLines;
+in
-options:
+optionsInfo:
+let
+ showOption = name:
+ let
+ inherit (optionsInfo.${name}) description documentDefault defaultValue aliases;
+ result = squash ''
+ - <span id="conf-${name}">[`${name}`](#conf-${name})</span>
-concatStrings (map
- (name:
- let option = options.${name}; in
- " - [`${name}`](#conf-${name})"
- + "<p id=\"conf-${name}\"></p>\n\n"
- + concatStrings (map (s: " ${s}\n") (splitLines option.description)) + "\n\n"
- + (if option.documentDefault
- then " **Default:** " + (
- if option.defaultValue == "" || option.defaultValue == []
- then "*empty*"
- else if isBool option.defaultValue
- then (if option.defaultValue then "`true`" else "`false`")
- else
- # n.b. a StringMap value type is specified as a string, but
- # this shows the value type. The empty stringmap is "null" in
- # JSON, but that converts to "{ }" here.
- (if isAttrs option.defaultValue then "`\"\"`"
- else "`" + toString option.defaultValue + "`")) + "\n\n"
- else " **Default:** *machine-specific*\n")
- + (if option.aliases != []
- then " **Deprecated alias:** " + (concatStringsSep ", " (map (s: "`${s}`") option.aliases)) + "\n\n"
- else "")
- )
- (attrNames options))
+ ${indent " " body}
+ '';
+ # separate body to cleanly handle indentation
+ body = ''
+ ${description}
+
+ **Default:** ${showDefault documentDefault defaultValue}
+
+ ${showAliases aliases}
+ '';
+ showDefault = documentDefault: defaultValue:
+ if documentDefault then
+ # a StringMap value type is specified as a string, but
+ # this shows the value type. The empty stringmap is `null` in
+ # JSON, but that converts to `{ }` here.
+ if defaultValue == "" || defaultValue == [] || isAttrs defaultValue
+ then "*empty*"
+ else if isBool defaultValue then
+ if defaultValue then "`true`" else "`false`"
+ else "`${toString defaultValue}`"
+ else "*machine-specific*";
+ showAliases = aliases:
+ if aliases == [] then "" else
+ "**Deprecated alias:** ${(concatStringsSep ", " (map (s: "`${s}`") aliases))}";
+ indent = prefix: s:
+ concatStringsSep "\n" (map (x: if x == "" then x else "${prefix}${x}") (splitLines s));
+ in result;
+in concatStrings (map showOption (attrNames optionsInfo))
diff --git a/doc/manual/local.mk b/doc/manual/local.mk
index 486dbd7a2..c0f69e00f 100644
--- a/doc/manual/local.mk
+++ b/doc/manual/local.mk
@@ -29,19 +29,19 @@ nix-eval = $(dummy-env) $(bindir)/nix eval --experimental-features nix-command -
$(d)/%.1: $(d)/src/command-ref/%.md
@printf "Title: %s\n\n" "$$(basename $@ .1)" > $^.tmp
@cat $^ >> $^.tmp
- $(trace-gen) lowdown -sT man -M section=1 $^.tmp -o $@
+ $(trace-gen) lowdown -sT man --nroff-nolinks -M section=1 $^.tmp -o $@
@rm $^.tmp
$(d)/%.8: $(d)/src/command-ref/%.md
@printf "Title: %s\n\n" "$$(basename $@ .8)" > $^.tmp
@cat $^ >> $^.tmp
- $(trace-gen) lowdown -sT man -M section=8 $^.tmp -o $@
+ $(trace-gen) lowdown -sT man --nroff-nolinks -M section=8 $^.tmp -o $@
@rm $^.tmp
$(d)/nix.conf.5: $(d)/src/command-ref/conf-file.md
@printf "Title: %s\n\n" "$$(basename $@ .5)" > $^.tmp
@cat $^ >> $^.tmp
- $(trace-gen) lowdown -sT man -M section=5 $^.tmp -o $@
+ $(trace-gen) lowdown -sT man --nroff-nolinks -M section=5 $^.tmp -o $@
@rm $^.tmp
$(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli
diff --git a/doc/manual/redirects.js b/doc/manual/redirects.js
index 2e77edd0f..69f75d3a0 100644
--- a/doc/manual/redirects.js
+++ b/doc/manual/redirects.js
@@ -35,7 +35,6 @@ const redirects = {
"conf-build-max-jobs": "command-ref/conf-file.html#conf-build-max-jobs",
"conf-build-max-log-size": "command-ref/conf-file.html#conf-build-max-log-size",
"conf-build-max-silent-time": "command-ref/conf-file.html#conf-build-max-silent-time",
- "conf-build-repeat": "command-ref/conf-file.html#conf-build-repeat",
"conf-build-timeout": "command-ref/conf-file.html#conf-build-timeout",
"conf-build-use-chroot": "command-ref/conf-file.html#conf-build-use-chroot",
"conf-build-use-sandbox": "command-ref/conf-file.html#conf-build-use-sandbox",
@@ -47,7 +46,6 @@ const redirects = {
"conf-connect-timeout": "command-ref/conf-file.html#conf-connect-timeout",
"conf-cores": "command-ref/conf-file.html#conf-cores",
"conf-diff-hook": "command-ref/conf-file.html#conf-diff-hook",
- "conf-enforce-determinism": "command-ref/conf-file.html#conf-enforce-determinism",
"conf-env-keep-derivations": "command-ref/conf-file.html#conf-env-keep-derivations",
"conf-extra-binary-caches": "command-ref/conf-file.html#conf-extra-binary-caches",
"conf-extra-platforms": "command-ref/conf-file.html#conf-extra-platforms",
@@ -74,7 +72,6 @@ const redirects = {
"conf-plugin-files": "command-ref/conf-file.html#conf-plugin-files",
"conf-post-build-hook": "command-ref/conf-file.html#conf-post-build-hook",
"conf-pre-build-hook": "command-ref/conf-file.html#conf-pre-build-hook",
- "conf-repeat": "command-ref/conf-file.html#conf-repeat",
"conf-require-sigs": "command-ref/conf-file.html#conf-require-sigs",
"conf-restrict-eval": "command-ref/conf-file.html#conf-restrict-eval",
"conf-run-diff-hook": "command-ref/conf-file.html#conf-run-diff-hook",
diff --git a/doc/manual/src/SUMMARY.md.in b/doc/manual/src/SUMMARY.md.in
index 908e7e3d9..6a514fa2c 100644
--- a/doc/manual/src/SUMMARY.md.in
+++ b/doc/manual/src/SUMMARY.md.in
@@ -65,6 +65,7 @@
- [CLI guideline](contributing/cli-guideline.md)
- [Release Notes](release-notes/release-notes.md)
- [Release X.Y (202?-??-??)](release-notes/rl-next.md)
+ - [Release 2.12 (2022-12-06)](release-notes/rl-2.12.md)
- [Release 2.11 (2022-08-25)](release-notes/rl-2.11.md)
- [Release 2.10 (2022-07-11)](release-notes/rl-2.10.md)
- [Release 2.9 (2022-05-30)](release-notes/rl-2.9.md)
diff --git a/doc/manual/src/advanced-topics/diff-hook.md b/doc/manual/src/advanced-topics/diff-hook.md
index 161e64b2a..4a742c160 100644
--- a/doc/manual/src/advanced-topics/diff-hook.md
+++ b/doc/manual/src/advanced-topics/diff-hook.md
@@ -121,37 +121,3 @@ error:
are not valid, so checking is not possible
Run the build without `--check`, and then try with `--check` again.
-
-# Automatic and Optionally Enforced Determinism Verification
-
-Automatically verify every build at build time by executing the build
-multiple times.
-
-Setting `repeat` and `enforce-determinism` in your `nix.conf` permits
-the automated verification of every build Nix performs.
-
-The following configuration will run each build three times, and will
-require the build to be deterministic:
-
- enforce-determinism = true
- repeat = 2
-
-Setting `enforce-determinism` to false as in the following
-configuration will run the build multiple times, execute the build
-hook, but will allow the build to succeed even if it does not build
-reproducibly:
-
- enforce-determinism = false
- repeat = 1
-
-An example output of this configuration:
-
-```console
-$ nix-build ./test.nix -A unstable
-this derivation will be built:
- /nix/store/ch6llwpr2h8c3jmnf3f2ghkhx59aa97f-unstable.drv
-building '/nix/store/ch6llwpr2h8c3jmnf3f2ghkhx59aa97f-unstable.drv' (round 1/2)...
-building '/nix/store/ch6llwpr2h8c3jmnf3f2ghkhx59aa97f-unstable.drv' (round 2/2)...
-output '/nix/store/6xg356v9gl03hpbbg8gws77n19qanh02-unstable' of '/nix/store/ch6llwpr2h8c3jmnf3f2ghkhx59aa97f-unstable.drv' differs from '/nix/store/6xg356v9gl03hpbbg8gws77n19qanh02-unstable.check' from previous round
-/nix/store/6xg356v9gl03hpbbg8gws77n19qanh02-unstable
-```
diff --git a/doc/manual/src/advanced-topics/post-build-hook.md b/doc/manual/src/advanced-topics/post-build-hook.md
index fcb52d878..1479cc3a4 100644
--- a/doc/manual/src/advanced-topics/post-build-hook.md
+++ b/doc/manual/src/advanced-topics/post-build-hook.md
@@ -33,12 +33,17 @@ distribute the public key for verifying the authenticity of the paths.
example-nix-cache-1:1/cKDz3QCCOmwcztD2eV6Coggp6rqc9DGjWv7C0G+rM=
```
-Then, add the public key and the cache URL to your `nix.conf`'s
-`trusted-public-keys` and `substituters` options:
+Then update [`nix.conf`](../command-ref/conf-file.md) on any machine that will access the cache.
+Add the cache URL to [`substituters`](../command-ref/conf-file.md#conf-substituters) and the public key to [`trusted-public-keys`](../command-ref/conf-file.md#conf-trusted-public-keys):
substituters = https://cache.nixos.org/ s3://example-nix-cache
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= example-nix-cache-1:1/cKDz3QCCOmwcztD2eV6Coggp6rqc9DGjWv7C0G+rM=
+Machines that build for the cache must sign derivations using the private key.
+On those machines, add the path to the key file to the [`secret-key-files`](../command-ref/conf-file.md#conf-secret-key-files) field in their [`nix.conf`](../command-ref/conf-file.md):
+
+ secret-key-files = /etc/nix/key.private
+
We will restart the Nix daemon in a later step.
# Implementing the build hook
@@ -52,14 +57,12 @@ set -eu
set -f # disable globbing
export IFS=' '
-echo "Signing paths" $OUT_PATHS
-nix store sign --key-file /etc/nix/key.private $OUT_PATHS
echo "Uploading paths" $OUT_PATHS
-exec nix copy --to 's3://example-nix-cache' $OUT_PATHS
+exec nix copy --to "s3://example-nix-cache" $OUT_PATHS
```
> **Note**
->
+>
> The `$OUT_PATHS` variable is a space-separated list of Nix store
> paths. In this case, we expect and want the shell to perform word
> splitting to make each output path its own argument to `nix
diff --git a/doc/manual/src/command-ref/nix-build.md b/doc/manual/src/command-ref/nix-build.md
index 49c6f3f55..3a47feaae 100644
--- a/doc/manual/src/command-ref/nix-build.md
+++ b/doc/manual/src/command-ref/nix-build.md
@@ -53,16 +53,18 @@ All options not listed here are passed to `nix-store
--realise`, except for `--arg` and `--attr` / `-A` which are passed to
`nix-instantiate`.
- - [`--no-out-link`]{#opt-no-out-link}\
+ - <span id="opt-no-out-link">[`--no-out-link`](#opt-no-out-link)<span>
+
Do not create a symlink to the output path. Note that as a result
the output does not become a root of the garbage collector, and so
- might be deleted by `nix-store
- --gc`.
+ might be deleted by `nix-store --gc`.
+
+ - <span id="opt-dry-run">[`--dry-run`](#opt-dry-run)</span>
- - [`--dry-run`]{#opt-dry-run}\
Show what store paths would be built or downloaded.
- - [`--out-link`]{#opt-out-link} / `-o` *outlink*\
+ - <span id="opt-out-link">[`--out-link`](#opt-out-link)</span> / `-o` *outlink*
+
Change the name of the symlink to the output path created from
`result` to *outlink*.
diff --git a/doc/manual/src/command-ref/nix-store.md b/doc/manual/src/command-ref/nix-store.md
index 1251888e9..b712a7463 100644
--- a/doc/manual/src/command-ref/nix-store.md
+++ b/doc/manual/src/command-ref/nix-store.md
@@ -22,7 +22,8 @@ This section lists the options that are common to all operations. These
options are allowed for every subcommand, though they may not always
have an effect.
- - [`--add-root`]{#opt-add-root} *path*\
+ - <span id="opt-add-root">[`--add-root`](#opt-add-root)</span> *path*
+
Causes the result of a realisation (`--realise` and
`--force-realise`) to be registered as a root of the garbage
collector. *path* will be created as a symlink to the resulting
@@ -104,10 +105,6 @@ The following flags are available:
previous build, the new output path is left in
`/nix/store/name.check.`
- See also the `build-repeat` configuration option, which repeats a
- derivation a number of times and prevents its outputs from being
- registered as “valid” in the Nix store unless they are identical.
-
Special exit codes:
- `100`\
diff --git a/doc/manual/src/language/index.md b/doc/manual/src/language/index.md
index f9e9b9781..db34fde75 100644
--- a/doc/manual/src/language/index.md
+++ b/doc/manual/src/language/index.md
@@ -93,7 +93,7 @@ This is an incomplete overview of language features, by example.
`"hello ${ { a = "world" }.a }"`
- `"1 2 ${3}"`
+ `"1 2 ${toString 3}"`
`"${pkgs.bash}/bin/sh"`
diff --git a/doc/manual/src/release-notes/rl-2.12.md b/doc/manual/src/release-notes/rl-2.12.md
new file mode 100644
index 000000000..82de22cb4
--- /dev/null
+++ b/doc/manual/src/release-notes/rl-2.12.md
@@ -0,0 +1,43 @@
+# Release 2.12 (2022-12-06)
+
+* On Linux, Nix can now run builds in a user namespace where they run
+ as root (UID 0) and have 65,536 UIDs available.
+ <!-- FIXME: move this to its own section about system features -->
+ This is primarily useful for running containers such as `systemd-nspawn`
+ inside a Nix build. For an example, see [`tests/systemd-nspawn/nix`][nspawn].
+
+ [nspawn]: https://github.com/NixOS/nix/blob/67bcb99700a0da1395fa063d7c6586740b304598/tests/systemd-nspawn.nix.
+
+ A build can enable this by setting the derivation attribute:
+
+ ```
+ requiredSystemFeatures = [ "uid-range" ];
+ ```
+
+ The `uid-range` [system feature] requires the [`auto-allocate-uids`]
+ setting to be enabled.
+
+ [system feature]: (../command-ref/conf-file.md#conf-system-features)
+
+* Nix can now automatically pick UIDs for builds, removing the need to
+ create `nixbld*` user accounts. See [`auto-allocate-uids`].
+
+ [`auto-allocate-uids`]: (../command-ref/conf-file.md#conf-auto-allocate-uids)
+
+* On Linux, Nix has experimental support for running builds inside a
+ cgroup. See
+ [`use-cgroups`](../command-ref/conf-file.md#conf-use-cgroups).
+
+* `<nix/fetchurl.nix>` now accepts an additional argument `impure` which
+ defaults to `false`. If it is set to `true`, the `hash` and `sha256`
+ arguments will be ignored and the resulting derivation will have
+ `__impure` set to `true`, making it an impure derivation.
+
+* If `builtins.readFile` is called on a file with context, then only
+ the parts of the context that appear in the content of the file are
+ retained. This avoids a lot of spurious errors where strings end up
+ having a context just because they are read from a store path
+ ([#7260](https://github.com/NixOS/nix/pull/7260)).
+
+* `nix build --json` now prints some statistics about top-level
+ derivations, such as CPU statistics when cgroups are enabled.
diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md
index 2069e4578..c1f4de76f 100644
--- a/doc/manual/src/release-notes/rl-next.md
+++ b/doc/manual/src/release-notes/rl-next.md
@@ -1,12 +1,4 @@
# Release X.Y (202?-??-??)
-* `<nix/fetchurl.nix>` now accepts an additional argument `impure` which
- defaults to `false`. If it is set to `true`, the `hash` and `sha256`
- arguments will be ignored and the resulting derivation will have
- `__impure` set to `true`, making it an impure derivation.
-
-* If `builtins.readFile` is called on a file with context, then only the parts
- of that context that appear in the content of the file are retained.
- This avoids a lot of spurious errors where some benign strings end-up having
- a context just because they are read from a store path
- ([#7260](https://github.com/NixOS/nix/pull/7260)).
+* The `repeat` and `enforce-determinism` options have been removed
+ since they had been broken under many circumstances for a long time.
diff --git a/docker.nix b/docker.nix
index bb2b4e7ff..203a06b53 100644
--- a/docker.nix
+++ b/docker.nix
@@ -36,6 +36,17 @@ let
shell = "${pkgs.bashInteractive}/bin/bash";
home = "/root";
gid = 0;
+ groups = [ "root" ];
+ description = "System administrator";
+ };
+
+ nobody = {
+ uid = 65534;
+ shell = "${pkgs.shadow}/bin/nologin";
+ home = "/var/empty";
+ gid = 65534;
+ groups = [ "nobody" ];
+ description = "Unprivileged account (don't use!)";
};
} // lib.listToAttrs (
@@ -57,6 +68,7 @@ let
groups = {
root.gid = 0;
nixbld.gid = 30000;
+ nobody.gid = 65534;
};
userToPasswd = (
diff --git a/flake.nix b/flake.nix
index cc2a48d9c..5127ee2a3 100644
--- a/flake.nix
+++ b/flake.nix
@@ -9,14 +9,14 @@
let
- version = builtins.readFile ./.version + versionSuffix;
+ officialRelease = false;
+
+ version = nixpkgs.lib.fileContents ./.version + versionSuffix;
versionSuffix =
if officialRelease
then ""
else "pre${builtins.substring 0 8 (self.lastModifiedDate or self.lastModified or "19700101")}_${self.shortRev or "dirty"}";
- officialRelease = false;
-
linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ];
linuxSystems = linux64BitSystems ++ [ "i686-linux" ];
systems = linuxSystems ++ [ "x86_64-darwin" "aarch64-darwin" ];
@@ -506,6 +506,12 @@
overlay = self.overlays.default;
});
+ tests.containers = (import ./tests/containers.nix rec {
+ system = "x86_64-linux";
+ inherit nixpkgs;
+ overlay = self.overlays.default;
+ });
+
tests.setuid = nixpkgs.lib.genAttrs
["i686-linux" "x86_64-linux"]
(system:
diff --git a/maintainers/README.md b/maintainers/README.md
new file mode 100644
index 000000000..60768db0a
--- /dev/null
+++ b/maintainers/README.md
@@ -0,0 +1,79 @@
+# Nix maintainers team
+
+## Motivation
+
+The goal of the team is to help other people to contribute to Nix.
+
+## Members
+
+- Eelco Dolstra (@edolstra) – Team lead
+- Théophane Hufschmitt (@thufschmitt)
+- Valentin Gagarin (@fricklerhandwerk)
+- Thomas Bereknyei (@tomberek)
+- Robert Hensing (@roberth)
+
+## Meeting protocol
+
+The team meets twice a week:
+
+- Discussion meeting: [Fridays 13:00-14:00 CET](https://calendar.google.com/calendar/event?eid=MHNtOGVuNWtrZXNpZHR2bW1sM3QyN2ZjaGNfMjAyMjExMjVUMTIwMDAwWiBiOW81MmZvYnFqYWs4b3E4bGZraGczdDBxZ0Bn)
+
+ 1. Triage issues and pull requests from the _No Status_ column (30 min)
+ 2. Discuss issues and pull requests from the _To discuss_ column (30 min)
+
+- Work meeting: [Mondays 13:00-15:00 CET](https://calendar.google.com/calendar/event?eid=NTM1MG1wNGJnOGpmOTZhYms3bTB1bnY5cWxfMjAyMjExMjFUMTIwMDAwWiBiOW81MmZvYnFqYWs4b3E4bGZraGczdDBxZ0Bn)
+
+ 1. Code review on pull requests from _In review_.
+ 2. Other chores and tasks.
+
+Meeting notes are collected on a [collaborative scratchpad](https://pad.lassul.us/Cv7FpYx-Ri-4VjUykQOLAw), and published on Discourse under the [Nix category](https://discourse.nixos.org/c/dev/nix/50).
+
+## Project board protocol
+
+The team uses a [GitHub project board](https://github.com/orgs/NixOS/projects/19/views/1) for tracking its work.
+
+Issues on the board progress through the following states:
+
+- No Status
+
+ Team members can add pull requests or issues to discuss or review together.
+
+ During the discussion meeting, the team triages new items.
+ If there is disagreement on the general idea behind an issue or pull request, it is moved to _To discuss_, otherwise to _In review_.
+
+- To discuss
+
+ Pull requests and issues that are important and controversial are discussed by the team during discussion meetings.
+
+ This may be where the merit of the change itself or the implementation strategy is contested by a team member.
+
+- In review
+
+ Pull requests in this column are reviewed together during work meetings.
+ This is both for spreading implementation knowledge and for establishing common values in code reviews.
+
+ When the overall direction is agreed upon, even when further changes are required, the pull request is assigned to one team member.
+
+- Assigned for merging
+
+ One team member is assigned to each of these pull requests.
+ They will communicate with the authors, and make the final approval once all remaining issues are addressed.
+
+ If more substantive issues arise, the assignee can move the pull request back to _To discuss_ to involve the team again.
+
+The process is illustrated in the following diagram:
+
+```mermaid
+flowchart TD
+ discuss[To discuss]
+
+ review[To review]
+
+ New --> |Disagreement on idea| discuss
+ New & discuss --> |Consensus on idea| review
+
+ review --> |Consensus on implementation| Assigned
+
+ Assigned --> |Implementation issues arise| review
+ Assigned --> |Remaining issues fixed| Merged
+```
diff --git a/misc/launchd/org.nixos.nix-daemon.plist.in b/misc/launchd/org.nixos.nix-daemon.plist.in
index da1970f69..5fa489b20 100644
--- a/misc/launchd/org.nixos.nix-daemon.plist.in
+++ b/misc/launchd/org.nixos.nix-daemon.plist.in
@@ -28,7 +28,7 @@
<key>SoftResourceLimits</key>
<dict>
<key>NumberOfFiles</key>
- <integer>4096</integer>
+ <integer>1048576</integer>
</dict>
</dict>
</plist>
diff --git a/misc/systemd/nix-daemon.service.in b/misc/systemd/nix-daemon.service.in
index e3ac42beb..f46413630 100644
--- a/misc/systemd/nix-daemon.service.in
+++ b/misc/systemd/nix-daemon.service.in
@@ -9,7 +9,7 @@ ConditionPathIsReadWrite=@localstatedir@/nix/daemon-socket
[Service]
ExecStart=@@bindir@/nix-daemon nix-daemon --daemon
KillMode=process
-LimitNOFILE=4096
+LimitNOFILE=1048576
[Install]
WantedBy=multi-user.target
diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh
index 96c0f302b..ec82e0560 100644
--- a/scripts/install-multi-user.sh
+++ b/scripts/install-multi-user.sh
@@ -97,13 +97,10 @@ is_os_darwin() {
}
contact_us() {
- echo "You can open an issue at https://github.com/nixos/nix/issues"
+ echo "You can open an issue at"
+ echo "https://github.com/NixOS/nix/issues/new?labels=installer&template=installer.md"
echo ""
- echo "Or feel free to contact the team:"
- echo " - Matrix: #nix:nixos.org"
- echo " - IRC: in #nixos on irc.libera.chat"
- echo " - twitter: @nixos_org"
- echo " - forum: https://discourse.nixos.org"
+ echo "Or get in touch with the community: https://nixos.org/community"
}
get_help() {
echo "We'd love to help if you need it."
diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc
index f63b9eeae..dbe4a449d 100644
--- a/src/libcmd/installables.cc
+++ b/src/libcmd/installables.cc
@@ -844,20 +844,20 @@ std::shared_ptr<Installable> SourceExprCommand::parseInstallable(
return installables.front();
}
-BuiltPaths Installable::build(
+std::vector<BuiltPathWithResult> Installable::build(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
const std::vector<std::shared_ptr<Installable>> & installables,
BuildMode bMode)
{
- BuiltPaths res;
- for (auto & [_, builtPath] : build2(evalStore, store, mode, installables, bMode))
- res.push_back(builtPath);
+ std::vector<BuiltPathWithResult> res;
+ for (auto & [_, builtPathWithResult] : build2(evalStore, store, mode, installables, bMode))
+ res.push_back(builtPathWithResult);
return res;
}
-std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::build2(
+std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> Installable::build2(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
@@ -877,7 +877,7 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
}
}
- std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> res;
+ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> res;
switch (mode) {
@@ -918,10 +918,10 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
output, *drvOutput->second);
}
}
- res.push_back({installable, BuiltPath::Built { bfd.drvPath, outputs }});
+ res.push_back({installable, {.path = BuiltPath::Built { bfd.drvPath, outputs }}});
},
[&](const DerivedPath::Opaque & bo) {
- res.push_back({installable, BuiltPath::Opaque { bo.path }});
+ res.push_back({installable, {.path = BuiltPath::Opaque { bo.path }}});
},
}, path.raw());
}
@@ -931,7 +931,7 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
case Realise::Outputs: {
if (settings.printMissing)
- printMissing(store, pathsToBuild, lvlInfo);
+ printMissing(store, pathsToBuild, lvlInfo);
for (auto & buildResult : store->buildPathsWithResults(pathsToBuild, bMode, evalStore)) {
if (!buildResult.success())
@@ -943,10 +943,10 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
std::map<std::string, StorePath> outputs;
for (auto & path : buildResult.builtOutputs)
outputs.emplace(path.first.outputName, path.second.outPath);
- res.push_back({installable, BuiltPath::Built { bfd.drvPath, outputs }});
+ res.push_back({installable, {.path = BuiltPath::Built { bfd.drvPath, outputs }, .result = buildResult}});
},
[&](const DerivedPath::Opaque & bo) {
- res.push_back({installable, BuiltPath::Opaque { bo.path }});
+ res.push_back({installable, {.path = BuiltPath::Opaque { bo.path }, .result = buildResult}});
},
}, buildResult.path.raw());
}
@@ -969,9 +969,12 @@ BuiltPaths Installable::toBuiltPaths(
OperateOn operateOn,
const std::vector<std::shared_ptr<Installable>> & installables)
{
- if (operateOn == OperateOn::Output)
- return Installable::build(evalStore, store, mode, installables);
- else {
+ if (operateOn == OperateOn::Output) {
+ BuiltPaths res;
+ for (auto & p : Installable::build(evalStore, store, mode, installables))
+ res.push_back(p.path);
+ return res;
+ } else {
if (mode == Realise::Nothing)
settings.readOnlyMode = true;
diff --git a/src/libcmd/installables.hh b/src/libcmd/installables.hh
index 948f78919..02ea351d3 100644
--- a/src/libcmd/installables.hh
+++ b/src/libcmd/installables.hh
@@ -7,6 +7,7 @@
#include "eval.hh"
#include "store-api.hh"
#include "flake/flake.hh"
+#include "build-result.hh"
#include <optional>
@@ -51,6 +52,12 @@ enum class OperateOn {
Derivation
};
+struct BuiltPathWithResult
+{
+ BuiltPath path;
+ std::optional<BuildResult> result;
+};
+
struct Installable
{
virtual ~Installable() { }
@@ -91,14 +98,14 @@ struct Installable
return FlakeRef::fromAttrs({{"type","indirect"}, {"id", "nixpkgs"}});
}
- static BuiltPaths build(
+ static std::vector<BuiltPathWithResult> build(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
const std::vector<std::shared_ptr<Installable>> & installables,
BuildMode bMode = bmNormal);
- static std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> build2(
+ static std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> build2(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc
index bb254ff8d..557952277 100644
--- a/src/libcmd/repl.cc
+++ b/src/libcmd/repl.cc
@@ -270,6 +270,7 @@ void NixRepl::mainLoop()
// ctrl-D should exit the debugger.
state->debugStop = false;
state->debugQuit = true;
+ logger->cout("");
break;
}
try {
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
index 563f24e48..76a10b9f8 100644
--- a/src/libexpr/eval.cc
+++ b/src/libexpr/eval.cc
@@ -7,7 +7,6 @@
#include "globals.hh"
#include "eval-inline.hh"
#include "filetransfer.hh"
-#include "json.hh"
#include "function-trace.hh"
#include <algorithm>
@@ -21,6 +20,7 @@
#include <functional>
#include <sys/resource.h>
+#include <nlohmann/json.hpp>
#if HAVE_BOEHMGC
@@ -35,6 +35,8 @@
#endif
+using json = nlohmann::json;
+
namespace nix {
static char * allocString(size_t size)
@@ -69,15 +71,11 @@ static char * dupString(const char * s)
// empty string.
static const char * makeImmutableStringWithLen(const char * s, size_t size)
{
- char * t;
if (size == 0)
return "";
-#if HAVE_BOEHMGC
- t = GC_STRNDUP(s, size);
-#else
- t = strndup(s, size);
-#endif
- if (!t) throw std::bad_alloc();
+ auto t = allocString(size + 1);
+ memcpy(t, s, size);
+ t[size] = 0;
return t;
}
@@ -1648,7 +1646,7 @@ void EvalState::callFunction(Value & fun, size_t nrArgs, Value * * args, Value &
auto dts = debugRepl
? makeDebugTraceStacker(
*this, *lambda.body, env2, positions[lambda.pos],
- "while evaluating %s",
+ "while calling %s",
lambda.name
? concatStrings("'", symbols[lambda.name], "'")
: "anonymous lambda")
@@ -1657,7 +1655,7 @@ void EvalState::callFunction(Value & fun, size_t nrArgs, Value * * args, Value &
lambda.body->eval(*this, env2, vCur);
} catch (Error & e) {
if (loggerSettings.showTrace.get()) {
- addErrorTrace(e, lambda.pos, "while evaluating %s",
+ addErrorTrace(e, lambda.pos, "while calling %s",
(lambda.name
? concatStrings("'", symbols[lambda.name], "'")
: "anonymous lambda"));
@@ -2441,97 +2439,97 @@ void EvalState::printStats()
std::fstream fs;
if (outPath != "-")
fs.open(outPath, std::fstream::out);
- JSONObject topObj(outPath == "-" ? std::cerr : fs, true);
- topObj.attr("cpuTime",cpuTime);
- {
- auto envs = topObj.object("envs");
- envs.attr("number", nrEnvs);
- envs.attr("elements", nrValuesInEnvs);
- envs.attr("bytes", bEnvs);
- }
- {
- auto lists = topObj.object("list");
- lists.attr("elements", nrListElems);
- lists.attr("bytes", bLists);
- lists.attr("concats", nrListConcats);
- }
- {
- auto values = topObj.object("values");
- values.attr("number", nrValues);
- values.attr("bytes", bValues);
- }
- {
- auto syms = topObj.object("symbols");
- syms.attr("number", symbols.size());
- syms.attr("bytes", symbols.totalSize());
- }
- {
- auto sets = topObj.object("sets");
- sets.attr("number", nrAttrsets);
- sets.attr("bytes", bAttrsets);
- sets.attr("elements", nrAttrsInAttrsets);
- }
- {
- auto sizes = topObj.object("sizes");
- sizes.attr("Env", sizeof(Env));
- sizes.attr("Value", sizeof(Value));
- sizes.attr("Bindings", sizeof(Bindings));
- sizes.attr("Attr", sizeof(Attr));
- }
- topObj.attr("nrOpUpdates", nrOpUpdates);
- topObj.attr("nrOpUpdateValuesCopied", nrOpUpdateValuesCopied);
- topObj.attr("nrThunks", nrThunks);
- topObj.attr("nrAvoided", nrAvoided);
- topObj.attr("nrLookups", nrLookups);
- topObj.attr("nrPrimOpCalls", nrPrimOpCalls);
- topObj.attr("nrFunctionCalls", nrFunctionCalls);
+ json topObj = json::object();
+ topObj["cpuTime"] = cpuTime;
+ topObj["envs"] = {
+ {"number", nrEnvs},
+ {"elements", nrValuesInEnvs},
+ {"bytes", bEnvs},
+ };
+ topObj["list"] = {
+ {"elements", nrListElems},
+ {"bytes", bLists},
+ {"concats", nrListConcats},
+ };
+ topObj["values"] = {
+ {"number", nrValues},
+ {"bytes", bValues},
+ };
+ topObj["symbols"] = {
+ {"number", symbols.size()},
+ {"bytes", symbols.totalSize()},
+ };
+ topObj["sets"] = {
+ {"number", nrAttrsets},
+ {"bytes", bAttrsets},
+ {"elements", nrAttrsInAttrsets},
+ };
+ topObj["sizes"] = {
+ {"Env", sizeof(Env)},
+ {"Value", sizeof(Value)},
+ {"Bindings", sizeof(Bindings)},
+ {"Attr", sizeof(Attr)},
+ };
+ topObj["nrOpUpdates"] = nrOpUpdates;
+ topObj["nrOpUpdateValuesCopied"] = nrOpUpdateValuesCopied;
+ topObj["nrThunks"] = nrThunks;
+ topObj["nrAvoided"] = nrAvoided;
+ topObj["nrLookups"] = nrLookups;
+ topObj["nrPrimOpCalls"] = nrPrimOpCalls;
+ topObj["nrFunctionCalls"] = nrFunctionCalls;
#if HAVE_BOEHMGC
- {
- auto gc = topObj.object("gc");
- gc.attr("heapSize", heapSize);
- gc.attr("totalBytes", totalBytes);
- }
+ topObj["gc"] = {
+ {"heapSize", heapSize},
+ {"totalBytes", totalBytes},
+ };
#endif
if (countCalls) {
+ topObj["primops"] = primOpCalls;
{
- auto obj = topObj.object("primops");
- for (auto & i : primOpCalls)
- obj.attr(i.first, i.second);
- }
- {
- auto list = topObj.list("functions");
+ auto& list = topObj["functions"];
+ list = json::array();
for (auto & [fun, count] : functionCalls) {
- auto obj = list.object();
+ json obj = json::object();
if (fun->name)
- obj.attr("name", (std::string_view) symbols[fun->name]);
+ obj["name"] = (std::string_view) symbols[fun->name];
else
- obj.attr("name", nullptr);
+ obj["name"] = nullptr;
if (auto pos = positions[fun->pos]) {
- obj.attr("file", (std::string_view) pos.file);
- obj.attr("line", pos.line);
- obj.attr("column", pos.column);
+ obj["file"] = (std::string_view) pos.file;
+ obj["line"] = pos.line;
+ obj["column"] = pos.column;
}
- obj.attr("count", count);
+ obj["count"] = count;
+ list.push_back(obj);
}
}
{
- auto list = topObj.list("attributes");
+ auto list = topObj["attributes"];
+ list = json::array();
for (auto & i : attrSelects) {
- auto obj = list.object();
+ json obj = json::object();
if (auto pos = positions[i.first]) {
- obj.attr("file", (const std::string &) pos.file);
- obj.attr("line", pos.line);
- obj.attr("column", pos.column);
+ obj["file"] = (const std::string &) pos.file;
+ obj["line"] = pos.line;
+ obj["column"] = pos.column;
}
- obj.attr("count", i.second);
+ obj["count"] = i.second;
+ list.push_back(obj);
}
}
}
if (getEnv("NIX_SHOW_SYMBOLS").value_or("0") != "0") {
- auto list = topObj.list("symbols");
- symbols.dump([&](const std::string & s) { list.elem(s); });
+ // XXX: overrides earlier assignment
+ topObj["symbols"] = json::array();
+ auto &list = topObj["symbols"];
+ symbols.dump([&](const std::string & s) { list.emplace_back(s); });
+ }
+ if (outPath == "-") {
+ std::cerr << topObj.dump(2) << std::endl;
+ } else {
+ fs << topObj.dump(2) << std::endl;
}
}
}
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
index 22f6ad3cc..8a4c19f7c 100644
--- a/src/libexpr/primops.cc
+++ b/src/libexpr/primops.cc
@@ -8,12 +8,12 @@
#include "references.hh"
#include "store-api.hh"
#include "util.hh"
-#include "json.hh"
#include "value-to-json.hh"
#include "value-to-xml.hh"
#include "primops.hh"
#include <boost/container/small_vector.hpp>
+#include <nlohmann/json.hpp>
#include <sys/types.h>
#include <sys/stat.h>
@@ -1011,6 +1011,7 @@ static void prim_second(EvalState & state, const PosIdx pos, Value * * args, Val
derivation. */
static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
+ using nlohmann::json;
state.forceAttrs(*args[0], pos);
/* Figure out the name first (for stack backtraces). */
@@ -1032,11 +1033,10 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * *
}
/* Check whether attributes should be passed as a JSON file. */
- std::ostringstream jsonBuf;
- std::unique_ptr<JSONObject> jsonObject;
+ std::optional<json> jsonObject;
attr = args[0]->attrs->find(state.sStructuredAttrs);
if (attr != args[0]->attrs->end() && state.forceBool(*attr->value, pos))
- jsonObject = std::make_unique<JSONObject>(jsonBuf);
+ jsonObject = json::object();
/* Check whether null attributes should be ignored. */
bool ignoreNulls = false;
@@ -1138,8 +1138,7 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * *
if (i->name == state.sStructuredAttrs) continue;
- auto placeholder(jsonObject->placeholder(key));
- printValueAsJSON(state, true, *i->value, pos, placeholder, context);
+ (*jsonObject)[key] = printValueAsJSON(state, true, *i->value, pos, context);
if (i->name == state.sBuilder)
drv.builder = state.forceString(*i->value, context, posDrvName);
@@ -1183,8 +1182,8 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * *
}
if (jsonObject) {
+ drv.env.emplace("__json", jsonObject->dump());
jsonObject.reset();
- drv.env.emplace("__json", jsonBuf.str());
}
/* Everything in the context of the strings in the derivation
@@ -2421,12 +2420,18 @@ static RegisterPrimOp primop_listToAttrs({
Construct a set from a list specifying the names and values of each
attribute. Each element of the list should be a set consisting of a
string-valued attribute `name` specifying the name of the attribute,
- and an attribute `value` specifying its value. Example:
+ and an attribute `value` specifying its value.
+
+ In case of duplicate occurrences of the same name, the first
+ takes precedence.
+
+ Example:
```nix
builtins.listToAttrs
[ { name = "foo"; value = 123; }
{ name = "bar"; value = 456; }
+ { name = "bar"; value = 420; }
]
```
diff --git a/src/libexpr/value-to-json.cc b/src/libexpr/value-to-json.cc
index 4d63d8b49..5dc453b2e 100644
--- a/src/libexpr/value-to-json.cc
+++ b/src/libexpr/value-to-json.cc
@@ -1,84 +1,82 @@
#include "value-to-json.hh"
-#include "json.hh"
#include "eval-inline.hh"
#include "util.hh"
#include <cstdlib>
#include <iomanip>
+#include <nlohmann/json.hpp>
namespace nix {
-
-void printValueAsJSON(EvalState & state, bool strict,
- Value & v, const PosIdx pos, JSONPlaceholder & out, PathSet & context, bool copyToStore)
+using json = nlohmann::json;
+json printValueAsJSON(EvalState & state, bool strict,
+ Value & v, const PosIdx pos, PathSet & context, bool copyToStore)
{
checkInterrupt();
if (strict) state.forceValue(v, pos);
+ json out;
+
switch (v.type()) {
case nInt:
- out.write(v.integer);
+ out = v.integer;
break;
case nBool:
- out.write(v.boolean);
+ out = v.boolean;
break;
case nString:
copyContext(v, context);
- out.write(v.string.s);
+ out = v.string.s;
break;
case nPath:
if (copyToStore)
- out.write(state.copyPathToStore(context, v.path));
+ out = state.copyPathToStore(context, v.path);
else
- out.write(v.path);
+ out = v.path;
break;
case nNull:
- out.write(nullptr);
break;
case nAttrs: {
auto maybeString = state.tryAttrsToString(pos, v, context, false, false);
if (maybeString) {
- out.write(*maybeString);
+ out = *maybeString;
break;
}
auto i = v.attrs->find(state.sOutPath);
if (i == v.attrs->end()) {
- auto obj(out.object());
+ out = json::object();
StringSet names;
for (auto & j : *v.attrs)
names.emplace(state.symbols[j.name]);
for (auto & j : names) {
Attr & a(*v.attrs->find(state.symbols.create(j)));
- auto placeholder(obj.placeholder(j));
- printValueAsJSON(state, strict, *a.value, a.pos, placeholder, context, copyToStore);
+ out[j] = printValueAsJSON(state, strict, *a.value, a.pos, context, copyToStore);
}
} else
- printValueAsJSON(state, strict, *i->value, i->pos, out, context, copyToStore);
+ return printValueAsJSON(state, strict, *i->value, i->pos, context, copyToStore);
break;
}
case nList: {
- auto list(out.list());
- for (auto elem : v.listItems()) {
- auto placeholder(list.placeholder());
- printValueAsJSON(state, strict, *elem, pos, placeholder, context, copyToStore);
- }
+ out = json::array();
+ for (auto elem : v.listItems())
+ out.push_back(printValueAsJSON(state, strict, *elem, pos, context, copyToStore));
break;
}
case nExternal:
- v.external->printValueAsJSON(state, strict, out, context, copyToStore);
+ return v.external->printValueAsJSON(state, strict, context, copyToStore);
break;
case nFloat:
- out.write(v.fpoint);
+ out = v.fpoint;
break;
case nThunk:
@@ -91,17 +89,17 @@ void printValueAsJSON(EvalState & state, bool strict,
state.debugThrowLastTrace(e);
throw e;
}
+ return out;
}
void printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore)
{
- JSONPlaceholder out(str);
- printValueAsJSON(state, strict, v, pos, out, context, copyToStore);
+ str << printValueAsJSON(state, strict, v, pos, context, copyToStore);
}
-void ExternalValueBase::printValueAsJSON(EvalState & state, bool strict,
- JSONPlaceholder & out, PathSet & context, bool copyToStore) const
+json ExternalValueBase::printValueAsJSON(EvalState & state, bool strict,
+ PathSet & context, bool copyToStore) const
{
state.debugThrowLastTrace(TypeError("cannot convert %1% to JSON", showType()));
}
diff --git a/src/libexpr/value-to-json.hh b/src/libexpr/value-to-json.hh
index 7ddc8a5b1..22f26b790 100644
--- a/src/libexpr/value-to-json.hh
+++ b/src/libexpr/value-to-json.hh
@@ -5,13 +5,12 @@
#include <string>
#include <map>
+#include <nlohmann/json_fwd.hpp>
namespace nix {
-class JSONPlaceholder;
-
-void printValueAsJSON(EvalState & state, bool strict,
- Value & v, const PosIdx pos, JSONPlaceholder & out, PathSet & context, bool copyToStore = true);
+nlohmann::json printValueAsJSON(EvalState & state, bool strict,
+ Value & v, const PosIdx pos, PathSet & context, bool copyToStore = true);
void printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore = true);
diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh
index 590ba7783..5adac72f8 100644
--- a/src/libexpr/value.hh
+++ b/src/libexpr/value.hh
@@ -7,6 +7,7 @@
#if HAVE_BOEHMGC
#include <gc/gc_allocator.h>
#endif
+#include <nlohmann/json_fwd.hpp>
namespace nix {
@@ -62,7 +63,6 @@ class StorePath;
class Store;
class EvalState;
class XMLWriter;
-class JSONPlaceholder;
typedef int64_t NixInt;
@@ -98,8 +98,8 @@ class ExternalValueBase
virtual bool operator ==(const ExternalValueBase & b) const;
/* Print the value as JSON. Defaults to unconvertable, i.e. throws an error */
- virtual void printValueAsJSON(EvalState & state, bool strict,
- JSONPlaceholder & out, PathSet & context, bool copyToStore = true) const;
+ virtual nlohmann::json printValueAsJSON(EvalState & state, bool strict,
+ PathSet & context, bool copyToStore = true) const;
/* Print the value as XML. Defaults to unevaluated */
virtual void printValueAsXML(EvalState & state, bool strict, bool location,
diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc
index 961f4e18a..d160a83e9 100644
--- a/src/libmain/progress-bar.cc
+++ b/src/libmain/progress-bar.cc
@@ -180,10 +180,12 @@ public:
auto machineName = getS(fields, 1);
if (machineName != "")
i->s += fmt(" on " ANSI_BOLD "%s" ANSI_NORMAL, machineName);
- auto curRound = getI(fields, 2);
- auto nrRounds = getI(fields, 3);
- if (nrRounds != 1)
- i->s += fmt(" (round %d/%d)", curRound, nrRounds);
+
+ // Used to be curRound and nrRounds, but the
+ // implementation was broken for a long time.
+ if (getI(fields, 2) != 1 || getI(fields, 3) != 1) {
+ throw Error("log message indicated repeating builds, but this is not currently implemented");
+ }
i->name = DrvName(name).name;
}
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc
index a26770c79..12d0c32fb 100644
--- a/src/libstore/binary-cache-store.cc
+++ b/src/libstore/binary-cache-store.cc
@@ -9,7 +9,6 @@
#include "remote-fs-accessor.hh"
#include "nar-info-disk-cache.hh"
#include "nar-accessor.hh"
-#include "json.hh"
#include "thread-pool.hh"
#include "callback.hh"
@@ -194,19 +193,12 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
/* Optionally write a JSON file containing a listing of the
contents of the NAR. */
if (writeNARListing) {
- std::ostringstream jsonOut;
-
- {
- JSONObject jsonRoot(jsonOut);
- jsonRoot.attr("version", 1);
-
- {
- auto res = jsonRoot.placeholder("root");
- listNar(res, ref<FSAccessor>(narAccessor), "", true);
- }
- }
+ nlohmann::json j = {
+ {"version", 1},
+ {"root", listNar(ref<FSAccessor>(narAccessor), "", true)},
+ };
- upsertFile(std::string(info.path.hashPart()) + ".ls", jsonOut.str(), "application/json");
+ upsertFile(std::string(info.path.hashPart()) + ".ls", j.dump(), "application/json");
}
/* Optionally maintain an index of DWARF debug info files
diff --git a/src/libstore/build-result.hh b/src/libstore/build-result.hh
index 24fb1f763..a5749cf33 100644
--- a/src/libstore/build-result.hh
+++ b/src/libstore/build-result.hh
@@ -5,7 +5,7 @@
#include <string>
#include <chrono>
-
+#include <optional>
namespace nix {
@@ -78,6 +78,9 @@ struct BuildResult
was repeated). */
time_t startTime = 0, stopTime = 0;
+ /* User and system CPU time the build took. */
+ std::optional<std::chrono::microseconds> cpuUser, cpuSystem;
+
bool success()
{
return status == Built || status == Substituted || status == AlreadyValid || status == ResolvesToAlreadyValid;
diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc
index 00e375fe9..87ec081cb 100644
--- a/src/libstore/build/derivation-goal.cc
+++ b/src/libstore/build/derivation-goal.cc
@@ -7,7 +7,6 @@
#include "finally.hh"
#include "util.hh"
#include "archive.hh"
-#include "json.hh"
#include "compression.hh"
#include "worker-protocol.hh"
#include "topo-sort.hh"
@@ -502,6 +501,14 @@ void DerivationGoal::inputsRealised()
now-known results of dependencies. If so, we become a
stub goal aliasing that resolved derivation goal. */
std::optional attempt = fullDrv.tryResolve(worker.store, inputDrvOutputs);
+ if (!attempt) {
+ /* TODO (impure derivations-induced tech debt) (see below):
+ The above attempt should have found it, but because we manage
+ inputDrvOutputs statefully, sometimes it gets out of sync with
+ the real source of truth (store). So we query the store
+ directly if there's a problem. */
+ attempt = fullDrv.tryResolve(worker.store);
+ }
assert(attempt);
Derivation drvResolved { *std::move(attempt) };
@@ -564,10 +571,6 @@ void DerivationGoal::inputsRealised()
/* What type of derivation are we building? */
derivationType = drv->type();
- /* Don't repeat fixed-output derivations since they're already
- verified by their output hash.*/
- nrRounds = derivationType.isFixed() ? 1 : settings.buildRepeat + 1;
-
/* Okay, try to build. Note that here we don't wait for a build
slot to become available, since we don't need one if there is a
build hook. */
@@ -582,12 +585,11 @@ void DerivationGoal::started()
auto msg = fmt(
buildMode == bmRepair ? "repairing outputs of '%s'" :
buildMode == bmCheck ? "checking outputs of '%s'" :
- nrRounds > 1 ? "building '%s' (round %d/%d)" :
- "building '%s'", worker.store.printStorePath(drvPath), curRound, nrRounds);
+ "building '%s'", worker.store.printStorePath(drvPath));
fmt("building '%s'", worker.store.printStorePath(drvPath));
if (hook) msg += fmt(" on '%s'", machineName);
act = std::make_unique<Activity>(*logger, lvlInfo, actBuild, msg,
- Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", curRound, nrRounds});
+ Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", 1, 1});
mcRunningBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.runningBuilds);
worker.updateProgress();
}
@@ -887,6 +889,14 @@ void DerivationGoal::buildDone()
cleanupPostChildKill();
+ if (buildResult.cpuUser && buildResult.cpuSystem) {
+ debug("builder for '%s' terminated with status %d, user CPU %.3fs, system CPU %.3fs",
+ worker.store.printStorePath(drvPath),
+ status,
+ ((double) buildResult.cpuUser->count()) / 1000000,
+ ((double) buildResult.cpuSystem->count()) / 1000000);
+ }
+
bool diskFull = false;
try {
@@ -933,14 +943,6 @@ void DerivationGoal::buildDone()
cleanupPostOutputsRegisteredModeNonCheck();
- /* Repeat the build if necessary. */
- if (curRound++ < nrRounds) {
- outputLocks.unlock();
- state = &DerivationGoal::tryToBuild;
- worker.wakeUp(shared_from_this());
- return;
- }
-
/* It is now safe to delete the lock files, since all future
lockers will see that the output paths are valid; they will
not create new lock files with the same names as the old
@@ -1001,22 +1003,34 @@ void DerivationGoal::resolvedFinished()
throw Error(
"derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,resolve)",
worker.store.printStorePath(drvPath), wantedOutput);
- auto realisation = get(resolvedResult.builtOutputs, DrvOutput { *resolvedHash, wantedOutput });
- if (!realisation)
- throw Error(
- "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,realisation)",
- worker.store.printStorePath(resolvedDrvGoal->drvPath), wantedOutput);
+
+ auto realisation = [&]{
+ auto take1 = get(resolvedResult.builtOutputs, DrvOutput { *resolvedHash, wantedOutput });
+ if (take1) return *take1;
+
+ /* The above `get` should work. But sateful tracking of
+ outputs in resolvedResult, this can get out of sync with the
+ store, which is our actual source of truth. For now we just
+ check the store directly if it fails. */
+ auto take2 = worker.evalStore.queryRealisation(DrvOutput { *resolvedHash, wantedOutput });
+ if (take2) return *take2;
+
+ throw Error(
+ "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,realisation)",
+ worker.store.printStorePath(resolvedDrvGoal->drvPath), wantedOutput);
+ }();
+
if (drv->type().isPure()) {
- auto newRealisation = *realisation;
+ auto newRealisation = realisation;
newRealisation.id = DrvOutput { initialOutput->outputHash, wantedOutput };
newRealisation.signatures.clear();
if (!drv->type().isFixed())
- newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation->outPath);
+ newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation.outPath);
signRealisation(newRealisation);
worker.store.registerDrvOutput(newRealisation);
}
- outputPaths.insert(realisation->outPath);
- builtOutputs.emplace(realisation->id, *realisation);
+ outputPaths.insert(realisation.outPath);
+ builtOutputs.emplace(realisation.id, realisation);
}
runPostBuildHook(
diff --git a/src/libstore/build/derivation-goal.hh b/src/libstore/build/derivation-goal.hh
index 2d8bfd592..d33e04cbc 100644
--- a/src/libstore/build/derivation-goal.hh
+++ b/src/libstore/build/derivation-goal.hh
@@ -115,11 +115,6 @@ struct DerivationGoal : public Goal
BuildMode buildMode;
- /* The current round, if we're building multiple times. */
- size_t curRound = 1;
-
- size_t nrRounds;
-
std::unique_ptr<MaintainCount<uint64_t>> mcExpectedBuilds, mcRunningBuilds;
std::unique_ptr<Activity> act;
diff --git a/src/libstore/build/local-derivation-goal.cc b/src/libstore/build/local-derivation-goal.cc
index 5cea3b590..6fe3bc49c 100644
--- a/src/libstore/build/local-derivation-goal.cc
+++ b/src/libstore/build/local-derivation-goal.cc
@@ -8,13 +8,13 @@
#include "finally.hh"
#include "util.hh"
#include "archive.hh"
-#include "json.hh"
#include "compression.hh"
#include "daemon.hh"
#include "worker-protocol.hh"
#include "topo-sort.hh"
#include "callback.hh"
#include "json-utils.hh"
+#include "cgroup.hh"
#include <regex>
#include <queue>
@@ -56,6 +56,7 @@
#include <pwd.h>
#include <grp.h>
+#include <iostream>
namespace nix {
@@ -129,26 +130,44 @@ void LocalDerivationGoal::killChild()
if (pid != -1) {
worker.childTerminated(this);
- if (buildUser) {
- /* If we're using a build user, then there is a tricky
- race condition: if we kill the build user before the
- child has done its setuid() to the build user uid, then
- it won't be killed, and we'll potentially lock up in
- pid.wait(). So also send a conventional kill to the
- child. */
- ::kill(-pid, SIGKILL); /* ignore the result */
- buildUser->kill();
- pid.wait();
- } else
- pid.kill();
+ /* If we're using a build user, then there is a tricky race
+ condition: if we kill the build user before the child has
+ done its setuid() to the build user uid, then it won't be
+ killed, and we'll potentially lock up in pid.wait(). So
+ also send a conventional kill to the child. */
+ ::kill(-pid, SIGKILL); /* ignore the result */
- assert(pid == -1);
+ killSandbox(true);
+
+ pid.wait();
}
DerivationGoal::killChild();
}
+void LocalDerivationGoal::killSandbox(bool getStats)
+{
+ if (cgroup) {
+ #if __linux__
+ auto stats = destroyCgroup(*cgroup);
+ if (getStats) {
+ buildResult.cpuUser = stats.cpuUser;
+ buildResult.cpuSystem = stats.cpuSystem;
+ }
+ #else
+ abort();
+ #endif
+ }
+
+ else if (buildUser) {
+ auto uid = buildUser->getUID();
+ assert(uid != 0);
+ killUser(uid);
+ }
+}
+
+
void LocalDerivationGoal::tryLocalBuild() {
unsigned int curBuilds = worker.getNrLocalBuilds();
if (curBuilds >= settings.maxBuildJobs) {
@@ -158,28 +177,46 @@ void LocalDerivationGoal::tryLocalBuild() {
return;
}
- /* If `build-users-group' is not empty, then we have to build as
- one of the members of that group. */
- if (settings.buildUsersGroup != "" && getuid() == 0) {
-#if defined(__linux__) || defined(__APPLE__)
- if (!buildUser) buildUser = std::make_unique<UserLock>();
+ /* Are we doing a chroot build? */
+ {
+ auto noChroot = parsedDrv->getBoolAttr("__noChroot");
+ if (settings.sandboxMode == smEnabled) {
+ if (noChroot)
+ throw Error("derivation '%s' has '__noChroot' set, "
+ "but that's not allowed when 'sandbox' is 'true'", worker.store.printStorePath(drvPath));
+#if __APPLE__
+ if (additionalSandboxProfile != "")
+ throw Error("derivation '%s' specifies a sandbox profile, "
+ "but this is only allowed when 'sandbox' is 'relaxed'", worker.store.printStorePath(drvPath));
+#endif
+ useChroot = true;
+ }
+ else if (settings.sandboxMode == smDisabled)
+ useChroot = false;
+ else if (settings.sandboxMode == smRelaxed)
+ useChroot = derivationType.isSandboxed() && !noChroot;
+ }
- if (buildUser->findFreeUser()) {
- /* Make sure that no other processes are executing under this
- uid. */
- buildUser->kill();
- } else {
+ auto & localStore = getLocalStore();
+ if (localStore.storeDir != localStore.realStoreDir.get()) {
+ #if __linux__
+ useChroot = true;
+ #else
+ throw Error("building using a diverted store is not supported on this platform");
+ #endif
+ }
+
+ if (useBuildUsers()) {
+ if (!buildUser)
+ buildUser = acquireUserLock(parsedDrv->useUidRange() ? 65536 : 1, useChroot);
+
+ if (!buildUser) {
if (!actLock)
actLock = std::make_unique<Activity>(*logger, lvlWarn, actBuildWaiting,
fmt("waiting for UID to build '%s'", yellowtxt(worker.store.printStorePath(drvPath))));
worker.waitForAWhile(shared_from_this());
return;
}
-#else
- /* Don't know how to block the creation of setuid/setgid
- binaries on this platform. */
- throw Error("build users are not supported on this platform for security reasons");
-#endif
}
actLock.reset();
@@ -270,7 +307,7 @@ void LocalDerivationGoal::cleanupPostChildKill()
malicious user from leaving behind a process that keeps files
open and modifies them after they have been chown'ed to
root. */
- if (buildUser) buildUser->kill();
+ killSandbox(true);
/* Terminate the recursive Nix daemon. */
stopDaemon();
@@ -363,6 +400,64 @@ static void linkOrCopy(const Path & from, const Path & to)
void LocalDerivationGoal::startBuilder()
{
+ if ((buildUser && buildUser->getUIDCount() != 1)
+ #if __linux__
+ || settings.useCgroups
+ #endif
+ )
+ {
+ #if __linux__
+ settings.requireExperimentalFeature(Xp::Cgroups);
+
+ auto cgroupFS = getCgroupFS();
+ if (!cgroupFS)
+ throw Error("cannot determine the cgroups file system");
+
+ auto ourCgroups = getCgroups("/proc/self/cgroup");
+ auto ourCgroup = ourCgroups[""];
+ if (ourCgroup == "")
+ throw Error("cannot determine cgroup name from /proc/self/cgroup");
+
+ auto ourCgroupPath = canonPath(*cgroupFS + "/" + ourCgroup);
+
+ if (!pathExists(ourCgroupPath))
+ throw Error("expected cgroup directory '%s'", ourCgroupPath);
+
+ static std::atomic<unsigned int> counter{0};
+
+ cgroup = buildUser
+ ? fmt("%s/nix-build-uid-%d", ourCgroupPath, buildUser->getUID())
+ : fmt("%s/nix-build-pid-%d-%d", ourCgroupPath, getpid(), counter++);
+
+ debug("using cgroup '%s'", *cgroup);
+
+ /* When using a build user, record the cgroup we used for that
+ user so that if we got interrupted previously, we can kill
+ any left-over cgroup first. */
+ if (buildUser) {
+ auto cgroupsDir = settings.nixStateDir + "/cgroups";
+ createDirs(cgroupsDir);
+
+ auto cgroupFile = fmt("%s/%d", cgroupsDir, buildUser->getUID());
+
+ if (pathExists(cgroupFile)) {
+ auto prevCgroup = readFile(cgroupFile);
+ destroyCgroup(prevCgroup);
+ }
+
+ writeFile(cgroupFile, *cgroup);
+ }
+
+ #else
+ throw Error("cgroups are not supported on this platform");
+ #endif
+ }
+
+ /* Make sure that no other processes are executing under the
+ sandbox uids. This must be done before any chownToBuilder()
+ calls. */
+ killSandbox(false);
+
/* Right platform? */
if (!parsedDrv->canBuildLocally(worker.store))
throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}",
@@ -376,35 +471,6 @@ void LocalDerivationGoal::startBuilder()
additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or("");
#endif
- /* Are we doing a chroot build? */
- {
- auto noChroot = parsedDrv->getBoolAttr("__noChroot");
- if (settings.sandboxMode == smEnabled) {
- if (noChroot)
- throw Error("derivation '%s' has '__noChroot' set, "
- "but that's not allowed when 'sandbox' is 'true'", worker.store.printStorePath(drvPath));
-#if __APPLE__
- if (additionalSandboxProfile != "")
- throw Error("derivation '%s' specifies a sandbox profile, "
- "but this is only allowed when 'sandbox' is 'relaxed'", worker.store.printStorePath(drvPath));
-#endif
- useChroot = true;
- }
- else if (settings.sandboxMode == smDisabled)
- useChroot = false;
- else if (settings.sandboxMode == smRelaxed)
- useChroot = derivationType.isSandboxed() && !noChroot;
- }
-
- auto & localStore = getLocalStore();
- if (localStore.storeDir != localStore.realStoreDir.get()) {
- #if __linux__
- useChroot = true;
- #else
- throw Error("building using a diverted store is not supported on this platform");
- #endif
- }
-
/* Create a temporary directory where the build will take
place. */
tmpDir = createTempDir("", "nix-build-" + std::string(drvPath.name()), false, false, 0700);
@@ -580,10 +646,11 @@ void LocalDerivationGoal::startBuilder()
printMsg(lvlChatty, format("setting up chroot environment in '%1%'") % chrootRootDir);
- if (mkdir(chrootRootDir.c_str(), 0750) == -1)
+ // FIXME: make this 0700
+ if (mkdir(chrootRootDir.c_str(), buildUser && buildUser->getUIDCount() != 1 ? 0755 : 0750) == -1)
throw SysError("cannot create '%1%'", chrootRootDir);
- if (buildUser && chown(chrootRootDir.c_str(), 0, buildUser->getGID()) == -1)
+ if (buildUser && chown(chrootRootDir.c_str(), buildUser->getUIDCount() != 1 ? buildUser->getUID() : 0, buildUser->getGID()) == -1)
throw SysError("cannot change ownership of '%1%'", chrootRootDir);
/* Create a writable /tmp in the chroot. Many builders need
@@ -597,6 +664,10 @@ void LocalDerivationGoal::startBuilder()
nobody account. The latter is kind of a hack to support
Samba-in-QEMU. */
createDirs(chrootRootDir + "/etc");
+ chownToBuilder(chrootRootDir + "/etc");
+
+ if (parsedDrv->useUidRange() && (!buildUser || buildUser->getUIDCount() < 65536))
+ throw Error("feature 'uid-range' requires the setting '%s' to be enabled", settings.autoAllocateUids.name);
/* Declare the build user's group so that programs get a consistent
view of the system (e.g., "id -gn"). */
@@ -647,12 +718,28 @@ void LocalDerivationGoal::startBuilder()
dirsInChroot.erase(worker.store.printStorePath(*i.second.second));
}
-#elif __APPLE__
- /* We don't really have any parent prep work to do (yet?)
- All work happens in the child, instead. */
+ if (cgroup) {
+ if (mkdir(cgroup->c_str(), 0755) != 0)
+ throw SysError("creating cgroup '%s'", *cgroup);
+ chownToBuilder(*cgroup);
+ chownToBuilder(*cgroup + "/cgroup.procs");
+ chownToBuilder(*cgroup + "/cgroup.threads");
+ //chownToBuilder(*cgroup + "/cgroup.subtree_control");
+ }
+
#else
- throw Error("sandboxing builds is not supported on this platform");
+ if (parsedDrv->useUidRange())
+ throw Error("feature 'uid-range' is not supported on this platform");
+ #if __APPLE__
+ /* We don't really have any parent prep work to do (yet?)
+ All work happens in the child, instead. */
+ #else
+ throw Error("sandboxing builds is not supported on this platform");
+ #endif
#endif
+ } else {
+ if (parsedDrv->useUidRange())
+ throw Error("feature 'uid-range' is only supported in sandboxed builds");
}
if (needsHashRewrite() && pathExists(homeDir))
@@ -913,14 +1000,16 @@ void LocalDerivationGoal::startBuilder()
the calling user (if build users are disabled). */
uid_t hostUid = buildUser ? buildUser->getUID() : getuid();
uid_t hostGid = buildUser ? buildUser->getGID() : getgid();
+ uid_t nrIds = buildUser ? buildUser->getUIDCount() : 1;
writeFile("/proc/" + std::to_string(pid) + "/uid_map",
- fmt("%d %d 1", sandboxUid(), hostUid));
+ fmt("%d %d %d", sandboxUid(), hostUid, nrIds));
- writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny");
+ if (!buildUser || buildUser->getUIDCount() == 1)
+ writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny");
writeFile("/proc/" + std::to_string(pid) + "/gid_map",
- fmt("%d %d 1", sandboxGid(), hostGid));
+ fmt("%d %d %d", sandboxGid(), hostGid, nrIds));
} else {
debug("note: not using a user namespace");
if (!buildUser)
@@ -947,6 +1036,10 @@ void LocalDerivationGoal::startBuilder()
throw SysError("getting sandbox user namespace");
}
+ /* Move the child into its own cgroup. */
+ if (cgroup)
+ writeFile(*cgroup + "/cgroup.procs", fmt("%d", (pid_t) pid));
+
/* Signal the builder that we've updated its user namespace. */
writeFull(userNamespaceSync.writeSide.get(), "1");
@@ -1552,6 +1645,22 @@ void setupSeccomp()
seccomp_arch_add(ctx, SCMP_ARCH_ARM) != 0)
printError("unable to add ARM seccomp architecture; this may result in spurious build failures if running 32-bit ARM processes");
+ if (nativeSystem == "mips64-linux" &&
+ seccomp_arch_add(ctx, SCMP_ARCH_MIPS) != 0)
+ printError("unable to add mips seccomp architecture");
+
+ if (nativeSystem == "mips64-linux" &&
+ seccomp_arch_add(ctx, SCMP_ARCH_MIPS64N32) != 0)
+ printError("unable to add mips64-*abin32 seccomp architecture");
+
+ if (nativeSystem == "mips64el-linux" &&
+ seccomp_arch_add(ctx, SCMP_ARCH_MIPSEL) != 0)
+ printError("unable to add mipsel seccomp architecture");
+
+ if (nativeSystem == "mips64el-linux" &&
+ seccomp_arch_add(ctx, SCMP_ARCH_MIPSEL64N32) != 0)
+ printError("unable to add mips64el-*abin32 seccomp architecture");
+
/* Prevent builders from creating setuid/setgid binaries. */
for (int perm : { S_ISUID, S_ISGID }) {
if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(EPERM), SCMP_SYS(chmod), 1,
@@ -1763,6 +1872,13 @@ void LocalDerivationGoal::runChild()
if (mount("none", (chrootRootDir + "/proc").c_str(), "proc", 0, 0) == -1)
throw SysError("mounting /proc");
+ /* Mount sysfs on /sys. */
+ if (buildUser && buildUser->getUIDCount() != 1) {
+ createDirs(chrootRootDir + "/sys");
+ if (mount("none", (chrootRootDir + "/sys").c_str(), "sysfs", 0, 0) == -1)
+ throw SysError("mounting /sys");
+ }
+
/* Mount a new tmpfs on /dev/shm to ensure that whatever
the builder puts in /dev/shm is cleaned up automatically. */
if (pathExists("/dev/shm") && mount("none", (chrootRootDir + "/dev/shm").c_str(), "tmpfs", 0,
@@ -1805,6 +1921,12 @@ void LocalDerivationGoal::runChild()
if (unshare(CLONE_NEWNS) == -1)
throw SysError("unsharing mount namespace");
+ /* Unshare the cgroup namespace. This means
+ /proc/self/cgroup will show the child's cgroup as '/'
+ rather than whatever it is in the parent. */
+ if (cgroup && unshare(CLONE_NEWCGROUP) == -1)
+ throw SysError("unsharing cgroup namespace");
+
/* Do the chroot(). */
if (chdir(chrootRootDir.c_str()) == -1)
throw SysError("cannot change directory to '%1%'", chrootRootDir);
@@ -1890,9 +2012,8 @@ void LocalDerivationGoal::runChild()
if (setUser && buildUser) {
/* Preserve supplementary groups of the build user, to allow
admins to specify groups such as "kvm". */
- if (!buildUser->getSupplementaryGIDs().empty() &&
- setgroups(buildUser->getSupplementaryGIDs().size(),
- buildUser->getSupplementaryGIDs().data()) == -1)
+ auto gids = buildUser->getSupplementaryGIDs();
+ if (setgroups(gids.size(), gids.data()) == -1)
throw SysError("cannot set supplementary groups of build user");
if (setgid(buildUser->getGID()) == -1 ||
@@ -2139,7 +2260,6 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
InodesSeen inodesSeen;
Path checkSuffix = ".check";
- bool keepPreviousRound = settings.keepFailed || settings.runDiffHook;
std::exception_ptr delayedException;
@@ -2221,7 +2341,10 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* Canonicalise first. This ensures that the path we're
rewriting doesn't contain a hard link to /etc/shadow or
something like that. */
- canonicalisePathMetaData(actualPath, buildUser ? buildUser->getUID() : -1, inodesSeen);
+ canonicalisePathMetaData(
+ actualPath,
+ buildUser ? std::optional(buildUser->getUIDRange()) : std::nullopt,
+ inodesSeen);
debug("scanning for references for output '%s' in temp location '%s'", outputName, actualPath);
@@ -2314,6 +2437,10 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
sink.s = rewriteStrings(sink.s, outputRewrites);
StringSource source(sink.s);
restorePath(actualPath, source);
+
+ /* FIXME: set proper permissions in restorePath() so
+ we don't have to do another traversal. */
+ canonicalisePathMetaData(actualPath, {}, inodesSeen);
}
};
@@ -2476,7 +2603,7 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* FIXME: set proper permissions in restorePath() so
we don't have to do another traversal. */
- canonicalisePathMetaData(actualPath, -1, inodesSeen);
+ canonicalisePathMetaData(actualPath, {}, inodesSeen);
/* Calculate where we'll move the output files. In the checking case we
will leave leave them where they are, for now, rather than move to
@@ -2560,10 +2687,8 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
debug("unreferenced input: '%1%'", worker.store.printStorePath(i));
}
- if (curRound == nrRounds) {
- localStore.optimisePath(actualPath, NoRepair); // FIXME: combine with scanForReferences()
- worker.markContentsGood(newInfo.path);
- }
+ localStore.optimisePath(actualPath, NoRepair); // FIXME: combine with scanForReferences()
+ worker.markContentsGood(newInfo.path);
newInfo.deriver = drvPath;
newInfo.ultimate = true;
@@ -2592,61 +2717,6 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* Apply output checks. */
checkOutputs(infos);
- /* Compare the result with the previous round, and report which
- path is different, if any.*/
- if (curRound > 1 && prevInfos != infos) {
- assert(prevInfos.size() == infos.size());
- for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); ++i, ++j)
- if (!(*i == *j)) {
- buildResult.isNonDeterministic = true;
- Path prev = worker.store.printStorePath(i->second.path) + checkSuffix;
- bool prevExists = keepPreviousRound && pathExists(prev);
- hintformat hint = prevExists
- ? hintfmt("output '%s' of '%s' differs from '%s' from previous round",
- worker.store.printStorePath(i->second.path), worker.store.printStorePath(drvPath), prev)
- : hintfmt("output '%s' of '%s' differs from previous round",
- worker.store.printStorePath(i->second.path), worker.store.printStorePath(drvPath));
-
- handleDiffHook(
- buildUser ? buildUser->getUID() : getuid(),
- buildUser ? buildUser->getGID() : getgid(),
- prev, worker.store.printStorePath(i->second.path),
- worker.store.printStorePath(drvPath), tmpDir);
-
- if (settings.enforceDeterminism)
- throw NotDeterministic(hint);
-
- printError(hint);
-
- curRound = nrRounds; // we know enough, bail out early
- }
- }
-
- /* If this is the first round of several, then move the output out of the way. */
- if (nrRounds > 1 && curRound == 1 && curRound < nrRounds && keepPreviousRound) {
- for (auto & [_, outputStorePath] : finalOutputs) {
- auto path = worker.store.printStorePath(outputStorePath);
- Path prev = path + checkSuffix;
- deletePath(prev);
- Path dst = path + checkSuffix;
- renameFile(path, dst);
- }
- }
-
- if (curRound < nrRounds) {
- prevInfos = std::move(infos);
- return {};
- }
-
- /* Remove the .check directories if we're done. FIXME: keep them
- if the result was not determistic? */
- if (curRound == nrRounds) {
- for (auto & [_, outputStorePath] : finalOutputs) {
- Path prev = worker.store.printStorePath(outputStorePath) + checkSuffix;
- deletePath(prev);
- }
- }
-
/* Register each output path as valid, and register the sets of
paths referenced by each of them. If there are cycles in the
outputs, this will fail. */
diff --git a/src/libstore/build/local-derivation-goal.hh b/src/libstore/build/local-derivation-goal.hh
index d456e9cae..34c4e9187 100644
--- a/src/libstore/build/local-derivation-goal.hh
+++ b/src/libstore/build/local-derivation-goal.hh
@@ -15,6 +15,9 @@ struct LocalDerivationGoal : public DerivationGoal
/* The process ID of the builder. */
Pid pid;
+ /* The cgroup of the builder, if any. */
+ std::optional<Path> cgroup;
+
/* The temporary directory. */
Path tmpDir;
@@ -92,8 +95,8 @@ struct LocalDerivationGoal : public DerivationGoal
result. */
std::map<Path, ValidPathInfo> prevInfos;
- uid_t sandboxUid() { return usingUserNamespace ? 1000 : buildUser->getUID(); }
- gid_t sandboxGid() { return usingUserNamespace ? 100 : buildUser->getGID(); }
+ uid_t sandboxUid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 1000 : 0) : buildUser->getUID(); }
+ gid_t sandboxGid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 100 : 0) : buildUser->getGID(); }
const static Path homeDir;
@@ -197,6 +200,10 @@ struct LocalDerivationGoal : public DerivationGoal
/* Forcibly kill the child process, if any. */
void killChild() override;
+ /* Kill any processes running under the build user UID or in the
+ cgroup of the build. */
+ void killSandbox(bool getStats);
+
/* Create alternative path calculated from but distinct from the
input, so we can avoid overwriting outputs (or other store paths)
that already exist. */
diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc
index 48dd5c247..12596ba49 100644
--- a/src/libstore/daemon.cc
+++ b/src/libstore/daemon.cc
@@ -238,7 +238,6 @@ struct ClientSettings
}
else if (trusted
|| name == settings.buildTimeout.name
- || name == settings.buildRepeat.name
|| name == settings.maxSilentTime.name
|| name == settings.pollInterval.name
|| name == "connect-timeout"
diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc
index 44587ae78..88b59f615 100644
--- a/src/libstore/derived-path.cc
+++ b/src/libstore/derived-path.cc
@@ -53,28 +53,13 @@ StorePathSet BuiltPath::outPaths() const
);
}
-template<typename T>
-nlohmann::json stuffToJSON(const std::vector<T> & ts, ref<Store> store) {
- auto res = nlohmann::json::array();
- for (const T & t : ts) {
- std::visit([&res, store](const auto & t) {
- res.push_back(t.toJSON(store));
- }, t.raw());
- }
- return res;
-}
-
-nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store)
-{ return stuffToJSON<BuiltPath>(buildables, store); }
-nlohmann::json derivedPathsToJSON(const DerivedPaths & paths, ref<Store> store)
-{ return stuffToJSON<DerivedPath>(paths, store); }
-
-
-std::string DerivedPath::Opaque::to_string(const Store & store) const {
+std::string DerivedPath::Opaque::to_string(const Store & store) const
+{
return store.printStorePath(path);
}
-std::string DerivedPath::Built::to_string(const Store & store) const {
+std::string DerivedPath::Built::to_string(const Store & store) const
+{
return store.printStorePath(drvPath)
+ "!"
+ (outputs.empty() ? std::string { "*" } : concatStringsSep(",", outputs));
diff --git a/src/libstore/derived-path.hh b/src/libstore/derived-path.hh
index 24a0ae773..878696136 100644
--- a/src/libstore/derived-path.hh
+++ b/src/libstore/derived-path.hh
@@ -125,7 +125,4 @@ struct BuiltPath : _BuiltPathRaw {
typedef std::vector<DerivedPath> DerivedPaths;
typedef std::vector<BuiltPath> BuiltPaths;
-nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store);
-nlohmann::json derivedPathsToJSON(const DerivedPaths & , ref<Store> store);
-
}
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
index 9ef8972f3..5d91829f1 100644
--- a/src/libstore/gc.cc
+++ b/src/libstore/gc.cc
@@ -147,7 +147,7 @@ void LocalStore::addTempRoot(const StorePath & path)
} catch (SysError & e) {
/* The garbage collector may have exited, so we need to
restart. */
- if (e.errNo == EPIPE) {
+ if (e.errNo == EPIPE || e.errNo == ECONNRESET) {
debug("GC socket disconnected");
state->fdRootsSocket.close();
goto restart;
@@ -506,6 +506,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
Finally cleanup([&]() {
debug("GC roots server shutting down");
+ fdServer.close();
while (true) {
auto item = remove_begin(*connections.lock());
if (!item) break;
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index ff658c428..b7f55cae7 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -131,6 +131,10 @@ StringSet Settings::getDefaultSystemFeatures()
StringSet features{"nixos-test", "benchmark", "big-parallel"};
#if __linux__
+ features.insert("uid-range");
+ #endif
+
+ #if __linux__
if (access("/dev/kvm", R_OK | W_OK) == 0)
features.insert("kvm");
#endif
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index 3dcf3d479..54a5d0fc7 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -46,6 +46,14 @@ struct PluginFilesSetting : public BaseSetting<Paths>
void set(const std::string & str, bool append = false) override;
};
+const uint32_t maxIdsPerBuild =
+ #if __linux__
+ 1 << 16
+ #else
+ 1
+ #endif
+ ;
+
class Settings : public Config {
unsigned int getDefaultCores();
@@ -275,6 +283,64 @@ public:
multi-user settings with untrusted users.
)"};
+ Setting<bool> autoAllocateUids{this, false, "auto-allocate-uids",
+ R"(
+ Whether to select UIDs for builds automatically, instead of using the
+ users in `build-users-group`.
+
+ UIDs are allocated starting at 872415232 (0x34000000) on Linux and 56930 on macOS.
+
+ > **Warning**
+ > This is an experimental feature.
+
+ To enable it, add the following to [`nix.conf`](#):
+
+ ```
+ extra-experimental-features = auto-allocate-uids
+ auto-allocate-uids = true
+ ```
+ )"};
+
+ Setting<uint32_t> startId{this,
+ #if __linux__
+ 0x34000000,
+ #else
+ 56930,
+ #endif
+ "start-id",
+ "The first UID and GID to use for dynamic ID allocation."};
+
+ Setting<uint32_t> uidCount{this,
+ #if __linux__
+ maxIdsPerBuild * 128,
+ #else
+ 128,
+ #endif
+ "id-count",
+ "The number of UIDs/GIDs to use for dynamic ID allocation."};
+
+ #if __linux__
+ Setting<bool> useCgroups{
+ this, false, "use-cgroups",
+ R"(
+ Whether to execute builds inside cgroups.
+ This is only supported on Linux.
+
+ Cgroups are required and enabled automatically for derivations
+ that require the `uid-range` system feature.
+
+ > **Warning**
+ > This is an experimental feature.
+
+ To enable it, add the following to [`nix.conf`](#):
+
+ ```
+ extra-experimental-features = cgroups
+ use-cgroups = true
+ ```
+ )"};
+ #endif
+
Setting<bool> impersonateLinux26{this, false, "impersonate-linux-26",
"Whether to impersonate a Linux 2.6 machine on newer kernels.",
{"build-impersonate-linux-26"}};
@@ -307,11 +373,6 @@ public:
)",
{"build-max-log-size"}};
- /* When buildRepeat > 0 and verboseBuild == true, whether to print
- repeated builds (i.e. builds other than the first one) to
- stderr. Hack to prevent Hydra logs from being polluted. */
- bool printRepeatedBuilds = true;
-
Setting<unsigned int> pollInterval{this, 5, "build-poll-interval",
"How often (in seconds) to poll for locks."};
@@ -435,19 +496,6 @@ public:
Setting<bool> sandboxFallback{this, true, "sandbox-fallback",
"Whether to disable sandboxing when the kernel doesn't allow it."};
- Setting<size_t> buildRepeat{
- this, 0, "repeat",
- R"(
- How many times to repeat builds to check whether they are
- deterministic. The default value is 0. If the value is non-zero,
- every build is repeated the specified number of times. If the
- contents of any of the runs differs from the previous ones and
- `enforce-determinism` is true, the build is rejected and the
- resulting store paths are not registered as “valid” in Nix’s
- database.
- )",
- {"build-repeat"}};
-
#if __linux__
Setting<std::string> sandboxShmSize{
this, "50%", "sandbox-dev-shm-size",
@@ -511,10 +559,6 @@ public:
configuration file, and cannot be passed at the command line.
)"};
- Setting<bool> enforceDeterminism{
- this, true, "enforce-determinism",
- "Whether to fail if repeated builds produce different output. See `repeat`."};
-
Setting<Strings> trustedPublicKeys{
this,
{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="},
@@ -563,10 +607,10 @@ public:
cache) must have a signature by a trusted key. A trusted key is one
listed in `trusted-public-keys`, or a public key counterpart to a
private key stored in a file listed in `secret-key-files`.
-
+
Set to `false` to disable signature checking and trust all
non-content-addressed paths unconditionally.
-
+
(Content-addressed paths are inherently trustworthy and thus
unaffected by this configuration option.)
)"};
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
index dd34b19c6..4d398b21d 100644
--- a/src/libstore/legacy-ssh-store.cc
+++ b/src/libstore/legacy-ssh-store.cc
@@ -255,8 +255,8 @@ private:
<< settings.maxLogSize;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3)
conn.to
- << settings.buildRepeat
- << settings.enforceDeterminism;
+ << 0 // buildRepeat hasn't worked for ages anyway
+ << 0;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 7) {
conn.to << ((int) settings.keepFailed);
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index d374d4558..b67668e52 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -583,7 +583,10 @@ void canonicaliseTimestampAndPermissions(const Path & path)
}
-static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
+static void canonicalisePathMetaData_(
+ const Path & path,
+ std::optional<std::pair<uid_t, uid_t>> uidRange,
+ InodesSeen & inodesSeen)
{
checkInterrupt();
@@ -630,7 +633,7 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
However, ignore files that we chown'ed ourselves previously to
ensure that we don't fail on hard links within the same build
(i.e. "touch $out/foo; ln $out/foo $out/bar"). */
- if (fromUid != (uid_t) -1 && st.st_uid != fromUid) {
+ if (uidRange && (st.st_uid < uidRange->first || st.st_uid > uidRange->second)) {
if (S_ISDIR(st.st_mode) || !inodesSeen.count(Inode(st.st_dev, st.st_ino)))
throw BuildError("invalid ownership on file '%1%'", path);
mode_t mode = st.st_mode & ~S_IFMT;
@@ -663,14 +666,17 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
if (S_ISDIR(st.st_mode)) {
DirEntries entries = readDirectory(path);
for (auto & i : entries)
- canonicalisePathMetaData_(path + "/" + i.name, fromUid, inodesSeen);
+ canonicalisePathMetaData_(path + "/" + i.name, uidRange, inodesSeen);
}
}
-void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
+void canonicalisePathMetaData(
+ const Path & path,
+ std::optional<std::pair<uid_t, uid_t>> uidRange,
+ InodesSeen & inodesSeen)
{
- canonicalisePathMetaData_(path, fromUid, inodesSeen);
+ canonicalisePathMetaData_(path, uidRange, inodesSeen);
/* On platforms that don't have lchown(), the top-level path can't
be a symlink, since we can't change its ownership. */
@@ -683,10 +689,11 @@ void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & ino
}
-void canonicalisePathMetaData(const Path & path, uid_t fromUid)
+void canonicalisePathMetaData(const Path & path,
+ std::optional<std::pair<uid_t, uid_t>> uidRange)
{
InodesSeen inodesSeen;
- canonicalisePathMetaData(path, fromUid, inodesSeen);
+ canonicalisePathMetaData(path, uidRange, inodesSeen);
}
@@ -1331,7 +1338,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
autoGC();
- canonicalisePathMetaData(realPath, -1);
+ canonicalisePathMetaData(realPath, {});
optimisePath(realPath, repair); // FIXME: combine with hashPath()
@@ -1444,7 +1451,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name
narHash = narSink.finish();
}
- canonicalisePathMetaData(realPath, -1); // FIXME: merge into restorePath
+ canonicalisePathMetaData(realPath, {}); // FIXME: merge into restorePath
optimisePath(realPath, repair);
@@ -1486,7 +1493,7 @@ StorePath LocalStore::addTextToStore(
writeFile(realPath, s);
- canonicalisePathMetaData(realPath, -1);
+ canonicalisePathMetaData(realPath, {});
StringSink sink;
dumpString(s, sink);
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
index bd0ce1fe6..4579c2f62 100644
--- a/src/libstore/local-store.hh
+++ b/src/libstore/local-store.hh
@@ -310,9 +310,18 @@ typedef std::set<Inode> InodesSeen;
- the permissions are set of 444 or 555 (i.e., read-only with or
without execute permission; setuid bits etc. are cleared)
- the owner and group are set to the Nix user and group, if we're
- running as root. */
-void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen);
-void canonicalisePathMetaData(const Path & path, uid_t fromUid);
+ running as root.
+ If uidRange is not empty, this function will throw an error if it
+ encounters files owned by a user outside of the closed interval
+ [uidRange->first, uidRange->second].
+*/
+void canonicalisePathMetaData(
+ const Path & path,
+ std::optional<std::pair<uid_t, uid_t>> uidRange,
+ InodesSeen & inodesSeen);
+void canonicalisePathMetaData(
+ const Path & path,
+ std::optional<std::pair<uid_t, uid_t>> uidRange);
void canonicaliseTimestampAndPermissions(const Path & path);
diff --git a/src/libstore/lock.cc b/src/libstore/lock.cc
index fa718f55d..2858137d6 100644
--- a/src/libstore/lock.cc
+++ b/src/libstore/lock.cc
@@ -2,105 +2,197 @@
#include "globals.hh"
#include "pathlocks.hh"
-#include <grp.h>
#include <pwd.h>
-
-#include <fcntl.h>
-#include <unistd.h>
+#include <grp.h>
namespace nix {
-UserLock::UserLock()
+struct SimpleUserLock : UserLock
{
- assert(settings.buildUsersGroup != "");
- createDirs(settings.nixStateDir + "/userpool");
-}
+ AutoCloseFD fdUserLock;
+ uid_t uid;
+ gid_t gid;
+ std::vector<gid_t> supplementaryGIDs;
+
+ uid_t getUID() override { assert(uid); return uid; }
+ uid_t getUIDCount() override { return 1; }
+ gid_t getGID() override { assert(gid); return gid; }
+
+ std::vector<gid_t> getSupplementaryGIDs() override { return supplementaryGIDs; }
+
+ static std::unique_ptr<UserLock> acquire()
+ {
+ assert(settings.buildUsersGroup != "");
+ createDirs(settings.nixStateDir + "/userpool");
+
+ /* Get the members of the build-users-group. */
+ struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
+ if (!gr)
+ throw Error("the group '%s' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
+
+ /* Copy the result of getgrnam. */
+ Strings users;
+ for (char * * p = gr->gr_mem; *p; ++p) {
+ debug("found build user '%s'", *p);
+ users.push_back(*p);
+ }
-bool UserLock::findFreeUser() {
- if (enabled()) return true;
-
- /* Get the members of the build-users-group. */
- struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
- if (!gr)
- throw Error("the group '%1%' specified in 'build-users-group' does not exist",
- settings.buildUsersGroup);
- gid = gr->gr_gid;
-
- /* Copy the result of getgrnam. */
- Strings users;
- for (char * * p = gr->gr_mem; *p; ++p) {
- debug("found build user '%1%'", *p);
- users.push_back(*p);
+ if (users.empty())
+ throw Error("the build users group '%s' has no members", settings.buildUsersGroup);
+
+ /* Find a user account that isn't currently in use for another
+ build. */
+ for (auto & i : users) {
+ debug("trying user '%s'", i);
+
+ struct passwd * pw = getpwnam(i.c_str());
+ if (!pw)
+ throw Error("the user '%s' in the group '%s' does not exist", i, settings.buildUsersGroup);
+
+ auto fnUserLock = fmt("%s/userpool/%s", settings.nixStateDir,pw->pw_uid);
+
+ AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
+ if (!fd)
+ throw SysError("opening user lock '%s'", fnUserLock);
+
+ if (lockFile(fd.get(), ltWrite, false)) {
+ auto lock = std::make_unique<SimpleUserLock>();
+
+ lock->fdUserLock = std::move(fd);
+ lock->uid = pw->pw_uid;
+ lock->gid = gr->gr_gid;
+
+ /* Sanity check... */
+ if (lock->uid == getuid() || lock->uid == geteuid())
+ throw Error("the Nix user should not be a member of '%s'", settings.buildUsersGroup);
+
+ #if __linux__
+ /* Get the list of supplementary groups of this build
+ user. This is usually either empty or contains a
+ group such as "kvm". */
+ int ngroups = 32; // arbitrary initial guess
+ std::vector<gid_t> gids;
+ gids.resize(ngroups);
+
+ int err = getgrouplist(
+ pw->pw_name, pw->pw_gid,
+ gids.data(),
+ &ngroups);
+
+ /* Our initial size of 32 wasn't sufficient, the
+ correct size has been stored in ngroups, so we try
+ again. */
+ if (err == -1) {
+ gids.resize(ngroups);
+ err = getgrouplist(
+ pw->pw_name, pw->pw_gid,
+ gids.data(),
+ &ngroups);
+ }
+
+ // If it failed once more, then something must be broken.
+ if (err == -1)
+ throw Error("failed to get list of supplementary groups for '%s'", pw->pw_name);
+
+ // Finally, trim back the GID list to its real size.
+ for (auto i = 0; i < ngroups; i++)
+ if (gids[i] != lock->gid)
+ lock->supplementaryGIDs.push_back(gids[i]);
+ #endif
+
+ return lock;
+ }
+ }
+
+ return nullptr;
}
+};
- if (users.empty())
- throw Error("the build users group '%1%' has no members",
- settings.buildUsersGroup);
+struct AutoUserLock : UserLock
+{
+ AutoCloseFD fdUserLock;
+ uid_t firstUid = 0;
+ gid_t firstGid = 0;
+ uid_t nrIds = 1;
- /* Find a user account that isn't currently in use for another
- build. */
- for (auto & i : users) {
- debug("trying user '%1%'", i);
+ uid_t getUID() override { assert(firstUid); return firstUid; }
- struct passwd * pw = getpwnam(i.c_str());
- if (!pw)
- throw Error("the user '%1%' in the group '%2%' does not exist",
- i, settings.buildUsersGroup);
+ gid_t getUIDCount() override { return nrIds; }
+ gid_t getGID() override { assert(firstGid); return firstGid; }
- fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str();
+ std::vector<gid_t> getSupplementaryGIDs() override { return {}; }
- AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
- if (!fd)
- throw SysError("opening user lock '%1%'", fnUserLock);
+ static std::unique_ptr<UserLock> acquire(uid_t nrIds, bool useChroot)
+ {
+ settings.requireExperimentalFeature(Xp::AutoAllocateUids);
+ assert(settings.startId > 0);
+ assert(settings.uidCount % maxIdsPerBuild == 0);
+ assert((uint64_t) settings.startId + (uint64_t) settings.uidCount <= std::numeric_limits<uid_t>::max());
+ assert(nrIds <= maxIdsPerBuild);
- if (lockFile(fd.get(), ltWrite, false)) {
- fdUserLock = std::move(fd);
- user = i;
- uid = pw->pw_uid;
+ createDirs(settings.nixStateDir + "/userpool2");
- /* Sanity check... */
- if (uid == getuid() || uid == geteuid())
- throw Error("the Nix user should not be a member of '%1%'",
- settings.buildUsersGroup);
+ size_t nrSlots = settings.uidCount / maxIdsPerBuild;
-#if __linux__
- /* Get the list of supplementary groups of this build user. This
- is usually either empty or contains a group such as "kvm". */
- int ngroups = 32; // arbitrary initial guess
- supplementaryGIDs.resize(ngroups);
+ for (size_t i = 0; i < nrSlots; i++) {
+ debug("trying user slot '%d'", i);
- int err = getgrouplist(pw->pw_name, pw->pw_gid, supplementaryGIDs.data(),
- &ngroups);
+ createDirs(settings.nixStateDir + "/userpool2");
- // Our initial size of 32 wasn't sufficient, the correct size has
- // been stored in ngroups, so we try again.
- if (err == -1) {
- supplementaryGIDs.resize(ngroups);
- err = getgrouplist(pw->pw_name, pw->pw_gid, supplementaryGIDs.data(),
- &ngroups);
- }
+ auto fnUserLock = fmt("%s/userpool2/slot-%d", settings.nixStateDir, i);
+
+ AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
+ if (!fd)
+ throw SysError("opening user lock '%s'", fnUserLock);
- // If it failed once more, then something must be broken.
- if (err == -1)
- throw Error("failed to get list of supplementary groups for '%1%'",
- pw->pw_name);
+ if (lockFile(fd.get(), ltWrite, false)) {
- // Finally, trim back the GID list to its real size
- supplementaryGIDs.resize(ngroups);
-#endif
+ auto firstUid = settings.startId + i * maxIdsPerBuild;
- isEnabled = true;
- return true;
+ auto pw = getpwuid(firstUid);
+ if (pw)
+ throw Error("auto-allocated UID %d clashes with existing user account '%s'", firstUid, pw->pw_name);
+
+ auto lock = std::make_unique<AutoUserLock>();
+ lock->fdUserLock = std::move(fd);
+ lock->firstUid = firstUid;
+ if (useChroot)
+ lock->firstGid = firstUid;
+ else {
+ struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
+ if (!gr)
+ throw Error("the group '%s' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
+ lock->firstGid = gr->gr_gid;
+ }
+ lock->nrIds = nrIds;
+ return lock;
+ }
}
+
+ return nullptr;
}
+};
- return false;
+std::unique_ptr<UserLock> acquireUserLock(uid_t nrIds, bool useChroot)
+{
+ if (settings.autoAllocateUids)
+ return AutoUserLock::acquire(nrIds, useChroot);
+ else
+ return SimpleUserLock::acquire();
}
-void UserLock::kill()
+bool useBuildUsers()
{
- killUser(uid);
+ #if __linux__
+ static bool b = (settings.buildUsersGroup != "" || settings.startId.get() != 0) && getuid() == 0;
+ return b;
+ #elif __APPLE__
+ static bool b = settings.buildUsersGroup != "" && getuid() == 0;
+ return b;
+ #else
+ return false;
+ #endif
}
}
diff --git a/src/libstore/lock.hh b/src/libstore/lock.hh
index 3d29a7b5b..49ad86de7 100644
--- a/src/libstore/lock.hh
+++ b/src/libstore/lock.hh
@@ -1,37 +1,38 @@
#pragma once
-#include "sync.hh"
#include "types.hh"
-#include "util.hh"
+
+#include <optional>
+
+#include <sys/types.h>
namespace nix {
-class UserLock
+struct UserLock
{
-private:
- Path fnUserLock;
- AutoCloseFD fdUserLock;
+ virtual ~UserLock() { }
- bool isEnabled = false;
- std::string user;
- uid_t uid = 0;
- gid_t gid = 0;
- std::vector<gid_t> supplementaryGIDs;
+ /* Get the first and last UID. */
+ std::pair<uid_t, uid_t> getUIDRange()
+ {
+ auto first = getUID();
+ return {first, first + getUIDCount() - 1};
+ }
-public:
- UserLock();
+ /* Get the first UID. */
+ virtual uid_t getUID() = 0;
- void kill();
+ virtual uid_t getUIDCount() = 0;
- std::string getUser() { return user; }
- uid_t getUID() { assert(uid); return uid; }
- uid_t getGID() { assert(gid); return gid; }
- std::vector<gid_t> getSupplementaryGIDs() { return supplementaryGIDs; }
+ virtual gid_t getGID() = 0;
- bool findFreeUser();
+ virtual std::vector<gid_t> getSupplementaryGIDs() = 0;
+};
- bool enabled() { return isEnabled; }
+/* Acquire a user lock for a UID range of size `nrIds`. Note that this
+ may return nullptr if no user is available. */
+std::unique_ptr<UserLock> acquireUserLock(uid_t nrIds, bool useChroot);
-};
+bool useBuildUsers();
}
diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc
index 398147fc3..9a0003588 100644
--- a/src/libstore/nar-accessor.cc
+++ b/src/libstore/nar-accessor.cc
@@ -1,6 +1,5 @@
#include "nar-accessor.hh"
#include "archive.hh"
-#include "json.hh"
#include <map>
#include <stack>
@@ -243,42 +242,43 @@ ref<FSAccessor> makeLazyNarAccessor(const std::string & listing,
return make_ref<NarAccessor>(listing, getNarBytes);
}
-void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor,
- const Path & path, bool recurse)
+using nlohmann::json;
+json listNar(ref<FSAccessor> accessor, const Path & path, bool recurse)
{
auto st = accessor->stat(path);
- auto obj = res.object();
+ json obj = json::object();
switch (st.type) {
case FSAccessor::Type::tRegular:
- obj.attr("type", "regular");
- obj.attr("size", st.fileSize);
+ obj["type"] = "regular";
+ obj["size"] = st.fileSize;
if (st.isExecutable)
- obj.attr("executable", true);
+ obj["executable"] = true;
if (st.narOffset)
- obj.attr("narOffset", st.narOffset);
+ obj["narOffset"] = st.narOffset;
break;
case FSAccessor::Type::tDirectory:
- obj.attr("type", "directory");
+ obj["type"] = "directory";
{
- auto res2 = obj.object("entries");
+ obj["entries"] = json::object();
+ json &res2 = obj["entries"];
for (auto & name : accessor->readDirectory(path)) {
if (recurse) {
- auto res3 = res2.placeholder(name);
- listNar(res3, accessor, path + "/" + name, true);
+ res2[name] = listNar(accessor, path + "/" + name, true);
} else
- res2.object(name);
+ res2[name] = json::object();
}
}
break;
case FSAccessor::Type::tSymlink:
- obj.attr("type", "symlink");
- obj.attr("target", accessor->readLink(path));
+ obj["type"] = "symlink";
+ obj["target"] = accessor->readLink(path);
break;
default:
throw Error("path '%s' does not exist in NAR", path);
}
+ return obj;
}
}
diff --git a/src/libstore/nar-accessor.hh b/src/libstore/nar-accessor.hh
index c2241a04c..7d998ae0b 100644
--- a/src/libstore/nar-accessor.hh
+++ b/src/libstore/nar-accessor.hh
@@ -2,6 +2,7 @@
#include <functional>
+#include <nlohmann/json_fwd.hpp>
#include "fs-accessor.hh"
namespace nix {
@@ -24,11 +25,8 @@ ref<FSAccessor> makeLazyNarAccessor(
const std::string & listing,
GetNarBytes getNarBytes);
-class JSONPlaceholder;
-
/* Write a JSON representation of the contents of a NAR (except file
contents). */
-void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor,
- const Path & path, bool recurse);
+nlohmann::json listNar(ref<FSAccessor> accessor, const Path & path, bool recurse);
}
diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc
index f2288a04e..cc4a94fab 100644
--- a/src/libstore/parsed-derivations.cc
+++ b/src/libstore/parsed-derivations.cc
@@ -2,7 +2,6 @@
#include <nlohmann/json.hpp>
#include <regex>
-#include "json.hh"
namespace nix {
@@ -90,6 +89,7 @@ std::optional<Strings> ParsedDerivation::getStringsAttr(const std::string & name
StringSet ParsedDerivation::getRequiredSystemFeatures() const
{
+ // FIXME: cache this?
StringSet res;
for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings()))
res.insert(i);
@@ -125,6 +125,11 @@ bool ParsedDerivation::substitutesAllowed() const
return getBoolAttr("allowSubstitutes", true);
}
+bool ParsedDerivation::useUidRange() const
+{
+ return getRequiredSystemFeatures().count("uid-range");
+}
+
static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*");
std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths)
@@ -144,16 +149,11 @@ std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & s
auto e = json.find("exportReferencesGraph");
if (e != json.end() && e->is_object()) {
for (auto i = e->begin(); i != e->end(); ++i) {
- std::ostringstream str;
- {
- JSONPlaceholder jsonRoot(str, true);
- StorePathSet storePaths;
- for (auto & p : *i)
- storePaths.insert(store.parseStorePath(p.get<std::string>()));
- store.pathInfoToJSON(jsonRoot,
- store.exportReferences(storePaths, inputPaths), false, true);
- }
- json[i.key()] = nlohmann::json::parse(str.str()); // urgh
+ StorePathSet storePaths;
+ for (auto & p : *i)
+ storePaths.insert(store.parseStorePath(p.get<std::string>()));
+ json[i.key()] = store.pathInfoToJSON(
+ store.exportReferences(storePaths, inputPaths), false, true);
}
}
diff --git a/src/libstore/parsed-derivations.hh b/src/libstore/parsed-derivations.hh
index 95bec21e8..bfb3857c0 100644
--- a/src/libstore/parsed-derivations.hh
+++ b/src/libstore/parsed-derivations.hh
@@ -38,6 +38,8 @@ public:
bool substitutesAllowed() const;
+ bool useUidRange() const;
+
std::optional<nlohmann::json> prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths);
};
diff --git a/src/libstore/remote-fs-accessor.cc b/src/libstore/remote-fs-accessor.cc
index 0ce335646..fcfb527f5 100644
--- a/src/libstore/remote-fs-accessor.cc
+++ b/src/libstore/remote-fs-accessor.cc
@@ -1,6 +1,6 @@
+#include <nlohmann/json.hpp>
#include "remote-fs-accessor.hh"
#include "nar-accessor.hh"
-#include "json.hh"
#include <sys/types.h>
#include <sys/stat.h>
@@ -38,10 +38,8 @@ ref<FSAccessor> RemoteFSAccessor::addToCache(std::string_view hashPart, std::str
if (cacheDir != "") {
try {
- std::ostringstream str;
- JSONPlaceholder jsonRoot(str);
- listNar(jsonRoot, narAccessor, "", true);
- writeFile(makeCacheFile(hashPart, "ls"), str.str());
+ nlohmann::json j = listNar(narAccessor, "", true);
+ writeFile(makeCacheFile(hashPart, "ls"), j.dump());
} catch (...) {
ignoreException();
}
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index 06a9758fc..8811ab578 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -6,14 +6,16 @@
#include "util.hh"
#include "nar-info-disk-cache.hh"
#include "thread-pool.hh"
-#include "json.hh"
#include "url.hh"
#include "archive.hh"
#include "callback.hh"
#include "remote-store.hh"
+#include <nlohmann/json.hpp>
#include <regex>
+using json = nlohmann::json;
+
namespace nix {
@@ -838,56 +840,53 @@ StorePathSet Store::exportReferences(const StorePathSet & storePaths, const Stor
return paths;
}
-
-void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & storePaths,
+json Store::pathInfoToJSON(const StorePathSet & storePaths,
bool includeImpureInfo, bool showClosureSize,
Base hashBase,
AllowInvalidFlag allowInvalid)
{
- auto jsonList = jsonOut.list();
+ json::array_t jsonList = json::array();
for (auto & storePath : storePaths) {
- auto jsonPath = jsonList.object();
+ auto& jsonPath = jsonList.emplace_back(json::object());
try {
auto info = queryPathInfo(storePath);
- jsonPath.attr("path", printStorePath(info->path));
- jsonPath
- .attr("narHash", info->narHash.to_string(hashBase, true))
- .attr("narSize", info->narSize);
+ jsonPath["path"] = printStorePath(info->path);
+ jsonPath["narHash"] = info->narHash.to_string(hashBase, true);
+ jsonPath["narSize"] = info->narSize;
{
- auto jsonRefs = jsonPath.list("references");
+ auto& jsonRefs = (jsonPath["references"] = json::array());
for (auto & ref : info->references)
- jsonRefs.elem(printStorePath(ref));
+ jsonRefs.emplace_back(printStorePath(ref));
}
if (info->ca)
- jsonPath.attr("ca", renderContentAddress(info->ca));
+ jsonPath["ca"] = renderContentAddress(info->ca);
std::pair<uint64_t, uint64_t> closureSizes;
if (showClosureSize) {
closureSizes = getClosureSize(info->path);
- jsonPath.attr("closureSize", closureSizes.first);
+ jsonPath["closureSize"] = closureSizes.first;
}
if (includeImpureInfo) {
if (info->deriver)
- jsonPath.attr("deriver", printStorePath(*info->deriver));
+ jsonPath["deriver"] = printStorePath(*info->deriver);
if (info->registrationTime)
- jsonPath.attr("registrationTime", info->registrationTime);
+ jsonPath["registrationTime"] = info->registrationTime;
if (info->ultimate)
- jsonPath.attr("ultimate", info->ultimate);
+ jsonPath["ultimate"] = info->ultimate;
if (!info->sigs.empty()) {
- auto jsonSigs = jsonPath.list("signatures");
for (auto & sig : info->sigs)
- jsonSigs.elem(sig);
+ jsonPath["signatures"].push_back(sig);
}
auto narInfo = std::dynamic_pointer_cast<const NarInfo>(
@@ -895,21 +894,22 @@ void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & store
if (narInfo) {
if (!narInfo->url.empty())
- jsonPath.attr("url", narInfo->url);
+ jsonPath["url"] = narInfo->url;
if (narInfo->fileHash)
- jsonPath.attr("downloadHash", narInfo->fileHash->to_string(hashBase, true));
+ jsonPath["downloadHash"] = narInfo->fileHash->to_string(hashBase, true);
if (narInfo->fileSize)
- jsonPath.attr("downloadSize", narInfo->fileSize);
+ jsonPath["downloadSize"] = narInfo->fileSize;
if (showClosureSize)
- jsonPath.attr("closureDownloadSize", closureSizes.second);
+ jsonPath["closureDownloadSize"] = closureSizes.second;
}
}
} catch (InvalidPath &) {
- jsonPath.attr("path", printStorePath(storePath));
- jsonPath.attr("valid", false);
+ jsonPath["path"] = printStorePath(storePath);
+ jsonPath["valid"] = false;
}
}
+ return jsonList;
}
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index c8a667c6d..151ec10d6 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -14,6 +14,7 @@
#include "path-info.hh"
#include "repair-flag.hh"
+#include <nlohmann/json_fwd.hpp>
#include <atomic>
#include <limits>
#include <map>
@@ -68,7 +69,6 @@ struct Derivation;
class FSAccessor;
class NarInfoDiskCache;
class Store;
-class JSONPlaceholder;
enum CheckSigsFlag : bool { NoCheckSigs = false, CheckSigs = true };
@@ -512,7 +512,7 @@ public:
variable elements such as the registration time are
included. If ‘showClosureSize’ is true, the closure size of
each path is included. */
- void pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & storePaths,
+ nlohmann::json pathInfoToJSON(const StorePathSet & storePaths,
bool includeImpureInfo, bool showClosureSize,
Base hashBase = Base32,
AllowInvalidFlag allowInvalid = DisallowInvalid);
diff --git a/src/libutil/cgroup.cc b/src/libutil/cgroup.cc
new file mode 100644
index 000000000..a008481ca
--- /dev/null
+++ b/src/libutil/cgroup.cc
@@ -0,0 +1,148 @@
+#if __linux__
+
+#include "cgroup.hh"
+#include "util.hh"
+#include "finally.hh"
+
+#include <chrono>
+#include <cmath>
+#include <regex>
+#include <unordered_set>
+#include <thread>
+
+#include <dirent.h>
+#include <mntent.h>
+
+namespace nix {
+
+std::optional<Path> getCgroupFS()
+{
+ static auto res = [&]() -> std::optional<Path> {
+ auto fp = fopen("/proc/mounts", "r");
+ if (!fp) return std::nullopt;
+ Finally delFP = [&]() { fclose(fp); };
+ while (auto ent = getmntent(fp))
+ if (std::string_view(ent->mnt_type) == "cgroup2")
+ return ent->mnt_dir;
+
+ return std::nullopt;
+ }();
+ return res;
+}
+
+// FIXME: obsolete, check for cgroup2
+std::map<std::string, std::string> getCgroups(const Path & cgroupFile)
+{
+ std::map<std::string, std::string> cgroups;
+
+ for (auto & line : tokenizeString<std::vector<std::string>>(readFile(cgroupFile), "\n")) {
+ static std::regex regex("([0-9]+):([^:]*):(.*)");
+ std::smatch match;
+ if (!std::regex_match(line, match, regex))
+ throw Error("invalid line '%s' in '%s'", line, cgroupFile);
+
+ std::string name = hasPrefix(std::string(match[2]), "name=") ? std::string(match[2], 5) : match[2];
+ cgroups.insert_or_assign(name, match[3]);
+ }
+
+ return cgroups;
+}
+
+static CgroupStats destroyCgroup(const Path & cgroup, bool returnStats)
+{
+ if (!pathExists(cgroup)) return {};
+
+ auto procsFile = cgroup + "/cgroup.procs";
+
+ if (!pathExists(procsFile))
+ throw Error("'%s' is not a cgroup", cgroup);
+
+ /* Use the fast way to kill every process in a cgroup, if
+ available. */
+ auto killFile = cgroup + "/cgroup.kill";
+ if (pathExists(killFile))
+ writeFile(killFile, "1");
+
+ /* Otherwise, manually kill every process in the subcgroups and
+ this cgroup. */
+ for (auto & entry : readDirectory(cgroup)) {
+ if (entry.type != DT_DIR) continue;
+ destroyCgroup(cgroup + "/" + entry.name, false);
+ }
+
+ int round = 1;
+
+ std::unordered_set<pid_t> pidsShown;
+
+ while (true) {
+ auto pids = tokenizeString<std::vector<std::string>>(readFile(procsFile));
+
+ if (pids.empty()) break;
+
+ if (round > 20)
+ throw Error("cannot kill cgroup '%s'", cgroup);
+
+ for (auto & pid_s : pids) {
+ pid_t pid;
+ if (auto o = string2Int<pid_t>(pid_s))
+ pid = *o;
+ else
+ throw Error("invalid pid '%s'", pid);
+ if (pidsShown.insert(pid).second) {
+ try {
+ auto cmdline = readFile(fmt("/proc/%d/cmdline", pid));
+ using namespace std::string_literals;
+ warn("killing stray builder process %d (%s)...",
+ pid, trim(replaceStrings(cmdline, "\0"s, " ")));
+ } catch (SysError &) {
+ }
+ }
+ // FIXME: pid wraparound
+ if (kill(pid, SIGKILL) == -1 && errno != ESRCH)
+ throw SysError("killing member %d of cgroup '%s'", pid, cgroup);
+ }
+
+ auto sleep = std::chrono::milliseconds((int) std::pow(2.0, std::min(round, 10)));
+ if (sleep.count() > 100)
+ printError("waiting for %d ms for cgroup '%s' to become empty", sleep.count(), cgroup);
+ std::this_thread::sleep_for(sleep);
+ round++;
+ }
+
+ CgroupStats stats;
+
+ if (returnStats) {
+ auto cpustatPath = cgroup + "/cpu.stat";
+
+ if (pathExists(cpustatPath)) {
+ for (auto & line : tokenizeString<std::vector<std::string>>(readFile(cpustatPath), "\n")) {
+ std::string_view userPrefix = "user_usec ";
+ if (hasPrefix(line, userPrefix)) {
+ auto n = string2Int<uint64_t>(line.substr(userPrefix.size()));
+ if (n) stats.cpuUser = std::chrono::microseconds(*n);
+ }
+
+ std::string_view systemPrefix = "system_usec ";
+ if (hasPrefix(line, systemPrefix)) {
+ auto n = string2Int<uint64_t>(line.substr(systemPrefix.size()));
+ if (n) stats.cpuSystem = std::chrono::microseconds(*n);
+ }
+ }
+ }
+
+ }
+
+ if (rmdir(cgroup.c_str()) == -1)
+ throw SysError("deleting cgroup '%s'", cgroup);
+
+ return stats;
+}
+
+CgroupStats destroyCgroup(const Path & cgroup)
+{
+ return destroyCgroup(cgroup, true);
+}
+
+}
+
+#endif
diff --git a/src/libutil/cgroup.hh b/src/libutil/cgroup.hh
new file mode 100644
index 000000000..d08c8ad29
--- /dev/null
+++ b/src/libutil/cgroup.hh
@@ -0,0 +1,29 @@
+#pragma once
+
+#if __linux__
+
+#include <chrono>
+#include <optional>
+
+#include "types.hh"
+
+namespace nix {
+
+std::optional<Path> getCgroupFS();
+
+std::map<std::string, std::string> getCgroups(const Path & cgroupFile);
+
+struct CgroupStats
+{
+ std::optional<std::chrono::microseconds> cpuUser, cpuSystem;
+};
+
+/* Destroy the cgroup denoted by 'path'. The postcondition is that
+ 'path' does not exist, and thus any processes in the cgroup have
+ been killed. Also return statistics from the cgroup just before
+ destruction. */
+CgroupStats destroyCgroup(const Path & cgroup);
+
+}
+
+#endif
diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc
index fa79cca6b..e0902971e 100644
--- a/src/libutil/experimental-features.cc
+++ b/src/libutil/experimental-features.cc
@@ -14,6 +14,8 @@ std::map<ExperimentalFeature, std::string> stringifiedXpFeatures = {
{ Xp::NoUrlLiterals, "no-url-literals" },
{ Xp::FetchClosure, "fetch-closure" },
{ Xp::ReplFlake, "repl-flake" },
+ { Xp::AutoAllocateUids, "auto-allocate-uids" },
+ { Xp::Cgroups, "cgroups" },
};
const std::optional<ExperimentalFeature> parseExperimentalFeature(const std::string_view & name)
diff --git a/src/libutil/experimental-features.hh b/src/libutil/experimental-features.hh
index d09ab025c..af775feb0 100644
--- a/src/libutil/experimental-features.hh
+++ b/src/libutil/experimental-features.hh
@@ -23,6 +23,8 @@ enum struct ExperimentalFeature
NoUrlLiterals,
FetchClosure,
ReplFlake,
+ AutoAllocateUids,
+ Cgroups,
};
/**
diff --git a/src/libutil/filesystem.cc b/src/libutil/filesystem.cc
index 403389e60..3a732cff8 100644
--- a/src/libutil/filesystem.cc
+++ b/src/libutil/filesystem.cc
@@ -1,5 +1,6 @@
#include <sys/time.h>
#include <filesystem>
+#include <atomic>
#include "finally.hh"
#include "util.hh"
@@ -10,7 +11,7 @@ namespace fs = std::filesystem;
namespace nix {
static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
- int & counter)
+ std::atomic<unsigned int> & counter)
{
tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR").value_or("/tmp") : tmpRoot, true);
if (includePid)
@@ -22,9 +23,9 @@ static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
Path createTempDir(const Path & tmpRoot, const Path & prefix,
bool includePid, bool useGlobalCounter, mode_t mode)
{
- static int globalCounter = 0;
- int localCounter = 0;
- int & counter(useGlobalCounter ? globalCounter : localCounter);
+ static std::atomic<unsigned int> globalCounter = 0;
+ std::atomic<unsigned int> localCounter = 0;
+ auto & counter(useGlobalCounter ? globalCounter : localCounter);
while (1) {
checkInterrupt();
diff --git a/src/libutil/json.cc b/src/libutil/json.cc
deleted file mode 100644
index 2f9e97ff5..000000000
--- a/src/libutil/json.cc
+++ /dev/null
@@ -1,203 +0,0 @@
-#include "json.hh"
-
-#include <iomanip>
-#include <cstdint>
-#include <cstring>
-
-namespace nix {
-
-template<>
-void toJSON<std::string_view>(std::ostream & str, const std::string_view & s)
-{
- constexpr size_t BUF_SIZE = 4096;
- char buf[BUF_SIZE + 7]; // BUF_SIZE + largest single sequence of puts
- size_t bufPos = 0;
-
- const auto flush = [&] {
- str.write(buf, bufPos);
- bufPos = 0;
- };
- const auto put = [&] (char c) {
- buf[bufPos++] = c;
- };
-
- put('"');
- for (auto i = s.begin(); i != s.end(); i++) {
- if (bufPos >= BUF_SIZE) flush();
- if (*i == '\"' || *i == '\\') { put('\\'); put(*i); }
- else if (*i == '\n') { put('\\'); put('n'); }
- else if (*i == '\r') { put('\\'); put('r'); }
- else if (*i == '\t') { put('\\'); put('t'); }
- else if (*i >= 0 && *i < 32) {
- const char hex[17] = "0123456789abcdef";
- put('\\');
- put('u');
- put(hex[(uint16_t(*i) >> 12) & 0xf]);
- put(hex[(uint16_t(*i) >> 8) & 0xf]);
- put(hex[(uint16_t(*i) >> 4) & 0xf]);
- put(hex[(uint16_t(*i) >> 0) & 0xf]);
- }
- else put(*i);
- }
- put('"');
- flush();
-}
-
-void toJSON(std::ostream & str, const char * s)
-{
- if (!s) str << "null"; else toJSON(str, std::string_view(s));
-}
-
-template<> void toJSON<int>(std::ostream & str, const int & n) { str << n; }
-template<> void toJSON<unsigned int>(std::ostream & str, const unsigned int & n) { str << n; }
-template<> void toJSON<long>(std::ostream & str, const long & n) { str << n; }
-template<> void toJSON<unsigned long>(std::ostream & str, const unsigned long & n) { str << n; }
-template<> void toJSON<long long>(std::ostream & str, const long long & n) { str << n; }
-template<> void toJSON<unsigned long long>(std::ostream & str, const unsigned long long & n) { str << n; }
-template<> void toJSON<float>(std::ostream & str, const float & n) { str << n; }
-template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
-template<> void toJSON<std::string>(std::ostream & str, const std::string & s) { toJSON(str, (std::string_view) s); }
-
-template<> void toJSON<bool>(std::ostream & str, const bool & b)
-{
- str << (b ? "true" : "false");
-}
-
-template<> void toJSON<std::nullptr_t>(std::ostream & str, const std::nullptr_t & b)
-{
- str << "null";
-}
-
-JSONWriter::JSONWriter(std::ostream & str, bool indent)
- : state(new JSONState(str, indent))
-{
- state->stack++;
-}
-
-JSONWriter::JSONWriter(JSONState * state)
- : state(state)
-{
- state->stack++;
-}
-
-JSONWriter::~JSONWriter()
-{
- if (state) {
- assertActive();
- state->stack--;
- if (state->stack == 0) delete state;
- }
-}
-
-void JSONWriter::comma()
-{
- assertActive();
- if (first) {
- first = false;
- } else {
- state->str << ',';
- }
- if (state->indent) indent();
-}
-
-void JSONWriter::indent()
-{
- state->str << '\n' << std::string(state->depth * 2, ' ');
-}
-
-void JSONList::open()
-{
- state->depth++;
- state->str << '[';
-}
-
-JSONList::~JSONList()
-{
- state->depth--;
- if (state->indent && !first) indent();
- state->str << "]";
-}
-
-JSONList JSONList::list()
-{
- comma();
- return JSONList(state);
-}
-
-JSONObject JSONList::object()
-{
- comma();
- return JSONObject(state);
-}
-
-JSONPlaceholder JSONList::placeholder()
-{
- comma();
- return JSONPlaceholder(state);
-}
-
-void JSONObject::open()
-{
- state->depth++;
- state->str << '{';
-}
-
-JSONObject::~JSONObject()
-{
- if (state) {
- state->depth--;
- if (state->indent && !first) indent();
- state->str << "}";
- }
-}
-
-void JSONObject::attr(std::string_view s)
-{
- comma();
- toJSON(state->str, s);
- state->str << ':';
- if (state->indent) state->str << ' ';
-}
-
-JSONList JSONObject::list(std::string_view name)
-{
- attr(name);
- return JSONList(state);
-}
-
-JSONObject JSONObject::object(std::string_view name)
-{
- attr(name);
- return JSONObject(state);
-}
-
-JSONPlaceholder JSONObject::placeholder(std::string_view name)
-{
- attr(name);
- return JSONPlaceholder(state);
-}
-
-JSONList JSONPlaceholder::list()
-{
- assertValid();
- first = false;
- return JSONList(state);
-}
-
-JSONObject JSONPlaceholder::object()
-{
- assertValid();
- first = false;
- return JSONObject(state);
-}
-
-JSONPlaceholder::~JSONPlaceholder()
-{
- if (first) {
- assert(std::uncaught_exceptions());
- if (state->stack != 0)
- write(nullptr);
- }
-}
-
-}
diff --git a/src/libutil/json.hh b/src/libutil/json.hh
deleted file mode 100644
index 3790b1a2e..000000000
--- a/src/libutil/json.hh
+++ /dev/null
@@ -1,185 +0,0 @@
-#pragma once
-
-#include <iostream>
-#include <vector>
-#include <cassert>
-
-namespace nix {
-
-void toJSON(std::ostream & str, const char * s);
-
-template<typename T>
-void toJSON(std::ostream & str, const T & n);
-
-class JSONWriter
-{
-protected:
-
- struct JSONState
- {
- std::ostream & str;
- bool indent;
- size_t depth = 0;
- size_t stack = 0;
- JSONState(std::ostream & str, bool indent) : str(str), indent(indent) { }
- ~JSONState()
- {
- assert(stack == 0);
- }
- };
-
- JSONState * state;
-
- bool first = true;
-
- JSONWriter(std::ostream & str, bool indent);
-
- JSONWriter(JSONState * state);
-
- ~JSONWriter();
-
- void assertActive()
- {
- assert(state->stack != 0);
- }
-
- void comma();
-
- void indent();
-};
-
-class JSONObject;
-class JSONPlaceholder;
-
-class JSONList : JSONWriter
-{
-private:
-
- friend class JSONObject;
- friend class JSONPlaceholder;
-
- void open();
-
- JSONList(JSONState * state)
- : JSONWriter(state)
- {
- open();
- }
-
-public:
-
- JSONList(std::ostream & str, bool indent = false)
- : JSONWriter(str, indent)
- {
- open();
- }
-
- ~JSONList();
-
- template<typename T>
- JSONList & elem(const T & v)
- {
- comma();
- toJSON(state->str, v);
- return *this;
- }
-
- JSONList list();
-
- JSONObject object();
-
- JSONPlaceholder placeholder();
-};
-
-class JSONObject : JSONWriter
-{
-private:
-
- friend class JSONList;
- friend class JSONPlaceholder;
-
- void open();
-
- JSONObject(JSONState * state)
- : JSONWriter(state)
- {
- open();
- }
-
- void attr(std::string_view s);
-
-public:
-
- JSONObject(std::ostream & str, bool indent = false)
- : JSONWriter(str, indent)
- {
- open();
- }
-
- JSONObject(const JSONObject & obj) = delete;
-
- JSONObject(JSONObject && obj)
- : JSONWriter(obj.state)
- {
- obj.state = 0;
- }
-
- ~JSONObject();
-
- template<typename T>
- JSONObject & attr(std::string_view name, const T & v)
- {
- attr(name);
- toJSON(state->str, v);
- return *this;
- }
-
- JSONList list(std::string_view name);
-
- JSONObject object(std::string_view name);
-
- JSONPlaceholder placeholder(std::string_view name);
-};
-
-class JSONPlaceholder : JSONWriter
-{
-
-private:
-
- friend class JSONList;
- friend class JSONObject;
-
- JSONPlaceholder(JSONState * state)
- : JSONWriter(state)
- {
- }
-
- void assertValid()
- {
- assertActive();
- assert(first);
- }
-
-public:
-
- JSONPlaceholder(std::ostream & str, bool indent = false)
- : JSONWriter(str, indent)
- {
- }
-
- ~JSONPlaceholder();
-
- template<typename T>
- void write(const T & v)
- {
- assertValid();
- first = false;
- toJSON(state->str, v);
- }
-
- JSONList list();
-
- JSONObject object();
-};
-
-}
diff --git a/src/libutil/tests/json.cc b/src/libutil/tests/json.cc
deleted file mode 100644
index 156286999..000000000
--- a/src/libutil/tests/json.cc
+++ /dev/null
@@ -1,193 +0,0 @@
-#include "json.hh"
-#include <gtest/gtest.h>
-#include <sstream>
-
-namespace nix {
-
- /* ----------------------------------------------------------------------------
- * toJSON
- * --------------------------------------------------------------------------*/
-
- TEST(toJSON, quotesCharPtr) {
- const char* input = "test";
- std::stringstream out;
- toJSON(out, input);
-
- ASSERT_EQ(out.str(), "\"test\"");
- }
-
- TEST(toJSON, quotesStdString) {
- std::string input = "test";
- std::stringstream out;
- toJSON(out, input);
-
- ASSERT_EQ(out.str(), "\"test\"");
- }
-
- TEST(toJSON, convertsNullptrtoNull) {
- auto input = nullptr;
- std::stringstream out;
- toJSON(out, input);
-
- ASSERT_EQ(out.str(), "null");
- }
-
- TEST(toJSON, convertsNullToNull) {
- const char* input = 0;
- std::stringstream out;
- toJSON(out, input);
-
- ASSERT_EQ(out.str(), "null");
- }
-
-
- TEST(toJSON, convertsFloat) {
- auto input = 1.024f;
- std::stringstream out;
- toJSON(out, input);
-
- ASSERT_EQ(out.str(), "1.024");
- }
-
- TEST(toJSON, convertsDouble) {
- const double input = 1.024;
- std::stringstream out;
- toJSON(out, input);
-
- ASSERT_EQ(out.str(), "1.024");
- }
-
- TEST(toJSON, convertsBool) {
- auto input = false;
- std::stringstream out;
- toJSON(out, input);
-
- ASSERT_EQ(out.str(), "false");
- }
-
- TEST(toJSON, quotesTab) {
- std::stringstream out;
- toJSON(out, "\t");
-
- ASSERT_EQ(out.str(), "\"\\t\"");
- }
-
- TEST(toJSON, quotesNewline) {
- std::stringstream out;
- toJSON(out, "\n");
-
- ASSERT_EQ(out.str(), "\"\\n\"");
- }
-
- TEST(toJSON, quotesCreturn) {
- std::stringstream out;
- toJSON(out, "\r");
-
- ASSERT_EQ(out.str(), "\"\\r\"");
- }
-
- TEST(toJSON, quotesCreturnNewLine) {
- std::stringstream out;
- toJSON(out, "\r\n");
-
- ASSERT_EQ(out.str(), "\"\\r\\n\"");
- }
-
- TEST(toJSON, quotesDoublequotes) {
- std::stringstream out;
- toJSON(out, "\"");
-
- ASSERT_EQ(out.str(), "\"\\\"\"");
- }
-
- TEST(toJSON, substringEscape) {
- std::stringstream out;
- std::string_view s = "foo\t";
- toJSON(out, s.substr(3));
-
- ASSERT_EQ(out.str(), "\"\\t\"");
- }
-
- /* ----------------------------------------------------------------------------
- * JSONObject
- * --------------------------------------------------------------------------*/
-
- TEST(JSONObject, emptyObject) {
- std::stringstream out;
- {
- JSONObject t(out);
- }
- ASSERT_EQ(out.str(), "{}");
- }
-
- TEST(JSONObject, objectWithList) {
- std::stringstream out;
- {
- JSONObject t(out);
- auto l = t.list("list");
- l.elem("element");
- }
- ASSERT_EQ(out.str(), R"#({"list":["element"]})#");
- }
-
- TEST(JSONObject, objectWithListIndent) {
- std::stringstream out;
- {
- JSONObject t(out, true);
- auto l = t.list("list");
- l.elem("element");
- }
- ASSERT_EQ(out.str(),
-R"#({
- "list": [
- "element"
- ]
-})#");
- }
-
- TEST(JSONObject, objectWithPlaceholderAndList) {
- std::stringstream out;
- {
- JSONObject t(out);
- auto l = t.placeholder("list");
- l.list().elem("element");
- }
-
- ASSERT_EQ(out.str(), R"#({"list":["element"]})#");
- }
-
- TEST(JSONObject, objectWithPlaceholderAndObject) {
- std::stringstream out;
- {
- JSONObject t(out);
- auto l = t.placeholder("object");
- l.object().attr("key", "value");
- }
-
- ASSERT_EQ(out.str(), R"#({"object":{"key":"value"}})#");
- }
-
- /* ----------------------------------------------------------------------------
- * JSONList
- * --------------------------------------------------------------------------*/
-
- TEST(JSONList, empty) {
- std::stringstream out;
- {
- JSONList l(out);
- }
- ASSERT_EQ(out.str(), R"#([])#");
- }
-
- TEST(JSONList, withElements) {
- std::stringstream out;
- {
- JSONList l(out);
- l.elem("one");
- l.object();
- l.placeholder().write("three");
- }
- ASSERT_EQ(out.str(), R"#(["one",{},"three"])#");
- }
-}
-
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 623b74bdd..4f2caaa40 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -2,6 +2,7 @@
#include "sync.hh"
#include "finally.hh"
#include "serialise.hh"
+#include "cgroup.hh"
#include <array>
#include <cctype>
@@ -36,7 +37,6 @@
#include <sys/prctl.h>
#include <sys/resource.h>
-#include <mntent.h>
#include <cmath>
#endif
@@ -727,45 +727,22 @@ unsigned int getMaxCPU()
{
#if __linux__
try {
- FILE *fp = fopen("/proc/mounts", "r");
- if (!fp)
- return 0;
-
- Strings cgPathParts;
-
- struct mntent *ent;
- while ((ent = getmntent(fp))) {
- std::string mountType, mountPath;
-
- mountType = ent->mnt_type;
- mountPath = ent->mnt_dir;
-
- if (mountType == "cgroup2") {
- cgPathParts.push_back(mountPath);
- break;
- }
- }
-
- fclose(fp);
-
- if (cgPathParts.size() > 0 && pathExists("/proc/self/cgroup")) {
- std::string currentCgroup = readFile("/proc/self/cgroup");
- Strings cgValues = tokenizeString<Strings>(currentCgroup, ":");
- cgPathParts.push_back(trim(cgValues.back(), "\n"));
- cgPathParts.push_back("cpu.max");
- std::string fullCgPath = canonPath(concatStringsSep("/", cgPathParts));
-
- if (pathExists(fullCgPath)) {
- std::string cpuMax = readFile(fullCgPath);
- std::vector<std::string> cpuMaxParts = tokenizeString<std::vector<std::string>>(cpuMax, " ");
- std::string quota = cpuMaxParts[0];
- std::string period = trim(cpuMaxParts[1], "\n");
-
- if (quota != "max")
- return std::ceil(std::stoi(quota) / std::stof(period));
- }
- }
- } catch (Error &) { ignoreException(); }
+ auto cgroupFS = getCgroupFS();
+ if (!cgroupFS) return 0;
+
+ auto cgroups = getCgroups("/proc/self/cgroup");
+ auto cgroup = cgroups[""];
+ if (cgroup == "") return 0;
+
+ auto cpuFile = *cgroupFS + "/" + cgroup + "/cpu.max";
+
+ auto cpuMax = readFile(cpuFile);
+ auto cpuMaxParts = tokenizeString<std::vector<std::string>>(cpuMax, " \n");
+ auto quota = cpuMaxParts[0];
+ auto period = cpuMaxParts[1];
+ if (quota != "max")
+ return std::ceil(std::stoi(quota) / std::stof(period));
+ } catch (Error &) { ignoreException(lvlDebug); }
#endif
return 0;
@@ -1427,7 +1404,7 @@ std::string shellEscape(const std::string_view s)
}
-void ignoreException()
+void ignoreException(Verbosity lvl)
{
/* Make sure no exceptions leave this function.
printError() also throws when remote is closed. */
@@ -1435,7 +1412,7 @@ void ignoreException()
try {
throw;
} catch (std::exception & e) {
- printError("error (ignored): %1%", e.what());
+ printMsg(lvl, "error (ignored): %1%", e.what());
}
} catch (...) { }
}
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
index e5c678682..94d8cc555 100644
--- a/src/libutil/util.hh
+++ b/src/libutil/util.hh
@@ -528,7 +528,7 @@ std::string shellEscape(const std::string_view s);
/* Exception handling in destructors: print an error message, then
ignore the exception. */
-void ignoreException();
+void ignoreException(Verbosity lvl = lvlError);
diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc
index fdd66220a..776c5f6db 100644
--- a/src/nix-env/nix-env.cc
+++ b/src/nix-env/nix-env.cc
@@ -12,7 +12,6 @@
#include "local-fs-store.hh"
#include "user-env.hh"
#include "util.hh"
-#include "json.hh"
#include "value-to-json.hh"
#include "xml-writer.hh"
#include "legacy.hh"
@@ -26,6 +25,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
+#include <nlohmann/json.hpp>
using namespace nix;
using std::cout;
@@ -911,43 +911,47 @@ static VersionDiff compareVersionAgainstSet(
static void queryJSON(Globals & globals, std::vector<DrvInfo> & elems, bool printOutPath, bool printMeta)
{
- JSONObject topObj(cout, true);
+ using nlohmann::json;
+ json topObj = json::object();
for (auto & i : elems) {
try {
if (i.hasFailed()) continue;
- JSONObject pkgObj = topObj.object(i.attrPath);
auto drvName = DrvName(i.queryName());
- pkgObj.attr("name", drvName.fullName);
- pkgObj.attr("pname", drvName.name);
- pkgObj.attr("version", drvName.version);
- pkgObj.attr("system", i.querySystem());
- pkgObj.attr("outputName", i.queryOutputName());
+ json &pkgObj = topObj[i.attrPath];
+ pkgObj = {
+ {"name", drvName.fullName},
+ {"pname", drvName.name},
+ {"version", drvName.version},
+ {"system", i.querySystem()},
+ {"outputName", i.queryOutputName()},
+ };
{
DrvInfo::Outputs outputs = i.queryOutputs(printOutPath);
- JSONObject outputObj = pkgObj.object("outputs");
+ json &outputObj = pkgObj["outputs"];
+ outputObj = json::object();
for (auto & j : outputs) {
if (j.second)
- outputObj.attr(j.first, globals.state->store->printStorePath(*j.second));
+ outputObj[j.first] = globals.state->store->printStorePath(*j.second);
else
- outputObj.attr(j.first, nullptr);
+ outputObj[j.first] = nullptr;
}
}
if (printMeta) {
- JSONObject metaObj = pkgObj.object("meta");
+ json &metaObj = pkgObj["meta"];
+ metaObj = json::object();
StringSet metaNames = i.queryMetaNames();
for (auto & j : metaNames) {
Value * v = i.queryMeta(j);
if (!v) {
printError("derivation '%s' has invalid meta attribute '%s'", i.queryName(), j);
- metaObj.attr(j, nullptr);
+ metaObj[j] = nullptr;
} else {
- auto placeholder = metaObj.placeholder(j);
PathSet context;
- printValueAsJSON(*globals.state, true, *v, noPos, placeholder, context);
+ metaObj[j] = printValueAsJSON(*globals.state, true, *v, noPos, context);
}
}
}
@@ -958,6 +962,7 @@ static void queryJSON(Globals & globals, std::vector<DrvInfo> & elems, bool prin
throw;
}
}
+ std::cout << topObj.dump(2);
}
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index 23f2ad3cf..b854ef1e7 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -516,7 +516,7 @@ static void registerValidity(bool reregister, bool hashGiven, bool canonicalise)
if (!store->isValidPath(info->path) || reregister) {
/* !!! races */
if (canonicalise)
- canonicalisePathMetaData(store->printStorePath(info->path), -1);
+ canonicalisePathMetaData(store->printStorePath(info->path), {});
if (!hashGiven) {
HashResult hash = hashPath(htSHA256, store->printStorePath(info->path));
info->narHash = hash.first;
@@ -808,14 +808,17 @@ static void opServe(Strings opFlags, Strings opArgs)
if (GET_PROTOCOL_MINOR(clientVersion) >= 2)
settings.maxLogSize = readNum<unsigned long>(in);
if (GET_PROTOCOL_MINOR(clientVersion) >= 3) {
- settings.buildRepeat = readInt(in);
- settings.enforceDeterminism = readInt(in);
+ if (readInt(in) != 0) {
+ throw Error("client requested repeating builds, but this is not currently implemented");
+ }
+ if (readInt(in) != 0) {
+ throw Error("client requested enforcing determinism, but this is not currently implemented");
+ }
settings.runDiffHook = true;
}
if (GET_PROTOCOL_MINOR(clientVersion) >= 7) {
settings.keepFailed = (bool) readInt(in);
}
- settings.printRepeatedBuilds = false;
};
while (true) {
diff --git a/src/nix/app.cc b/src/nix/app.cc
index 48de8fb82..5658f2a52 100644
--- a/src/nix/app.cc
+++ b/src/nix/app.cc
@@ -37,11 +37,13 @@ struct InstallableDerivedPath : Installable
* Return the rewrites that are needed to resolve a string whose context is
* included in `dependencies`.
*/
-StringPairs resolveRewrites(Store & store, const BuiltPaths dependencies)
+StringPairs resolveRewrites(
+ Store & store,
+ const std::vector<BuiltPathWithResult> & dependencies)
{
StringPairs res;
for (auto & dep : dependencies)
- if (auto drvDep = std::get_if<BuiltPathBuilt>(&dep))
+ if (auto drvDep = std::get_if<BuiltPathBuilt>(&dep.path))
for (auto & [ outputName, outputPath ] : drvDep->outputs)
res.emplace(
downstreamPlaceholder(store, drvDep->drvPath, outputName),
@@ -53,7 +55,10 @@ StringPairs resolveRewrites(Store & store, const BuiltPaths dependencies)
/**
* Resolve the given string assuming the given context.
*/
-std::string resolveString(Store & store, const std::string & toResolve, const BuiltPaths dependencies)
+std::string resolveString(
+ Store & store,
+ const std::string & toResolve,
+ const std::vector<BuiltPathWithResult> & dependencies)
{
auto rewrites = resolveRewrites(store, dependencies);
return rewriteStrings(toResolve, rewrites);
diff --git a/src/nix/build.cc b/src/nix/build.cc
index 9c648d28e..94b169167 100644
--- a/src/nix/build.cc
+++ b/src/nix/build.cc
@@ -10,6 +10,37 @@
using namespace nix;
+nlohmann::json derivedPathsToJSON(const DerivedPaths & paths, ref<Store> store)
+{
+ auto res = nlohmann::json::array();
+ for (auto & t : paths) {
+ std::visit([&res, store](const auto & t) {
+ res.push_back(t.toJSON(store));
+ }, t.raw());
+ }
+ return res;
+}
+
+nlohmann::json builtPathsWithResultToJSON(const std::vector<BuiltPathWithResult> & buildables, ref<Store> store)
+{
+ auto res = nlohmann::json::array();
+ for (auto & b : buildables) {
+ std::visit([&](const auto & t) {
+ auto j = t.toJSON(store);
+ if (b.result) {
+ j["startTime"] = b.result->startTime;
+ j["stopTime"] = b.result->stopTime;
+ if (b.result->cpuUser)
+ j["cpuUser"] = ((double) b.result->cpuUser->count()) / 1000000;
+ if (b.result->cpuSystem)
+ j["cpuSystem"] = ((double) b.result->cpuSystem->count()) / 1000000;
+ }
+ res.push_back(j);
+ }, b.path.raw());
+ }
+ return res;
+}
+
struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
{
Path outLink = "result";
@@ -78,7 +109,7 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
Realise::Outputs,
installables, buildMode);
- if (json) logger->cout("%s", derivedPathsWithHintsToJSON(buildables, store).dump());
+ if (json) logger->cout("%s", builtPathsWithResultToJSON(buildables, store).dump());
if (outLink != "")
if (auto store2 = store.dynamic_pointer_cast<LocalFSStore>())
@@ -98,7 +129,7 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
store2->addPermRoot(output.second, absPath(symlink));
}
},
- }, buildable.raw());
+ }, buildable.path.raw());
}
if (printOutputPaths) {
@@ -113,11 +144,14 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
std::cout << store->printStorePath(output.second) << std::endl;
}
},
- }, buildable.raw());
+ }, buildable.path.raw());
}
}
- updateProfile(buildables);
+ BuiltPaths buildables2;
+ for (auto & b : buildables)
+ buildables2.push_back(b.path);
+ updateProfile(buildables2);
}
};
diff --git a/src/nix/eval.cc b/src/nix/eval.cc
index ddd2790c6..ba82b5772 100644
--- a/src/nix/eval.cc
+++ b/src/nix/eval.cc
@@ -4,10 +4,11 @@
#include "store-api.hh"
#include "eval.hh"
#include "eval-inline.hh"
-#include "json.hh"
#include "value-to-json.hh"
#include "progress-bar.hh"
+#include <nlohmann/json.hpp>
+
using namespace nix;
struct CmdEval : MixJSON, InstallableCommand
@@ -115,9 +116,7 @@ struct CmdEval : MixJSON, InstallableCommand
}
else if (json) {
- JSONPlaceholder jsonOut(std::cout);
- printValueAsJSON(*state, true, *v, pos, jsonOut, context, false);
- std::cout << std::endl;
+ std::cout << printValueAsJSON(*state, true, *v, pos, context, false).dump() << std::endl;
}
else {
diff --git a/src/nix/flake.cc b/src/nix/flake.cc
index 3967f1102..336f6723a 100644
--- a/src/nix/flake.cc
+++ b/src/nix/flake.cc
@@ -11,7 +11,6 @@
#include "attr-path.hh"
#include "fetchers.hh"
#include "registry.hh"
-#include "json.hh"
#include "eval-cache.hh"
#include "markdown.hh"
@@ -21,6 +20,7 @@
using namespace nix;
using namespace nix::flake;
+using json = nlohmann::json;
class FlakeCommand : virtual Args, public MixFlakeOptions
{
@@ -917,35 +917,44 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun
{
auto flake = lockFlake();
- auto jsonRoot = json ? std::optional<JSONObject>(std::cout) : std::nullopt;
-
StorePathSet sources;
sources.insert(flake.flake.sourceInfo->storePath);
- if (jsonRoot)
- jsonRoot->attr("path", store->printStorePath(flake.flake.sourceInfo->storePath));
// FIXME: use graph output, handle cycles.
- std::function<void(const Node & node, std::optional<JSONObject> & jsonObj)> traverse;
- traverse = [&](const Node & node, std::optional<JSONObject> & jsonObj)
+ std::function<nlohmann::json(const Node & node)> traverse;
+ traverse = [&](const Node & node)
{
- auto jsonObj2 = jsonObj ? jsonObj->object("inputs") : std::optional<JSONObject>();
+ nlohmann::json jsonObj2 = json ? json::object() : nlohmann::json(nullptr);
for (auto & [inputName, input] : node.inputs) {
if (auto inputNode = std::get_if<0>(&input)) {
- auto jsonObj3 = jsonObj2 ? jsonObj2->object(inputName) : std::optional<JSONObject>();
auto storePath =
dryRun
? (*inputNode)->lockedRef.input.computeStorePath(*store)
: (*inputNode)->lockedRef.input.fetch(store).first.storePath;
- if (jsonObj3)
- jsonObj3->attr("path", store->printStorePath(storePath));
- sources.insert(std::move(storePath));
- traverse(**inputNode, jsonObj3);
+ if (json) {
+ auto& jsonObj3 = jsonObj2[inputName];
+ jsonObj3["path"] = store->printStorePath(storePath);
+ sources.insert(std::move(storePath));
+ jsonObj3["inputs"] = traverse(**inputNode);
+ } else {
+ sources.insert(std::move(storePath));
+ traverse(**inputNode);
+ }
}
}
+ return jsonObj2;
};
- traverse(*flake.lockFile.root, jsonRoot);
+ if (json) {
+ nlohmann::json jsonRoot = {
+ {"path", store->printStorePath(flake.flake.sourceInfo->storePath)},
+ {"inputs", traverse(*flake.lockFile.root)},
+ };
+ std::cout << jsonRoot.dump() << std::endl;
+ } else {
+ traverse(*flake.lockFile.root);
+ }
if (!dryRun && !dstUri.empty()) {
ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
diff --git a/src/nix/ls.cc b/src/nix/ls.cc
index 07554994b..e964b01b3 100644
--- a/src/nix/ls.cc
+++ b/src/nix/ls.cc
@@ -3,7 +3,7 @@
#include "fs-accessor.hh"
#include "nar-accessor.hh"
#include "common-args.hh"
-#include "json.hh"
+#include <nlohmann/json.hpp>
using namespace nix;
@@ -91,10 +91,9 @@ struct MixLs : virtual Args, MixJSON
if (path == "/") path = "";
if (json) {
- JSONPlaceholder jsonRoot(std::cout);
if (showDirectory)
throw UsageError("'--directory' is useless with '--json'");
- listNar(jsonRoot, accessor, path, recursive);
+ std::cout << listNar(accessor, path, recursive);
} else
listText(accessor);
}
diff --git a/src/nix/make-content-addressed.cc b/src/nix/make-content-addressed.cc
index 34860c38f..d86b90fc7 100644
--- a/src/nix/make-content-addressed.cc
+++ b/src/nix/make-content-addressed.cc
@@ -2,10 +2,13 @@
#include "store-api.hh"
#include "make-content-addressed.hh"
#include "common-args.hh"
-#include "json.hh"
+
+#include <nlohmann/json.hpp>
using namespace nix;
+using nlohmann::json;
+
struct CmdMakeContentAddressed : virtual CopyCommand, virtual StorePathsCommand, MixJSON
{
CmdMakeContentAddressed()
@@ -25,6 +28,7 @@ struct CmdMakeContentAddressed : virtual CopyCommand, virtual StorePathsCommand,
;
}
+ using StorePathsCommand::run;
void run(ref<Store> srcStore, StorePaths && storePaths) override
{
auto dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
@@ -33,13 +37,15 @@ struct CmdMakeContentAddressed : virtual CopyCommand, virtual StorePathsCommand,
StorePathSet(storePaths.begin(), storePaths.end()));
if (json) {
- JSONObject jsonRoot(std::cout);
- JSONObject jsonRewrites(jsonRoot.object("rewrites"));
+ auto jsonRewrites = json::object();
for (auto & path : storePaths) {
auto i = remappings.find(path);
assert(i != remappings.end());
- jsonRewrites.attr(srcStore->printStorePath(path), srcStore->printStorePath(i->second));
+ jsonRewrites[srcStore->printStorePath(path)] = srcStore->printStorePath(i->second);
}
+ auto json = json::object();
+ json["rewrites"] = jsonRewrites;
+ std::cout << json.dump();
} else {
for (auto & path : storePaths) {
auto i = remappings.find(path);
diff --git a/src/nix/path-info.cc b/src/nix/path-info.cc
index d690fe594..613c5b191 100644
--- a/src/nix/path-info.cc
+++ b/src/nix/path-info.cc
@@ -1,12 +1,13 @@
#include "command.hh"
#include "shared.hh"
#include "store-api.hh"
-#include "json.hh"
#include "common-args.hh"
#include <algorithm>
#include <array>
+#include <nlohmann/json.hpp>
+
using namespace nix;
struct CmdPathInfo : StorePathsCommand, MixJSON
@@ -86,11 +87,10 @@ struct CmdPathInfo : StorePathsCommand, MixJSON
pathLen = std::max(pathLen, store->printStorePath(storePath).size());
if (json) {
- JSONPlaceholder jsonRoot(std::cout);
- store->pathInfoToJSON(jsonRoot,
+ std::cout << store->pathInfoToJSON(
// FIXME: preserve order?
StorePathSet(storePaths.begin(), storePaths.end()),
- true, showClosureSize, SRI, AllowInvalid);
+ true, showClosureSize, SRI, AllowInvalid).dump();
}
else {
diff --git a/src/nix/profile.cc b/src/nix/profile.cc
index 3814e7d5a..11910523d 100644
--- a/src/nix/profile.cc
+++ b/src/nix/profile.cc
@@ -253,11 +253,11 @@ struct ProfileManifest
static std::map<Installable *, BuiltPaths>
builtPathsPerInstallable(
- const std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> & builtPaths)
+ const std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> & builtPaths)
{
std::map<Installable *, BuiltPaths> res;
for (auto & [installable, builtPath] : builtPaths)
- res[installable.get()].push_back(builtPath);
+ res[installable.get()].push_back(builtPath.path);
return res;
}
diff --git a/src/nix/search.cc b/src/nix/search.cc
index bdd45cbed..d2a31607d 100644
--- a/src/nix/search.cc
+++ b/src/nix/search.cc
@@ -5,7 +5,6 @@
#include "names.hh"
#include "get-drvs.hh"
#include "common-args.hh"
-#include "json.hh"
#include "shared.hh"
#include "eval-cache.hh"
#include "attr-path.hh"
@@ -13,8 +12,10 @@
#include <regex>
#include <fstream>
+#include <nlohmann/json.hpp>
using namespace nix;
+using json = nlohmann::json;
std::string wrap(std::string prefix, std::string s)
{
@@ -84,7 +85,8 @@ struct CmdSearch : InstallableCommand, MixJSON
auto state = getEvalState();
- auto jsonOut = json ? std::make_unique<JSONObject>(std::cout) : nullptr;
+ std::optional<nlohmann::json> jsonOut;
+ if (json) jsonOut = json::object();
uint64_t results = 0;
@@ -151,10 +153,11 @@ struct CmdSearch : InstallableCommand, MixJSON
{
results++;
if (json) {
- auto jsonElem = jsonOut->object(attrPath2);
- jsonElem.attr("pname", name.name);
- jsonElem.attr("version", name.version);
- jsonElem.attr("description", description);
+ (*jsonOut)[attrPath2] = {
+ {"pname", name.name},
+ {"version", name.version},
+ {"description", description},
+ };
} else {
auto name2 = hiliteMatches(name.name, nameMatches, ANSI_GREEN, "\e[0;2m");
if (results > 1) logger->cout("");
@@ -193,6 +196,10 @@ struct CmdSearch : InstallableCommand, MixJSON
for (auto & cursor : installable->getCursors(*state))
visit(*cursor, cursor->getAttrPath(), true);
+ if (json) {
+ std::cout << jsonOut->dump() << std::endl;
+ }
+
if (!json && !results)
throw Error("no results for the given search term(s)!");
}
diff --git a/src/nix/show-derivation.cc b/src/nix/show-derivation.cc
index fb46b4dbf..af2e676a4 100644
--- a/src/nix/show-derivation.cc
+++ b/src/nix/show-derivation.cc
@@ -5,10 +5,11 @@
#include "common-args.hh"
#include "store-api.hh"
#include "archive.hh"
-#include "json.hh"
#include "derivations.hh"
+#include <nlohmann/json.hpp>
using namespace nix;
+using json = nlohmann::json;
struct CmdShowDerivation : InstallablesCommand
{
@@ -48,77 +49,63 @@ struct CmdShowDerivation : InstallablesCommand
drvPaths = std::move(closure);
}
- {
-
- JSONObject jsonRoot(std::cout, true);
+ json jsonRoot = json::object();
for (auto & drvPath : drvPaths) {
if (!drvPath.isDerivation()) continue;
- auto drvObj(jsonRoot.object(store->printStorePath(drvPath)));
+ json& drvObj = jsonRoot[store->printStorePath(drvPath)];
auto drv = store->readDerivation(drvPath);
{
- auto outputsObj(drvObj.object("outputs"));
+ json& outputsObj = drvObj["outputs"];
+ outputsObj = json::object();
for (auto & [_outputName, output] : drv.outputs) {
auto & outputName = _outputName; // work around clang bug
- auto outputObj { outputsObj.object(outputName) };
+ auto& outputObj = outputsObj[outputName];
+ outputObj = json::object();
std::visit(overloaded {
[&](const DerivationOutput::InputAddressed & doi) {
- outputObj.attr("path", store->printStorePath(doi.path));
+ outputObj["path"] = store->printStorePath(doi.path);
},
[&](const DerivationOutput::CAFixed & dof) {
- outputObj.attr("path", store->printStorePath(dof.path(*store, drv.name, outputName)));
- outputObj.attr("hashAlgo", dof.hash.printMethodAlgo());
- outputObj.attr("hash", dof.hash.hash.to_string(Base16, false));
+ outputObj["path"] = store->printStorePath(dof.path(*store, drv.name, outputName));
+ outputObj["hashAlgo"] = dof.hash.printMethodAlgo();
+ outputObj["hash"] = dof.hash.hash.to_string(Base16, false);
},
[&](const DerivationOutput::CAFloating & dof) {
- outputObj.attr("hashAlgo", makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType));
+ outputObj["hashAlgo"] = makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType);
},
[&](const DerivationOutput::Deferred &) {},
[&](const DerivationOutput::Impure & doi) {
- outputObj.attr("hashAlgo", makeFileIngestionPrefix(doi.method) + printHashType(doi.hashType));
- outputObj.attr("impure", true);
+ outputObj["hashAlgo"] = makeFileIngestionPrefix(doi.method) + printHashType(doi.hashType);
+ outputObj["impure"] = true;
},
}, output.raw());
}
}
{
- auto inputsList(drvObj.list("inputSrcs"));
+ auto& inputsList = drvObj["inputSrcs"];
+ inputsList = json::array();
for (auto & input : drv.inputSrcs)
- inputsList.elem(store->printStorePath(input));
- }
-
- {
- auto inputDrvsObj(drvObj.object("inputDrvs"));
- for (auto & input : drv.inputDrvs) {
- auto inputList(inputDrvsObj.list(store->printStorePath(input.first)));
- for (auto & outputId : input.second)
- inputList.elem(outputId);
- }
+ inputsList.emplace_back(store->printStorePath(input));
}
- drvObj.attr("system", drv.platform);
- drvObj.attr("builder", drv.builder);
-
{
- auto argsList(drvObj.list("args"));
- for (auto & arg : drv.args)
- argsList.elem(arg);
+ auto& inputDrvsObj = drvObj["inputDrvs"];
+ inputDrvsObj = json::object();
+ for (auto & input : drv.inputDrvs)
+ inputDrvsObj[store->printStorePath(input.first)] = input.second;
}
- {
- auto envObj(drvObj.object("env"));
- for (auto & var : drv.env)
- envObj.attr(var.first, var.second);
- }
+ drvObj["system"] = drv.platform;
+ drvObj["builder"] = drv.builder;
+ drvObj["args"] = drv.args;
+ drvObj["env"] = drv.env;
}
-
- }
-
- std::cout << "\n";
+ std::cout << jsonRoot.dump(2) << std::endl;
}
};
diff --git a/src/nix/why-depends.cc b/src/nix/why-depends.cc
index 1d9ab28ba..723017497 100644
--- a/src/nix/why-depends.cc
+++ b/src/nix/why-depends.cc
@@ -83,20 +83,47 @@ struct CmdWhyDepends : SourceExprCommand
{
auto package = parseInstallable(store, _package);
auto packagePath = Installable::toStorePath(getEvalStore(), store, Realise::Outputs, operateOn, package);
+
+ /* We don't need to build `dependency`. We try to get the store
+ * path if it's already known, and if not, then it's not a dependency.
+ *
+ * Why? If `package` does depends on `dependency`, then getting the
+ * store path of `package` above necessitated having the store path
+ * of `dependency`. The contrapositive is, if the store path of
+ * `dependency` is not already known at this point (i.e. it's a CA
+ * derivation which hasn't been built), then `package` did not need it
+ * to build.
+ */
auto dependency = parseInstallable(store, _dependency);
- auto dependencyPath = Installable::toStorePath(getEvalStore(), store, Realise::Derivation, operateOn, dependency);
- auto dependencyPathHash = dependencyPath.hashPart();
+ auto derivedDependency = dependency->toDerivedPath();
+ auto optDependencyPath = std::visit(overloaded {
+ [](const DerivedPath::Opaque & nodrv) -> std::optional<StorePath> {
+ return { nodrv.path };
+ },
+ [&](const DerivedPath::Built & hasdrv) -> std::optional<StorePath> {
+ if (hasdrv.outputs.size() != 1) {
+ throw Error("argument '%s' should evaluate to one store path", dependency->what());
+ }
+ auto outputMap = store->queryPartialDerivationOutputMap(hasdrv.drvPath);
+ auto maybePath = outputMap.find(*hasdrv.outputs.begin());
+ if (maybePath == outputMap.end()) {
+ throw Error("unexpected end of iterator");
+ }
+ return maybePath->second;
+ },
+ }, derivedDependency.raw());
StorePathSet closure;
store->computeFSClosure({packagePath}, closure, false, false);
- if (!closure.count(dependencyPath)) {
- printError("'%s' does not depend on '%s'",
- store->printStorePath(packagePath),
- store->printStorePath(dependencyPath));
+ if (!optDependencyPath.has_value() || !closure.count(*optDependencyPath)) {
+ printError("'%s' does not depend on '%s'", package->what(), dependency->what());
return;
}
+ auto dependencyPath = *optDependencyPath;
+ auto dependencyPathHash = dependencyPath.hashPart();
+
stopProgressBar(); // FIXME
auto accessor = store->getFSAccessor();
diff --git a/tests/ca/why-depends.sh b/tests/ca/why-depends.sh
new file mode 100644
index 000000000..0c079f63b
--- /dev/null
+++ b/tests/ca/why-depends.sh
@@ -0,0 +1,5 @@
+source common.sh
+
+export NIX_TESTS_CA_BY_DEFAULT=1
+
+cd .. && source why-depends.sh
diff --git a/tests/check.nix b/tests/check.nix
index ed91ff845..ddab8eea9 100644
--- a/tests/check.nix
+++ b/tests/check.nix
@@ -44,7 +44,7 @@ with import ./config.nix;
};
hashmismatch = import <nix/fetchurl.nix> {
- url = "file://" + builtins.getEnv "TMPDIR" + "/dummy";
+ url = "file://" + builtins.getEnv "TEST_ROOT" + "/dummy";
sha256 = "0mdqa9w1p6cmli6976v4wi0sw9r4p5prkj7lzfd1877wk11c9c73";
};
diff --git a/tests/check.sh b/tests/check.sh
index 495202781..fbd784fc5 100644
--- a/tests/check.sh
+++ b/tests/check.sh
@@ -91,13 +91,13 @@ nix-build check.nix -A fetchurl --no-out-link --check
nix-build check.nix -A fetchurl --no-out-link --repair
[[ $(cat $path) != foo ]]
-echo 'Hello World' > $TMPDIR/dummy
+echo 'Hello World' > $TEST_ROOT/dummy
nix-build check.nix -A hashmismatch --no-out-link || status=$?
[ "$status" = "102" ]
-echo -n > $TMPDIR/dummy
+echo -n > $TEST_ROOT/dummy
nix-build check.nix -A hashmismatch --no-out-link
-echo 'Hello World' > $TMPDIR/dummy
+echo 'Hello World' > $TEST_ROOT/dummy
nix-build check.nix -A hashmismatch --no-out-link --check || status=$?
[ "$status" = "102" ]
diff --git a/tests/containers.nix b/tests/containers.nix
new file mode 100644
index 000000000..59e953c3b
--- /dev/null
+++ b/tests/containers.nix
@@ -0,0 +1,68 @@
+# Test whether we can run a NixOS container inside a Nix build using systemd-nspawn.
+{ nixpkgs, system, overlay }:
+
+with import (nixpkgs + "/nixos/lib/testing-python.nix") {
+ inherit system;
+ extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ];
+};
+
+makeTest ({
+ name = "containers";
+
+ nodes =
+ {
+ host =
+ { config, lib, pkgs, nodes, ... }:
+ { virtualisation.writableStore = true;
+ virtualisation.diskSize = 2048;
+ virtualisation.additionalPaths =
+ [ pkgs.stdenv
+ (import ./systemd-nspawn.nix { inherit nixpkgs; }).toplevel
+ ];
+ virtualisation.memorySize = 4096;
+ nix.binaryCaches = lib.mkForce [ ];
+ nix.extraOptions =
+ ''
+ extra-experimental-features = nix-command auto-allocate-uids cgroups
+ extra-system-features = uid-range
+ '';
+ nix.nixPath = [ "nixpkgs=${nixpkgs}" ];
+ };
+ };
+
+ testScript = { nodes }: ''
+ start_all()
+
+ host.succeed("nix --version >&2")
+
+ # Test that 'id' gives the expected result in various configurations.
+
+ # Existing UIDs, sandbox.
+ host.succeed("nix build --no-auto-allocate-uids --sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-1")
+ host.succeed("[[ $(cat ./result) = 'uid=1000(nixbld) gid=100(nixbld) groups=100(nixbld)' ]]")
+
+ # Existing UIDs, no sandbox.
+ host.succeed("nix build --no-auto-allocate-uids --no-sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-2")
+ host.succeed("[[ $(cat ./result) = 'uid=30001(nixbld1) gid=30000(nixbld) groups=30000(nixbld)' ]]")
+
+ # Auto-allocated UIDs, sandbox.
+ host.succeed("nix build --auto-allocate-uids --sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-3")
+ host.succeed("[[ $(cat ./result) = 'uid=1000(nixbld) gid=100(nixbld) groups=100(nixbld)' ]]")
+
+ # Auto-allocated UIDs, no sandbox.
+ host.succeed("nix build --auto-allocate-uids --no-sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-4")
+ host.succeed("[[ $(cat ./result) = 'uid=872415232 gid=30000(nixbld) groups=30000(nixbld)' ]]")
+
+ # Auto-allocated UIDs, UID range, sandbox.
+ host.succeed("nix build --auto-allocate-uids --sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-5 --arg uidRange true")
+ host.succeed("[[ $(cat ./result) = 'uid=0(root) gid=0(root) groups=0(root)' ]]")
+
+ # Auto-allocated UIDs, UID range, no sandbox.
+ host.fail("nix build --auto-allocate-uids --no-sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-6 --arg uidRange true")
+
+ # Run systemd-nspawn in a Nix build.
+ host.succeed("nix build --auto-allocate-uids --sandbox -L --offline --impure --file ${./systemd-nspawn.nix} --argstr nixpkgs ${nixpkgs}")
+ host.succeed("[[ $(cat ./result/msg) = 'Hello World' ]]")
+ '';
+
+})
diff --git a/tests/fetchClosure.sh b/tests/fetchClosure.sh
index 44050c878..d88c55c3c 100644
--- a/tests/fetchClosure.sh
+++ b/tests/fetchClosure.sh
@@ -1,7 +1,6 @@
source common.sh
enableFeatures "fetch-closure"
-needLocalStore "'--no-require-sigs' can’t be used with the daemon"
clearStore
clearCacheCache
@@ -28,15 +27,19 @@ clearStore
[ ! -e $nonCaPath ]
[ -e $caPath ]
-# In impure mode, we can use non-CA paths.
-[[ $(nix eval --raw --no-require-sigs --impure --expr "
- builtins.fetchClosure {
- fromStore = \"file://$cacheDir\";
- fromPath = $nonCaPath;
- }
-") = $nonCaPath ]]
+if [[ "$NIX_REMOTE" != "daemon" ]]; then
+
+ # In impure mode, we can use non-CA paths.
+ [[ $(nix eval --raw --no-require-sigs --impure --expr "
+ builtins.fetchClosure {
+ fromStore = \"file://$cacheDir\";
+ fromPath = $nonCaPath;
+ }
+ ") = $nonCaPath ]]
+
+ [ -e $nonCaPath ]
-[ -e $nonCaPath ]
+fi
# 'toPath' set to empty string should fail but print the expected path.
nix eval -v --json --expr "
diff --git a/tests/id-test.nix b/tests/id-test.nix
new file mode 100644
index 000000000..8eb9d38f9
--- /dev/null
+++ b/tests/id-test.nix
@@ -0,0 +1,8 @@
+{ name, uidRange ? false }:
+
+with import <nixpkgs> {};
+
+runCommand name
+ { requiredSystemFeatures = if uidRange then ["uid-range"] else [];
+ }
+ "id; id > $out"
diff --git a/tests/systemd-nspawn.nix b/tests/systemd-nspawn.nix
new file mode 100644
index 000000000..424436b3f
--- /dev/null
+++ b/tests/systemd-nspawn.nix
@@ -0,0 +1,78 @@
+{ nixpkgs }:
+
+let
+
+ machine = { config, pkgs, ... }:
+ {
+ system.stateVersion = "22.05";
+ boot.isContainer = true;
+ systemd.services.console-getty.enable = false;
+ networking.dhcpcd.enable = false;
+
+ services.httpd = {
+ enable = true;
+ adminAddr = "nixos@example.org";
+ };
+
+ systemd.services.test = {
+ wantedBy = [ "multi-user.target" ];
+ after = [ "httpd.service" ];
+ script = ''
+ source /.env
+ echo "Hello World" > $out/msg
+ ls -lR /dev > $out/dev
+ ${pkgs.curl}/bin/curl -sS --fail http://localhost/ > $out/page.html
+ '';
+ unitConfig = {
+ FailureAction = "exit-force";
+ FailureActionExitStatus = 42;
+ SuccessAction = "exit-force";
+ };
+ };
+ };
+
+ cfg = (import (nixpkgs + "/nixos/lib/eval-config.nix") {
+ modules = [ machine ];
+ system = "x86_64-linux";
+ });
+
+ config = cfg.config;
+
+in
+
+with cfg._module.args.pkgs;
+
+runCommand "test"
+ { buildInputs = [ config.system.path ];
+ requiredSystemFeatures = [ "uid-range" ];
+ toplevel = config.system.build.toplevel;
+ }
+ ''
+ root=$(pwd)/root
+ mkdir -p $root $root/etc
+
+ export > $root/.env
+
+ # Make /run a tmpfs to shut up a systemd warning.
+ mkdir /run
+ mount -t tmpfs none /run
+ chmod 0700 /run
+
+ mount -t cgroup2 none /sys/fs/cgroup
+
+ mkdir -p $out
+
+ touch /etc/os-release
+ echo a5ea3f98dedc0278b6f3cc8c37eeaeac > /etc/machine-id
+
+ SYSTEMD_NSPAWN_UNIFIED_HIERARCHY=1 \
+ ${config.systemd.package}/bin/systemd-nspawn \
+ --keep-unit \
+ -M ${config.networking.hostName} -D "$root" \
+ --register=no \
+ --resolv-conf=off \
+ --bind-ro=/nix/store \
+ --bind=$out \
+ --private-network \
+ $toplevel/init
+ ''