aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md5
-rw-r--r--.github/PULL_REQUEST_TEMPLATE/pull_request_template.md11
-rw-r--r--.github/labeler.yml23
-rw-r--r--.github/workflows/ci.yml12
-rw-r--r--.github/workflows/labels.yml24
-rw-r--r--.gitignore6
-rw-r--r--.version2
-rw-r--r--CONTRIBUTING.md62
-rw-r--r--Makefile3
-rw-r--r--Makefile.config.in2
-rw-r--r--boehmgc-coroutine-sp-fallback.diff22
-rw-r--r--configure.ac24
-rw-r--r--default.nix13
-rw-r--r--doc/internal-api/.gitignore3
-rw-r--r--doc/internal-api/doxygen.cfg.in63
-rw-r--r--doc/internal-api/local.mk19
-rw-r--r--doc/manual/generate-builtins.nix12
-rw-r--r--doc/manual/generate-manpage.nix75
-rw-r--r--doc/manual/generate-options.nix41
-rw-r--r--doc/manual/generate-xp-features-shortlist.nix9
-rw-r--r--doc/manual/generate-xp-features.nix11
-rw-r--r--doc/manual/local.mk143
-rw-r--r--doc/manual/redirects.js3
-rw-r--r--doc/manual/src/SUMMARY.md.in43
-rw-r--r--doc/manual/src/advanced-topics/diff-hook.md10
-rw-r--r--doc/manual/src/advanced-topics/post-build-hook.md2
-rw-r--r--doc/manual/src/command-ref/env-common.md68
-rw-r--r--doc/manual/src/command-ref/experimental-commands.md2
-rw-r--r--doc/manual/src/command-ref/files/channels.md26
-rw-r--r--doc/manual/src/command-ref/files/default-nix-expression.md52
-rw-r--r--doc/manual/src/command-ref/files/manifest.json.md45
-rw-r--r--doc/manual/src/command-ref/files/manifest.nix.md128
-rw-r--r--doc/manual/src/command-ref/files/profiles.md74
-rw-r--r--doc/manual/src/command-ref/nix-build.md21
-rw-r--r--doc/manual/src/command-ref/nix-channel.md46
-rw-r--r--doc/manual/src/command-ref/nix-collect-garbage.md6
-rw-r--r--doc/manual/src/command-ref/nix-copy-closure.md6
-rw-r--r--doc/manual/src/command-ref/nix-env.md862
-rw-r--r--doc/manual/src/command-ref/nix-env/delete-generations.md46
-rw-r--r--doc/manual/src/command-ref/nix-env/env-common.md6
-rw-r--r--doc/manual/src/command-ref/nix-env/install.md187
-rw-r--r--doc/manual/src/command-ref/nix-env/list-generations.md33
-rw-r--r--doc/manual/src/command-ref/nix-env/opt-common.md35
-rw-r--r--doc/manual/src/command-ref/nix-env/query.md215
-rw-r--r--doc/manual/src/command-ref/nix-env/rollback.md34
-rw-r--r--doc/manual/src/command-ref/nix-env/set-flag.md82
-rw-r--r--doc/manual/src/command-ref/nix-env/set.md30
-rw-r--r--doc/manual/src/command-ref/nix-env/switch-generation.md33
-rw-r--r--doc/manual/src/command-ref/nix-env/switch-profile.md26
-rw-r--r--doc/manual/src/command-ref/nix-env/uninstall.md28
-rw-r--r--doc/manual/src/command-ref/nix-env/upgrade.md141
-rw-r--r--doc/manual/src/command-ref/nix-hash.md46
-rw-r--r--doc/manual/src/command-ref/nix-instantiate.md20
-rw-r--r--doc/manual/src/command-ref/nix-shell.md31
-rw-r--r--doc/manual/src/command-ref/nix-store.md867
-rw-r--r--doc/manual/src/command-ref/nix-store/add-fixed.md35
-rw-r--r--doc/manual/src/command-ref/nix-store/add.md25
-rw-r--r--doc/manual/src/command-ref/nix-store/delete.md33
-rw-r--r--doc/manual/src/command-ref/nix-store/dump-db.md26
-rw-r--r--doc/manual/src/command-ref/nix-store/dump.md40
-rw-r--r--doc/manual/src/command-ref/nix-store/export.md41
-rw-r--r--doc/manual/src/command-ref/nix-store/gc.md72
-rw-r--r--doc/manual/src/command-ref/nix-store/generate-binary-cache-key.md29
-rw-r--r--doc/manual/src/command-ref/nix-store/import.md21
-rw-r--r--doc/manual/src/command-ref/nix-store/load-db.md18
-rw-r--r--doc/manual/src/command-ref/nix-store/opt-common.md36
-rw-r--r--doc/manual/src/command-ref/nix-store/optimise.md40
-rw-r--r--doc/manual/src/command-ref/nix-store/print-env.md31
-rw-r--r--doc/manual/src/command-ref/nix-store/query.md220
-rw-r--r--doc/manual/src/command-ref/nix-store/read-log.md38
-rw-r--r--doc/manual/src/command-ref/nix-store/realise.md118
-rw-r--r--doc/manual/src/command-ref/nix-store/repair-path.md35
-rw-r--r--doc/manual/src/command-ref/nix-store/restore.md18
-rw-r--r--doc/manual/src/command-ref/nix-store/serve.md38
-rw-r--r--doc/manual/src/command-ref/nix-store/verify-path.md29
-rw-r--r--doc/manual/src/command-ref/nix-store/verify.md36
-rw-r--r--doc/manual/src/command-ref/opt-common.md61
-rw-r--r--doc/manual/src/contributing/experimental-features.md95
-rw-r--r--doc/manual/src/contributing/hacking.md38
-rw-r--r--doc/manual/src/glossary.md67
-rw-r--r--doc/manual/src/installation/env-variables.md11
-rw-r--r--doc/manual/src/installation/installing-binary.md167
-rw-r--r--doc/manual/src/installation/uninstall.md148
-rw-r--r--doc/manual/src/installation/upgrading.md6
-rw-r--r--doc/manual/src/introduction.md6
-rw-r--r--doc/manual/src/language/advanced-attributes.md29
-rw-r--r--doc/manual/src/language/builtin-constants.md31
-rw-r--r--doc/manual/src/language/builtins-prefix.md20
-rw-r--r--doc/manual/src/language/operators.md6
-rw-r--r--doc/manual/src/language/values.md10
-rw-r--r--doc/manual/src/package-management/basic-package-mgmt.md24
-rw-r--r--doc/manual/src/package-management/binary-cache-substituter.md4
-rw-r--r--doc/manual/src/package-management/channels.md2
-rw-r--r--doc/manual/src/package-management/copy-closure.md4
-rw-r--r--doc/manual/src/package-management/profiles.md6
-rw-r--r--doc/manual/src/package-management/s3-substituter.md34
-rw-r--r--doc/manual/src/package-management/ssh-substituter.md4
-rw-r--r--doc/manual/src/quick-start.md12
-rw-r--r--doc/manual/src/release-notes/rl-2.15.md58
-rw-r--r--doc/manual/src/release-notes/rl-next.md24
-rw-r--r--doc/manual/utils.nix65
-rw-r--r--docker.nix20
-rw-r--r--flake.lock17
-rw-r--r--flake.nix78
-rw-r--r--local.mk4
-rw-r--r--maintainers/README.md112
-rw-r--r--misc/launchd/org.nixos.nix-daemon.plist.in2
-rw-r--r--mk/cxx-big-literal.mk5
-rwxr-xr-xmk/debug-test.sh2
-rw-r--r--mk/lib.mk1
-rw-r--r--mk/libraries.mk2
-rw-r--r--mk/patterns.mk4
-rwxr-xr-xmk/run-test.sh17
-rw-r--r--perl/lib/Nix/Store.xs10
-rwxr-xr-xscripts/install-systemd-multi-user.sh2
-rw-r--r--src/build-remote/build-remote.cc68
-rw-r--r--src/libcmd/command-installable-value.cc11
-rw-r--r--src/libcmd/command-installable-value.hh23
-rw-r--r--src/libcmd/command.cc8
-rw-r--r--src/libcmd/command.hh62
-rw-r--r--src/libcmd/common-eval-args.cc18
-rw-r--r--src/libcmd/common-eval-args.hh7
-rw-r--r--src/libcmd/editor-for.cc7
-rw-r--r--src/libcmd/editor-for.hh10
-rw-r--r--src/libcmd/installable-attr-path.cc14
-rw-r--r--src/libcmd/installable-attr-path.hh3
-rw-r--r--src/libcmd/installable-derived-path.cc5
-rw-r--r--src/libcmd/installable-derived-path.hh1
-rw-r--r--src/libcmd/installable-flake.cc52
-rw-r--r--src/libcmd/installable-flake.hh46
-rw-r--r--src/libcmd/installable-value.cc66
-rw-r--r--src/libcmd/installable-value.hh107
-rw-r--r--src/libcmd/installables.cc132
-rw-r--r--src/libcmd/installables.hh173
-rw-r--r--src/libcmd/legacy.hh1
-rw-r--r--src/libcmd/markdown.hh3
-rw-r--r--src/libcmd/repl.cc44
-rw-r--r--src/libcmd/repl.hh1
-rw-r--r--src/libexpr/attr-path.cc26
-rw-r--r--src/libexpr/attr-path.hh7
-rw-r--r--src/libexpr/attr-set.hh27
-rw-r--r--src/libexpr/eval-cache.cc21
-rw-r--r--src/libexpr/eval-cache.hh11
-rw-r--r--src/libexpr/eval-inline.hh5
-rw-r--r--src/libexpr/eval.cc330
-rw-r--r--src/libexpr/eval.hh364
-rw-r--r--src/libexpr/flake/config.cc2
-rw-r--r--src/libexpr/flake/flake.cc48
-rw-r--r--src/libexpr/flake/flake.hh114
-rw-r--r--src/libexpr/flake/flakeref.hh15
-rw-r--r--src/libexpr/flake/lockfile.cc5
-rw-r--r--src/libexpr/flake/lockfile.hh24
-rw-r--r--src/libexpr/function-trace.hh1
-rw-r--r--src/libexpr/get-drvs.cc6
-rw-r--r--src/libexpr/get-drvs.hh23
-rw-r--r--src/libexpr/json-to-value.hh1
-rw-r--r--src/libexpr/local.mk2
-rw-r--r--src/libexpr/nixexpr.cc48
-rw-r--r--src/libexpr/nixexpr.hh19
-rw-r--r--src/libexpr/parser.y60
-rw-r--r--src/libexpr/paths.cc10
-rw-r--r--src/libexpr/primops.cc346
-rw-r--r--src/libexpr/primops.hh17
-rw-r--r--src/libexpr/primops/context.cc54
-rw-r--r--src/libexpr/primops/fetchClosure.cc7
-rw-r--r--src/libexpr/primops/fetchMercurial.cc5
-rw-r--r--src/libexpr/primops/fetchTree.cc82
-rw-r--r--src/libexpr/print.cc94
-rw-r--r--src/libexpr/print.hh54
-rw-r--r--src/libexpr/symbol-table.hh27
-rw-r--r--src/libexpr/tests/derived-path.cc65
-rw-r--r--src/libexpr/tests/json.cc2
-rw-r--r--src/libexpr/tests/libexpr.hh5
-rw-r--r--src/libexpr/tests/local.mk2
-rw-r--r--src/libexpr/tests/primops.cc4
-rw-r--r--src/libexpr/tests/value/context.cc67
-rw-r--r--src/libexpr/tests/value/context.hh1
-rw-r--r--src/libexpr/value-to-json.cc11
-rw-r--r--src/libexpr/value-to-json.hh5
-rw-r--r--src/libexpr/value-to-xml.cc24
-rw-r--r--src/libexpr/value-to-xml.hh3
-rw-r--r--src/libexpr/value.hh136
-rw-r--r--src/libexpr/value/context.cc17
-rw-r--r--src/libexpr/value/context.hh62
-rw-r--r--src/libfetchers/attrs.hh1
-rw-r--r--src/libfetchers/cache.hh1
-rw-r--r--src/libfetchers/fetch-settings.hh13
-rw-r--r--src/libfetchers/fetchers.cc8
-rw-r--r--src/libfetchers/fetchers.hh42
-rw-r--r--src/libfetchers/git.cc11
-rw-r--r--src/libfetchers/github.cc2
-rw-r--r--src/libfetchers/input-accessor.cc100
-rw-r--r--src/libfetchers/input-accessor.hh167
-rw-r--r--src/libfetchers/registry.hh1
-rw-r--r--src/libfetchers/tarball.cc16
-rw-r--r--src/libmain/common-args.hh19
-rw-r--r--src/libmain/loggers.hh1
-rw-r--r--src/libmain/progress-bar.cc21
-rw-r--r--src/libmain/progress-bar.hh1
-rw-r--r--src/libmain/shared.cc101
-rw-r--r--src/libmain/shared.hh61
-rw-r--r--src/libstore/binary-cache-store.cc48
-rw-r--r--src/libstore/binary-cache-store.hh53
-rw-r--r--src/libstore/build-result.hh70
-rw-r--r--src/libstore/build/derivation-goal.cc154
-rw-r--r--src/libstore/build/derivation-goal.hh235
-rw-r--r--src/libstore/build/drv-output-substitution-goal.cc23
-rw-r--r--src/libstore/build/drv-output-substitution-goal.hh49
-rw-r--r--src/libstore/build/entry-points.cc49
-rw-r--r--src/libstore/build/goal.cc27
-rw-r--r--src/libstore/build/goal.hh102
-rw-r--r--src/libstore/build/hook-instance.cc5
-rw-r--r--src/libstore/build/hook-instance.hh17
-rw-r--r--src/libstore/build/local-derivation-goal.cc223
-rw-r--r--src/libstore/build/local-derivation-goal.hh225
-rw-r--r--src/libstore/build/personality.hh1
-rw-r--r--src/libstore/build/substitution-goal.cc13
-rw-r--r--src/libstore/build/substitution-goal.hh65
-rw-r--r--src/libstore/build/worker.cc40
-rw-r--r--src/libstore/build/worker.hh204
-rw-r--r--src/libstore/builtins.hh1
-rw-r--r--src/libstore/builtins/buildenv.hh1
-rw-r--r--src/libstore/content-address.cc240
-rw-r--r--src/libstore/content-address.hh316
-rw-r--r--src/libstore/crypto.hh17
-rw-r--r--src/libstore/daemon.cc86
-rw-r--r--src/libstore/daemon.hh2
-rw-r--r--src/libstore/derivations.cc257
-rw-r--r--src/libstore/derivations.hh407
-rw-r--r--src/libstore/derived-path.cc40
-rw-r--r--src/libstore/derived-path.hh29
-rw-r--r--src/libstore/downstream-placeholder.cc39
-rw-r--r--src/libstore/downstream-placeholder.hh75
-rw-r--r--src/libstore/dummy-store.cc18
-rw-r--r--src/libstore/dummy-store.md13
-rw-r--r--src/libstore/export-import.cc8
-rw-r--r--src/libstore/filetransfer.cc11
-rw-r--r--src/libstore/filetransfer.hh40
-rw-r--r--src/libstore/fs-accessor.hh18
-rw-r--r--src/libstore/gc-store.hh86
-rw-r--r--src/libstore/gc.cc53
-rw-r--r--src/libstore/globals.cc134
-rw-r--r--src/libstore/globals.hh142
-rw-r--r--src/libstore/http-binary-cache-store.cc21
-rw-r--r--src/libstore/http-binary-cache-store.md8
-rw-r--r--src/libstore/legacy-ssh-store.cc83
-rw-r--r--src/libstore/legacy-ssh-store.md8
-rw-r--r--src/libstore/local-binary-cache-store.cc11
-rw-r--r--src/libstore/local-binary-cache-store.md16
-rw-r--r--src/libstore/local-fs-store.hh21
-rw-r--r--src/libstore/local-store.cc180
-rw-r--r--src/libstore/local-store.hh165
-rw-r--r--src/libstore/local-store.md39
-rw-r--r--src/libstore/local.mk6
-rw-r--r--src/libstore/lock.cc2
-rw-r--r--src/libstore/lock.hh15
-rw-r--r--src/libstore/log-store.hh7
-rw-r--r--src/libstore/machines.hh1
-rw-r--r--src/libstore/make-content-addressed.cc33
-rw-r--r--src/libstore/make-content-addressed.hh1
-rw-r--r--src/libstore/misc.cc21
-rw-r--r--src/libstore/names.hh1
-rw-r--r--src/libstore/nar-accessor.cc1
-rw-r--r--src/libstore/nar-accessor.hh23
-rw-r--r--src/libstore/nar-info-disk-cache.cc2
-rw-r--r--src/libstore/nar-info-disk-cache.hh7
-rw-r--r--src/libstore/nar-info.cc34
-rw-r--r--src/libstore/nar-info.hh4
-rw-r--r--src/libstore/optimise-store.cc17
-rw-r--r--src/libstore/outputs-spec.hh34
-rw-r--r--src/libstore/parsed-derivations.hh1
-rw-r--r--src/libstore/path-info.cc72
-rw-r--r--src/libstore/path-info.hh94
-rw-r--r--src/libstore/path-regex.hh1
-rw-r--r--src/libstore/path-with-outputs.hh24
-rw-r--r--src/libstore/path.cc4
-rw-r--r--src/libstore/path.hh22
-rw-r--r--src/libstore/pathlocks.cc8
-rw-r--r--src/libstore/pathlocks.hh13
-rw-r--r--src/libstore/profiles.cc34
-rw-r--r--src/libstore/profiles.hh72
-rw-r--r--src/libstore/realisation.cc13
-rw-r--r--src/libstore/realisation.hh43
-rw-r--r--src/libstore/references.cc3
-rw-r--r--src/libstore/references.hh1
-rw-r--r--src/libstore/remote-fs-accessor.hh1
-rw-r--r--src/libstore/remote-store.cc263
-rw-r--r--src/libstore/remote-store.hh42
-rw-r--r--src/libstore/repair-flag.hh1
-rw-r--r--src/libstore/s3-binary-cache-store.cc93
-rw-r--r--src/libstore/s3-binary-cache-store.hh1
-rw-r--r--src/libstore/s3-binary-cache-store.md8
-rw-r--r--src/libstore/s3.hh1
-rw-r--r--src/libstore/serve-protocol.hh1
-rw-r--r--src/libstore/sqlite.cc9
-rw-r--r--src/libstore/sqlite.hh49
-rw-r--r--src/libstore/ssh-store-config.hh29
-rw-r--r--src/libstore/ssh-store.cc21
-rw-r--r--src/libstore/ssh-store.md8
-rw-r--r--src/libstore/ssh.cc51
-rw-r--r--src/libstore/ssh.hh2
-rw-r--r--src/libstore/store-api.cc178
-rw-r--r--src/libstore/store-api.hh748
-rw-r--r--src/libstore/store-cast.hh8
-rw-r--r--src/libstore/tests/derivation.cc123
-rw-r--r--src/libstore/tests/derived-path.cc14
-rw-r--r--src/libstore/tests/derived-path.hh1
-rw-r--r--src/libstore/tests/downstream-placeholder.cc33
-rw-r--r--src/libstore/tests/libstore.hh3
-rw-r--r--src/libstore/tests/outputs-spec.cc6
-rw-r--r--src/libstore/tests/outputs-spec.hh1
-rw-r--r--src/libstore/tests/path.hh1
-rw-r--r--src/libstore/uds-remote-store.cc6
-rw-r--r--src/libstore/uds-remote-store.hh8
-rw-r--r--src/libstore/uds-remote-store.md9
-rw-r--r--src/libstore/worker-protocol.cc192
-rw-r--r--src/libstore/worker-protocol.hh160
-rw-r--r--src/libutil/abstract-setting-to-json.hh1
-rw-r--r--src/libutil/ansicolor.hh6
-rw-r--r--src/libutil/archive.cc4
-rw-r--r--src/libutil/archive.hh110
-rw-r--r--src/libutil/args.cc38
-rw-r--r--src/libutil/args.hh73
-rw-r--r--src/libutil/callback.hh9
-rw-r--r--src/libutil/canon-path.cc31
-rw-r--r--src/libutil/canon-path.hh103
-rw-r--r--src/libutil/cgroup.hh11
-rw-r--r--src/libutil/chunked-vector.hh21
-rw-r--r--src/libutil/closure.hh3
-rw-r--r--src/libutil/comparator.hh14
-rw-r--r--src/libutil/compression.cc6
-rw-r--r--src/libutil/compression.hh3
-rw-r--r--src/libutil/compute-levels.hh3
-rw-r--r--src/libutil/config-impl.hh71
-rw-r--r--src/libutil/config.cc149
-rw-r--r--src/libutil/config.hh157
-rw-r--r--src/libutil/error.cc6
-rw-r--r--src/libutil/error.hh49
-rw-r--r--src/libutil/experimental-features.cc254
-rw-r--r--src/libutil/experimental-features.hh42
-rw-r--r--src/libutil/filesystem.cc4
-rw-r--r--src/libutil/finally.hh5
-rw-r--r--src/libutil/fmt.hh37
-rw-r--r--src/libutil/git.hh33
-rw-r--r--src/libutil/hash.cc17
-rw-r--r--src/libutil/hash.hh111
-rw-r--r--src/libutil/hilite.hh13
-rw-r--r--src/libutil/json-impls.hh1
-rw-r--r--src/libutil/json-utils.hh1
-rw-r--r--src/libutil/logging.cc13
-rw-r--r--src/libutil/logging.hh39
-rw-r--r--src/libutil/lru-cache.hh23
-rw-r--r--src/libutil/monitor-fd.hh1
-rw-r--r--src/libutil/namespaces.hh1
-rw-r--r--src/libutil/pool.hh45
-rw-r--r--src/libutil/ref.hh7
-rw-r--r--src/libutil/regex-combinators.hh1
-rw-r--r--src/libutil/serialise.cc49
-rw-r--r--src/libutil/serialise.hh160
-rw-r--r--src/libutil/split.hh11
-rw-r--r--src/libutil/suggestions.hh8
-rw-r--r--src/libutil/sync.hh33
-rw-r--r--src/libutil/tarfile.cc4
-rw-r--r--src/libutil/tarfile.hh6
-rw-r--r--src/libutil/tests/canon-path.cc73
-rw-r--r--src/libutil/tests/config.cc50
-rw-r--r--src/libutil/tests/hash.hh1
-rw-r--r--src/libutil/thread-pool.hh44
-rw-r--r--src/libutil/topo-sort.hh1
-rw-r--r--src/libutil/types.hh47
-rw-r--r--src/libutil/url-parts.hh16
-rw-r--r--src/libutil/url.hh6
-rw-r--r--src/libutil/util.cc84
-rw-r--r--src/libutil/util.hh517
-rw-r--r--src/libutil/xml-writer.hh1
-rw-r--r--src/nix-build/nix-build.cc21
-rwxr-xr-xsrc/nix-channel/nix-channel.cc3
-rw-r--r--src/nix-collect-garbage/nix-collect-garbage.cc10
-rwxr-xr-xsrc/nix-copy-closure/nix-copy-closure.cc2
-rw-r--r--src/nix-env/nix-env.cc152
-rw-r--r--src/nix-env/user-env.cc16
-rw-r--r--src/nix-env/user-env.hh1
-rw-r--r--src/nix-instantiate/nix-instantiate.cc17
-rw-r--r--src/nix-store/dotgraph.hh1
-rw-r--r--src/nix-store/graphml.cc2
-rw-r--r--src/nix-store/graphml.hh1
-rw-r--r--src/nix-store/nix-store.cc139
-rw-r--r--src/nix/add-to-store.cc14
-rw-r--r--src/nix/app.cc10
-rw-r--r--src/nix/build.cc54
-rw-r--r--src/nix/build.md4
-rw-r--r--src/nix/bundle.cc9
-rw-r--r--src/nix/bundle.md2
-rw-r--r--src/nix/copy.cc2
-rw-r--r--src/nix/copy.md2
-rw-r--r--src/nix/daemon.cc264
-rw-r--r--src/nix/derivation-add.cc45
-rw-r--r--src/nix/derivation-add.md18
-rw-r--r--src/nix/derivation-show.cc (renamed from src/nix/show-derivation.cc)8
-rw-r--r--src/nix/derivation-show.md (renamed from src/nix/show-derivation.md)18
-rw-r--r--src/nix/derivation.cc25
-rw-r--r--src/nix/describe-stores.cc44
-rw-r--r--src/nix/develop.cc31
-rw-r--r--src/nix/develop.md4
-rw-r--r--src/nix/doctor.cc23
-rw-r--r--src/nix/edit.cc6
-rw-r--r--src/nix/eval.cc12
-rw-r--r--src/nix/eval.md4
-rw-r--r--src/nix/flake-check.md2
-rw-r--r--src/nix/flake.cc185
-rw-r--r--src/nix/flake.md57
-rw-r--r--src/nix/fmt.cc6
-rw-r--r--src/nix/hash.cc39
-rw-r--r--src/nix/help-stores.md46
-rw-r--r--src/nix/local.mk6
-rw-r--r--src/nix/log.cc2
-rw-r--r--src/nix/log.md3
-rw-r--r--src/nix/main.cc107
-rw-r--r--src/nix/make-content-addressed.cc1
-rw-r--r--src/nix/make-content-addressed.md4
-rw-r--r--src/nix/nar-ls.md4
-rw-r--r--src/nix/nar.cc1
-rw-r--r--src/nix/nix.md189
-rw-r--r--src/nix/path-info.md10
-rw-r--r--src/nix/ping-store.cc5
-rw-r--r--src/nix/prefetch.cc21
-rw-r--r--src/nix/print-dev-env.md2
-rw-r--r--src/nix/profile-install.md2
-rw-r--r--src/nix/profile.cc127
-rw-r--r--src/nix/profile.md103
-rw-r--r--src/nix/realisation.cc3
-rw-r--r--src/nix/registry.cc3
-rw-r--r--src/nix/repl.cc55
-rw-r--r--src/nix/run.cc8
-rw-r--r--src/nix/run.hh1
-rw-r--r--src/nix/run.md2
-rw-r--r--src/nix/search.cc6
-rw-r--r--src/nix/search.md10
-rw-r--r--src/nix/shell.md10
-rw-r--r--src/nix/sigs.cc3
-rw-r--r--src/nix/store-copy-log.cc4
-rw-r--r--src/nix/store-delete.cc2
-rw-r--r--src/nix/store-delete.md2
-rw-r--r--src/nix/store-dump-path.md2
-rw-r--r--src/nix/store-ls.md4
-rw-r--r--src/nix/store-repair.cc2
-rw-r--r--src/nix/store-repair.md2
-rw-r--r--src/nix/store.cc1
-rw-r--r--src/nix/upgrade-nix.cc10
-rw-r--r--src/nix/upgrade-nix.md2
-rw-r--r--src/nix/verify.md4
-rw-r--r--src/resolve-system-dependencies/resolve-system-dependencies.cc8
-rw-r--r--tests/binary-cache.sh24
-rw-r--r--tests/build-delete.sh2
-rw-r--r--tests/build-dry.sh2
-rw-r--r--tests/build-remote-trustless-after.sh2
-rw-r--r--tests/build-remote-trustless-should-fail-0.sh29
-rw-r--r--tests/build-remote-trustless-should-pass-0.sh9
-rw-r--r--tests/build-remote-trustless-should-pass-1.sh9
-rw-r--r--tests/build-remote-trustless-should-pass-2.sh13
-rw-r--r--tests/build-remote-trustless-should-pass-3.sh14
-rw-r--r--tests/build-remote-trustless.sh14
-rw-r--r--tests/build-remote.sh35
-rw-r--r--tests/build.sh26
-rw-r--r--tests/ca/build.sh4
-rw-r--r--tests/ca/derivation-json.sh29
-rw-r--r--tests/check-refs.sh10
-rw-r--r--tests/check-reqs.sh4
-rw-r--r--tests/check.sh2
-rw-r--r--tests/common.sh2
-rw-r--r--tests/common/vars-and-functions.sh.in87
-rw-r--r--tests/compute-levels.sh2
-rw-r--r--tests/db-migration.sh10
-rw-r--r--tests/dependencies.sh6
-rw-r--r--tests/derivation-json.sh12
-rw-r--r--tests/describe-stores.sh8
-rw-r--r--tests/dyn-drv/common.sh8
l---------tests/dyn-drv/config.nix.in1
-rw-r--r--tests/dyn-drv/recursive-mod-json.nix33
-rw-r--r--tests/dyn-drv/recursive-mod-json.sh25
-rw-r--r--tests/dyn-drv/text-hashed-output.nix29
-rw-r--r--tests/dyn-drv/text-hashed-output.sh26
-rw-r--r--tests/eval-store.sh2
-rw-r--r--tests/eval.sh6
-rw-r--r--tests/experimental-features.sh86
-rw-r--r--tests/export-graph.sh2
-rw-r--r--tests/fetchClosure.sh4
-rw-r--r--tests/fetchGit.sh5
-rw-r--r--tests/fetchGitRefs.sh7
-rw-r--r--tests/fetchGitSubmodules.sh5
-rw-r--r--tests/fetchMercurial.sh5
-rw-r--r--tests/fetchTree-file.sh4
-rw-r--r--tests/fetchurl.sh2
-rw-r--r--tests/flakes/build-paths.sh34
-rw-r--r--tests/flakes/check.sh8
-rw-r--r--tests/flakes/common.sh9
-rw-r--r--tests/flakes/flake-in-submodule.sh52
-rw-r--r--tests/flakes/flakes.sh39
-rw-r--r--tests/flakes/follow-paths.sh2
-rw-r--r--tests/flakes/mercurial.sh5
-rw-r--r--tests/flakes/show.sh21
-rw-r--r--tests/fmt.sh2
-rwxr-xr-xtests/function-trace.sh10
-rw-r--r--tests/gc-runtime.sh2
-rw-r--r--tests/gc.sh28
-rw-r--r--tests/hash.sh32
-rw-r--r--tests/impure-derivations.sh15
-rwxr-xr-xtests/init.sh2
-rwxr-xr-xtests/install-darwin.sh2
-rw-r--r--tests/installer/default.nix26
-rw-r--r--tests/lang.sh17
-rw-r--r--tests/legacy-ssh-store.sh4
-rw-r--r--tests/linux-sandbox-cert-test.nix29
-rw-r--r--tests/linux-sandbox.sh32
-rw-r--r--tests/local-store.sh3
-rw-r--r--tests/local.mk28
-rw-r--r--tests/misc.sh12
-rw-r--r--tests/multiple-outputs.sh8
-rw-r--r--tests/nar-access.sh4
-rw-r--r--tests/nix-channel.sh14
-rwxr-xr-xtests/nix-daemon-untrusting.sh3
-rw-r--r--tests/nix-profile.sh14
-rw-r--r--tests/nix-shell.sh55
-rw-r--r--tests/nixos/nix-copy.nix95
-rw-r--r--tests/plugins.sh5
-rw-r--r--tests/plugins/local.mk2
-rw-r--r--tests/post-hook.sh5
-rw-r--r--tests/pure-eval.sh2
-rwxr-xr-xtests/push-to-store-old.sh6
-rwxr-xr-xtests/push-to-store.sh6
-rw-r--r--tests/recursive.nix56
-rw-r--r--tests/recursive.sh66
-rw-r--r--tests/remote-store.sh11
-rw-r--r--tests/repl.sh20
-rw-r--r--tests/restricted.sh2
-rw-r--r--tests/search.sh6
-rw-r--r--tests/shell.sh2
-rw-r--r--tests/tarball.sh4
-rw-r--r--tests/test-infra.sh85
-rw-r--r--tests/timeout.sh7
-rw-r--r--tests/user-envs-migration.sh2
-rw-r--r--tests/user-envs.sh50
-rw-r--r--tests/why-depends.sh10
543 files changed, 15502 insertions, 7300 deletions
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index db69e51db..4488c7b7d 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -23,6 +23,11 @@ Maintainers: tick if completed or explain if not relevant
- unit tests - `src/*/tests`
- integration tests - `tests/nixos/*`
- [ ] documentation in the manual
+ - [ ] documentation in the internal API docs
- [ ] code and comments are self-explanatory
- [ ] commit message explains why the change was made
- [ ] new feature or incompatible change: updated release notes
+
+# Priorities
+
+Add :+1: to [pull requests you find important](https://github.com/NixOS/nix/pulls?q=is%3Aopen+sort%3Areactions-%2B1-desc).
diff --git a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md b/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md
deleted file mode 100644
index 5311be01f..000000000
--- a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md
+++ /dev/null
@@ -1,11 +0,0 @@
-**Release Notes**
-Please include relevant [release notes](https://github.com/NixOS/nix/blob/master/doc/manual/src/release-notes/rl-next.md) as needed.
-
-
-**Testing**
-
-If this issue is a regression or something that should block release, please consider including a test either in the [testsuite](https://github.com/NixOS/nix/tree/master/tests) or as a [hydraJob]( https://github.com/NixOS/nix/blob/master/flake.nix#L396) so that it can be part of the [automatic checks](https://hydra.nixos.org/jobset/nix/master).
-
-**Priorities**
-
-Add :+1: to [pull requests you find important](https://github.com/NixOS/nix/pulls?q=is%3Aopen+sort%3Areactions-%2B1-desc).
diff --git a/.github/labeler.yml b/.github/labeler.yml
new file mode 100644
index 000000000..fce0d3aeb
--- /dev/null
+++ b/.github/labeler.yml
@@ -0,0 +1,23 @@
+"documentation":
+ - doc/manual/*
+ - src/nix/**/*.md
+
+"store":
+ - src/libstore/store-api.*
+ - src/libstore/*-store.*
+
+"fetching":
+ - src/libfetchers/**/*
+
+"repl":
+ - src/libcmd/repl.*
+ - src/nix/repl.*
+
+"new-cli":
+ - src/nix/**/*
+
+"tests":
+ # Unit tests
+ - src/*/tests/**/*
+ # Functional and integration tests
+ - tests/**/*
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index fe0976228..c06c77043 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -19,9 +19,7 @@ jobs:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- - uses: cachix/install-nix-action@v19
- with:
- install_url: https://releases.nixos.org/nix/nix-2.13.3/install
+ - uses: cachix/install-nix-action@v20
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/cachix-action@v12
if: needs.check_secrets.outputs.cachix == 'true'
@@ -60,7 +58,7 @@ jobs:
with:
fetch-depth: 0
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- - uses: cachix/install-nix-action@v19
+ - uses: cachix/install-nix-action@v20
with:
install_url: https://releases.nixos.org/nix/nix-2.13.3/install
- uses: cachix/cachix-action@v12
@@ -81,7 +79,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- - uses: cachix/install-nix-action@v19
+ - uses: cachix/install-nix-action@v20
with:
install_url: '${{needs.installer.outputs.installerURL}}'
install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve"
@@ -93,6 +91,8 @@ jobs:
- run: exec sh -c "nix-instantiate -E 'builtins.currentTime' --eval"
- run: exec zsh -c "nix-instantiate -E 'builtins.currentTime' --eval"
- run: exec fish -c "nix-instantiate -E 'builtins.currentTime' --eval"
+ - run: exec bash -c "nix-channel --add https://releases.nixos.org/nixos/unstable/nixos-23.05pre466020.60c1d71f2ba nixpkgs"
+ - run: exec bash -c "nix-channel --update && nix-env -iA nixpkgs.hello && hello"
docker_push_image:
needs: [check_secrets, tests]
@@ -106,7 +106,7 @@ jobs:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- - uses: cachix/install-nix-action@v19
+ - uses: cachix/install-nix-action@v20
with:
install_url: https://releases.nixos.org/nix/nix-2.13.3/install
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
diff --git a/.github/workflows/labels.yml b/.github/workflows/labels.yml
new file mode 100644
index 000000000..5f949ddc5
--- /dev/null
+++ b/.github/workflows/labels.yml
@@ -0,0 +1,24 @@
+name: "Label PR"
+
+on:
+ pull_request_target:
+ types: [edited, opened, synchronize, reopened]
+
+# WARNING:
+# When extending this action, be aware that $GITHUB_TOKEN allows some write
+# access to the GitHub API. This means that it should not evaluate user input in
+# a way that allows code injection.
+
+permissions:
+ contents: read
+ pull-requests: write
+
+jobs:
+ labels:
+ runs-on: ubuntu-latest
+ if: github.repository_owner == 'NixOS'
+ steps:
+ - uses: actions/labeler@v4
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ sync-labels: true
diff --git a/.gitignore b/.gitignore
index e326966d6..7ae1071d0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -19,9 +19,12 @@ perl/Makefile.config
/doc/manual/nix.json
/doc/manual/conf-file.json
/doc/manual/builtins.json
+/doc/manual/xp-features.json
/doc/manual/src/SUMMARY.md
/doc/manual/src/command-ref/new-cli
/doc/manual/src/command-ref/conf-file.md
+/doc/manual/src/command-ref/experimental-features-shortlist.md
+/doc/manual/src/contributing/experimental-feature-descriptions.md
/doc/manual/src/language/builtins.md
# /scripts/
@@ -48,6 +51,8 @@ perl/Makefile.config
/src/nix/nix
+/src/nix/doc
+
# /src/nix-env/
/src/nix-env/nix-env
@@ -82,6 +87,7 @@ perl/Makefile.config
/tests/shell.drv
/tests/config.nix
/tests/ca/config.nix
+/tests/dyn-drv/config.nix
/tests/repl-result-out
# /tests/lang/
diff --git a/.version b/.version
index c910885a0..752490696 100644
--- a/.version
+++ b/.version
@@ -1 +1 @@
-2.15.0 \ No newline at end of file
+2.16.0
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 000000000..57a949906
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,62 @@
+# Contributing to Nix
+
+Welcome and thank you for your interest in contributing to Nix!
+We appreciate your support.
+
+Reading and following these guidelines will help us make the contribution process easy and effective for everyone involved.
+
+
+## Report a bug
+
+1. Check on the [GitHub issue tracker](https://github.com/NixOS/nix/issues) if your bug was already reported.
+
+2. If you were not able to find the bug or feature [open a new issue](https://github.com/NixOS/nix/issues/new/choose)
+
+3. The issue templates will guide you in specifying your issue.
+ The more complete the information you provide, the more likely it can be found by others and the more useful it is in the future.
+ Make sure reported bugs can be reproduced easily.
+
+4. Once submitted, do not expect issues to be picked up or solved right away.
+ The only way to ensure this, is to [work on the issue yourself](#making-changes-to-nix).
+
+## Report a security vulnerability
+
+Check out the [security policy](https://github.com/NixOS/nix/security/policy).
+
+## Making changes to Nix
+
+1. Check for [pull requests](https://github.com/NixOS/nix/pulls) that might already cover the contribution you are about to make.
+ There are many open pull requests that might already do what you intent to work on.
+ You can use [labels](https://github.com/NixOS/nix/labels) to filter for relevant topics.
+
+2. Search for related issues that cover what you're going to work on. It could help to mention there that you will work on the issue.
+ Pull requests addressing issues labeled ["idea approved"](https://github.com/NixOS/nix/labels/idea%20approved) are especially welcomed by maintainers and will receive prioritised review.
+
+3. Check the [Nix reference manual](https://nixos.org/manual/nix/unstable/contributing/hacking.html) for information on building Nix and running its tests.
+
+ For contributions to the command line interface, please check the [CLI guidelines](https://nixos.org/manual/nix/unstable/contributing/cli-guideline.html).
+
+4. Make your changes!
+
+5. [Create a pull request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request) for your changes.
+ * [Mark the pull request as draft](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/changing-the-stage-of-a-pull-request) if you're not done with the changes.
+ * Make sure to have [a clean history of commits on your branch by using rebase](https://www.digitalocean.com/community/tutorials/how-to-rebase-and-update-a-pull-request).
+ * Link related issues in your pull request to inform interested parties and future contributors about your change.
+ If your pull request closes one or multiple issues, note that in the description using `Closes: #<number>`, as it will then happen automatically when your change is merged.
+
+6. Do not expect your pull request to be reviewed immediately.
+ Nix maintainers follow a [structured process for reviews and design decisions](https://github.com/NixOS/nix/tree/master/maintainers#project-board-protocol), which may or may not prioritise your work.
+
+7. If you need additional feedback or help to getting pull request into shape, ask other contributors using [@mentions](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax#mentioning-people-and-teams).
+
+## Making changes to the Nix manual
+
+The Nix reference manual is hosted on https://nixos.org/manual/nix.
+The underlying source files are located in [`doc/manual/src`](./doc/manual/src).
+For small changes you can [use GitHub to edit these files](https://docs.github.com/en/repositories/working-with-files/managing-files/editing-files)
+For larger changes see the [Nix reference manual](https://nixos.org/manual/nix/unstable/contributing/hacking.html).
+
+## Getting help
+
+Whenever you're stuck or do not know how to proceed, you can always ask for help.
+The appropriate channels to do so can be found on the [NixOS Community](https://nixos.org/community/) page.
diff --git a/Makefile b/Makefile
index 31e4961bc..d6b49473a 100644
--- a/Makefile
+++ b/Makefile
@@ -16,7 +16,8 @@ makefiles = \
misc/systemd/local.mk \
misc/launchd/local.mk \
misc/upstart/local.mk \
- doc/manual/local.mk
+ doc/manual/local.mk \
+ doc/internal-api/local.mk
-include Makefile.config
diff --git a/Makefile.config.in b/Makefile.config.in
index a6c84f2ad..707cfe0e3 100644
--- a/Makefile.config.in
+++ b/Makefile.config.in
@@ -22,6 +22,7 @@ LOWDOWN_LIBS = @LOWDOWN_LIBS@
OPENSSL_LIBS = @OPENSSL_LIBS@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_VERSION = @PACKAGE_VERSION@
+RAPIDCHECK_HEADERS = @RAPIDCHECK_HEADERS@
SHELL = @bash@
SODIUM_LIBS = @SODIUM_LIBS@
SQLITE3_LIBS = @SQLITE3_LIBS@
@@ -46,3 +47,4 @@ storedir = @storedir@
sysconfdir = @sysconfdir@
system = @system@
tests = @tests@
+internal_api_docs = @internal_api_docs@
diff --git a/boehmgc-coroutine-sp-fallback.diff b/boehmgc-coroutine-sp-fallback.diff
index 2826486fb..5066d8278 100644
--- a/boehmgc-coroutine-sp-fallback.diff
+++ b/boehmgc-coroutine-sp-fallback.diff
@@ -1,8 +1,8 @@
diff --git a/darwin_stop_world.c b/darwin_stop_world.c
-index 3dbaa3fb..36a1d1f7 100644
+index 0468aaec..b348d869 100644
--- a/darwin_stop_world.c
+++ b/darwin_stop_world.c
-@@ -352,6 +352,7 @@ GC_INNER void GC_push_all_stacks(void)
+@@ -356,6 +356,7 @@ GC_INNER void GC_push_all_stacks(void)
int nthreads = 0;
word total_size = 0;
mach_msg_type_number_t listcount = (mach_msg_type_number_t)THREAD_TABLE_SZ;
@@ -10,7 +10,7 @@ index 3dbaa3fb..36a1d1f7 100644
if (!EXPECT(GC_thr_initialized, TRUE))
GC_thr_init();
-@@ -407,6 +408,19 @@ GC_INNER void GC_push_all_stacks(void)
+@@ -411,6 +412,19 @@ GC_INNER void GC_push_all_stacks(void)
GC_push_all_stack_sections(lo, hi, p->traced_stack_sect);
}
if (altstack_lo) {
@@ -30,6 +30,22 @@ index 3dbaa3fb..36a1d1f7 100644
total_size += altstack_hi - altstack_lo;
GC_push_all_stack(altstack_lo, altstack_hi);
}
+diff --git a/include/gc.h b/include/gc.h
+index edab6c22..f2c61282 100644
+--- a/include/gc.h
++++ b/include/gc.h
+@@ -2172,6 +2172,11 @@ GC_API void GC_CALL GC_win32_free_heap(void);
+ (*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic_ignore_off_page)
+ #endif /* _AMIGA && !GC_AMIGA_MAKINGLIB */
+
++#if !__APPLE__
++/* Patch doesn't work on apple */
++#define NIX_BOEHM_PATCH_VERSION 1
++#endif
++
+ #ifdef __cplusplus
+ } /* extern "C" */
+ #endif
diff --git a/pthread_stop_world.c b/pthread_stop_world.c
index b5d71e62..aed7b0bf 100644
--- a/pthread_stop_world.c
diff --git a/configure.ac b/configure.ac
index 36e119bed..bb3f92e4d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -152,6 +152,11 @@ AC_ARG_ENABLE(tests, AS_HELP_STRING([--disable-tests],[Do not build the tests]),
tests=$enableval, tests=yes)
AC_SUBST(tests)
+# Building without API docs is the default as Nix' C++ interfaces are internal and unstable.
+AC_ARG_ENABLE(internal_api_docs, AS_HELP_STRING([--enable-internal-api-docs],[Build API docs for Nix's internal unstable C++ interfaces]),
+ internal_api_docs=$enableval, internal_api_docs=no)
+AC_SUBST(internal_api_docs)
+
# LTO is currently broken with clang for unknown reasons; ld segfaults in the llvm plugin
AC_ARG_ENABLE(lto, AS_HELP_STRING([--enable-lto],[Enable LTO (only supported with GCC) [default=no]]),
lto=$enableval, lto=no)
@@ -179,7 +184,7 @@ fi
# Look for OpenSSL, a required dependency. FIXME: this is only (maybe)
# used by S3BinaryCacheStore.
-PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"])
+PKG_CHECK_MODULES([OPENSSL], [libcrypto >= 1.1.1], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"])
# Look for libarchive.
@@ -284,11 +289,24 @@ PKG_CHECK_MODULES([GTEST], [gtest_main])
# Look for rapidcheck.
+AC_ARG_VAR([RAPIDCHECK_HEADERS], [include path of gtest headers shipped by RAPIDCHECK])
# No pkg-config yet, https://github.com/emil-e/rapidcheck/issues/302
AC_LANG_PUSH(C++)
+AC_SUBST(RAPIDCHECK_HEADERS)
+[CXXFLAGS="-I $RAPIDCHECK_HEADERS $CXXFLAGS"]
+[LIBS="-lrapidcheck -lgtest $LIBS"]
AC_CHECK_HEADERS([rapidcheck/gtest.h], [], [], [#include <gtest/gtest.h>])
-dnl No good for C++ libs with mangled symbols
-dnl AC_CHECK_LIB([rapidcheck], [])
+dnl AC_CHECK_LIB doesn't work for C++ libs with mangled symbols
+AC_LINK_IFELSE([
+ AC_LANG_PROGRAM([[
+ #include <gtest/gtest.h>
+ #include <rapidcheck/gtest.h>
+ ]], [[
+ return RUN_ALL_TESTS();
+ ]])
+ ],
+ [],
+ [AC_MSG_ERROR([librapidcheck is not found.])])
AC_LANG_POP(C++)
fi
diff --git a/default.nix b/default.nix
index 00ec5b617..2cccff28d 100644
--- a/default.nix
+++ b/default.nix
@@ -1,3 +1,10 @@
-(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") {
- src = ./.;
-}).defaultNix
+(import
+ (
+ let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
+ fetchTarball {
+ url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
+ sha256 = lock.nodes.flake-compat.locked.narHash;
+ }
+ )
+ { src = ./.; }
+).defaultNix
diff --git a/doc/internal-api/.gitignore b/doc/internal-api/.gitignore
new file mode 100644
index 000000000..dab28b6b0
--- /dev/null
+++ b/doc/internal-api/.gitignore
@@ -0,0 +1,3 @@
+/doxygen.cfg
+/html
+/latex
diff --git a/doc/internal-api/doxygen.cfg.in b/doc/internal-api/doxygen.cfg.in
new file mode 100644
index 000000000..8f526536d
--- /dev/null
+++ b/doc/internal-api/doxygen.cfg.in
@@ -0,0 +1,63 @@
+# Doxyfile 1.9.5
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME = "Nix"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER = @PACKAGE_VERSION@
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF = "Nix, the purely functional package manager; unstable internal interfaces"
+
+# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX = NO
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
+# Note: If this tag is empty the current directory is searched.
+
+# FIXME Make this list more maintainable somehow. We could maybe generate this
+# in the Makefile, but we would need to change how `.in` files are preprocessed
+# so they can expand variables despite configure variables.
+
+INPUT = \
+ src/libcmd \
+ src/libexpr \
+ src/libexpr/flake \
+ src/libexpr/tests \
+ src/libexpr/tests/value \
+ src/libexpr/value \
+ src/libfetchers \
+ src/libmain \
+ src/libstore \
+ src/libstore/build \
+ src/libstore/builtins \
+ src/libstore/tests \
+ src/libutil \
+ src/libutil/tests \
+ src/nix \
+ src/nix-env \
+ src/nix-store
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor. Note that the INCLUDE_PATH is not recursive, so the setting of
+# RECURSIVE has no effect here.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH = @RAPIDCHECK_HEADERS@
diff --git a/doc/internal-api/local.mk b/doc/internal-api/local.mk
new file mode 100644
index 000000000..890f341b7
--- /dev/null
+++ b/doc/internal-api/local.mk
@@ -0,0 +1,19 @@
+.PHONY: internal-api-html
+
+ifeq ($(internal_api_docs), yes)
+
+$(docdir)/internal-api/html/index.html $(docdir)/internal-api/latex: $(d)/doxygen.cfg
+ mkdir -p $(docdir)/internal-api
+ { cat $< ; echo "OUTPUT_DIRECTORY=$(docdir)/internal-api" ; } | doxygen -
+
+# Generate the HTML API docs for Nix's unstable internal interfaces.
+internal-api-html: $(docdir)/internal-api/html/index.html
+
+else
+
+# Make a nicer error message
+internal-api-html:
+ @echo "Internal API docs are disabled. Configure with '--enable-internal-api-docs', or avoid calling 'make internal-api-html'."
+ @exit 1
+
+endif
diff --git a/doc/manual/generate-builtins.nix b/doc/manual/generate-builtins.nix
index 115bb3f94..71f96153f 100644
--- a/doc/manual/generate-builtins.nix
+++ b/doc/manual/generate-builtins.nix
@@ -1,8 +1,12 @@
-builtinsDump:
+let
+ inherit (builtins) concatStringsSep attrNames;
+in
+
+builtinsInfo:
let
showBuiltin = name:
let
- inherit (builtinsDump.${name}) doc args;
+ inherit (builtinsInfo.${name}) doc args;
in
''
<dt id="builtins-${name}">
@@ -14,7 +18,7 @@ let
</dd>
'';
- listArgs = args: builtins.concatStringsSep " " (map (s: "<var>${s}</var>") args);
+ listArgs = args: concatStringsSep " " (map (s: "<var>${s}</var>") args);
in
-with builtins; concatStringsSep "\n" (map showBuiltin (attrNames builtinsDump))
+concatStringsSep "\n" (map showBuiltin (attrNames builtinsInfo))
diff --git a/doc/manual/generate-manpage.nix b/doc/manual/generate-manpage.nix
index 8c7c4d358..fb34898f3 100644
--- a/doc/manual/generate-manpage.nix
+++ b/doc/manual/generate-manpage.nix
@@ -1,15 +1,24 @@
-{ toplevel }:
+let
+ inherit (builtins)
+ attrNames attrValues fromJSON listToAttrs mapAttrs
+ concatStringsSep concatMap length lessThan replaceStrings sort;
+ inherit (import ./utils.nix) concatStrings optionalString filterAttrs trim squash unique showSettings;
+in
-with builtins;
-with import ./utils.nix;
+commandDump:
let
+ commandInfo = fromJSON commandDump;
+
showCommand = { command, details, filename, toplevel }:
let
+
result = ''
> **Warning** \
- > This program is **experimental** and its interface is subject to change.
+ > This program is
+ > [**experimental**](@docroot@/contributing/experimental-features.md#xp-feature-nix-command)
+ > and its interface is subject to change.
# Name
@@ -25,59 +34,75 @@ let
${maybeOptions}
'';
+
showSynopsis = command: args:
let
- showArgument = arg: "*${arg.label}*" + (if arg ? arity then "" else "...");
+ showArgument = arg: "*${arg.label}*" + optionalString (! arg ? arity) "...";
arguments = concatStringsSep " " (map showArgument args);
in ''
`${command}` [*option*...] ${arguments}
'';
- maybeSubcommands = if details ? commands && details.commands != {}
- then ''
+
+ maybeSubcommands = optionalString (details ? commands && details.commands != {})
+ ''
where *subcommand* is one of the following:
${subcommands}
- ''
- else "";
+ '';
+
subcommands = if length categories > 1
then listCategories
else listSubcommands details.commands;
+
categories = sort (x: y: x.id < y.id) (unique (map (cmd: cmd.category) (attrValues details.commands)));
+
listCategories = concatStrings (map showCategory categories);
+
showCategory = cat: ''
**${toString cat.description}:**
${listSubcommands (filterAttrs (n: v: v.category == cat) details.commands)}
'';
+
listSubcommands = cmds: concatStrings (attrValues (mapAttrs showSubcommand cmds));
+
showSubcommand = name: subcmd: ''
* [`${command} ${name}`](./${appendName filename name}.md) - ${subcmd.description}
'';
- maybeDocumentation = if details ? doc then details.doc else "";
- maybeOptions = if details.flags == {} then "" else ''
+
+ maybeDocumentation = optionalString
+ (details ? doc)
+ (replaceStrings ["@stores@"] [storeDocs] details.doc);
+
+ maybeOptions = optionalString (details.flags != {}) ''
# Options
${showOptions details.flags toplevel.flags}
'';
+
showOptions = options: commonOptions:
let
allOptions = options // commonOptions;
showCategory = cat: ''
- ${if cat != "" then "**${cat}:**" else ""}
+ ${optionalString (cat != "") "**${cat}:**"}
${listOptions (filterAttrs (n: v: v.category == cat) allOptions)}
'';
listOptions = opts: concatStringsSep "\n" (attrValues (mapAttrs showOption opts));
showOption = name: option:
let
- shortName = if option ? shortName then "/ `-${option.shortName}`" else "";
- labels = if option ? labels then (concatStringsSep " " (map (s: "*${s}*") option.labels)) else "";
+ shortName = optionalString
+ (option ? shortName)
+ ("/ `-${option.shortName}`");
+ labels = optionalString
+ (option ? labels)
+ (concatStringsSep " " (map (s: "*${s}*") option.labels));
in trim ''
- `--${name}` ${shortName} ${labels}
${option.description}
'';
- categories = sort builtins.lessThan (unique (map (cmd: cmd.category) (attrValues allOptions)));
+ categories = sort lessThan (unique (map (cmd: cmd.category) (attrValues allOptions)));
in concatStrings (map showCategory categories);
in squash result;
@@ -98,13 +123,11 @@ let
};
in [ cmd ] ++ concatMap subcommand (attrNames details.commands or {});
- parsedToplevel = builtins.fromJSON toplevel;
-
manpages = processCommand {
command = "nix";
- details = parsedToplevel;
+ details = commandInfo.args;
filename = "nix";
- toplevel = parsedToplevel;
+ toplevel = commandInfo.args;
};
tableOfContents = let
@@ -112,4 +135,18 @@ let
" - [${page.command}](command-ref/new-cli/${page.name})";
in concatStringsSep "\n" (map showEntry manpages) + "\n";
+ storeDocs =
+ let
+ showStore = name: { settings, doc }:
+ ''
+ ## ${name}
+
+ ${doc}
+
+ **Settings**:
+
+ ${showSettings { useAnchors = false; } settings}
+ '';
+ in concatStrings (attrValues (mapAttrs showStore commandInfo.stores));
+
in (listToAttrs manpages) // { "SUMMARY.md" = tableOfContents; }
diff --git a/doc/manual/generate-options.nix b/doc/manual/generate-options.nix
deleted file mode 100644
index a4ec36477..000000000
--- a/doc/manual/generate-options.nix
+++ /dev/null
@@ -1,41 +0,0 @@
-let
- inherit (builtins) attrNames concatStringsSep isAttrs isBool;
- inherit (import ./utils.nix) concatStrings squash splitLines;
-in
-
-optionsInfo:
-let
- showOption = name:
- let
- inherit (optionsInfo.${name}) description documentDefault defaultValue aliases;
- result = squash ''
- - <span id="conf-${name}">[`${name}`](#conf-${name})</span>
-
- ${indent " " body}
- '';
- # separate body to cleanly handle indentation
- body = ''
- ${description}
-
- **Default:** ${showDefault documentDefault defaultValue}
-
- ${showAliases aliases}
- '';
- showDefault = documentDefault: defaultValue:
- if documentDefault then
- # a StringMap value type is specified as a string, but
- # this shows the value type. The empty stringmap is `null` in
- # JSON, but that converts to `{ }` here.
- if defaultValue == "" || defaultValue == [] || isAttrs defaultValue
- then "*empty*"
- else if isBool defaultValue then
- if defaultValue then "`true`" else "`false`"
- else "`${toString defaultValue}`"
- else "*machine-specific*";
- showAliases = aliases:
- if aliases == [] then "" else
- "**Deprecated alias:** ${(concatStringsSep ", " (map (s: "`${s}`") aliases))}";
- indent = prefix: s:
- concatStringsSep "\n" (map (x: if x == "" then x else "${prefix}${x}") (splitLines s));
- in result;
-in concatStrings (map showOption (attrNames optionsInfo))
diff --git a/doc/manual/generate-xp-features-shortlist.nix b/doc/manual/generate-xp-features-shortlist.nix
new file mode 100644
index 000000000..30e211c96
--- /dev/null
+++ b/doc/manual/generate-xp-features-shortlist.nix
@@ -0,0 +1,9 @@
+with builtins;
+with import ./utils.nix;
+
+let
+ showExperimentalFeature = name: doc:
+ ''
+ - [`${name}`](@docroot@/contributing/experimental-features.md#xp-feature-${name})
+ '';
+in xps: indent " " (concatStrings (attrValues (mapAttrs showExperimentalFeature xps)))
diff --git a/doc/manual/generate-xp-features.nix b/doc/manual/generate-xp-features.nix
new file mode 100644
index 000000000..adb94355c
--- /dev/null
+++ b/doc/manual/generate-xp-features.nix
@@ -0,0 +1,11 @@
+with builtins;
+with import ./utils.nix;
+
+let
+ showExperimentalFeature = name: doc:
+ squash ''
+ ## [`${name}`]{#xp-feature-${name}}
+
+ ${doc}
+ '';
+in xps: (concatStringsSep "\n" (attrValues (mapAttrs showExperimentalFeature xps)))
diff --git a/doc/manual/local.mk b/doc/manual/local.mk
index f43510b6d..b4b7283ef 100644
--- a/doc/manual/local.mk
+++ b/doc/manual/local.mk
@@ -1,17 +1,25 @@
ifeq ($(doc_generate),yes)
MANUAL_SRCS := \
- $(call rwildcard, $(d)/src, *.md) \
- $(call rwildcard, $(d)/src, */*.md)
+ $(call rwildcard, $(d)/src, *.md) \
+ $(call rwildcard, $(d)/src, */*.md)
-# Generate man pages.
man-pages := $(foreach n, \
- nix-env.1 nix-build.1 nix-shell.1 nix-store.1 nix-instantiate.1 \
- nix-collect-garbage.1 \
- nix-prefetch-url.1 nix-channel.1 \
- nix-hash.1 nix-copy-closure.1 \
- nix.conf.5 nix-daemon.8, \
- $(d)/$(n))
+ nix-env.1 nix-store.1 \
+ nix-build.1 nix-shell.1 nix-instantiate.1 \
+ nix-collect-garbage.1 \
+ nix-prefetch-url.1 nix-channel.1 \
+ nix-hash.1 nix-copy-closure.1 \
+ nix.conf.5 nix-daemon.8 \
+ nix-profiles.5 \
+, $(d)/$(n))
+
+# man pages for subcommands
+# convert from `$(d)/src/command-ref/nix-{1}/{2}.md` to `$(d)/nix-{1}-{2}.1`
+# FIXME: unify with how nix3-cli man pages are generated
+man-pages += $(foreach subcommand, \
+ $(filter-out %opt-common.md %env-common.md, $(wildcard $(d)/src/command-ref/nix-*/*.md)), \
+ $(d)/$(subst /,-,$(subst $(d)/src/command-ref/,,$(subst .md,.1,$(subcommand)))))
clean-files += $(d)/*.1 $(d)/*.5 $(d)/*.8
@@ -26,9 +34,42 @@ dummy-env = env -i \
nix-eval = $(dummy-env) $(bindir)/nix eval --experimental-features nix-command -I nix/corepkgs=corepkgs --store dummy:// --impure --raw
+# re-implement mdBook's include directive to make it usable for terminal output and for proper @docroot@ substitution
+define process-includes
+ while read -r line; do \
+ set -euo pipefail; \
+ filename="$$(dirname $(1))/$$(sed 's/{{#include \(.*\)}}/\1/'<<< $$line)"; \
+ test -f "$$filename" || ( echo "#include-d file '$$filename' does not exist." >&2; exit 1; ); \
+ matchline="$$(sed 's|/|\\/|g' <<< $$line)"; \
+ sed -i "/$$matchline/r $$filename" $(2); \
+ sed -i "s/$$matchline//" $(2); \
+ done < <(grep '{{#include' $(1))
+endef
+
+$(d)/nix-env-%.1: $(d)/src/command-ref/nix-env/%.md
+ @printf "Title: %s\n\n" "$(subst nix-env-,nix-env --,$$(basename "$@" .1))" > $^.tmp
+ $(render-subcommand)
+
+$(d)/nix-store-%.1: $(d)/src/command-ref/nix-store/%.md
+ @printf -- 'Title: %s\n\n' "$(subst nix-store-,nix-store --,$$(basename "$@" .1))" > $^.tmp
+ $(render-subcommand)
+
+# FIXME: there surely is some more deduplication to be achieved here with even darker Make magic
+define render-subcommand
+ @cat $^ >> $^.tmp
+ @$(call process-includes,$^,$^.tmp)
+ $(trace-gen) lowdown -sT man --nroff-nolinks -M section=1 $^.tmp -o $@
+ @# fix up `lowdown`'s automatic escaping of `--`
+ @# https://github.com/kristapsdz/lowdown/blob/edca6ce6d5336efb147321a43c47a698de41bb7c/entity.c#L202
+ @sed -i 's/\e\[u2013\]/--/' $@
+ @rm $^.tmp
+endef
+
+
$(d)/%.1: $(d)/src/command-ref/%.md
@printf "Title: %s\n\n" "$$(basename $@ .1)" > $^.tmp
@cat $^ >> $^.tmp
+ @$(call process-includes,$^,$^.tmp)
$(trace-gen) lowdown -sT man --nroff-nolinks -M section=1 $^.tmp -o $@
@rm $^.tmp
@@ -41,40 +82,55 @@ $(d)/%.8: $(d)/src/command-ref/%.md
$(d)/nix.conf.5: $(d)/src/command-ref/conf-file.md
@printf "Title: %s\n\n" "$$(basename $@ .5)" > $^.tmp
@cat $^ >> $^.tmp
+ @$(call process-includes,$^,$^.tmp)
$(trace-gen) lowdown -sT man --nroff-nolinks -M section=5 $^.tmp -o $@
@rm $^.tmp
-$(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli
- $(trace-gen) cat doc/manual/src/SUMMARY.md.in | while IFS= read line; do if [[ $$line = @manpages@ ]]; then cat doc/manual/src/command-ref/new-cli/SUMMARY.md; else echo "$$line"; fi; done > $@.tmp
- @mv $@.tmp $@
+$(d)/nix-profiles.5: $(d)/src/command-ref/files/profiles.md
+ @printf "Title: %s\n\n" "$$(basename $@ .5)" > $^.tmp
+ @cat $^ >> $^.tmp
+ $(trace-gen) lowdown -sT man --nroff-nolinks -M section=5 $^.tmp -o $@
+ @rm $^.tmp
+
+$(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli $(d)/src/contributing/experimental-feature-descriptions.md
+ @cp $< $@
+ @$(call process-includes,$@,$@)
-$(d)/src/command-ref/new-cli: $(d)/nix.json $(d)/generate-manpage.nix $(bindir)/nix
- @rm -rf $@
- $(trace-gen) $(nix-eval) --write-to $@.tmp --expr 'import doc/manual/generate-manpage.nix { toplevel = builtins.readFile $<; }'
- @# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable
- $(trace-gen) sed -i $@.tmp/*.md -e 's^@docroot@^../..^g'
+$(d)/src/command-ref/new-cli: $(d)/nix.json $(d)/utils.nix $(d)/generate-manpage.nix $(bindir)/nix
+ @rm -rf $@ $@.tmp
+ $(trace-gen) $(nix-eval) --write-to $@.tmp --expr 'import doc/manual/generate-manpage.nix (builtins.readFile $<)'
@mv $@.tmp $@
-$(d)/src/command-ref/conf-file.md: $(d)/conf-file.json $(d)/generate-options.nix $(d)/src/command-ref/conf-file-prefix.md $(bindir)/nix
+$(d)/src/command-ref/conf-file.md: $(d)/conf-file.json $(d)/utils.nix $(d)/src/command-ref/conf-file-prefix.md $(d)/src/command-ref/experimental-features-shortlist.md $(bindir)/nix
@cat doc/manual/src/command-ref/conf-file-prefix.md > $@.tmp
- @# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable
- $(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-options.nix (builtins.fromJSON (builtins.readFile $<))' \
- | sed -e 's^@docroot@^..^g'>> $@.tmp
+ $(trace-gen) $(nix-eval) --expr '(import doc/manual/utils.nix).showSettings { useAnchors = true; } (builtins.fromJSON (builtins.readFile $<))' >> $@.tmp;
@mv $@.tmp $@
$(d)/nix.json: $(bindir)/nix
- $(trace-gen) $(dummy-env) $(bindir)/nix __dump-args > $@.tmp
+ $(trace-gen) $(dummy-env) $(bindir)/nix __dump-cli > $@.tmp
@mv $@.tmp $@
$(d)/conf-file.json: $(bindir)/nix
$(trace-gen) $(dummy-env) $(bindir)/nix show-config --json --experimental-features nix-command > $@.tmp
@mv $@.tmp $@
+$(d)/src/contributing/experimental-feature-descriptions.md: $(d)/xp-features.json $(d)/utils.nix $(d)/generate-xp-features.nix $(bindir)/nix
+ @rm -rf $@ $@.tmp
+ $(trace-gen) $(nix-eval) --write-to $@.tmp --expr 'import doc/manual/generate-xp-features.nix (builtins.fromJSON (builtins.readFile $<))'
+ @mv $@.tmp $@
+
+$(d)/src/command-ref/experimental-features-shortlist.md: $(d)/xp-features.json $(d)/utils.nix $(d)/generate-xp-features-shortlist.nix $(bindir)/nix
+ @rm -rf $@ $@.tmp
+ $(trace-gen) $(nix-eval) --write-to $@.tmp --expr 'import doc/manual/generate-xp-features-shortlist.nix (builtins.fromJSON (builtins.readFile $<))'
+ @mv $@.tmp $@
+
+$(d)/xp-features.json: $(bindir)/nix
+ $(trace-gen) $(dummy-env) NIX_PATH=nix/corepkgs=corepkgs $(bindir)/nix __dump-xp-features > $@.tmp
+ @mv $@.tmp $@
+
$(d)/src/language/builtins.md: $(d)/builtins.json $(d)/generate-builtins.nix $(d)/src/language/builtins-prefix.md $(bindir)/nix
@cat doc/manual/src/language/builtins-prefix.md > $@.tmp
- @# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable
- $(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-builtins.nix (builtins.fromJSON (builtins.readFile $<))' \
- | sed -e 's^@docroot@^..^g' >> $@.tmp
+ $(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-builtins.nix (builtins.fromJSON (builtins.readFile $<))' >> $@.tmp;
@cat doc/manual/src/language/builtins-suffix.md >> $@.tmp
@mv $@.tmp $@
@@ -83,7 +139,8 @@ $(d)/builtins.json: $(bindir)/nix
@mv $@.tmp $@
# Generate the HTML manual.
-html: $(docdir)/manual/index.html
+.PHONY: manual-html
+manual-html: $(docdir)/manual/index.html
install: $(docdir)/manual/index.html
# Generate 'nix' manpages.
@@ -91,6 +148,8 @@ install: $(mandir)/man1/nix3-manpages
man: doc/manual/generated/man1/nix3-manpages
all: doc/manual/generated/man1/nix3-manpages
+# FIXME: unify with how the other man pages are generated.
+# this one works differently and does not use any of the amenities provided by `/mk/lib.mk`.
$(mandir)/man1/nix3-manpages: doc/manual/generated/man1/nix3-manpages
@mkdir -p $(DESTDIR)$$(dirname $@)
$(trace-install) install -m 0644 $$(dirname $<)/* $(DESTDIR)$$(dirname $@)
@@ -98,21 +157,31 @@ $(mandir)/man1/nix3-manpages: doc/manual/generated/man1/nix3-manpages
doc/manual/generated/man1/nix3-manpages: $(d)/src/command-ref/new-cli
@mkdir -p $(DESTDIR)$$(dirname $@)
$(trace-gen) for i in doc/manual/src/command-ref/new-cli/*.md; do \
- name=$$(basename $$i .md); \
- tmpFile=$$(mktemp); \
- if [[ $$name = SUMMARY ]]; then continue; fi; \
- printf "Title: %s\n\n" "$$name" > $$tmpFile; \
- cat $$i >> $$tmpFile; \
- lowdown -sT man --nroff-nolinks -M section=1 $$tmpFile -o $(DESTDIR)$$(dirname $@)/$$name.1; \
- rm $$tmpFile; \
+ name=$$(basename $$i .md); \
+ tmpFile=$$(mktemp); \
+ if [[ $$name = SUMMARY ]]; then continue; fi; \
+ printf "Title: %s\n\n" "$$name" > $$tmpFile; \
+ cat $$i >> $$tmpFile; \
+ lowdown -sT man --nroff-nolinks -M section=1 $$tmpFile -o $(DESTDIR)$$(dirname $@)/$$name.1; \
+ rm $$tmpFile; \
done
@touch $@
-$(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/anchors.jq $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/command-ref/conf-file.md $(d)/src/language/builtins.md
+$(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/anchors.jq $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/contributing/experimental-feature-descriptions.md $(d)/src/command-ref/conf-file.md $(d)/src/language/builtins.md
$(trace-gen) \
- set -euo pipefail; \
- RUST_LOG=warn mdbook build doc/manual -d $(DESTDIR)$(docdir)/manual.tmp 2>&1 \
- | { grep -Fv "because fragment resolution isn't implemented" || :; }
+ tmp="$$(mktemp -d)"; \
+ cp -r doc/manual "$$tmp"; \
+ find "$$tmp" -name '*.md' | while read -r file; do \
+ $(call process-includes,$$file,$$file); \
+ done; \
+ find "$$tmp" -name '*.md' | while read -r file; do \
+ docroot="$$(realpath --relative-to="$$(dirname "$$file")" $$tmp/manual/src)"; \
+ sed -i "s,@docroot@,$$docroot,g" "$$file"; \
+ done; \
+ set -euo pipefail; \
+ RUST_LOG=warn mdbook build "$$tmp/manual" -d $(DESTDIR)$(docdir)/manual.tmp 2>&1 \
+ | { grep -Fv "because fragment resolution isn't implemented" || :; }; \
+ rm -rf "$$tmp/manual"
@rm -rf $(DESTDIR)$(docdir)/manual
@mv $(DESTDIR)$(docdir)/manual.tmp/html $(DESTDIR)$(docdir)/manual
@rm -rf $(DESTDIR)$(docdir)/manual.tmp
diff --git a/doc/manual/redirects.js b/doc/manual/redirects.js
index 69f75d3a0..5cd6fdea2 100644
--- a/doc/manual/redirects.js
+++ b/doc/manual/redirects.js
@@ -338,6 +338,9 @@ const redirects = {
"strings": "#string",
"lists": "#list",
"attribute-sets": "#attribute-set"
+ },
+ "installation/installing-binary.html": {
+ "uninstalling": "uninstall.html"
}
};
diff --git a/doc/manual/src/SUMMARY.md.in b/doc/manual/src/SUMMARY.md.in
index 964091285..606aecd8f 100644
--- a/doc/manual/src/SUMMARY.md.in
+++ b/doc/manual/src/SUMMARY.md.in
@@ -15,6 +15,7 @@
- [Multi-User Mode](installation/multi-user.md)
- [Environment Variables](installation/env-variables.md)
- [Upgrading Nix](installation/upgrading.md)
+ - [Uninstalling Nix](installation/uninstall.md)
- [Package Management](package-management/package-management.md)
- [Basic Package Management](package-management/basic-package-mgmt.md)
- [Profiles](package-management/profiles.md)
@@ -44,10 +45,41 @@
- [Common Options](command-ref/opt-common.md)
- [Common Environment Variables](command-ref/env-common.md)
- [Main Commands](command-ref/main-commands.md)
- - [nix-env](command-ref/nix-env.md)
- [nix-build](command-ref/nix-build.md)
- [nix-shell](command-ref/nix-shell.md)
- [nix-store](command-ref/nix-store.md)
+ - [nix-store --add-fixed](command-ref/nix-store/add-fixed.md)
+ - [nix-store --add](command-ref/nix-store/add.md)
+ - [nix-store --delete](command-ref/nix-store/delete.md)
+ - [nix-store --dump-db](command-ref/nix-store/dump-db.md)
+ - [nix-store --dump](command-ref/nix-store/dump.md)
+ - [nix-store --export](command-ref/nix-store/export.md)
+ - [nix-store --gc](command-ref/nix-store/gc.md)
+ - [nix-store --generate-binary-cache-key](command-ref/nix-store/generate-binary-cache-key.md)
+ - [nix-store --import](command-ref/nix-store/import.md)
+ - [nix-store --load-db](command-ref/nix-store/load-db.md)
+ - [nix-store --optimise](command-ref/nix-store/optimise.md)
+ - [nix-store --print-env](command-ref/nix-store/print-env.md)
+ - [nix-store --query](command-ref/nix-store/query.md)
+ - [nix-store --read-log](command-ref/nix-store/read-log.md)
+ - [nix-store --realise](command-ref/nix-store/realise.md)
+ - [nix-store --repair-path](command-ref/nix-store/repair-path.md)
+ - [nix-store --restore](command-ref/nix-store/restore.md)
+ - [nix-store --serve](command-ref/nix-store/serve.md)
+ - [nix-store --verify-path](command-ref/nix-store/verify-path.md)
+ - [nix-store --verify](command-ref/nix-store/verify.md)
+ - [nix-env](command-ref/nix-env.md)
+ - [nix-env --delete-generations](command-ref/nix-env/delete-generations.md)
+ - [nix-env --install](command-ref/nix-env/install.md)
+ - [nix-env --list-generations](command-ref/nix-env/list-generations.md)
+ - [nix-env --query](command-ref/nix-env/query.md)
+ - [nix-env --rollback](command-ref/nix-env/rollback.md)
+ - [nix-env --set-flag](command-ref/nix-env/set-flag.md)
+ - [nix-env --set](command-ref/nix-env/set.md)
+ - [nix-env --switch-generation](command-ref/nix-env/switch-generation.md)
+ - [nix-env --switch-profile](command-ref/nix-env/switch-profile.md)
+ - [nix-env --uninstall](command-ref/nix-env/uninstall.md)
+ - [nix-env --upgrade](command-ref/nix-env/upgrade.md)
- [Utilities](command-ref/utilities.md)
- [nix-channel](command-ref/nix-channel.md)
- [nix-collect-garbage](command-ref/nix-collect-garbage.md)
@@ -57,16 +89,23 @@
- [nix-instantiate](command-ref/nix-instantiate.md)
- [nix-prefetch-url](command-ref/nix-prefetch-url.md)
- [Experimental Commands](command-ref/experimental-commands.md)
-@manpages@
+{{#include ./command-ref/new-cli/SUMMARY.md}}
- [Files](command-ref/files.md)
- [nix.conf](command-ref/conf-file.md)
+ - [Profiles](command-ref/files/profiles.md)
+ - [manifest.nix](command-ref/files/manifest.nix.md)
+ - [manifest.json](command-ref/files/manifest.json.md)
+ - [Channels](command-ref/files/channels.md)
+ - [Default Nix expression](command-ref/files/default-nix-expression.md)
- [Architecture](architecture/architecture.md)
- [Glossary](glossary.md)
- [Contributing](contributing/contributing.md)
- [Hacking](contributing/hacking.md)
+ - [Experimental Features](contributing/experimental-features.md)
- [CLI guideline](contributing/cli-guideline.md)
- [Release Notes](release-notes/release-notes.md)
- [Release X.Y (202?-??-??)](release-notes/rl-next.md)
+ - [Release 2.15 (2023-04-11)](release-notes/rl-2.15.md)
- [Release 2.14 (2023-02-28)](release-notes/rl-2.14.md)
- [Release 2.13 (2023-01-17)](release-notes/rl-2.13.md)
- [Release 2.12 (2022-12-06)](release-notes/rl-2.12.md)
diff --git a/doc/manual/src/advanced-topics/diff-hook.md b/doc/manual/src/advanced-topics/diff-hook.md
index 4a742c160..207aad3b8 100644
--- a/doc/manual/src/advanced-topics/diff-hook.md
+++ b/doc/manual/src/advanced-topics/diff-hook.md
@@ -48,13 +48,13 @@ If the build passes and is deterministic, Nix will exit with a status
code of 0:
```console
-$ nix-build ./deterministic.nix -A stable
+$ nix-build ./deterministic.nix --attr stable
this derivation will be built:
/nix/store/z98fasz2jqy9gs0xbvdj939p27jwda38-stable.drv
building '/nix/store/z98fasz2jqy9gs0xbvdj939p27jwda38-stable.drv'...
/nix/store/yyxlzw3vqaas7wfp04g0b1xg51f2czgq-stable
-$ nix-build ./deterministic.nix -A stable --check
+$ nix-build ./deterministic.nix --attr stable --check
checking outputs of '/nix/store/z98fasz2jqy9gs0xbvdj939p27jwda38-stable.drv'...
/nix/store/yyxlzw3vqaas7wfp04g0b1xg51f2czgq-stable
```
@@ -63,13 +63,13 @@ If the build is not deterministic, Nix will exit with a status code of
1:
```console
-$ nix-build ./deterministic.nix -A unstable
+$ nix-build ./deterministic.nix --attr unstable
this derivation will be built:
/nix/store/cgl13lbj1w368r5z8gywipl1ifli7dhk-unstable.drv
building '/nix/store/cgl13lbj1w368r5z8gywipl1ifli7dhk-unstable.drv'...
/nix/store/krpqk0l9ib0ibi1d2w52z293zw455cap-unstable
-$ nix-build ./deterministic.nix -A unstable --check
+$ nix-build ./deterministic.nix --attr unstable --check
checking outputs of '/nix/store/cgl13lbj1w368r5z8gywipl1ifli7dhk-unstable.drv'...
error: derivation '/nix/store/cgl13lbj1w368r5z8gywipl1ifli7dhk-unstable.drv' may
not be deterministic: output '/nix/store/krpqk0l9ib0ibi1d2w52z293zw455cap-unstable' differs
@@ -89,7 +89,7 @@ Using `--check` with `--keep-failed` will cause Nix to keep the second
build's output in a special, `.check` path:
```console
-$ nix-build ./deterministic.nix -A unstable --check --keep-failed
+$ nix-build ./deterministic.nix --attr unstable --check --keep-failed
checking outputs of '/nix/store/cgl13lbj1w368r5z8gywipl1ifli7dhk-unstable.drv'...
note: keeping build directory '/tmp/nix-build-unstable.drv-0'
error: derivation '/nix/store/cgl13lbj1w368r5z8gywipl1ifli7dhk-unstable.drv' may
diff --git a/doc/manual/src/advanced-topics/post-build-hook.md b/doc/manual/src/advanced-topics/post-build-hook.md
index 1479cc3a4..a251dec48 100644
--- a/doc/manual/src/advanced-topics/post-build-hook.md
+++ b/doc/manual/src/advanced-topics/post-build-hook.md
@@ -90,7 +90,7 @@ Then, restart the `nix-daemon`.
Build any derivation, for example:
```console
-$ nix-build -E '(import <nixpkgs> {}).writeText "example" (builtins.toString builtins.currentTime)'
+$ nix-build --expr '(import <nixpkgs> {}).writeText "example" (builtins.toString builtins.currentTime)'
this derivation will be built:
/nix/store/s4pnfbkalzy5qz57qs6yybna8wylkig6-example.drv
building '/nix/store/s4pnfbkalzy5qz57qs6yybna8wylkig6-example.drv'...
diff --git a/doc/manual/src/command-ref/env-common.md b/doc/manual/src/command-ref/env-common.md
index c5d38db47..b4a9bb2a9 100644
--- a/doc/manual/src/command-ref/env-common.md
+++ b/doc/manual/src/command-ref/env-common.md
@@ -2,18 +2,29 @@
Most Nix commands interpret the following environment variables:
- - [`IN_NIX_SHELL`]{#env-IN_NIX_SHELL}\
+ - <span id="env-IN_NIX_SHELL">[`IN_NIX_SHELL`](#env-IN_NIX_SHELL)</span>\
Indicator that tells if the current environment was set up by
`nix-shell`. It can have the values `pure` or `impure`.
- - [`NIX_PATH`]{#env-NIX_PATH}\
+ - <span id="env-NIX_PATH">[`NIX_PATH`](#env-NIX_PATH)</span>\
A colon-separated list of directories used to look up the location of Nix
- expressions using [paths](../language/values.md#type-path)
+ expressions using [paths](@docroot@/language/values.md#type-path)
enclosed in angle brackets (i.e., `<path>`),
e.g. `/home/eelco/Dev:/etc/nixos`. It can be extended using the
- [`-I` option](./opt-common.md#opt-I).
+ [`-I` option](@docroot@/command-ref/opt-common.md#opt-I).
- - [`NIX_IGNORE_SYMLINK_STORE`]{#env-NIX_IGNORE_SYMLINK_STORE}\
+ If `NIX_PATH` is not set at all, Nix will fall back to the following list in [impure](@docroot@/command-ref/conf-file.md#conf-pure-eval) and [unrestricted](@docroot@/command-ref/conf-file.md#conf-restrict-eval) evaluation mode:
+
+ 1. `$HOME/.nix-defexpr/channels`
+ 2. `nixpkgs=/nix/var/nix/profiles/per-user/root/channels/nixpkgs`
+ 3. `/nix/var/nix/profiles/per-user/root/channels`
+
+ If `NIX_PATH` is set to an empty string, resolving search paths will always fail.
+ For example, attempting to use `<nixpkgs>` will produce:
+
+ error: file 'nixpkgs' was not found in the Nix search path
+
+ - <span id="env-NIX_IGNORE_SYMLINK_STORE">[`NIX_IGNORE_SYMLINK_STORE`](#env-NIX_IGNORE_SYMLINK_STORE)</span>\
Normally, the Nix store directory (typically `/nix/store`) is not
allowed to contain any symlink components. This is to prevent
“impure” builds. Builders sometimes “canonicalise” paths by
@@ -35,72 +46,79 @@ Most Nix commands interpret the following environment variables:
Consult the mount 8 manual page for details.
- - [`NIX_STORE_DIR`]{#env-NIX_STORE_DIR}\
+ - <span id="env-NIX_STORE_DIR">[`NIX_STORE_DIR`](#env-NIX_STORE_DIR)</span>\
Overrides the location of the Nix store (default `prefix/store`).
- - [`NIX_DATA_DIR`]{#env-NIX_DATA_DIR}\
+ - <span id="env-NIX_DATA_DIR">[`NIX_DATA_DIR`](#env-NIX_DATA_DIR)</span>\
Overrides the location of the Nix static data directory (default
`prefix/share`).
- - [`NIX_LOG_DIR`]{#env-NIX_LOG_DIR}\
+ - <span id="env-NIX_LOG_DIR">[`NIX_LOG_DIR`](#env-NIX_LOG_DIR)</span>\
Overrides the location of the Nix log directory (default
`prefix/var/log/nix`).
- - [`NIX_STATE_DIR`]{#env-NIX_STATE_DIR}\
+ - <span id="env-NIX_STATE_DIR">[`NIX_STATE_DIR`](#env-NIX_STATE_DIR)</span>\
Overrides the location of the Nix state directory (default
`prefix/var/nix`).
- - [`NIX_CONF_DIR`]{#env-NIX_CONF_DIR}\
+ - <span id="env-NIX_CONF_DIR">[`NIX_CONF_DIR`](#env-NIX_CONF_DIR)</span>\
Overrides the location of the system Nix configuration directory
(default `prefix/etc/nix`).
- - [`NIX_CONFIG`]{#env-NIX_CONFIG}\
+ - <span id="env-NIX_CONFIG">[`NIX_CONFIG`](#env-NIX_CONFIG)</span>\
Applies settings from Nix configuration from the environment.
The content is treated as if it was read from a Nix configuration file.
Settings are separated by the newline character.
- - [`NIX_USER_CONF_FILES`]{#env-NIX_USER_CONF_FILES}\
- Overrides the location of the user Nix configuration files to load
- from (defaults to the XDG spec locations). The variable is treated
- as a list separated by the `:` token.
+ - <span id="env-NIX_USER_CONF_FILES">[`NIX_USER_CONF_FILES`](#env-NIX_USER_CONF_FILES)</span>\
+ Overrides the location of the Nix user configuration files to load from.
- - [`TMPDIR`]{#env-TMPDIR}\
+ The default are the locations according to the [XDG Base Directory Specification].
+ See the [XDG Base Directories](#xdg-base-directories) sub-section for details.
+
+ The variable is treated as a list separated by the `:` token.
+
+ - <span id="env-TMPDIR">[`TMPDIR`](#env-TMPDIR)</span>\
Use the specified directory to store temporary files. In particular,
this includes temporary build directories; these can take up
substantial amounts of disk space. The default is `/tmp`.
- - [`NIX_REMOTE`]{#env-NIX_REMOTE}\
+ - <span id="env-NIX_REMOTE">[`NIX_REMOTE`](#env-NIX_REMOTE)</span>\
This variable should be set to `daemon` if you want to use the Nix
daemon to execute Nix operations. This is necessary in [multi-user
- Nix installations](../installation/multi-user.md). If the Nix
+ Nix installations](@docroot@/installation/multi-user.md). If the Nix
daemon's Unix socket is at some non-standard path, this variable
should be set to `unix://path/to/socket`. Otherwise, it should be
left unset.
- - [`NIX_SHOW_STATS`]{#env-NIX_SHOW_STATS}\
+ - <span id="env-NIX_SHOW_STATS">[`NIX_SHOW_STATS`](#env-NIX_SHOW_STATS)</span>\
If set to `1`, Nix will print some evaluation statistics, such as
the number of values allocated.
- - [`NIX_COUNT_CALLS`]{#env-NIX_COUNT_CALLS}\
+ - <span id="env-NIX_COUNT_CALLS">[`NIX_COUNT_CALLS`](#env-NIX_COUNT_CALLS)</span>\
If set to `1`, Nix will print how often functions were called during
Nix expression evaluation. This is useful for profiling your Nix
expressions.
- - [`GC_INITIAL_HEAP_SIZE`]{#env-GC_INITIAL_HEAP_SIZE}\
+ - <span id="env-GC_INITIAL_HEAP_SIZE">[`GC_INITIAL_HEAP_SIZE`](#env-GC_INITIAL_HEAP_SIZE)</span>\
If Nix has been configured to use the Boehm garbage collector, this
variable sets the initial size of the heap in bytes. It defaults to
384 MiB. Setting it to a low value reduces memory consumption, but
will increase runtime due to the overhead of garbage collection.
-## XDG Base Directory
+## XDG Base Directories
+
+Nix follows the [XDG Base Directory Specification].
+
+For backwards compatibility, Nix commands will follow the standard only when [`use-xdg-base-directories`] is enabled.
+[New Nix commands](@docroot@/command-ref/new-cli/nix.md) (experimental) conform to the standard by default.
-New Nix commands conform to the [XDG Base Directory Specification], and use the following environment variables to determine locations of various state and configuration files:
+The following environment variables are used to determine locations of various state and configuration files:
- [`XDG_CONFIG_HOME`]{#env-XDG_CONFIG_HOME} (default `~/.config`)
- [`XDG_STATE_HOME`]{#env-XDG_STATE_HOME} (default `~/.local/state`)
- [`XDG_CACHE_HOME`]{#env-XDG_CACHE_HOME} (default `~/.cache`)
-Classic Nix commands can also be made to follow this standard using the [`use-xdg-base-directories`] configuration option.
[XDG Base Directory Specification]: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
-[`use-xdg-base-directories`]: ../command-ref/conf-file.md#conf-use-xdg-base-directories \ No newline at end of file
+[`use-xdg-base-directories`]: @docroot@/command-ref/conf-file.md#conf-use-xdg-base-directories
diff --git a/doc/manual/src/command-ref/experimental-commands.md b/doc/manual/src/command-ref/experimental-commands.md
index cfa6f8b73..286ddc6d6 100644
--- a/doc/manual/src/command-ref/experimental-commands.md
+++ b/doc/manual/src/command-ref/experimental-commands.md
@@ -1,6 +1,6 @@
# Experimental Commands
-This section lists experimental commands.
+This section lists [experimental commands](@docroot@/contributing/experimental-features.md#xp-feature-nix-command).
> **Warning**
>
diff --git a/doc/manual/src/command-ref/files/channels.md b/doc/manual/src/command-ref/files/channels.md
new file mode 100644
index 000000000..7b1f27128
--- /dev/null
+++ b/doc/manual/src/command-ref/files/channels.md
@@ -0,0 +1,26 @@
+## Channels
+
+A directory containing symlinks to Nix channels, managed by [`nix-channel`]:
+
+- `$XDG_STATE_HOME/nix/profiles/channels` for regular users
+- `$NIX_STATE_DIR/profiles/per-user/root/channels` for `root`
+
+[`nix-channel`] uses a [profile](@docroot@/command-ref/files/profiles.md) to store channels.
+This profile contains symlinks to the contents of those channels.
+
+## Subscribed channels
+
+The list of subscribed channels is stored in
+
+- `~/.nix-channels`
+- `$XDG_STATE_HOME/nix/channels` if [`use-xdg-base-directories`] is set to `true`
+
+in the following format:
+
+```
+<url> <name>
+...
+```
+
+[`nix-channel`]: @docroot@/command-ref/nix-channel.md
+[`use-xdg-base-directories`]: @docroot@/command-ref/conf-file.md#conf-use-xdg-base-directories
diff --git a/doc/manual/src/command-ref/files/default-nix-expression.md b/doc/manual/src/command-ref/files/default-nix-expression.md
new file mode 100644
index 000000000..620f7035c
--- /dev/null
+++ b/doc/manual/src/command-ref/files/default-nix-expression.md
@@ -0,0 +1,52 @@
+## Default Nix expression
+
+The source for the default [Nix expressions](@docroot@/language/index.md) used by [`nix-env`]:
+
+- `~/.nix-defexpr`
+- `$XDG_STATE_HOME/nix/defexpr` if [`use-xdg-base-directories`] is set to `true`.
+
+It is loaded as follows:
+
+- If the default expression is a file, it is loaded as a Nix expression.
+- If the default expression is a directory containing a `default.nix` file, that `default.nix` file is loaded as a Nix expression.
+- If the default expression is a directory without a `default.nix` file, then its contents (both files and subdirectories) are loaded as Nix expressions.
+ The expressions are combined into a single attribute set, each expression under an attribute with the same name as the original file or subdirectory.
+ Subdirectories without a `default.nix` file are traversed recursively in search of more Nix expressions, but the names of these intermediate directories are not added to the attribute paths of the default Nix expression.
+
+Then, the resulting expression is interpreted like this:
+
+- If the expression is an attribute set, it is used as the default Nix expression.
+- If the expression is a function, an empty set is passed as argument and the return value is used as the default Nix expression.
+
+
+For example, if the default expression contains two files, `foo.nix` and `bar.nix`, then the default Nix expression will be equivalent to
+
+```nix
+{
+ foo = import ~/.nix-defexpr/foo.nix;
+ bar = import ~/.nix-defexpr/bar.nix;
+}
+```
+
+The file [`manifest.nix`](@docroot@/command-ref/files/manifest.nix.md) is always ignored.
+
+The command [`nix-channel`] places a symlink to the user's current [channels profile](@docroot@/command-ref/files/channels.md) in this directory.
+This makes all subscribed channels available as attributes in the default expression.
+
+## User channel link
+
+A symlink that ensures that [`nix-env`] can find your channels:
+
+- `~/.nix-defexpr/channels`
+- `$XDG_STATE_HOME/defexpr/channels` if [`use-xdg-base-directories`] is set to `true`.
+
+This symlink points to:
+
+- `$XDG_STATE_HOME/profiles/channels` for regular users
+- `$NIX_STATE_DIR/profiles/per-user/root/channels` for `root`
+
+In a multi-user installation, you may also have `~/.nix-defexpr/channels_root`, which links to the channels of the root user.[`nix-env`]: ../nix-env.md
+
+[`nix-env`]: @docroot@/command-ref/nix-env.md
+[`nix-channel`]: @docroot@/command-ref/nix-channel.md
+[`use-xdg-base-directories`]: @docroot@/command-ref/conf-file.md#conf-use-xdg-base-directories
diff --git a/doc/manual/src/command-ref/files/manifest.json.md b/doc/manual/src/command-ref/files/manifest.json.md
new file mode 100644
index 000000000..bcfe7373d
--- /dev/null
+++ b/doc/manual/src/command-ref/files/manifest.json.md
@@ -0,0 +1,45 @@
+## `manifest.json`
+
+The manifest file records the provenance of the packages that are installed in a [profile](./profiles.md) managed by [`nix profile`](@docroot@/command-ref/new-cli/nix3-profile.md) (experimental).
+
+Here is an example of what the file might look like after installing `zoom-us` from Nixpkgs:
+
+```json
+{
+ "version": 1,
+ "elements": [
+ {
+ "active": true,
+ "attrPath": "legacyPackages.x86_64-linux.zoom-us",
+ "originalUrl": "flake:nixpkgs",
+ "storePaths": [
+ "/nix/store/wbhg2ga8f3h87s9h5k0slxk0m81m4cxl-zoom-us-5.3.469451.0927"
+ ],
+ "uri": "github:NixOS/nixpkgs/13d0c311e3ae923a00f734b43fd1d35b47d8943a"
+ },
+ …
+ ]
+}
+```
+
+Each object in the array `elements` denotes an installed package and
+has the following fields:
+
+* `originalUrl`: The [flake reference](@docroot@/command-ref/new-cli/nix3-flake.md) specified by
+ the user at the time of installation (e.g. `nixpkgs`). This is also
+ the flake reference that will be used by `nix profile upgrade`.
+
+* `uri`: The locked flake reference to which `originalUrl` resolved.
+
+* `attrPath`: The flake output attribute that provided this
+ package. Note that this is not necessarily the attribute that the
+ user specified, but the one resulting from applying the default
+ attribute paths and prefixes; for instance, `hello` might resolve to
+ `packages.x86_64-linux.hello` and the empty string to
+ `packages.x86_64-linux.default`.
+
+* `storePath`: The paths in the Nix store containing the package.
+
+* `active`: Whether the profile contains symlinks to the files of this
+ package. If set to false, the package is kept in the Nix store, but
+ is not "visible" in the profile's symlink tree.
diff --git a/doc/manual/src/command-ref/files/manifest.nix.md b/doc/manual/src/command-ref/files/manifest.nix.md
new file mode 100644
index 000000000..d7d1b605b
--- /dev/null
+++ b/doc/manual/src/command-ref/files/manifest.nix.md
@@ -0,0 +1,128 @@
+## `manifest.nix`
+
+The manifest file records the provenance of the packages that are installed in a [profile](./profiles.md) managed by [`nix-env`](@docroot@/command-ref/nix-env.md).
+
+Here is an example of how this file might look like after installing `hello` from Nixpkgs:
+
+```nix
+[{
+ meta = {
+ available = true;
+ broken = false;
+ changelog =
+ "https://git.savannah.gnu.org/cgit/hello.git/plain/NEWS?h=v2.12.1";
+ description = "A program that produces a familiar, friendly greeting";
+ homepage = "https://www.gnu.org/software/hello/manual/";
+ insecure = false;
+ license = {
+ deprecated = false;
+ free = true;
+ fullName = "GNU General Public License v3.0 or later";
+ redistributable = true;
+ shortName = "gpl3Plus";
+ spdxId = "GPL-3.0-or-later";
+ url = "https://spdx.org/licenses/GPL-3.0-or-later.html";
+ };
+ longDescription = ''
+ GNU Hello is a program that prints "Hello, world!" when you run it.
+ It is fully customizable.
+ '';
+ maintainers = [{
+ email = "edolstra+nixpkgs@gmail.com";
+ github = "edolstra";
+ githubId = 1148549;
+ name = "Eelco Dolstra";
+ }];
+ name = "hello-2.12.1";
+ outputsToInstall = [ "out" ];
+ platforms = [
+ "i686-cygwin"
+ "x86_64-cygwin"
+ "x86_64-darwin"
+ "i686-darwin"
+ "aarch64-darwin"
+ "armv7a-darwin"
+ "i686-freebsd13"
+ "x86_64-freebsd13"
+ "aarch64-genode"
+ "i686-genode"
+ "x86_64-genode"
+ "x86_64-solaris"
+ "js-ghcjs"
+ "aarch64-linux"
+ "armv5tel-linux"
+ "armv6l-linux"
+ "armv7a-linux"
+ "armv7l-linux"
+ "i686-linux"
+ "m68k-linux"
+ "microblaze-linux"
+ "microblazeel-linux"
+ "mipsel-linux"
+ "mips64el-linux"
+ "powerpc64-linux"
+ "powerpc64le-linux"
+ "riscv32-linux"
+ "riscv64-linux"
+ "s390-linux"
+ "s390x-linux"
+ "x86_64-linux"
+ "mmix-mmixware"
+ "aarch64-netbsd"
+ "armv6l-netbsd"
+ "armv7a-netbsd"
+ "armv7l-netbsd"
+ "i686-netbsd"
+ "m68k-netbsd"
+ "mipsel-netbsd"
+ "powerpc-netbsd"
+ "riscv32-netbsd"
+ "riscv64-netbsd"
+ "x86_64-netbsd"
+ "aarch64_be-none"
+ "aarch64-none"
+ "arm-none"
+ "armv6l-none"
+ "avr-none"
+ "i686-none"
+ "microblaze-none"
+ "microblazeel-none"
+ "msp430-none"
+ "or1k-none"
+ "m68k-none"
+ "powerpc-none"
+ "powerpcle-none"
+ "riscv32-none"
+ "riscv64-none"
+ "rx-none"
+ "s390-none"
+ "s390x-none"
+ "vc4-none"
+ "x86_64-none"
+ "i686-openbsd"
+ "x86_64-openbsd"
+ "x86_64-redox"
+ "wasm64-wasi"
+ "wasm32-wasi"
+ "x86_64-windows"
+ "i686-windows"
+ ];
+ position =
+ "/nix/store/7niq32w715567hbph0q13m5lqna64c1s-nixos-unstable.tar.gz/nixos-unstable.tar.gz/pkgs/applications/misc/hello/default.nix:34";
+ unfree = false;
+ unsupported = false;
+ };
+ name = "hello-2.12.1";
+ out = {
+ outPath = "/nix/store/260q5867crm1xjs4khgqpl6vr9kywql1-hello-2.12.1";
+ };
+ outPath = "/nix/store/260q5867crm1xjs4khgqpl6vr9kywql1-hello-2.12.1";
+ outputs = [ "out" ];
+ system = "x86_64-linux";
+ type = "derivation";
+}]
+```
+
+Each element in this list corresponds to an installed package.
+It incorporates some attributes of the original derivation, including `meta`, `name`, `out`, `outPath`, `outputs`, `system`.
+This information is used by Nix for querying and updating the package.
diff --git a/doc/manual/src/command-ref/files/profiles.md b/doc/manual/src/command-ref/files/profiles.md
new file mode 100644
index 000000000..b5c737880
--- /dev/null
+++ b/doc/manual/src/command-ref/files/profiles.md
@@ -0,0 +1,74 @@
+## Profiles
+
+A directory that contains links to profiles managed by [`nix-env`] and [`nix profile`]:
+
+- `$XDG_STATE_HOME/nix/profiles` for regular users
+- `$NIX_STATE_DIR/profiles/per-user/root` if the user is `root`
+
+A profile is a directory of symlinks to files in the Nix store.
+
+### Filesystem layout
+
+Profiles are versioned as follows. When using a profile named *path*, *path* is a symlink to *path*`-`*N*`-link`, where *N* is the version of the profile.
+In turn, *path*`-`*N*`-link` is a symlink to a path in the Nix store.
+For example:
+
+```console
+$ ls -l ~alice/.local/state/nix/profiles/profile*
+lrwxrwxrwx 1 alice users 14 Nov 25 14:35 /home/alice/.local/state/nix/profiles/profile -> profile-7-link
+lrwxrwxrwx 1 alice users 51 Oct 28 16:18 /home/alice/.local/state/nix/profiles/profile-5-link -> /nix/store/q69xad13ghpf7ir87h0b2gd28lafjj1j-profile
+lrwxrwxrwx 1 alice users 51 Oct 29 13:20 /home/alice/.local/state/nix/profiles/profile-6-link -> /nix/store/6bvhpysd7vwz7k3b0pndn7ifi5xr32dg-profile
+lrwxrwxrwx 1 alice users 51 Nov 25 14:35 /home/alice/.local/state/nix/profiles/profile-7-link -> /nix/store/mp0x6xnsg0b8qhswy6riqvimai4gm677-profile
+```
+
+Each of these symlinks is a root for the Nix garbage collector.
+
+The contents of the store path corresponding to each version of the
+profile is a tree of symlinks to the files of the installed packages,
+e.g.
+
+```console
+$ ll -R ~eelco/.local/state/nix/profiles/profile-7-link/
+/home/eelco/.local/state/nix/profiles/profile-7-link/:
+total 20
+dr-xr-xr-x 2 root root 4096 Jan 1 1970 bin
+-r--r--r-- 2 root root 1402 Jan 1 1970 manifest.nix
+dr-xr-xr-x 4 root root 4096 Jan 1 1970 share
+
+/home/eelco/.local/state/nix/profiles/profile-7-link/bin:
+total 20
+lrwxrwxrwx 5 root root 79 Jan 1 1970 chromium -> /nix/store/ijm5k0zqisvkdwjkc77mb9qzb35xfi4m-chromium-86.0.4240.111/bin/chromium
+lrwxrwxrwx 7 root root 87 Jan 1 1970 spotify -> /nix/store/w9182874m1bl56smps3m5zjj36jhp3rn-spotify-1.1.26.501.gbe11e53b-15/bin/spotify
+lrwxrwxrwx 3 root root 79 Jan 1 1970 zoom-us -> /nix/store/wbhg2ga8f3h87s9h5k0slxk0m81m4cxl-zoom-us-5.3.469451.0927/bin/zoom-us
+
+/home/eelco/.local/state/nix/profiles/profile-7-link/share/applications:
+total 12
+lrwxrwxrwx 4 root root 120 Jan 1 1970 chromium-browser.desktop -> /nix/store/4cf803y4vzfm3gyk3vzhzb2327v0kl8a-chromium-unwrapped-86.0.4240.111/share/applications/chromium-browser.desktop
+lrwxrwxrwx 7 root root 110 Jan 1 1970 spotify.desktop -> /nix/store/w9182874m1bl56smps3m5zjj36jhp3rn-spotify-1.1.26.501.gbe11e53b-15/share/applications/spotify.desktop
+lrwxrwxrwx 3 root root 107 Jan 1 1970 us.zoom.Zoom.desktop -> /nix/store/wbhg2ga8f3h87s9h5k0slxk0m81m4cxl-zoom-us-5.3.469451.0927/share/applications/us.zoom.Zoom.desktop
+
+…
+```
+
+Each profile version contains a manifest file:
+- [`manifest.nix`](@docroot@/command-ref/files/manifest.nix.md) used by [`nix-env`](@docroot@/command-ref/nix-env.md).
+- [`manifest.json`](@docroot@/command-ref/files/manifest.json.md) used by [`nix profile`](@docroot@/command-ref/new-cli/nix3-profile.md) (experimental).
+
+## User profile link
+
+A symbolic link to the user's current profile:
+
+- `~/.nix-profile`
+- `$XDG_STATE_HOME/nix/profile` if [`use-xdg-base-directories`] is set to `true`.
+
+By default, this symlink points to:
+
+- `$XDG_STATE_HOME/nix/profiles/profile` for regular users
+- `$NIX_STATE_DIR/profiles/per-user/root/profile` for `root`
+
+The `PATH` environment variable should include `/bin` subdirectory of the profile link (e.g. `~/.nix-profile/bin`) for the user environment to be visible to the user.
+The [installer](@docroot@/installation/installing-binary.md) sets this up by default, unless you enable [`use-xdg-base-directories`].
+
+[`nix-env`]: @docroot@/command-ref/nix-env.md
+[`nix profile`]: @docroot@/command-ref/new-cli/nix3-profile.md
+[`use-xdg-base-directories`]: @docroot@/command-ref/conf-file.md#conf-use-xdg-base-directories
diff --git a/doc/manual/src/command-ref/nix-build.md b/doc/manual/src/command-ref/nix-build.md
index 937b046b8..f70bbd7f2 100644
--- a/doc/manual/src/command-ref/nix-build.md
+++ b/doc/manual/src/command-ref/nix-build.md
@@ -38,7 +38,7 @@ directory containing at least a file named `default.nix`.
`nix-build` is essentially a wrapper around
[`nix-instantiate`](nix-instantiate.md) (to translate a high-level Nix
expression to a low-level [store derivation]) and [`nix-store
---realise`](nix-store.md#operation---realise) (to build the store
+--realise`](@docroot@/command-ref/nix-store/realise.md) (to build the store
derivation).
[store derivation]: ../glossary.md#gloss-store-derivation
@@ -51,9 +51,8 @@ derivation).
# Options
-All options not listed here are passed to `nix-store
---realise`, except for `--arg` and `--attr` / `-A` which are passed to
-`nix-instantiate`.
+All options not listed here are passed to `nix-store --realise`,
+except for `--arg` and `--attr` / `-A` which are passed to `nix-instantiate`.
- <span id="opt-no-out-link">[`--no-out-link`](#opt-no-out-link)<span>
@@ -70,12 +69,14 @@ All options not listed here are passed to `nix-store
Change the name of the symlink to the output path created from
`result` to *outlink*.
-The following common options are supported:
+{{#include ./opt-common.md}}
+
+{{#include ./env-common.md}}
# Examples
```console
-$ nix-build '<nixpkgs>' -A firefox
+$ nix-build '<nixpkgs>' --attr firefox
store derivation is /nix/store/qybprl8sz2lc...-firefox-1.5.0.7.drv
/nix/store/d18hyl92g30l...-firefox-1.5.0.7
@@ -90,7 +91,7 @@ If a derivation has multiple outputs, `nix-build` will build the default
(first) output. You can also build all outputs:
```console
-$ nix-build '<nixpkgs>' -A openssl.all
+$ nix-build '<nixpkgs>' --attr openssl.all
```
This will create a symlink for each output named `result-outputname`.
@@ -100,7 +101,7 @@ outputs `out`, `bin` and `man`, `nix-build` will create symlinks
specific output:
```console
-$ nix-build '<nixpkgs>' -A openssl.man
+$ nix-build '<nixpkgs>' --attr openssl.man
```
This will create a symlink `result-man`.
@@ -108,7 +109,7 @@ This will create a symlink `result-man`.
Build a Nix expression given on the command line:
```console
-$ nix-build -E 'with import <nixpkgs> { }; runCommand "foo" { } "echo bar > $out"'
+$ nix-build --expr 'with import <nixpkgs> { }; runCommand "foo" { } "echo bar > $out"'
$ cat ./result
bar
```
@@ -117,5 +118,5 @@ Build the GNU Hello package from the latest revision of the master
branch of Nixpkgs:
```console
-$ nix-build https://github.com/NixOS/nixpkgs/archive/master.tar.gz -A hello
+$ nix-build https://github.com/NixOS/nixpkgs/archive/master.tar.gz --attr hello
```
diff --git a/doc/manual/src/command-ref/nix-channel.md b/doc/manual/src/command-ref/nix-channel.md
index 24353525f..a210583ae 100644
--- a/doc/manual/src/command-ref/nix-channel.md
+++ b/doc/manual/src/command-ref/nix-channel.md
@@ -22,6 +22,9 @@ This command has the following operations:
channels. If *name* is omitted, it defaults to the last component of
*url*, with the suffixes `-stable` or `-unstable` removed.
+ A channel URL must point to a directory containing a file `nixexprs.tar.gz`.
+ At the top level, that tarball must contain a single directory with a `default.nix` file that serves as the channel’s entry point.
+
- `--remove` *name*\
Removes the channel named *name* from the list of subscribed
channels.
@@ -45,6 +48,16 @@ Note that `--add` does not automatically perform an update.
The list of subscribed channels is stored in `~/.nix-channels`.
+{{#include ./opt-common.md}}
+
+{{#include ./env-common.md}}
+
+# Files
+
+`nix-channel` operates on the following files.
+
+{{#include ./files/channels.md}}
+
# Examples
To subscribe to the Nixpkgs channel and install the GNU Hello package:
@@ -52,45 +65,18 @@ To subscribe to the Nixpkgs channel and install the GNU Hello package:
```console
$ nix-channel --add https://nixos.org/channels/nixpkgs-unstable
$ nix-channel --update
-$ nix-env -iA nixpkgs.hello
+$ nix-env --install --attr nixpkgs.hello
```
You can revert channel updates using `--rollback`:
```console
-$ nix-instantiate --eval -E '(import <nixpkgs> {}).lib.version'
+$ nix-instantiate --eval --expr '(import <nixpkgs> {}).lib.version'
"14.04.527.0e935f1"
$ nix-channel --rollback
switching from generation 483 to 482
-$ nix-instantiate --eval -E '(import <nixpkgs> {}).lib.version'
+$ nix-instantiate --eval --expr '(import <nixpkgs> {}).lib.version'
"14.04.526.dbadfad"
```
-
-# Files
-
- - `/nix/var/nix/profiles/per-user/username/channels`\
- `nix-channel` uses a `nix-env` profile to keep track of previous
- versions of the subscribed channels. Every time you run `nix-channel
- --update`, a new channel generation (that is, a symlink to the
- channel Nix expressions in the Nix store) is created. This enables
- `nix-channel --rollback` to revert to previous versions.
-
- - `~/.nix-defexpr/channels`\
- This is a symlink to
- `/nix/var/nix/profiles/per-user/username/channels`. It ensures that
- `nix-env` can find your channels. In a multi-user installation, you
- may also have `~/.nix-defexpr/channels_root`, which links to the
- channels of the root user.
-
-# Channel format
-
-A channel URL should point to a directory containing the following
-files:
-
- - `nixexprs.tar.xz`\
- A tarball containing Nix expressions and files referenced by them
- (such as build scripts and patches). At the top level, the tarball
- should contain a single directory. That directory must contain a
- file `default.nix` that serves as the channel’s “entry point”.
diff --git a/doc/manual/src/command-ref/nix-collect-garbage.md b/doc/manual/src/command-ref/nix-collect-garbage.md
index 296165993..51db5fc67 100644
--- a/doc/manual/src/command-ref/nix-collect-garbage.md
+++ b/doc/manual/src/command-ref/nix-collect-garbage.md
@@ -9,7 +9,7 @@
# Description
The command `nix-collect-garbage` is mostly an alias of [`nix-store
---gc`](nix-store.md#operation---gc), that is, it deletes all
+--gc`](@docroot@/command-ref/nix-store/gc.md), that is, it deletes all
unreachable paths in the Nix store to clean up your system. However,
it provides two additional options: `-d` (`--delete-old`), which
deletes all old generations of all profiles in `/nix/var/nix/profiles`
@@ -20,6 +20,10 @@ and `--delete-older-than` *period*, where period is a value such as
of days in all profiles in `/nix/var/nix/profiles` (except for the
generations that were active at that point in time).
+{{#include ./opt-common.md}}
+
+{{#include ./env-common.md}}
+
# Example
To delete from the Nix store everything that is not used by the current
diff --git a/doc/manual/src/command-ref/nix-copy-closure.md b/doc/manual/src/command-ref/nix-copy-closure.md
index cd8e351bb..fbf6828da 100644
--- a/doc/manual/src/command-ref/nix-copy-closure.md
+++ b/doc/manual/src/command-ref/nix-copy-closure.md
@@ -63,12 +63,16 @@ authentication, you can avoid typing the passphrase with `ssh-agent`.
- `-v`\
Show verbose output.
+{{#include ./opt-common.md}}
+
# Environment variables
- `NIX_SSHOPTS`\
Additional options to be passed to `ssh` on the command
line.
+{{#include ./env-common.md}}
+
# Examples
Copy Firefox with all its dependencies to a remote machine:
@@ -83,5 +87,5 @@ environment:
```console
$ nix-copy-closure --from alice@itchy.labs \
/nix/store/0dj0503hjxy5mbwlafv1rsbdiyx1gkdy-subversion-1.4.4
-$ nix-env -i /nix/store/0dj0503hjxy5mbwlafv1rsbdiyx1gkdy-subversion-1.4.4
+$ nix-env --install /nix/store/0dj0503hjxy5mbwlafv1rsbdiyx1gkdy-subversion-1.4.4
```
diff --git a/doc/manual/src/command-ref/nix-env.md b/doc/manual/src/command-ref/nix-env.md
index f4fa5b50c..941723216 100644
--- a/doc/manual/src/command-ref/nix-env.md
+++ b/doc/manual/src/command-ref/nix-env.md
@@ -4,15 +4,14 @@
# Synopsis
-`nix-env`
+`nix-env` *operation* [*options*] [*arguments…*]
[`--option` *name* *value*]
[`--arg` *name* *value*]
[`--argstr` *name* *value*]
[{`--file` | `-f`} *path*]
- [{`--profile` | `-p`} *path(]
+ [{`--profile` | `-p`} *path*]
[`--system-filter` *system*]
[`--dry-run`]
- *operation* [*options…*] [*arguments…*]
# Description
@@ -24,11 +23,33 @@ environments: different users can have different environments, and
individual users can switch between different environments.
`nix-env` takes exactly one *operation* flag which indicates the
-subcommand to be performed. These are documented below.
+subcommand to be performed. The following operations are available:
+
+- [`--install`](./nix-env/install.md)
+- [`--upgrade`](./nix-env/upgrade.md)
+- [`--uninstall`](./nix-env/uninstall.md)
+- [`--set`](./nix-env/set.md)
+- [`--set-flag`](./nix-env/set-flag.md)
+- [`--query`](./nix-env/query.md)
+- [`--switch-profile`](./nix-env/switch-profile.md)
+- [`--list-generations`](./nix-env/list-generations.md)
+- [`--delete-generations`](./nix-env/delete-generations.md)
+- [`--switch-generation`](./nix-env/switch-generation.md)
+- [`--rollback`](./nix-env/rollback.md)
+
+These pages can be viewed offline:
+
+- `man nix-env-<operation>`.
+
+ Example: `man nix-env-install`
+
+- `nix-env --help --<operation>`
+
+ Example: `nix-env --help --install`
# Selectors
-Several commands, such as `nix-env -q` and `nix-env -i`, take a list of
+Several commands, such as `nix-env --query ` and `nix-env --install `, take a list of
arguments that specify the packages on which to operate. These are
extended regular expressions that must match the entire name of the
package. (For details on regular expressions, see **regex**(7).) The match is
@@ -60,835 +81,10 @@ match. Here are some examples:
Matches any package name containing the strings `firefox` or
`chromium`.
-# Common options
-
-This section lists the options that are common to all operations. These
-options are allowed for every subcommand, though they may not always
-have an effect.
-
- - `--file` / `-f` *path*\
- Specifies the Nix expression (designated below as the *active Nix
- expression*) used by the `--install`, `--upgrade`, and `--query
- --available` operations to obtain derivations. The default is
- `~/.nix-defexpr`.
-
- If the argument starts with `http://` or `https://`, it is
- interpreted as the URL of a tarball that will be downloaded and
- unpacked to a temporary location. The tarball must include a single
- top-level directory containing at least a file named `default.nix`.
-
- - `--profile` / `-p` *path*\
- Specifies the profile to be used by those operations that operate on
- a profile (designated below as the *active profile*). A profile is a
- sequence of user environments called *generations*, one of which is
- the *current generation*.
-
- - `--dry-run`\
- For the `--install`, `--upgrade`, `--uninstall`,
- `--switch-generation`, `--delete-generations` and `--rollback`
- operations, this flag will cause `nix-env` to print what *would* be
- done if this flag had not been specified, without actually doing it.
-
- `--dry-run` also prints out which paths will be
- [substituted](../glossary.md) (i.e., downloaded) and which paths
- will be built from source (because no substitute is available).
-
- - `--system-filter` *system*\
- By default, operations such as `--query
- --available` show derivations matching any platform. This option
- allows you to use derivations for the specified platform *system*.
-
-<!-- end list -->
-
# Files
- - `~/.nix-defexpr`\
- The source for the default Nix expressions used by the
- `--install`, `--upgrade`, and `--query --available` operations to
- obtain derivations. The `--file` option may be used to override
- this default.
-
- If `~/.nix-defexpr` is a file, it is loaded as a Nix expression. If
- the expression is a set, it is used as the default Nix expression.
- If the expression is a function, an empty set is passed as argument
- and the return value is used as the default Nix expression.
-
- If `~/.nix-defexpr` is a directory containing a `default.nix` file,
- that file is loaded as in the above paragraph.
-
- If `~/.nix-defexpr` is a directory without a `default.nix` file,
- then its contents (both files and subdirectories) are loaded as Nix
- expressions. The expressions are combined into a single set, each
- expression under an attribute with the same name as the original
- file or subdirectory.
-
- For example, if `~/.nix-defexpr` contains two files, `foo.nix` and
- `bar.nix`, then the default Nix expression will essentially be
-
- ```nix
- {
- foo = import ~/.nix-defexpr/foo.nix;
- bar = import ~/.nix-defexpr/bar.nix;
- }
- ```
-
- The file `manifest.nix` is always ignored. Subdirectories without a
- `default.nix` file are traversed recursively in search of more Nix
- expressions, but the names of these intermediate directories are not
- added to the attribute paths of the default Nix expression.
-
- The command `nix-channel` places symlinks to the downloaded Nix
- expressions from each subscribed channel in this directory.
-
- - `~/.nix-profile`\
- A symbolic link to the user's current profile. By default, this
- symlink points to `prefix/var/nix/profiles/default`. The `PATH`
- environment variable should include `~/.nix-profile/bin` for the
- user environment to be visible to the user.
-
-# Operation `--install`
-
-## Synopsis
-
-`nix-env` {`--install` | `-i`} *args…*
- [{`--prebuilt-only` | `-b`}]
- [{`--attr` | `-A`}]
- [`--from-expression`] [`-E`]
- [`--from-profile` *path*]
- [`--preserve-installed` | `-P`]
- [`--remove-all` | `-r`]
-
-## Description
-
-The install operation creates a new user environment, based on the
-current generation of the active profile, to which a set of store paths
-described by *args* is added. The arguments *args* map to store paths in
-a number of possible ways:
-
- - By default, *args* is a set of derivation names denoting derivations
- in the active Nix expression. These are realised, and the resulting
- output paths are installed. Currently installed derivations with a
- name equal to the name of a derivation being added are removed
- unless the option `--preserve-installed` is specified.
-
- If there are multiple derivations matching a name in *args* that
- have the same name (e.g., `gcc-3.3.6` and `gcc-4.1.1`), then the
- derivation with the highest *priority* is used. A derivation can
- define a priority by declaring the `meta.priority` attribute. This
- attribute should be a number, with a higher value denoting a lower
- priority. The default priority is `0`.
-
- If there are multiple matching derivations with the same priority,
- then the derivation with the highest version will be installed.
-
- You can force the installation of multiple derivations with the same
- name by being specific about the versions. For instance, `nix-env -i
- gcc-3.3.6 gcc-4.1.1` will install both version of GCC (and will
- probably cause a user environment conflict\!).
-
- - If `--attr` (`-A`) is specified, the arguments are *attribute
- paths* that select attributes from the top-level Nix
- expression. This is faster than using derivation names and
- unambiguous. To find out the attribute paths of available
- packages, use `nix-env -qaP`.
-
- - If `--from-profile` *path* is given, *args* is a set of names
- denoting installed store paths in the profile *path*. This is an
- easy way to copy user environment elements from one profile to
- another.
-
- - If `--from-expression` is given, *args* are Nix
- [functions](../language/constructs.md#functions)
- that are called with the active Nix expression as their single
- argument. The derivations returned by those function calls are
- installed. This allows derivations to be specified in an
- unambiguous way, which is necessary if there are multiple
- derivations with the same name.
-
- - If *args* are [store derivation]s, then these are
- [realised](nix-store.md#operation---realise), and the resulting output paths
- are installed.
-
- [store derivation]: ../glossary.md#gloss-store-derivation
-
- - If *args* are store paths that are not store derivations, then these
- are [realised](nix-store.md#operation---realise) and installed.
-
- - By default all outputs are installed for each derivation. That can
- be reduced by setting `meta.outputsToInstall`.
-
-## Flags
-
- - `--prebuilt-only` / `-b`\
- Use only derivations for which a substitute is registered, i.e.,
- there is a pre-built binary available that can be downloaded in lieu
- of building the derivation. Thus, no packages will be built from
- source.
-
- - `--preserve-installed`; `-P`\
- Do not remove derivations with a name matching one of the
- derivations being installed. Usually, trying to have two versions of
- the same package installed in the same generation of a profile will
- lead to an error in building the generation, due to file name
- clashes between the two versions. However, this is not the case for
- all packages.
-
- - `--remove-all`; `-r`\
- Remove all previously installed packages first. This is equivalent
- to running `nix-env -e '.*'` first, except that everything happens
- in a single transaction.
-
-## Examples
-
-To install a package using a specific attribute path from the active Nix expression:
-
-```console
-$ nix-env -iA gcc40mips
-installing `gcc-4.0.2'
-$ nix-env -iA xorg.xorgserver
-installing `xorg-server-1.2.0'
-```
-
-To install a specific version of `gcc` using the derivation name:
-
-```console
-$ nix-env --install gcc-3.3.2
-installing `gcc-3.3.2'
-uninstalling `gcc-3.1'
-```
-
-Using attribute path for selecting a package is preferred,
-as it is much faster and there will not be multiple matches.
-
-Note the previously installed version is removed, since
-`--preserve-installed` was not specified.
-
-To install an arbitrary version:
-
-```console
-$ nix-env --install gcc
-installing `gcc-3.3.2'
-```
-
-To install all derivations in the Nix expression `foo.nix`:
-
-```console
-$ nix-env -f ~/foo.nix -i '.*'
-```
-
-To copy the store path with symbolic name `gcc` from another profile:
-
-```console
-$ nix-env -i --from-profile /nix/var/nix/profiles/foo gcc
-```
-
-To install a specific [store derivation] (typically created by
-`nix-instantiate`):
-
-```console
-$ nix-env -i /nix/store/fibjb1bfbpm5mrsxc4mh2d8n37sxh91i-gcc-3.4.3.drv
-```
-
-To install a specific output path:
-
-```console
-$ nix-env -i /nix/store/y3cgx0xj1p4iv9x0pnnmdhr8iyg741vk-gcc-3.4.3
-```
-
-To install from a Nix expression specified on the command-line:
-
-```console
-$ nix-env -f ./foo.nix -i -E \
- 'f: (f {system = "i686-linux";}).subversionWithJava'
-```
-
-I.e., this evaluates to `(f: (f {system =
-"i686-linux";}).subversionWithJava) (import ./foo.nix)`, thus selecting
-the `subversionWithJava` attribute from the set returned by calling the
-function defined in `./foo.nix`.
-
-A dry-run tells you which paths will be downloaded or built from source:
-
-```console
-$ nix-env -f '<nixpkgs>' -iA hello --dry-run
-(dry run; not doing anything)
-installing ‘hello-2.10’
-this path will be fetched (0.04 MiB download, 0.19 MiB unpacked):
- /nix/store/wkhdf9jinag5750mqlax6z2zbwhqb76n-hello-2.10
- ...
-```
-
-To install Firefox from the latest revision in the Nixpkgs/NixOS 14.12
-channel:
-
-```console
-$ nix-env -f https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz -iA firefox
-```
-
-# Operation `--upgrade`
-
-## Synopsis
-
-`nix-env` {`--upgrade` | `-u`} *args*
- [`--lt` | `--leq` | `--eq` | `--always`]
- [{`--prebuilt-only` | `-b`}]
- [{`--attr` | `-A`}]
- [`--from-expression`] [`-E`]
- [`--from-profile` *path*]
- [`--preserve-installed` | `-P`]
-
-## Description
-
-The upgrade operation creates a new user environment, based on the
-current generation of the active profile, in which all store paths are
-replaced for which there are newer versions in the set of paths
-described by *args*. Paths for which there are no newer versions are
-left untouched; this is not an error. It is also not an error if an
-element of *args* matches no installed derivations.
-
-For a description of how *args* is mapped to a set of store paths, see
-[`--install`](#operation---install). If *args* describes multiple
-store paths with the same symbolic name, only the one with the highest
-version is installed.
-
-## Flags
-
- - `--lt`\
- Only upgrade a derivation to newer versions. This is the default.
-
- - `--leq`\
- In addition to upgrading to newer versions, also “upgrade” to
- derivations that have the same version. Version are not a unique
- identification of a derivation, so there may be many derivations
- that have the same version. This flag may be useful to force
- “synchronisation” between the installed and available derivations.
-
- - `--eq`\
- *Only* “upgrade” to derivations that have the same version. This may
- not seem very useful, but it actually is, e.g., when there is a new
- release of Nixpkgs and you want to replace installed applications
- with the same versions built against newer dependencies (to reduce
- the number of dependencies floating around on your system).
-
- - `--always`\
- In addition to upgrading to newer versions, also “upgrade” to
- derivations that have the same or a lower version. I.e., derivations
- may actually be downgraded depending on what is available in the
- active Nix expression.
-
-For the other flags, see `--install`.
-
-## Examples
-
-```console
-$ nix-env --upgrade -A nixpkgs.gcc
-upgrading `gcc-3.3.1' to `gcc-3.4'
-```
-
-When there are no updates available, nothing will happen:
-
-```console
-$ nix-env --upgrade -A nixpkgs.pan
-```
-
-Using `-A` is preferred when possible, as it is faster and unambiguous but
-it is also possible to upgrade to a specific version by matching the derivation name:
-
-```console
-$ nix-env -u gcc-3.3.2 --always
-upgrading `gcc-3.4' to `gcc-3.3.2'
-```
-
-To try to upgrade everything
-(matching packages based on the part of the derivation name without version):
-
-```console
-$ nix-env -u
-upgrading `hello-2.1.2' to `hello-2.1.3'
-upgrading `mozilla-1.2' to `mozilla-1.4'
-```
-
-## Versions
-
-The upgrade operation determines whether a derivation `y` is an upgrade
-of a derivation `x` by looking at their respective `name` attributes.
-The names (e.g., `gcc-3.3.1` are split into two parts: the package name
-(`gcc`), and the version (`3.3.1`). The version part starts after the
-first dash not followed by a letter. `y` is considered an upgrade of `x`
-if their package names match, and the version of `y` is higher than that
-of `x`.
-
-The versions are compared by splitting them into contiguous components
-of numbers and letters. E.g., `3.3.1pre5` is split into `[3, 3, 1,
-"pre", 5]`. These lists are then compared lexicographically (from left
-to right). Corresponding components `a` and `b` are compared as follows.
-If they are both numbers, integer comparison is used. If `a` is an empty
-string and `b` is a number, `a` is considered less than `b`. The special
-string component `pre` (for *pre-release*) is considered to be less than
-other components. String components are considered less than number
-components. Otherwise, they are compared lexicographically (i.e., using
-case-sensitive string comparison).
-
-This is illustrated by the following examples:
-
- 1.0 < 2.3
- 2.1 < 2.3
- 2.3 = 2.3
- 2.5 > 2.3
- 3.1 > 2.3
- 2.3.1 > 2.3
- 2.3.1 > 2.3a
- 2.3pre1 < 2.3
- 2.3pre3 < 2.3pre12
- 2.3a < 2.3c
- 2.3pre1 < 2.3c
- 2.3pre1 < 2.3q
-
-# Operation `--uninstall`
-
-## Synopsis
-
-`nix-env` {`--uninstall` | `-e`} *drvnames…*
-
-## Description
-
-The uninstall operation creates a new user environment, based on the
-current generation of the active profile, from which the store paths
-designated by the symbolic names *drvnames* are removed.
-
-## Examples
-
-```console
-$ nix-env --uninstall gcc
-$ nix-env -e '.*' (remove everything)
-```
-
-# Operation `--set`
-
-## Synopsis
-
-`nix-env` `--set` *drvname*
-
-## Description
-
-The `--set` operation modifies the current generation of a profile so
-that it contains exactly the specified derivation, and nothing else.
-
-## Examples
-
-The following updates a profile such that its current generation will
-contain just Firefox:
-
-```console
-$ nix-env -p /nix/var/nix/profiles/browser --set firefox
-```
-
-# Operation `--set-flag`
-
-## Synopsis
-
-`nix-env` `--set-flag` *name* *value* *drvnames*
-
-## Description
-
-The `--set-flag` operation allows meta attributes of installed packages
-to be modified. There are several attributes that can be usefully
-modified, because they affect the behaviour of `nix-env` or the user
-environment build script:
-
- - `priority` can be changed to resolve filename clashes. The user
- environment build script uses the `meta.priority` attribute of
- derivations to resolve filename collisions between packages. Lower
- priority values denote a higher priority. For instance, the GCC
- wrapper package and the Binutils package in Nixpkgs both have a file
- `bin/ld`, so previously if you tried to install both you would get a
- collision. Now, on the other hand, the GCC wrapper declares a higher
- priority than Binutils, so the former’s `bin/ld` is symlinked in the
- user environment.
-
- - `keep` can be set to `true` to prevent the package from being
- upgraded or replaced. This is useful if you want to hang on to an
- older version of a package.
-
- - `active` can be set to `false` to “disable” the package. That is, no
- symlinks will be generated to the files of the package, but it
- remains part of the profile (so it won’t be garbage-collected). It
- can be set back to `true` to re-enable the package.
-
-## Examples
-
-To prevent the currently installed Firefox from being upgraded:
-
-```console
-$ nix-env --set-flag keep true firefox
-```
-
-After this, `nix-env -u` will ignore Firefox.
-
-To disable the currently installed Firefox, then install a new Firefox
-while the old remains part of the profile:
-
-```console
-$ nix-env -q
-firefox-2.0.0.9 (the current one)
-
-$ nix-env --preserve-installed -i firefox-2.0.0.11
-installing `firefox-2.0.0.11'
-building path(s) `/nix/store/myy0y59q3ig70dgq37jqwg1j0rsapzsl-user-environment'
-collision between `/nix/store/...-firefox-2.0.0.11/bin/firefox'
- and `/nix/store/...-firefox-2.0.0.9/bin/firefox'.
-(i.e., can’t have two active at the same time)
-
-$ nix-env --set-flag active false firefox
-setting flag on `firefox-2.0.0.9'
-
-$ nix-env --preserve-installed -i firefox-2.0.0.11
-installing `firefox-2.0.0.11'
-
-$ nix-env -q
-firefox-2.0.0.11 (the enabled one)
-firefox-2.0.0.9 (the disabled one)
-```
-
-To make files from `binutils` take precedence over files from `gcc`:
-
-```console
-$ nix-env --set-flag priority 5 binutils
-$ nix-env --set-flag priority 10 gcc
-```
-
-# Operation `--query`
-
-## Synopsis
-
-`nix-env` {`--query` | `-q`} *names…*
- [`--installed` | `--available` | `-a`]
- [{`--status` | `-s`}]
- [{`--attr-path` | `-P`}]
- [`--no-name`]
- [{`--compare-versions` | `-c`}]
- [`--system`]
- [`--drv-path`]
- [`--out-path`]
- [`--description`]
- [`--meta`]
- [`--xml`]
- [`--json`]
- [{`--prebuilt-only` | `-b`}]
- [{`--attr` | `-A`} *attribute-path*]
-
-## Description
-
-The query operation displays information about either the store paths
-that are installed in the current generation of the active profile
-(`--installed`), or the derivations that are available for installation
-in the active Nix expression (`--available`). It only prints information
-about derivations whose symbolic name matches one of *names*.
-
-The derivations are sorted by their `name` attributes.
-
-## Source selection
-
-The following flags specify the set of things on which the query
-operates.
-
- - `--installed`\
- The query operates on the store paths that are installed in the
- current generation of the active profile. This is the default.
-
- - `--available`; `-a`\
- The query operates on the derivations that are available in the
- active Nix expression.
-
-## Queries
-
-The following flags specify what information to display about the
-selected derivations. Multiple flags may be specified, in which case the
-information is shown in the order given here. Note that the name of the
-derivation is shown unless `--no-name` is specified.
-
- - `--xml`\
- Print the result in an XML representation suitable for automatic
- processing by other tools. The root element is called `items`, which
- contains a `item` element for each available or installed
- derivation. The fields discussed below are all stored in attributes
- of the `item` elements.
-
- - `--json`\
- Print the result in a JSON representation suitable for automatic
- processing by other tools.
-
- - `--prebuilt-only` / `-b`\
- Show only derivations for which a substitute is registered, i.e.,
- there is a pre-built binary available that can be downloaded in lieu
- of building the derivation. Thus, this shows all packages that
- probably can be installed quickly.
-
- - `--status`; `-s`\
- Print the *status* of the derivation. The status consists of three
- characters. The first is `I` or `-`, indicating whether the
- derivation is currently installed in the current generation of the
- active profile. This is by definition the case for `--installed`,
- but not for `--available`. The second is `P` or `-`, indicating
- whether the derivation is present on the system. This indicates
- whether installation of an available derivation will require the
- derivation to be built. The third is `S` or `-`, indicating whether
- a substitute is available for the derivation.
-
- - `--attr-path`; `-P`\
- Print the *attribute path* of the derivation, which can be used to
- unambiguously select it using the `--attr` option available in
- commands that install derivations like `nix-env --install`. This
- option only works together with `--available`
-
- - `--no-name`\
- Suppress printing of the `name` attribute of each derivation.
-
- - `--compare-versions` / `-c`\
- Compare installed versions to available versions, or vice versa (if
- `--available` is given). This is useful for quickly seeing whether
- upgrades for installed packages are available in a Nix expression. A
- column is added with the following meaning:
-
- - `<` *version*\
- A newer version of the package is available or installed.
-
- - `=` *version*\
- At most the same version of the package is available or
- installed.
-
- - `>` *version*\
- Only older versions of the package are available or installed.
-
- - `- ?`\
- No version of the package is available or installed.
-
- - `--system`\
- Print the `system` attribute of the derivation.
-
- - `--drv-path`\
- Print the path of the [store derivation].
-
- - `--out-path`\
- Print the output path of the derivation.
-
- - `--description`\
- Print a short (one-line) description of the derivation, if
- available. The description is taken from the `meta.description`
- attribute of the derivation.
-
- - `--meta`\
- Print all of the meta-attributes of the derivation. This option is
- only available with `--xml` or `--json`.
-
-## Examples
-
-To show installed packages:
-
-```console
-$ nix-env -q
-bison-1.875c
-docbook-xml-4.2
-firefox-1.0.4
-MPlayer-1.0pre7
-ORBit2-2.8.3
-…
-```
-
-To show available packages:
-
-```console
-$ nix-env -qa
-firefox-1.0.7
-GConf-2.4.0.1
-MPlayer-1.0pre7
-ORBit2-2.8.3
-…
-```
-
-To show the status of available packages:
-
-```console
-$ nix-env -qas
--P- firefox-1.0.7 (not installed but present)
---S GConf-2.4.0.1 (not present, but there is a substitute for fast installation)
---S MPlayer-1.0pre3 (i.e., this is not the installed MPlayer, even though the version is the same!)
-IP- ORBit2-2.8.3 (installed and by definition present)
-…
-```
-
-To show available packages in the Nix expression `foo.nix`:
-
-```console
-$ nix-env -f ./foo.nix -qa
-foo-1.2.3
-```
-
-To compare installed versions to what’s available:
-
-```console
-$ nix-env -qc
-...
-acrobat-reader-7.0 - ? (package is not available at all)
-autoconf-2.59 = 2.59 (same version)
-firefox-1.0.4 < 1.0.7 (a more recent version is available)
-...
-```
-
-To show all packages with “`zip`” in the name:
-
-```console
-$ nix-env -qa '.*zip.*'
-bzip2-1.0.6
-gzip-1.6
-zip-3.0
-…
-```
-
-To show all packages with “`firefox`” or “`chromium`” in the name:
-
-```console
-$ nix-env -qa '.*(firefox|chromium).*'
-chromium-37.0.2062.94
-chromium-beta-38.0.2125.24
-firefox-32.0.3
-firefox-with-plugins-13.0.1
-…
-```
-
-To show all packages in the latest revision of the Nixpkgs repository:
-
-```console
-$ nix-env -f https://github.com/NixOS/nixpkgs/archive/master.tar.gz -qa
-```
-
-# Operation `--switch-profile`
-
-## Synopsis
-
-`nix-env` {`--switch-profile` | `-S`} *path*
-
-## Description
-
-This operation makes *path* the current profile for the user. That is,
-the symlink `~/.nix-profile` is made to point to *path*.
-
-## Examples
-
-```console
-$ nix-env -S ~/my-profile
-```
-
-# Operation `--list-generations`
-
-## Synopsis
-
-`nix-env` `--list-generations`
-
-## Description
-
-This operation print a list of all the currently existing generations
-for the active profile. These may be switched to using the
-`--switch-generation` operation. It also prints the creation date of the
-generation, and indicates the current generation.
-
-## Examples
-
-```console
-$ nix-env --list-generations
- 95 2004-02-06 11:48:24
- 96 2004-02-06 11:49:01
- 97 2004-02-06 16:22:45
- 98 2004-02-06 16:24:33 (current)
-```
-
-# Operation `--delete-generations`
-
-## Synopsis
-
-`nix-env` `--delete-generations` *generations*
-
-## Description
-
-This operation deletes the specified generations of the current profile.
-The generations can be a list of generation numbers, the special value
-`old` to delete all non-current generations, a value such as `30d` to
-delete all generations older than the specified number of days (except
-for the generation that was active at that point in time), or a value
-such as `+5` to keep the last `5` generations ignoring any newer than
-current, e.g., if `30` is the current generation `+5` will delete
-generation `25` and all older generations. Periodically deleting old
-generations is important to make garbage collection effective.
-
-## Examples
-
-```console
-$ nix-env --delete-generations 3 4 8
-```
-
-```console
-$ nix-env --delete-generations +5
-```
-
-```console
-$ nix-env --delete-generations 30d
-```
-
-```console
-$ nix-env -p other_profile --delete-generations old
-```
-
-# Operation `--switch-generation`
-
-## Synopsis
-
-`nix-env` {`--switch-generation` | `-G`} *generation*
-
-## Description
-
-This operation makes generation number *generation* the current
-generation of the active profile. That is, if the `profile` is the path
-to the active profile, then the symlink `profile` is made to point to
-`profile-generation-link`, which is in turn a symlink to the actual user
-environment in the Nix store.
-
-Switching will fail if the specified generation does not exist.
-
-## Examples
-
-```console
-$ nix-env -G 42
-switching from generation 50 to 42
-```
-
-# Operation `--rollback`
-
-## Synopsis
-
-`nix-env` `--rollback`
-
-## Description
-
-This operation switches to the “previous” generation of the active
-profile, that is, the highest numbered generation lower than the current
-generation, if it exists. It is just a convenience wrapper around
-`--list-generations` and `--switch-generation`.
-
-## Examples
-
-```console
-$ nix-env --rollback
-switching from generation 92 to 91
-```
-
-```console
-$ nix-env --rollback
-error: no generation older than the current (91) exists
-```
+`nix-env` operates on the following files.
-# Environment variables
+{{#include ./files/default-nix-expression.md}}
- - `NIX_PROFILE`\
- Location of the Nix profile. Defaults to the target of the symlink
- `~/.nix-profile`, if it exists, or `/nix/var/nix/profiles/default`
- otherwise.
+{{#include ./files/profiles.md}}
diff --git a/doc/manual/src/command-ref/nix-env/delete-generations.md b/doc/manual/src/command-ref/nix-env/delete-generations.md
new file mode 100644
index 000000000..92cb7f0d9
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-env/delete-generations.md
@@ -0,0 +1,46 @@
+# Name
+
+`nix-env --delete-generations` - delete profile generations
+
+# Synopsis
+
+`nix-env` `--delete-generations` *generations*
+
+# Description
+
+This operation deletes the specified generations of the current profile.
+The generations can be a list of generation numbers, the special value
+`old` to delete all non-current generations, a value such as `30d` to
+delete all generations older than the specified number of days (except
+for the generation that was active at that point in time), or a value
+such as `+5` to keep the last `5` generations ignoring any newer than
+current, e.g., if `30` is the current generation `+5` will delete
+generation `25` and all older generations. Periodically deleting old
+generations is important to make garbage collection effective.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ./env-common.md}}
+
+{{#include ../env-common.md}}
+
+# Examples
+
+```console
+$ nix-env --delete-generations 3 4 8
+```
+
+```console
+$ nix-env --delete-generations +5
+```
+
+```console
+$ nix-env --delete-generations 30d
+```
+
+```console
+$ nix-env --profile other_profile --delete-generations old
+```
+
diff --git a/doc/manual/src/command-ref/nix-env/env-common.md b/doc/manual/src/command-ref/nix-env/env-common.md
new file mode 100644
index 000000000..735817959
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-env/env-common.md
@@ -0,0 +1,6 @@
+# Environment variables
+
+- `NIX_PROFILE`\
+ Location of the Nix profile. Defaults to the target of the symlink
+ `~/.nix-profile`, if it exists, or `/nix/var/nix/profiles/default`
+ otherwise.
diff --git a/doc/manual/src/command-ref/nix-env/install.md b/doc/manual/src/command-ref/nix-env/install.md
new file mode 100644
index 000000000..ad179cbc7
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-env/install.md
@@ -0,0 +1,187 @@
+# Name
+
+`nix-env --install` - add packages to user environment
+
+# Synopsis
+
+`nix-env` {`--install` | `-i`} *args…*
+ [{`--prebuilt-only` | `-b`}]
+ [{`--attr` | `-A`}]
+ [`--from-expression`] [`-E`]
+ [`--from-profile` *path*]
+ [`--preserve-installed` | `-P`]
+ [`--remove-all` | `-r`]
+
+# Description
+
+The install operation creates a new user environment, based on the
+current generation of the active profile, to which a set of store paths
+described by *args* is added. The arguments *args* map to store paths in
+a number of possible ways:
+
+ - By default, *args* is a set of derivation names denoting derivations
+ in the active Nix expression. These are realised, and the resulting
+ output paths are installed. Currently installed derivations with a
+ name equal to the name of a derivation being added are removed
+ unless the option `--preserve-installed` is specified.
+
+ If there are multiple derivations matching a name in *args* that
+ have the same name (e.g., `gcc-3.3.6` and `gcc-4.1.1`), then the
+ derivation with the highest *priority* is used. A derivation can
+ define a priority by declaring the `meta.priority` attribute. This
+ attribute should be a number, with a higher value denoting a lower
+ priority. The default priority is `0`.
+
+ If there are multiple matching derivations with the same priority,
+ then the derivation with the highest version will be installed.
+
+ You can force the installation of multiple derivations with the same
+ name by being specific about the versions. For instance, `nix-env --install
+ gcc-3.3.6 gcc-4.1.1` will install both version of GCC (and will
+ probably cause a user environment conflict\!).
+
+ - If `--attr` (`-A`) is specified, the arguments are *attribute
+ paths* that select attributes from the top-level Nix
+ expression. This is faster than using derivation names and
+ unambiguous. To find out the attribute paths of available
+ packages, use `nix-env --query --available --attr-path `.
+
+ - If `--from-profile` *path* is given, *args* is a set of names
+ denoting installed store paths in the profile *path*. This is an
+ easy way to copy user environment elements from one profile to
+ another.
+
+ - If `--from-expression` is given, *args* are Nix
+ [functions](@docroot@/language/constructs.md#functions)
+ that are called with the active Nix expression as their single
+ argument. The derivations returned by those function calls are
+ installed. This allows derivations to be specified in an
+ unambiguous way, which is necessary if there are multiple
+ derivations with the same name.
+
+ - If *args* are [store derivations](@docroot@/glossary.md#gloss-store-derivation), then these are
+ [realised](@docroot@/command-ref/nix-store/realise.md), and the resulting output paths
+ are installed.
+
+ - If *args* are store paths that are not store derivations, then these
+ are [realised](@docroot@/command-ref/nix-store/realise.md) and installed.
+
+ - By default all outputs are installed for each derivation. That can
+ be reduced by setting `meta.outputsToInstall`.
+
+# Flags
+
+ - `--prebuilt-only` / `-b`\
+ Use only derivations for which a substitute is registered, i.e.,
+ there is a pre-built binary available that can be downloaded in lieu
+ of building the derivation. Thus, no packages will be built from
+ source.
+
+ - `--preserve-installed` / `-P`\
+ Do not remove derivations with a name matching one of the
+ derivations being installed. Usually, trying to have two versions of
+ the same package installed in the same generation of a profile will
+ lead to an error in building the generation, due to file name
+ clashes between the two versions. However, this is not the case for
+ all packages.
+
+ - `--remove-all` / `-r`\
+ Remove all previously installed packages first. This is equivalent
+ to running `nix-env --uninstall '.*'` first, except that everything happens
+ in a single transaction.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ./env-common.md}}
+
+{{#include ../env-common.md}}
+
+# Examples
+
+To install a package using a specific attribute path from the active Nix expression:
+
+```console
+$ nix-env --install --attr gcc40mips
+installing `gcc-4.0.2'
+$ nix-env --install --attr xorg.xorgserver
+installing `xorg-server-1.2.0'
+```
+
+To install a specific version of `gcc` using the derivation name:
+
+```console
+$ nix-env --install gcc-3.3.2
+installing `gcc-3.3.2'
+uninstalling `gcc-3.1'
+```
+
+Using attribute path for selecting a package is preferred,
+as it is much faster and there will not be multiple matches.
+
+Note the previously installed version is removed, since
+`--preserve-installed` was not specified.
+
+To install an arbitrary version:
+
+```console
+$ nix-env --install gcc
+installing `gcc-3.3.2'
+```
+
+To install all derivations in the Nix expression `foo.nix`:
+
+```console
+$ nix-env --file ~/foo.nix --install '.*'
+```
+
+To copy the store path with symbolic name `gcc` from another profile:
+
+```console
+$ nix-env --install --from-profile /nix/var/nix/profiles/foo gcc
+```
+
+To install a specific [store derivation] (typically created by
+`nix-instantiate`):
+
+```console
+$ nix-env --install /nix/store/fibjb1bfbpm5mrsxc4mh2d8n37sxh91i-gcc-3.4.3.drv
+```
+
+To install a specific output path:
+
+```console
+$ nix-env --install /nix/store/y3cgx0xj1p4iv9x0pnnmdhr8iyg741vk-gcc-3.4.3
+```
+
+To install from a Nix expression specified on the command-line:
+
+```console
+$ nix-env --file ./foo.nix --install --expr \
+ 'f: (f {system = "i686-linux";}).subversionWithJava'
+```
+
+I.e., this evaluates to `(f: (f {system =
+"i686-linux";}).subversionWithJava) (import ./foo.nix)`, thus selecting
+the `subversionWithJava` attribute from the set returned by calling the
+function defined in `./foo.nix`.
+
+A dry-run tells you which paths will be downloaded or built from source:
+
+```console
+$ nix-env --file '<nixpkgs>' --install --attr hello --dry-run
+(dry run; not doing anything)
+installing ‘hello-2.10’
+this path will be fetched (0.04 MiB download, 0.19 MiB unpacked):
+ /nix/store/wkhdf9jinag5750mqlax6z2zbwhqb76n-hello-2.10
+ ...
+```
+
+To install Firefox from the latest revision in the Nixpkgs/NixOS 14.12
+channel:
+
+```console
+$ nix-env --file https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz --install --attr firefox
+```
+
diff --git a/doc/manual/src/command-ref/nix-env/list-generations.md b/doc/manual/src/command-ref/nix-env/list-generations.md
new file mode 100644
index 000000000..a4881ece8
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-env/list-generations.md
@@ -0,0 +1,33 @@
+# Name
+
+`nix-env --list-generations` - list profile generations
+
+# Synopsis
+
+`nix-env` `--list-generations`
+
+# Description
+
+This operation print a list of all the currently existing generations
+for the active profile. These may be switched to using the
+`--switch-generation` operation. It also prints the creation date of the
+generation, and indicates the current generation.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ./env-common.md}}
+
+{{#include ../env-common.md}}
+
+# Examples
+
+```console
+$ nix-env --list-generations
+ 95 2004-02-06 11:48:24
+ 96 2004-02-06 11:49:01
+ 97 2004-02-06 16:22:45
+ 98 2004-02-06 16:24:33 (current)
+```
+
diff --git a/doc/manual/src/command-ref/nix-env/opt-common.md b/doc/manual/src/command-ref/nix-env/opt-common.md
new file mode 100644
index 000000000..636281b6d
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-env/opt-common.md
@@ -0,0 +1,35 @@
+# Options
+
+The following options are allowed for all `nix-env` operations, but may not always have an effect.
+
+ - `--file` / `-f` *path*\
+ Specifies the Nix expression (designated below as the *active Nix
+ expression*) used by the `--install`, `--upgrade`, and `--query
+ --available` operations to obtain derivations. The default is
+ `~/.nix-defexpr`.
+
+ If the argument starts with `http://` or `https://`, it is
+ interpreted as the URL of a tarball that will be downloaded and
+ unpacked to a temporary location. The tarball must include a single
+ top-level directory containing at least a file named `default.nix`.
+
+ - `--profile` / `-p` *path*\
+ Specifies the profile to be used by those operations that operate on
+ a profile (designated below as the *active profile*). A profile is a
+ sequence of user environments called *generations*, one of which is
+ the *current generation*.
+
+ - `--dry-run`\
+ For the `--install`, `--upgrade`, `--uninstall`,
+ `--switch-generation`, `--delete-generations` and `--rollback`
+ operations, this flag will cause `nix-env` to print what *would* be
+ done if this flag had not been specified, without actually doing it.
+
+ `--dry-run` also prints out which paths will be
+ [substituted](@docroot@/glossary.md) (i.e., downloaded) and which paths
+ will be built from source (because no substitute is available).
+
+ - `--system-filter` *system*\
+ By default, operations such as `--query
+ --available` show derivations matching any platform. This option
+ allows you to use derivations for the specified platform *system*.
diff --git a/doc/manual/src/command-ref/nix-env/query.md b/doc/manual/src/command-ref/nix-env/query.md
new file mode 100644
index 000000000..c9b4d8513
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-env/query.md
@@ -0,0 +1,215 @@
+# Name
+
+`nix-env --query` - display information about packages
+
+# Synopsis
+
+`nix-env` {`--query` | `-q`} *names…*
+ [`--installed` | `--available` | `-a`]
+ [{`--status` | `-s`}]
+ [{`--attr-path` | `-P`}]
+ [`--no-name`]
+ [{`--compare-versions` | `-c`}]
+ [`--system`]
+ [`--drv-path`]
+ [`--out-path`]
+ [`--description`]
+ [`--meta`]
+ [`--xml`]
+ [`--json`]
+ [{`--prebuilt-only` | `-b`}]
+ [{`--attr` | `-A`} *attribute-path*]
+
+# Description
+
+The query operation displays information about either the store paths
+that are installed in the current generation of the active profile
+(`--installed`), or the derivations that are available for installation
+in the active Nix expression (`--available`). It only prints information
+about derivations whose symbolic name matches one of *names*.
+
+The derivations are sorted by their `name` attributes.
+
+# Source selection
+
+The following flags specify the set of things on which the query
+operates.
+
+ - `--installed`\
+ The query operates on the store paths that are installed in the
+ current generation of the active profile. This is the default.
+
+ - `--available`; `-a`\
+ The query operates on the derivations that are available in the
+ active Nix expression.
+
+# Queries
+
+The following flags specify what information to display about the
+selected derivations. Multiple flags may be specified, in which case the
+information is shown in the order given here. Note that the name of the
+derivation is shown unless `--no-name` is specified.
+
+ - `--xml`\
+ Print the result in an XML representation suitable for automatic
+ processing by other tools. The root element is called `items`, which
+ contains a `item` element for each available or installed
+ derivation. The fields discussed below are all stored in attributes
+ of the `item` elements.
+
+ - `--json`\
+ Print the result in a JSON representation suitable for automatic
+ processing by other tools.
+
+ - `--prebuilt-only` / `-b`\
+ Show only derivations for which a substitute is registered, i.e.,
+ there is a pre-built binary available that can be downloaded in lieu
+ of building the derivation. Thus, this shows all packages that
+ probably can be installed quickly.
+
+ - `--status`; `-s`\
+ Print the *status* of the derivation. The status consists of three
+ characters. The first is `I` or `-`, indicating whether the
+ derivation is currently installed in the current generation of the
+ active profile. This is by definition the case for `--installed`,
+ but not for `--available`. The second is `P` or `-`, indicating
+ whether the derivation is present on the system. This indicates
+ whether installation of an available derivation will require the
+ derivation to be built. The third is `S` or `-`, indicating whether
+ a substitute is available for the derivation.
+
+ - `--attr-path`; `-P`\
+ Print the *attribute path* of the derivation, which can be used to
+ unambiguously select it using the `--attr` option available in
+ commands that install derivations like `nix-env --install`. This
+ option only works together with `--available`
+
+ - `--no-name`\
+ Suppress printing of the `name` attribute of each derivation.
+
+ - `--compare-versions` / `-c`\
+ Compare installed versions to available versions, or vice versa (if
+ `--available` is given). This is useful for quickly seeing whether
+ upgrades for installed packages are available in a Nix expression. A
+ column is added with the following meaning:
+
+ - `<` *version*\
+ A newer version of the package is available or installed.
+
+ - `=` *version*\
+ At most the same version of the package is available or
+ installed.
+
+ - `>` *version*\
+ Only older versions of the package are available or installed.
+
+ - `- ?`\
+ No version of the package is available or installed.
+
+ - `--system`\
+ Print the `system` attribute of the derivation.
+
+ - `--drv-path`\
+ Print the path of the [store derivation](@docroot@/glossary.md#gloss-store-derivation).
+
+ - `--out-path`\
+ Print the output path of the derivation.
+
+ - `--description`\
+ Print a short (one-line) description of the derivation, if
+ available. The description is taken from the `meta.description`
+ attribute of the derivation.
+
+ - `--meta`\
+ Print all of the meta-attributes of the derivation. This option is
+ only available with `--xml` or `--json`.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ./env-common.md}}
+
+{{#include ../env-common.md}}
+
+# Examples
+
+To show installed packages:
+
+```console
+$ nix-env --query
+bison-1.875c
+docbook-xml-4.2
+firefox-1.0.4
+MPlayer-1.0pre7
+ORBit2-2.8.3
+…
+```
+
+To show available packages:
+
+```console
+$ nix-env --query --available
+firefox-1.0.7
+GConf-2.4.0.1
+MPlayer-1.0pre7
+ORBit2-2.8.3
+…
+```
+
+To show the status of available packages:
+
+```console
+$ nix-env --query --available --status
+-P- firefox-1.0.7 (not installed but present)
+--S GConf-2.4.0.1 (not present, but there is a substitute for fast installation)
+--S MPlayer-1.0pre3 (i.e., this is not the installed MPlayer, even though the version is the same!)
+IP- ORBit2-2.8.3 (installed and by definition present)
+…
+```
+
+To show available packages in the Nix expression `foo.nix`:
+
+```console
+$ nix-env --file ./foo.nix --query --available
+foo-1.2.3
+```
+
+To compare installed versions to what’s available:
+
+```console
+$ nix-env --query --compare-versions
+...
+acrobat-reader-7.0 - ? (package is not available at all)
+autoconf-2.59 = 2.59 (same version)
+firefox-1.0.4 < 1.0.7 (a more recent version is available)
+...
+```
+
+To show all packages with “`zip`” in the name:
+
+```console
+$ nix-env --query --available '.*zip.*'
+bzip2-1.0.6
+gzip-1.6
+zip-3.0
+…
+```
+
+To show all packages with “`firefox`” or “`chromium`” in the name:
+
+```console
+$ nix-env --query --available '.*(firefox|chromium).*'
+chromium-37.0.2062.94
+chromium-beta-38.0.2125.24
+firefox-32.0.3
+firefox-with-plugins-13.0.1
+…
+```
+
+To show all packages in the latest revision of the Nixpkgs repository:
+
+```console
+$ nix-env --file https://github.com/NixOS/nixpkgs/archive/master.tar.gz --query --available
+```
+
diff --git a/doc/manual/src/command-ref/nix-env/rollback.md b/doc/manual/src/command-ref/nix-env/rollback.md
new file mode 100644
index 000000000..1e3958cfc
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-env/rollback.md
@@ -0,0 +1,34 @@
+# Name
+
+`nix-env --rollback` - set user environment to previous generation
+
+# Synopsis
+
+`nix-env` `--rollback`
+
+# Description
+
+This operation switches to the “previous” generation of the active
+profile, that is, the highest numbered generation lower than the current
+generation, if it exists. It is just a convenience wrapper around
+`--list-generations` and `--switch-generation`.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ./env-common.md}}
+
+{{#include ../env-common.md}}
+
+# Examples
+
+```console
+$ nix-env --rollback
+switching from generation 92 to 91
+```
+
+```console
+$ nix-env --rollback
+error: no generation older than the current (91) exists
+```
diff --git a/doc/manual/src/command-ref/nix-env/set-flag.md b/doc/manual/src/command-ref/nix-env/set-flag.md
new file mode 100644
index 000000000..e04b22a91
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-env/set-flag.md
@@ -0,0 +1,82 @@
+# Name
+
+`nix-env --set-flag` - modify meta attributes of installed packages
+
+# Synopsis
+
+`nix-env` `--set-flag` *name* *value* *drvnames*
+
+# Description
+
+The `--set-flag` operation allows meta attributes of installed packages
+to be modified. There are several attributes that can be usefully
+modified, because they affect the behaviour of `nix-env` or the user
+environment build script:
+
+ - `priority` can be changed to resolve filename clashes. The user
+ environment build script uses the `meta.priority` attribute of
+ derivations to resolve filename collisions between packages. Lower
+ priority values denote a higher priority. For instance, the GCC
+ wrapper package and the Binutils package in Nixpkgs both have a file
+ `bin/ld`, so previously if you tried to install both you would get a
+ collision. Now, on the other hand, the GCC wrapper declares a higher
+ priority than Binutils, so the former’s `bin/ld` is symlinked in the
+ user environment.
+
+ - `keep` can be set to `true` to prevent the package from being
+ upgraded or replaced. This is useful if you want to hang on to an
+ older version of a package.
+
+ - `active` can be set to `false` to “disable” the package. That is, no
+ symlinks will be generated to the files of the package, but it
+ remains part of the profile (so it won’t be garbage-collected). It
+ can be set back to `true` to re-enable the package.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
+
+# Examples
+
+To prevent the currently installed Firefox from being upgraded:
+
+```console
+$ nix-env --set-flag keep true firefox
+```
+
+After this, `nix-env --upgrade ` will ignore Firefox.
+
+To disable the currently installed Firefox, then install a new Firefox
+while the old remains part of the profile:
+
+```console
+$ nix-env --query
+firefox-2.0.0.9 (the current one)
+
+$ nix-env --preserve-installed --install firefox-2.0.0.11
+installing `firefox-2.0.0.11'
+building path(s) `/nix/store/myy0y59q3ig70dgq37jqwg1j0rsapzsl-user-environment'
+collision between `/nix/store/...-firefox-2.0.0.11/bin/firefox'
+ and `/nix/store/...-firefox-2.0.0.9/bin/firefox'.
+(i.e., can’t have two active at the same time)
+
+$ nix-env --set-flag active false firefox
+setting flag on `firefox-2.0.0.9'
+
+$ nix-env --preserve-installed --install firefox-2.0.0.11
+installing `firefox-2.0.0.11'
+
+$ nix-env --query
+firefox-2.0.0.11 (the enabled one)
+firefox-2.0.0.9 (the disabled one)
+```
+
+To make files from `binutils` take precedence over files from `gcc`:
+
+```console
+$ nix-env --set-flag priority 5 binutils
+$ nix-env --set-flag priority 10 gcc
+```
+
diff --git a/doc/manual/src/command-ref/nix-env/set.md b/doc/manual/src/command-ref/nix-env/set.md
new file mode 100644
index 000000000..b9950eeab
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-env/set.md
@@ -0,0 +1,30 @@
+# Name
+
+`nix-env --set` - set profile to contain a specified derivation
+
+## Synopsis
+
+`nix-env` `--set` *drvname*
+
+## Description
+
+The `--set` operation modifies the current generation of a profile so
+that it contains exactly the specified derivation, and nothing else.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ./env-common.md}}
+
+{{#include ../env-common.md}}
+
+## Examples
+
+The following updates a profile such that its current generation will
+contain just Firefox:
+
+```console
+$ nix-env --profile /nix/var/nix/profiles/browser --set firefox
+```
+
diff --git a/doc/manual/src/command-ref/nix-env/switch-generation.md b/doc/manual/src/command-ref/nix-env/switch-generation.md
new file mode 100644
index 000000000..38cf0534d
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-env/switch-generation.md
@@ -0,0 +1,33 @@
+# Name
+
+`nix-env --switch-generation` - set user environment to given profile generation
+
+# Synopsis
+
+`nix-env` {`--switch-generation` | `-G`} *generation*
+
+# Description
+
+This operation makes generation number *generation* the current
+generation of the active profile. That is, if the `profile` is the path
+to the active profile, then the symlink `profile` is made to point to
+`profile-generation-link`, which is in turn a symlink to the actual user
+environment in the Nix store.
+
+Switching will fail if the specified generation does not exist.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ./env-common.md}}
+
+{{#include ../env-common.md}}
+
+# Examples
+
+```console
+$ nix-env --switch-generation 42
+switching from generation 50 to 42
+```
+
diff --git a/doc/manual/src/command-ref/nix-env/switch-profile.md b/doc/manual/src/command-ref/nix-env/switch-profile.md
new file mode 100644
index 000000000..5ae2fdced
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-env/switch-profile.md
@@ -0,0 +1,26 @@
+# Name
+
+`nix-env --switch-profile` - set user environment to given profile
+
+# Synopsis
+
+`nix-env` {`--switch-profile` | `-S`} *path*
+
+# Description
+
+This operation makes *path* the current profile for the user. That is,
+the symlink `~/.nix-profile` is made to point to *path*.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ./env-common.md}}
+
+{{#include ../env-common.md}}
+
+# Examples
+
+```console
+$ nix-env --switch-profile ~/my-profile
+```
diff --git a/doc/manual/src/command-ref/nix-env/uninstall.md b/doc/manual/src/command-ref/nix-env/uninstall.md
new file mode 100644
index 000000000..734cc7675
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-env/uninstall.md
@@ -0,0 +1,28 @@
+# Name
+
+`nix-env --uninstall` - remove packages from user environment
+
+# Synopsis
+
+`nix-env` {`--uninstall` | `-e`} *drvnames…*
+
+# Description
+
+The uninstall operation creates a new user environment, based on the
+current generation of the active profile, from which the store paths
+designated by the symbolic names *drvnames* are removed.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ./env-common.md}}
+
+{{#include ../env-common.md}}
+
+# Examples
+
+```console
+$ nix-env --uninstall gcc
+$ nix-env --uninstall '.*' (remove everything)
+```
diff --git a/doc/manual/src/command-ref/nix-env/upgrade.md b/doc/manual/src/command-ref/nix-env/upgrade.md
new file mode 100644
index 000000000..322dfbda2
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-env/upgrade.md
@@ -0,0 +1,141 @@
+# Name
+
+`nix-env --upgrade` - upgrade packages in user environment
+
+# Synopsis
+
+`nix-env` {`--upgrade` | `-u`} *args*
+ [`--lt` | `--leq` | `--eq` | `--always`]
+ [{`--prebuilt-only` | `-b`}]
+ [{`--attr` | `-A`}]
+ [`--from-expression`] [`-E`]
+ [`--from-profile` *path*]
+ [`--preserve-installed` | `-P`]
+
+# Description
+
+The upgrade operation creates a new user environment, based on the
+current generation of the active profile, in which all store paths are
+replaced for which there are newer versions in the set of paths
+described by *args*. Paths for which there are no newer versions are
+left untouched; this is not an error. It is also not an error if an
+element of *args* matches no installed derivations.
+
+For a description of how *args* is mapped to a set of store paths, see
+[`--install`](#operation---install). If *args* describes multiple
+store paths with the same symbolic name, only the one with the highest
+version is installed.
+
+# Flags
+
+ - `--lt`\
+ Only upgrade a derivation to newer versions. This is the default.
+
+ - `--leq`\
+ In addition to upgrading to newer versions, also “upgrade” to
+ derivations that have the same version. Version are not a unique
+ identification of a derivation, so there may be many derivations
+ that have the same version. This flag may be useful to force
+ “synchronisation” between the installed and available derivations.
+
+ - `--eq`\
+ *Only* “upgrade” to derivations that have the same version. This may
+ not seem very useful, but it actually is, e.g., when there is a new
+ release of Nixpkgs and you want to replace installed applications
+ with the same versions built against newer dependencies (to reduce
+ the number of dependencies floating around on your system).
+
+ - `--always`\
+ In addition to upgrading to newer versions, also “upgrade” to
+ derivations that have the same or a lower version. I.e., derivations
+ may actually be downgraded depending on what is available in the
+ active Nix expression.
+
+ - `--prebuilt-only` / `-b`\
+ Use only derivations for which a substitute is registered, i.e.,
+ there is a pre-built binary available that can be downloaded in lieu
+ of building the derivation. Thus, no packages will be built from
+ source.
+
+ - `--preserve-installed` / `-P`\
+ Do not remove derivations with a name matching one of the
+ derivations being installed. Usually, trying to have two versions of
+ the same package installed in the same generation of a profile will
+ lead to an error in building the generation, due to file name
+ clashes between the two versions. However, this is not the case for
+ all packages.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ./env-common.md}}
+
+{{#include ../env-common.md}}
+
+# Examples
+
+```console
+$ nix-env --upgrade --attr nixpkgs.gcc
+upgrading `gcc-3.3.1' to `gcc-3.4'
+```
+
+When there are no updates available, nothing will happen:
+
+```console
+$ nix-env --upgrade --attr nixpkgs.pan
+```
+
+Using `-A` is preferred when possible, as it is faster and unambiguous but
+it is also possible to upgrade to a specific version by matching the derivation name:
+
+```console
+$ nix-env --upgrade gcc-3.3.2 --always
+upgrading `gcc-3.4' to `gcc-3.3.2'
+```
+
+To try to upgrade everything
+(matching packages based on the part of the derivation name without version):
+
+```console
+$ nix-env --upgrade
+upgrading `hello-2.1.2' to `hello-2.1.3'
+upgrading `mozilla-1.2' to `mozilla-1.4'
+```
+
+# Versions
+
+The upgrade operation determines whether a derivation `y` is an upgrade
+of a derivation `x` by looking at their respective `name` attributes.
+The names (e.g., `gcc-3.3.1` are split into two parts: the package name
+(`gcc`), and the version (`3.3.1`). The version part starts after the
+first dash not followed by a letter. `y` is considered an upgrade of `x`
+if their package names match, and the version of `y` is higher than that
+of `x`.
+
+The versions are compared by splitting them into contiguous components
+of numbers and letters. E.g., `3.3.1pre5` is split into `[3, 3, 1,
+"pre", 5]`. These lists are then compared lexicographically (from left
+to right). Corresponding components `a` and `b` are compared as follows.
+If they are both numbers, integer comparison is used. If `a` is an empty
+string and `b` is a number, `a` is considered less than `b`. The special
+string component `pre` (for *pre-release*) is considered to be less than
+other components. String components are considered less than number
+components. Otherwise, they are compared lexicographically (i.e., using
+case-sensitive string comparison).
+
+This is illustrated by the following examples:
+
+ 1.0 < 2.3
+ 2.1 < 2.3
+ 2.3 = 2.3
+ 2.5 > 2.3
+ 3.1 > 2.3
+ 2.3.1 > 2.3
+ 2.3.1 > 2.3a
+ 2.3pre1 < 2.3
+ 2.3pre3 < 2.3pre12
+ 2.3a < 2.3c
+ 2.3pre1 < 2.3c
+ 2.3pre1 < 2.3q
+
diff --git a/doc/manual/src/command-ref/nix-hash.md b/doc/manual/src/command-ref/nix-hash.md
index 45f67f1c5..37c8facec 100644
--- a/doc/manual/src/command-ref/nix-hash.md
+++ b/doc/manual/src/command-ref/nix-hash.md
@@ -6,9 +6,7 @@
`nix-hash` [`--flat`] [`--base32`] [`--truncate`] [`--type` *hashAlgo*] *path…*
-`nix-hash` `--to-base16` *hash…*
-
-`nix-hash` `--to-base32` *hash…*
+`nix-hash` [`--to-base16`|`--to-base32`|`--to-base64`|`--to-sri`] [`--type` *hashAlgo*] *hash…*
# Description
@@ -23,7 +21,7 @@ The hash is computed over a *serialisation* of each path: a dump of
the file system tree rooted at the path. This allows directories and
symlinks to be hashed as well as regular files. The dump is in the
*NAR format* produced by [`nix-store
---dump`](nix-store.md#operation---dump). Thus, `nix-hash path`
+--dump`](@docroot@/command-ref/nix-store/dump.md). Thus, `nix-hash path`
yields the same cryptographic hash as `nix-store --dump path |
md5sum`.
@@ -35,11 +33,23 @@ md5sum`.
The result is identical to that produced by the GNU commands
`md5sum` and `sha1sum`.
+ - `--base16`\
+ Print the hash in a hexadecimal representation (default).
+
- `--base32`\
Print the hash in a base-32 representation rather than hexadecimal.
This base-32 representation is more compact and can be used in Nix
expressions (such as in calls to `fetchurl`).
+ - `--base64`\
+ Similar to --base32, but print the hash in a base-64 representation,
+ which is more compact than the base-32 one.
+
+ - `--sri`\
+ Print the hash in SRI format with base-64 encoding.
+ The type of hash algorithm will be prepended to the hash string,
+ followed by a hyphen (-) and the base-64 hash body.
+
- `--truncate`\
Truncate hashes longer than 160 bits (such as SHA-256) to 160 bits.
@@ -55,6 +65,14 @@ md5sum`.
Don’t hash anything, but convert the hexadecimal hash representation
*hash* to base-32.
+ - `--to-base64`\
+ Don’t hash anything, but convert the hexadecimal hash representation
+ *hash* to base-64.
+
+ - `--to-sri`\
+ Don’t hash anything, but convert the hexadecimal hash representation
+ *hash* to SRI.
+
# Examples
Computing the same hash as `nix-prefetch-url`:
@@ -81,9 +99,18 @@ $ nix-store --dump test/ | md5sum (for comparison)
$ nix-hash --type sha1 test/
e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6
+$ nix-hash --type sha1 --base16 test/
+e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6
+
$ nix-hash --type sha1 --base32 test/
nvd61k9nalji1zl9rrdfmsmvyyjqpzg4
+$ nix-hash --type sha1 --base64 test/
+5P2Lpfe76upazon+ECVVNs1g2rY=
+
+$ nix-hash --type sha1 --sri test/
+sha1-5P2Lpfe76upazon+ECVVNs1g2rY=
+
$ nix-hash --type sha256 --flat test/
error: reading file `test/': Is a directory
@@ -91,7 +118,7 @@ $ nix-hash --type sha256 --flat test/world
5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03
```
-Converting between hexadecimal and base-32:
+Converting between hexadecimal, base-32, base-64, and SRI:
```console
$ nix-hash --type sha1 --to-base32 e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6
@@ -99,4 +126,13 @@ nvd61k9nalji1zl9rrdfmsmvyyjqpzg4
$ nix-hash --type sha1 --to-base16 nvd61k9nalji1zl9rrdfmsmvyyjqpzg4
e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6
+
+$ nix-hash --type sha1 --to-base64 e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6
+5P2Lpfe76upazon+ECVVNs1g2rY=
+
+$ nix-hash --type sha1 --to-sri nvd61k9nalji1zl9rrdfmsmvyyjqpzg4
+sha1-5P2Lpfe76upazon+ECVVNs1g2rY=
+
+$ nix-hash --to-base16 sha1-5P2Lpfe76upazon+ECVVNs1g2rY=
+e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6
```
diff --git a/doc/manual/src/command-ref/nix-instantiate.md b/doc/manual/src/command-ref/nix-instantiate.md
index 432fb2608..e1b4a3e80 100644
--- a/doc/manual/src/command-ref/nix-instantiate.md
+++ b/doc/manual/src/command-ref/nix-instantiate.md
@@ -76,7 +76,9 @@ standard input.
this option is not enabled, there may be uninstantiated store paths
in the final output.
-<!-- end list -->
+{{#include ./opt-common.md}}
+
+{{#include ./env-common.md}}
# Examples
@@ -86,7 +88,7 @@ Instantiate [store derivation]s from a Nix expression, and build them using `nix
$ nix-instantiate test.nix (instantiate)
/nix/store/cigxbmvy6dzix98dxxh9b6shg7ar5bvs-perl-BerkeleyDB-0.26.drv
-$ nix-store -r $(nix-instantiate test.nix) (build)
+$ nix-store --realise $(nix-instantiate test.nix) (build)
...
/nix/store/qhqk4n8ci095g3sdp93x7rgwyh9rdvgk-perl-BerkeleyDB-0.26 (output path)
@@ -98,30 +100,30 @@ dr-xr-xr-x 2 eelco users 4096 1970-01-01 01:00 lib
You can also give a Nix expression on the command line:
```console
-$ nix-instantiate -E 'with import <nixpkgs> { }; hello'
+$ nix-instantiate --expr 'with import <nixpkgs> { }; hello'
/nix/store/j8s4zyv75a724q38cb0r87rlczaiag4y-hello-2.8.drv
```
This is equivalent to:
```console
-$ nix-instantiate '<nixpkgs>' -A hello
+$ nix-instantiate '<nixpkgs>' --attr hello
```
Parsing and evaluating Nix expressions:
```console
-$ nix-instantiate --parse -E '1 + 2'
+$ nix-instantiate --parse --expr '1 + 2'
1 + 2
```
```console
-$ nix-instantiate --eval -E '1 + 2'
+$ nix-instantiate --eval --expr '1 + 2'
3
```
```console
-$ nix-instantiate --eval --xml -E '1 + 2'
+$ nix-instantiate --eval --xml --expr '1 + 2'
<?xml version='1.0' encoding='utf-8'?>
<expr>
<int value="3" />
@@ -131,7 +133,7 @@ $ nix-instantiate --eval --xml -E '1 + 2'
The difference between non-strict and strict evaluation:
```console
-$ nix-instantiate --eval --xml -E 'rec { x = "foo"; y = x; }'
+$ nix-instantiate --eval --xml --expr 'rec { x = "foo"; y = x; }'
...
<attr name="x">
<string value="foo" />
@@ -146,7 +148,7 @@ Note that `y` is left unevaluated (the XML representation doesn’t
attempt to show non-normal forms).
```console
-$ nix-instantiate --eval --xml --strict -E 'rec { x = "foo"; y = x; }'
+$ nix-instantiate --eval --xml --strict --expr 'rec { x = "foo"; y = x; }'
...
<attr name="x">
<string value="foo" />
diff --git a/doc/manual/src/command-ref/nix-shell.md b/doc/manual/src/command-ref/nix-shell.md
index 840bccd25..195b72be5 100644
--- a/doc/manual/src/command-ref/nix-shell.md
+++ b/doc/manual/src/command-ref/nix-shell.md
@@ -89,7 +89,7 @@ All options not listed here are passed to `nix-store
- `--packages` / `-p` *packages*…\
Set up an environment in which the specified packages are present.
The command line arguments are interpreted as attribute names inside
- the Nix Packages collection. Thus, `nix-shell -p libjpeg openjdk`
+ the Nix Packages collection. Thus, `nix-shell --packages libjpeg openjdk`
will start a shell in which the packages denoted by the attribute
names `libjpeg` and `openjdk` are present.
@@ -101,7 +101,7 @@ All options not listed here are passed to `nix-store
When a `--pure` shell is started, keep the listed environment
variables.
-The following common options are supported:
+{{#include ./opt-common.md}}
# Environment variables
@@ -110,15 +110,18 @@ The following common options are supported:
`bash` found in `<nixpkgs>`, falling back to the `bash` found in
`PATH` if not found.
+{{#include ./env-common.md}}
+
# Examples
To build the dependencies of the package Pan, and start an interactive
shell in which to build it:
```console
-$ nix-shell '<nixpkgs>' -A pan
+$ nix-shell '<nixpkgs>' --attr pan
[nix-shell]$ eval ${unpackPhase:-unpackPhase}
-[nix-shell]$ cd pan-*
+[nix-shell]$ cd $sourceRoot
+[nix-shell]$ eval ${patchPhase:-patchPhase}
[nix-shell]$ eval ${configurePhase:-configurePhase}
[nix-shell]$ eval ${buildPhase:-buildPhase}
[nix-shell]$ ./pan/gui/pan
@@ -134,7 +137,7 @@ To clear the environment first, and do some additional automatic
initialisation of the interactive shell:
```console
-$ nix-shell '<nixpkgs>' -A pan --pure \
+$ nix-shell '<nixpkgs>' --attr pan --pure \
--command 'export NIX_DEBUG=1; export NIX_CORES=8; return'
```
@@ -143,13 +146,13 @@ Nix expressions can also be given on the command line using the `-E` and
packages `sqlite` and `libX11`:
```console
-$ nix-shell -E 'with import <nixpkgs> { }; runCommand "dummy" { buildInputs = [ sqlite xorg.libX11 ]; } ""'
+$ nix-shell --expr 'with import <nixpkgs> { }; runCommand "dummy" { buildInputs = [ sqlite xorg.libX11 ]; } ""'
```
A shorter way to do the same is:
```console
-$ nix-shell -p sqlite xorg.libX11
+$ nix-shell --packages sqlite xorg.libX11
[nix-shell]$ echo $NIX_LDFLAGS
… -L/nix/store/j1zg5v…-sqlite-3.8.0.2/lib -L/nix/store/0gmcz9…-libX11-1.6.1/lib …
```
@@ -159,7 +162,7 @@ the `buildInputs = [ ... ]` shown above, not only package names. So the
following is also legal:
```console
-$ nix-shell -p sqlite 'git.override { withManual = false; }'
+$ nix-shell --packages sqlite 'git.override { withManual = false; }'
```
The `-p` flag looks up Nixpkgs in the Nix search path. You can override
@@ -168,7 +171,7 @@ gives you a shell containing the Pan package from a specific revision of
Nixpkgs:
```console
-$ nix-shell -p pan -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/8a3eea054838b55aca962c3fbde9c83c102b8bf2.tar.gz
+$ nix-shell --packages pan -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/8a3eea054838b55aca962c3fbde9c83c102b8bf2.tar.gz
[nix-shell:~]$ pan --version
Pan 0.139
@@ -182,7 +185,7 @@ done by starting the script with the following lines:
```bash
#! /usr/bin/env nix-shell
-#! nix-shell -i real-interpreter -p packages
+#! nix-shell -i real-interpreter --packages packages
```
where *real-interpreter* is the “real” script interpreter that will be
@@ -199,7 +202,7 @@ For example, here is a Python script that depends on Python and the
```python
#! /usr/bin/env nix-shell
-#! nix-shell -i python -p python pythonPackages.prettytable
+#! nix-shell -i python --packages python pythonPackages.prettytable
import prettytable
@@ -214,7 +217,7 @@ requires Perl and the `HTML::TokeParser::Simple` and `LWP` packages:
```perl
#! /usr/bin/env nix-shell
-#! nix-shell -i perl -p perl perlPackages.HTMLTokeParserSimple perlPackages.LWP
+#! nix-shell -i perl --packages perl perlPackages.HTMLTokeParserSimple perlPackages.LWP
use HTML::TokeParser::Simple;
@@ -232,7 +235,7 @@ package like Terraform:
```bash
#! /usr/bin/env nix-shell
-#! nix-shell -i bash -p "terraform.withPlugins (plugins: [ plugins.openstack ])"
+#! nix-shell -i bash --packages "terraform.withPlugins (plugins: [ plugins.openstack ])"
terraform apply
```
@@ -248,7 +251,7 @@ branch):
```haskell
#! /usr/bin/env nix-shell
-#! nix-shell -i runghc -p "haskellPackages.ghcWithPackages (ps: [ps.download-curl ps.tagsoup])"
+#! nix-shell -i runghc --packages "haskellPackages.ghcWithPackages (ps: [ps.download-curl ps.tagsoup])"
#! nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/nixos-20.03.tar.gz
import Network.Curl.Download
diff --git a/doc/manual/src/command-ref/nix-store.md b/doc/manual/src/command-ref/nix-store.md
index 17cbd0461..c7c5fdd2f 100644
--- a/doc/manual/src/command-ref/nix-store.md
+++ b/doc/manual/src/command-ref/nix-store.md
@@ -13,838 +13,35 @@
The command `nix-store` performs primitive operations on the Nix store.
You generally do not need to run this command manually.
-`nix-store` takes exactly one *operation* flag which indicates the
-subcommand to be performed. These are documented below.
-
-# Common options
-
-This section lists the options that are common to all operations. These
-options are allowed for every subcommand, though they may not always
-have an effect.
-
- - <span id="opt-add-root">[`--add-root`](#opt-add-root)</span> *path*
-
- Causes the result of a realisation (`--realise` and
- `--force-realise`) to be registered as a root of the garbage
- collector. *path* will be created as a symlink to the resulting
- store path. In addition, a uniquely named symlink to *path* will
- be created in `/nix/var/nix/gcroots/auto/`. For instance,
-
- ```console
- $ nix-store --add-root /home/eelco/bla/result -r ...
-
- $ ls -l /nix/var/nix/gcroots/auto
- lrwxrwxrwx 1 ... 2005-03-13 21:10 dn54lcypm8f8... -> /home/eelco/bla/result
-
- $ ls -l /home/eelco/bla/result
- lrwxrwxrwx 1 ... 2005-03-13 21:10 /home/eelco/bla/result -> /nix/store/1r11343n6qd4...-f-spot-0.0.10
- ```
-
- Thus, when `/home/eelco/bla/result` is removed, the GC root in the
- `auto` directory becomes a dangling symlink and will be ignored by
- the collector.
-
- > **Warning**
- >
- > Note that it is not possible to move or rename GC roots, since
- > the symlink in the `auto` directory will still point to the old
- > location.
-
- If there are multiple results, then multiple symlinks will be
- created by sequentially numbering symlinks beyond the first one
- (e.g., `foo`, `foo-2`, `foo-3`, and so on).
-
- - <span id="opt-stdin">[`--stdin`](#opt-stdin)</span>
-
- Read *paths…* from the standard input.
- Useful for chaining nix-store commands.
-
-# Operation `--realise`
-
-## Synopsis
-
-`nix-store` {`--realise` | `-r`} *paths…* [`--dry-run`]
-
-## Description
-
-The operation `--realise` essentially “builds” the specified store
-paths. Realisation is a somewhat overloaded term:
-
- - If the store path is a *derivation*, realisation ensures that the
- output paths of the derivation are [valid] (i.e.,
- the output path and its closure exist in the file system). This
- can be done in several ways. First, it is possible that the
- outputs are already valid, in which case we are done
- immediately. Otherwise, there may be [substitutes]
- that produce the outputs (e.g., by downloading them). Finally, the
- outputs can be produced by running the build task described
- by the derivation.
-
- - If the store path is not a derivation, realisation ensures that the
- specified path is valid (i.e., it and its closure exist in the file
- system). If the path is already valid, we are done immediately.
- Otherwise, the path and any missing paths in its closure may be
- produced through substitutes. If there are no (successful)
- substitutes, realisation fails.
-
-[valid]: ../glossary.md#gloss-validity
-[substitutes]: ../glossary.md#gloss-substitute
-
-The output path of each derivation is printed on standard output. (For
-non-derivations argument, the argument itself is printed.)
-
-The following flags are available:
-
- - `--dry-run`\
- Print on standard error a description of what packages would be
- built or downloaded, without actually performing the operation.
-
- - `--ignore-unknown`\
- If a non-derivation path does not have a substitute, then silently
- ignore it.
-
- - `--check`\
- This option allows you to check whether a derivation is
- deterministic. It rebuilds the specified derivation and checks
- whether the result is bitwise-identical with the existing outputs,
- printing an error if that’s not the case. The outputs of the
- specified derivation must already exist. When used with `-K`, if an
- output path is not identical to the corresponding output from the
- previous build, the new output path is left in
- `/nix/store/name.check.`
-
-Special exit codes:
-
- - `100`\
- Generic build failure, the builder process returned with a non-zero
- exit code.
-
- - `101`\
- Build timeout, the build was aborted because it did not complete
- within the specified `timeout`.
-
- - `102`\
- Hash mismatch, the build output was rejected because it does not
- match the [`outputHash` attribute of the
- derivation](../language/advanced-attributes.md).
-
- - `104`\
- Not deterministic, the build succeeded in check mode but the
- resulting output is not binary reproducible.
-
-With the `--keep-going` flag it's possible for multiple failures to
-occur, in this case the 1xx status codes are or combined using binary
-or.
-
- 1100100
- ^^^^
- |||`- timeout
- ||`-- output hash mismatch
- |`--- build failure
- `---- not deterministic
-
-## Examples
-
-This operation is typically used to build [store derivation]s produced by
-[`nix-instantiate`](./nix-instantiate.md):
-
-[store derivation]: ../glossary.md#gloss-store-derivation
-
-```console
-$ nix-store -r $(nix-instantiate ./test.nix)
-/nix/store/31axcgrlbfsxzmfff1gyj1bf62hvkby2-aterm-2.3.1
-```
-
-This is essentially what [`nix-build`](nix-build.md) does.
-
-To test whether a previously-built derivation is deterministic:
-
-```console
-$ nix-build '<nixpkgs>' -A hello --check -K
-```
-
-Use [`--read-log`](#operation---read-log) to show the stderr and stdout of a build:
-
-```console
-$ nix-store --read-log $(nix-instantiate ./test.nix)
-```
-
-# Operation `--serve`
-
-## Synopsis
-
-`nix-store` `--serve` [`--write`]
-
-## Description
-
-The operation `--serve` provides access to the Nix store over stdin and
-stdout, and is intended to be used as a means of providing Nix store
-access to a restricted ssh user.
-
-The following flags are available:
-
- - `--write`\
- Allow the connected client to request the realization of
- derivations. In effect, this can be used to make the host act as a
- remote builder.
-
-## Examples
-
-To turn a host into a build server, the `authorized_keys` file can be
-used to provide build access to a given SSH public key:
-
-```console
-$ cat <<EOF >>/root/.ssh/authorized_keys
-command="nice -n20 nix-store --serve --write" ssh-rsa AAAAB3NzaC1yc2EAAAA...
-EOF
-```
-
-# Operation `--gc`
-
-## Synopsis
-
-`nix-store` `--gc` [`--print-roots` | `--print-live` | `--print-dead`] [`--max-freed` *bytes*]
-
-## Description
-
-Without additional flags, the operation `--gc` performs a garbage
-collection on the Nix store. That is, all paths in the Nix store not
-reachable via file system references from a set of “roots”, are deleted.
-
-The following suboperations may be specified:
-
- - `--print-roots`\
- This operation prints on standard output the set of roots used by
- the garbage collector.
-
- - `--print-live`\
- This operation prints on standard output the set of “live” store
- paths, which are all the store paths reachable from the roots. Live
- paths should never be deleted, since that would break consistency —
- it would become possible that applications are installed that
- reference things that are no longer present in the store.
-
- - `--print-dead`\
- This operation prints out on standard output the set of “dead” store
- paths, which is just the opposite of the set of live paths: any path
- in the store that is not live (with respect to the roots) is dead.
-
-By default, all unreachable paths are deleted. The following options
-control what gets deleted and in what order:
-
- - `--max-freed` *bytes*\
- Keep deleting paths until at least *bytes* bytes have been deleted,
- then stop. The argument *bytes* can be followed by the
- multiplicative suffix `K`, `M`, `G` or `T`, denoting KiB, MiB, GiB
- or TiB units.
-
-The behaviour of the collector is also influenced by the
-`keep-outputs` and `keep-derivations` settings in the Nix
-configuration file.
-
-By default, the collector prints the total number of freed bytes when it
-finishes (or when it is interrupted). With `--print-dead`, it prints the
-number of bytes that would be freed.
-
-## Examples
-
-To delete all unreachable paths, just do:
-
-```console
-$ nix-store --gc
-deleting `/nix/store/kq82idx6g0nyzsp2s14gfsc38npai7lf-cairo-1.0.4.tar.gz.drv'
-...
-8825586 bytes freed (8.42 MiB)
-```
-
-To delete at least 100 MiBs of unreachable paths:
-
-```console
-$ nix-store --gc --max-freed $((100 * 1024 * 1024))
-```
-
-# Operation `--delete`
-
-## Synopsis
-
-`nix-store` `--delete` [`--ignore-liveness`] *paths…*
-
-## Description
-
-The operation `--delete` deletes the store paths *paths* from the Nix
-store, but only if it is safe to do so; that is, when the path is not
-reachable from a root of the garbage collector. This means that you can
-only delete paths that would also be deleted by `nix-store --gc`. Thus,
-`--delete` is a more targeted version of `--gc`.
-
-With the option `--ignore-liveness`, reachability from the roots is
-ignored. However, the path still won’t be deleted if there are other
-paths in the store that refer to it (i.e., depend on it).
-
-## Example
-
-```console
-$ nix-store --delete /nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4
-0 bytes freed (0.00 MiB)
-error: cannot delete path `/nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4' since it is still alive
-```
-
-# Operation `--query`
-
-## Synopsis
-
-`nix-store` {`--query` | `-q`}
- {`--outputs` | `--requisites` | `-R` | `--references` |
- `--referrers` | `--referrers-closure` | `--deriver` | `-d` |
- `--graph` | `--tree` | `--binding` *name* | `-b` *name* | `--hash` |
- `--size` | `--roots`}
- [`--use-output`] [`-u`] [`--force-realise`] [`-f`]
- *paths…*
-
-## Description
-
-The operation `--query` displays information about [store path]s.
-The queries are described below. At most one query can be
-specified. The default query is `--outputs`.
-
-The paths *paths* may also be symlinks from outside of the Nix store, to
-the Nix store. In that case, the query is applied to the target of the
-symlink.
-
-## Common query options
-
- - `--use-output`; `-u`\
- For each argument to the query that is a [store derivation], apply the
- query to the output path of the derivation instead.
-
- - `--force-realise`; `-f`\
- Realise each argument to the query first (see [`nix-store
- --realise`](#operation---realise)).
-
-## Queries
-
- - `--outputs`\
- Prints out the [output path]s of the store
- derivations *paths*. These are the paths that will be produced when
- the derivation is built.
-
- - `--requisites`; `-R`\
- Prints out the [closure] of the given *paths*.
-
- This query has one option:
-
- - `--include-outputs`
- Also include the existing output paths of [store derivation]s,
- and their closures.
-
- This query can be used to implement various kinds of deployment. A
- *source deployment* is obtained by distributing the closure of a
- store derivation. A *binary deployment* is obtained by distributing
- the closure of an output path. A *cache deployment* (combined
- source/binary deployment, including binaries of build-time-only
- dependencies) is obtained by distributing the closure of a store
- derivation and specifying the option `--include-outputs`.
-
- - `--references`\
- Prints the set of [references]s of the store paths
- *paths*, that is, their immediate dependencies. (For *all*
- dependencies, use `--requisites`.)
-
- [reference]: ../glossary.md#gloss-reference
-
- - `--referrers`\
- Prints the set of *referrers* of the store paths *paths*, that is,
- the store paths currently existing in the Nix store that refer to
- one of *paths*. Note that contrary to the references, the set of
- referrers is not constant; it can change as store paths are added or
- removed.
-
- - `--referrers-closure`\
- Prints the closure of the set of store paths *paths* under the
- referrers relation; that is, all store paths that directly or
- indirectly refer to one of *paths*. These are all the path currently
- in the Nix store that are dependent on *paths*.
-
- - `--deriver`; `-d`\
- Prints the [deriver] of the store paths *paths*. If
- the path has no deriver (e.g., if it is a source file), or if the
- deriver is not known (e.g., in the case of a binary-only
- deployment), the string `unknown-deriver` is printed.
-
- [deriver]: ../glossary.md#gloss-deriver
-
- - `--graph`\
- Prints the references graph of the store paths *paths* in the format
- of the `dot` tool of AT\&T's [Graphviz
- package](http://www.graphviz.org/). This can be used to visualise
- dependency graphs. To obtain a build-time dependency graph, apply
- this to a store derivation. To obtain a runtime dependency graph,
- apply it to an output path.
-
- - `--tree`\
- Prints the references graph of the store paths *paths* as a nested
- ASCII tree. References are ordered by descending closure size; this
- tends to flatten the tree, making it more readable. The query only
- recurses into a store path when it is first encountered; this
- prevents a blowup of the tree representation of the graph.
-
- - `--graphml`\
- Prints the references graph of the store paths *paths* in the
- [GraphML](http://graphml.graphdrawing.org/) file format. This can be
- used to visualise dependency graphs. To obtain a build-time
- dependency graph, apply this to a [store derivation]. To obtain a
- runtime dependency graph, apply it to an output path.
-
- - `--binding` *name*; `-b` *name*\
- Prints the value of the attribute *name* (i.e., environment
- variable) of the [store derivation]s *paths*. It is an error for a
- derivation to not have the specified attribute.
-
- - `--hash`\
- Prints the SHA-256 hash of the contents of the store paths *paths*
- (that is, the hash of the output of `nix-store --dump` on the given
- paths). Since the hash is stored in the Nix database, this is a fast
- operation.
-
- - `--size`\
- Prints the size in bytes of the contents of the store paths *paths*
- — to be precise, the size of the output of `nix-store --dump` on
- the given paths. Note that the actual disk space required by the
- store paths may be higher, especially on filesystems with large
- cluster sizes.
-
- - `--roots`\
- Prints the garbage collector roots that point, directly or
- indirectly, at the store paths *paths*.
-
-## Examples
-
-Print the closure (runtime dependencies) of the `svn` program in the
-current user environment:
-
-```console
-$ nix-store -qR $(which svn)
-/nix/store/5mbglq5ldqld8sj57273aljwkfvj22mc-subversion-1.1.4
-/nix/store/9lz9yc6zgmc0vlqmn2ipcpkjlmbi51vv-glibc-2.3.4
-...
-```
-
-Print the build-time dependencies of `svn`:
-
-```console
-$ nix-store -qR $(nix-store -qd $(which svn))
-/nix/store/02iizgn86m42q905rddvg4ja975bk2i4-grep-2.5.1.tar.bz2.drv
-/nix/store/07a2bzxmzwz5hp58nf03pahrv2ygwgs3-gcc-wrapper.sh
-/nix/store/0ma7c9wsbaxahwwl04gbw3fcd806ski4-glibc-2.3.4.drv
-... lots of other paths ...
-```
-
-The difference with the previous example is that we ask the closure of
-the derivation (`-qd`), not the closure of the output path that contains
-`svn`.
-
-Show the build-time dependencies as a tree:
-
-```console
-$ nix-store -q --tree $(nix-store -qd $(which svn))
-/nix/store/7i5082kfb6yjbqdbiwdhhza0am2xvh6c-subversion-1.1.4.drv
-+---/nix/store/d8afh10z72n8l1cr5w42366abiblgn54-builder.sh
-+---/nix/store/fmzxmpjx2lh849ph0l36snfj9zdibw67-bash-3.0.drv
-| +---/nix/store/570hmhmx3v57605cqg9yfvvyh0nnb8k8-bash
-| +---/nix/store/p3srsbd8dx44v2pg6nbnszab5mcwx03v-builder.sh
-...
-```
-
-Show all paths that depend on the same OpenSSL library as `svn`:
-
-```console
-$ nix-store -q --referrers $(nix-store -q --binding openssl $(nix-store -qd $(which svn)))
-/nix/store/23ny9l9wixx21632y2wi4p585qhva1q8-sylpheed-1.0.0
-/nix/store/5mbglq5ldqld8sj57273aljwkfvj22mc-subversion-1.1.4
-/nix/store/dpmvp969yhdqs7lm2r1a3gng7pyq6vy4-subversion-1.1.3
-/nix/store/l51240xqsgg8a7yrbqdx1rfzyv6l26fx-lynx-2.8.5
-```
-
-Show all paths that directly or indirectly depend on the Glibc (C
-library) used by `svn`:
-
-```console
-$ nix-store -q --referrers-closure $(ldd $(which svn) | grep /libc.so | awk '{print $3}')
-/nix/store/034a6h4vpz9kds5r6kzb9lhh81mscw43-libgnomeprintui-2.8.2
-/nix/store/15l3yi0d45prm7a82pcrknxdh6nzmxza-gawk-3.1.4
-...
-```
-
-Note that `ldd` is a command that prints out the dynamic libraries used
-by an ELF executable.
-
-Make a picture of the runtime dependency graph of the current user
-environment:
-
-```console
-$ nix-store -q --graph ~/.nix-profile | dot -Tps > graph.ps
-$ gv graph.ps
-```
-
-Show every garbage collector root that points to a store path that
-depends on `svn`:
-
-```console
-$ nix-store -q --roots $(which svn)
-/nix/var/nix/profiles/default-81-link
-/nix/var/nix/profiles/default-82-link
-/nix/var/nix/profiles/per-user/eelco/profile-97-link
-```
-
-# Operation `--add`
-
-## Synopsis
-
-`nix-store` `--add` *paths…*
-
-## Description
-
-The operation `--add` adds the specified paths to the Nix store. It
-prints the resulting paths in the Nix store on standard output.
-
-## Example
-
-```console
-$ nix-store --add ./foo.c
-/nix/store/m7lrha58ph6rcnv109yzx1nk1cj7k7zf-foo.c
-```
-
-# Operation `--add-fixed`
-
-## Synopsis
-
-`nix-store` `--add-fixed` [`--recursive`] *algorithm* *paths…*
-
-## Description
-
-The operation `--add-fixed` adds the specified paths to the Nix store.
-Unlike `--add` paths are registered using the specified hashing
-algorithm, resulting in the same output path as a fixed-output
-derivation. This can be used for sources that are not available from a
-public url or broke since the download expression was written.
-
-This operation has the following options:
-
- - `--recursive`\
- Use recursive instead of flat hashing mode, used when adding
- directories to the store.
-
-## Example
-
-```console
-$ nix-store --add-fixed sha256 ./hello-2.10.tar.gz
-/nix/store/3x7dwzq014bblazs7kq20p9hyzz0qh8g-hello-2.10.tar.gz
-```
-
-# Operation `--verify`
-
-## Synopsis
-
-`nix-store` `--verify` [`--check-contents`] [`--repair`]
-
-## Description
-
-The operation `--verify` verifies the internal consistency of the Nix
-database, and the consistency between the Nix database and the Nix
-store. Any inconsistencies encountered are automatically repaired.
-Inconsistencies are generally the result of the Nix store or database
-being modified by non-Nix tools, or of bugs in Nix itself.
-
-This operation has the following options:
-
- - `--check-contents`\
- Checks that the contents of every valid store path has not been
- altered by computing a SHA-256 hash of the contents and comparing it
- with the hash stored in the Nix database at build time. Paths that
- have been modified are printed out. For large stores,
- `--check-contents` is obviously quite slow.
-
- - `--repair`\
- If any valid path is missing from the store, or (if
- `--check-contents` is given) the contents of a valid path has been
- modified, then try to repair the path by redownloading it. See
- `nix-store --repair-path` for details.
-
-# Operation `--verify-path`
-
-## Synopsis
-
-`nix-store` `--verify-path` *paths…*
-
-## Description
-
-The operation `--verify-path` compares the contents of the given store
-paths to their cryptographic hashes stored in Nix’s database. For every
-changed path, it prints a warning message. The exit status is 0 if no
-path has changed, and 1 otherwise.
-
-## Example
-
-To verify the integrity of the `svn` command and all its dependencies:
-
-```console
-$ nix-store --verify-path $(nix-store -qR $(which svn))
-```
-
-# Operation `--repair-path`
-
-## Synopsis
-
-`nix-store` `--repair-path` *paths…*
-
-## Description
-
-The operation `--repair-path` attempts to “repair” the specified paths
-by redownloading them using the available substituters. If no
-substitutes are available, then repair is not possible.
-
-> **Warning**
->
-> During repair, there is a very small time window during which the old
-> path (if it exists) is moved out of the way and replaced with the new
-> path. If repair is interrupted in between, then the system may be left
-> in a broken state (e.g., if the path contains a critical system
-> component like the GNU C Library).
-
-## Example
-
-```console
-$ nix-store --verify-path /nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13
-path `/nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13' was modified!
- expected hash `2db57715ae90b7e31ff1f2ecb8c12ec1cc43da920efcbe3b22763f36a1861588',
- got `481c5aa5483ebc97c20457bb8bca24deea56550d3985cda0027f67fe54b808e4'
-
-$ nix-store --repair-path /nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13
-fetching path `/nix/store/d7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13'...
-…
-```
-
-# Operation `--dump`
-
-## Synopsis
-
-`nix-store` `--dump` *path*
-
-## Description
-
-The operation `--dump` produces a NAR (Nix ARchive) file containing the
-contents of the file system tree rooted at *path*. The archive is
-written to standard output.
-
-A NAR archive is like a TAR or Zip archive, but it contains only the
-information that Nix considers important. For instance, timestamps are
-elided because all files in the Nix store have their timestamp set to 1
-anyway. Likewise, all permissions are left out except for the execute
-bit, because all files in the Nix store have 444 or 555 permission.
-
-Also, a NAR archive is *canonical*, meaning that “equal” paths always
-produce the same NAR archive. For instance, directory entries are
-always sorted so that the actual on-disk order doesn’t influence the
-result. This means that the cryptographic hash of a NAR dump of a
-path is usable as a fingerprint of the contents of the path. Indeed,
-the hashes of store paths stored in Nix’s database (see `nix-store -q
---hash`) are SHA-256 hashes of the NAR dump of each store path.
-
-NAR archives support filenames of unlimited length and 64-bit file
-sizes. They can contain regular files, directories, and symbolic links,
-but not other types of files (such as device nodes).
-
-A Nix archive can be unpacked using `nix-store
---restore`.
-
-# Operation `--restore`
-
-## Synopsis
-
-`nix-store` `--restore` *path*
-
-## Description
-
-The operation `--restore` unpacks a NAR archive to *path*, which must
-not already exist. The archive is read from standard input.
-
-# Operation `--export`
-
-## Synopsis
-
-`nix-store` `--export` *paths…*
-
-## Description
-
-The operation `--export` writes a serialisation of the specified store
-paths to standard output in a format that can be imported into another
-Nix store with `nix-store --import`. This is like `nix-store
---dump`, except that the NAR archive produced by that command doesn’t
-contain the necessary meta-information to allow it to be imported into
-another Nix store (namely, the set of references of the path).
-
-This command does not produce a *closure* of the specified paths, so if
-a store path references other store paths that are missing in the target
-Nix store, the import will fail. To copy a whole closure, do something
-like:
-
-```console
-$ nix-store --export $(nix-store -qR paths) > out
-```
-
-To import the whole closure again, run:
-
-```console
-$ nix-store --import < out
-```
-
-# Operation `--import`
-
-## Synopsis
-
-`nix-store` `--import`
-
-## Description
-
-The operation `--import` reads a serialisation of a set of store paths
-produced by `nix-store --export` from standard input and adds those
-store paths to the Nix store. Paths that already exist in the Nix store
-are ignored. If a path refers to another path that doesn’t exist in the
-Nix store, the import fails.
-
-# Operation `--optimise`
-
-## Synopsis
-
-`nix-store` `--optimise`
-
-## Description
-
-The operation `--optimise` reduces Nix store disk space usage by finding
-identical files in the store and hard-linking them to each other. It
-typically reduces the size of the store by something like 25-35%. Only
-regular files and symlinks are hard-linked in this manner. Files are
-considered identical when they have the same NAR archive serialisation:
-that is, regular files must have the same contents and permission
-(executable or non-executable), and symlinks must have the same
-contents.
-
-After completion, or when the command is interrupted, a report on the
-achieved savings is printed on standard error.
-
-Use `-vv` or `-vvv` to get some progress indication.
-
-## Example
-
-```console
-$ nix-store --optimise
-hashing files in `/nix/store/qhqx7l2f1kmwihc9bnxs7rc159hsxnf3-gcc-4.1.1'
-...
-541838819 bytes (516.74 MiB) freed by hard-linking 54143 files;
-there are 114486 files with equal contents out of 215894 files in total
-```
-
-# Operation `--read-log`
-
-## Synopsis
-
-`nix-store` {`--read-log` | `-l`} *paths…*
-
-## Description
-
-The operation `--read-log` prints the build log of the specified store
-paths on standard output. The build log is whatever the builder of a
-derivation wrote to standard output and standard error. If a store path
-is not a derivation, the deriver of the store path is used.
-
-Build logs are kept in `/nix/var/log/nix/drvs`. However, there is no
-guarantee that a build log is available for any particular store path.
-For instance, if the path was downloaded as a pre-built binary through a
-substitute, then the log is unavailable.
-
-## Example
-
-```console
-$ nix-store -l $(which ktorrent)
-building /nix/store/dhc73pvzpnzxhdgpimsd9sw39di66ph1-ktorrent-2.2.1
-unpacking sources
-unpacking source archive /nix/store/p8n1jpqs27mgkjw07pb5269717nzf5f8-ktorrent-2.2.1.tar.gz
-ktorrent-2.2.1/
-ktorrent-2.2.1/NEWS
-...
-```
-
-# Operation `--dump-db`
-
-## Synopsis
-
-`nix-store` `--dump-db` [*paths…*]
-
-## Description
-
-The operation `--dump-db` writes a dump of the Nix database to standard
-output. It can be loaded into an empty Nix store using `--load-db`. This
-is useful for making backups and when migrating to different database
-schemas.
-
-By default, `--dump-db` will dump the entire Nix database. When one or
-more store paths is passed, only the subset of the Nix database for
-those store paths is dumped. As with `--export`, the user is responsible
-for passing all the store paths for a closure. See `--export` for an
-example.
-
-# Operation `--load-db`
-
-## Synopsis
-
-`nix-store` `--load-db`
-
-## Description
-
-The operation `--load-db` reads a dump of the Nix database created by
-`--dump-db` from standard input and loads it into the Nix database.
-
-# Operation `--print-env`
-
-## Synopsis
-
-`nix-store` `--print-env` *drvpath*
-
-## Description
-
-The operation `--print-env` prints out the environment of a derivation
-in a format that can be evaluated by a shell. The command line arguments
-of the builder are placed in the variable `_args`.
-
-## Example
-
-```console
-$ nix-store --print-env $(nix-instantiate '<nixpkgs>' -A firefox)
-…
-export src; src='/nix/store/plpj7qrwcz94z2psh6fchsi7s8yihc7k-firefox-12.0.source.tar.bz2'
-export stdenv; stdenv='/nix/store/7c8asx3yfrg5dg1gzhzyq2236zfgibnn-stdenv'
-export system; system='x86_64-linux'
-export _args; _args='-e /nix/store/9krlzvny65gdc8s7kpb6lkx8cd02c25c-default-builder.sh'
-```
-
-# Operation `--generate-binary-cache-key`
-
-## Synopsis
-
-`nix-store` `--generate-binary-cache-key` *key-name* *secret-key-file* *public-key-file*
-
-## Description
-
-This command generates an [Ed25519 key pair](http://ed25519.cr.yp.to/)
-that can be used to create a signed binary cache. It takes three
-mandatory parameters:
-
-1. A key name, such as `cache.example.org-1`, that is used to look up
- keys on the client when it verifies signatures. It can be anything,
- but it’s suggested to use the host name of your cache (e.g.
- `cache.example.org`) with a suffix denoting the number of the key
- (to be incremented every time you need to revoke a key).
-
-2. The file name where the secret key is to be stored.
-
-3. The file name where the public key is to be stored.
+`nix-store` takes exactly one *operation* flag which indicates the subcommand to be performed. The following operations are available:
+
+- [`--realise`](./nix-store/realise.md)
+- [`--serve`](./nix-store/serve.md)
+- [`--gc`](./nix-store/gc.md)
+- [`--delete`](./nix-store/delete.md)
+- [`--query`](./nix-store/query.md)
+- [`--add`](./nix-store/add.md)
+- [`--add-fixed`](./nix-store/add-fixed.md)
+- [`--verify`](./nix-store/verify.md)
+- [`--verify-path`](./nix-store/verify-path.md)
+- [`--repair-path`](./nix-store/repair-path.md)
+- [`--dump`](./nix-store/dump.md)
+- [`--restore`](./nix-store/restore.md)
+- [`--export`](./nix-store/export.md)
+- [`--import`](./nix-store/import.md)
+- [`--optimise`](./nix-store/optimise.md)
+- [`--read-log`](./nix-store/read-log.md)
+- [`--dump-db`](./nix-store/dump-db.md)
+- [`--load-db`](./nix-store/load-db.md)
+- [`--print-env`](./nix-store/print-env.md)
+- [`--generate-binary-cache-key`](./nix-store/generate-binary-cache-key.md)
+
+These pages can be viewed offline:
+
+- `man nix-store-<operation>`.
+
+ Example: `man nix-store-realise`
+
+- `nix-store --help --<operation>`
+
+ Example: `nix-store --help --realise`
diff --git a/doc/manual/src/command-ref/nix-store/add-fixed.md b/doc/manual/src/command-ref/nix-store/add-fixed.md
new file mode 100644
index 000000000..d25db091c
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/add-fixed.md
@@ -0,0 +1,35 @@
+# Name
+
+`nix-store --add-fixed` - add paths to store using given hashing algorithm
+
+## Synopsis
+
+`nix-store` `--add-fixed` [`--recursive`] *algorithm* *paths…*
+
+## Description
+
+The operation `--add-fixed` adds the specified paths to the Nix store.
+Unlike `--add` paths are registered using the specified hashing
+algorithm, resulting in the same output path as a fixed-output
+derivation. This can be used for sources that are not available from a
+public url or broke since the download expression was written.
+
+This operation has the following options:
+
+ - `--recursive`\
+ Use recursive instead of flat hashing mode, used when adding
+ directories to the store.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
+
+## Example
+
+```console
+$ nix-store --add-fixed sha256 ./hello-2.10.tar.gz
+/nix/store/3x7dwzq014bblazs7kq20p9hyzz0qh8g-hello-2.10.tar.gz
+```
+
diff --git a/doc/manual/src/command-ref/nix-store/add.md b/doc/manual/src/command-ref/nix-store/add.md
new file mode 100644
index 000000000..87d504cd3
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/add.md
@@ -0,0 +1,25 @@
+# Name
+
+`nix-store --add` - add paths to Nix store
+
+# Synopsis
+
+`nix-store` `--add` *paths…*
+
+# Description
+
+The operation `--add` adds the specified paths to the Nix store. It
+prints the resulting paths in the Nix store on standard output.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
+
+# Example
+
+```console
+$ nix-store --add ./foo.c
+/nix/store/m7lrha58ph6rcnv109yzx1nk1cj7k7zf-foo.c
+```
diff --git a/doc/manual/src/command-ref/nix-store/delete.md b/doc/manual/src/command-ref/nix-store/delete.md
new file mode 100644
index 000000000..550c5ea29
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/delete.md
@@ -0,0 +1,33 @@
+# Name
+
+`nix-store --delete` - delete store paths
+
+# Synopsis
+
+`nix-store` `--delete` [`--ignore-liveness`] *paths…*
+
+# Description
+
+The operation `--delete` deletes the store paths *paths* from the Nix
+store, but only if it is safe to do so; that is, when the path is not
+reachable from a root of the garbage collector. This means that you can
+only delete paths that would also be deleted by `nix-store --gc`. Thus,
+`--delete` is a more targeted version of `--gc`.
+
+With the option `--ignore-liveness`, reachability from the roots is
+ignored. However, the path still won’t be deleted if there are other
+paths in the store that refer to it (i.e., depend on it).
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
+
+# Example
+
+```console
+$ nix-store --delete /nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4
+0 bytes freed (0.00 MiB)
+error: cannot delete path `/nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4' since it is still alive
+```
diff --git a/doc/manual/src/command-ref/nix-store/dump-db.md b/doc/manual/src/command-ref/nix-store/dump-db.md
new file mode 100644
index 000000000..b2c77ced0
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/dump-db.md
@@ -0,0 +1,26 @@
+# Name
+
+`nix-store --dump-db` - export Nix database
+
+# Synopsis
+
+`nix-store` `--dump-db` [*paths…*]
+
+# Description
+
+The operation `--dump-db` writes a dump of the Nix database to standard
+output. It can be loaded into an empty Nix store using `--load-db`. This
+is useful for making backups and when migrating to different database
+schemas.
+
+By default, `--dump-db` will dump the entire Nix database. When one or
+more store paths is passed, only the subset of the Nix database for
+those store paths is dumped. As with `--export`, the user is responsible
+for passing all the store paths for a closure. See `--export` for an
+example.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
diff --git a/doc/manual/src/command-ref/nix-store/dump.md b/doc/manual/src/command-ref/nix-store/dump.md
new file mode 100644
index 000000000..c2f3c42ef
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/dump.md
@@ -0,0 +1,40 @@
+# Name
+
+`nix-store --dump` - write a single path to a Nix Archive
+
+## Synopsis
+
+`nix-store` `--dump` *path*
+
+## Description
+
+The operation `--dump` produces a NAR (Nix ARchive) file containing the
+contents of the file system tree rooted at *path*. The archive is
+written to standard output.
+
+A NAR archive is like a TAR or Zip archive, but it contains only the
+information that Nix considers important. For instance, timestamps are
+elided because all files in the Nix store have their timestamp set to 0
+anyway. Likewise, all permissions are left out except for the execute
+bit, because all files in the Nix store have 444 or 555 permission.
+
+Also, a NAR archive is *canonical*, meaning that “equal” paths always
+produce the same NAR archive. For instance, directory entries are
+always sorted so that the actual on-disk order doesn’t influence the
+result. This means that the cryptographic hash of a NAR dump of a
+path is usable as a fingerprint of the contents of the path. Indeed,
+the hashes of store paths stored in Nix’s database (see `nix-store --query
+--hash`) are SHA-256 hashes of the NAR dump of each store path.
+
+NAR archives support filenames of unlimited length and 64-bit file
+sizes. They can contain regular files, directories, and symbolic links,
+but not other types of files (such as device nodes).
+
+A Nix archive can be unpacked using `nix-store
+--restore`.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
diff --git a/doc/manual/src/command-ref/nix-store/export.md b/doc/manual/src/command-ref/nix-store/export.md
new file mode 100644
index 000000000..1bc46f53b
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/export.md
@@ -0,0 +1,41 @@
+# Name
+
+`nix-store --export` - export store paths to a Nix Archive
+
+## Synopsis
+
+`nix-store` `--export` *paths…*
+
+## Description
+
+The operation `--export` writes a serialisation of the specified store
+paths to standard output in a format that can be imported into another
+Nix store with `nix-store --import`. This is like `nix-store
+--dump`, except that the NAR archive produced by that command doesn’t
+contain the necessary meta-information to allow it to be imported into
+another Nix store (namely, the set of references of the path).
+
+This command does not produce a *closure* of the specified paths, so if
+a store path references other store paths that are missing in the target
+Nix store, the import will fail.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
+
+# Examples
+
+To copy a whole closure, do something
+like:
+
+```console
+$ nix-store --export $(nix-store --query --requisites paths) > out
+```
+
+To import the whole closure again, run:
+
+```console
+$ nix-store --import < out
+```
diff --git a/doc/manual/src/command-ref/nix-store/gc.md b/doc/manual/src/command-ref/nix-store/gc.md
new file mode 100644
index 000000000..7be0d559a
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/gc.md
@@ -0,0 +1,72 @@
+# Name
+
+`nix-store --gc` - run garbage collection
+
+# Synopsis
+
+`nix-store` `--gc` [`--print-roots` | `--print-live` | `--print-dead`] [`--max-freed` *bytes*]
+
+# Description
+
+Without additional flags, the operation `--gc` performs a garbage
+collection on the Nix store. That is, all paths in the Nix store not
+reachable via file system references from a set of “roots”, are deleted.
+
+The following suboperations may be specified:
+
+ - `--print-roots`\
+ This operation prints on standard output the set of roots used by
+ the garbage collector.
+
+ - `--print-live`\
+ This operation prints on standard output the set of “live” store
+ paths, which are all the store paths reachable from the roots. Live
+ paths should never be deleted, since that would break consistency —
+ it would become possible that applications are installed that
+ reference things that are no longer present in the store.
+
+ - `--print-dead`\
+ This operation prints out on standard output the set of “dead” store
+ paths, which is just the opposite of the set of live paths: any path
+ in the store that is not live (with respect to the roots) is dead.
+
+By default, all unreachable paths are deleted. The following options
+control what gets deleted and in what order:
+
+ - `--max-freed` *bytes*\
+ Keep deleting paths until at least *bytes* bytes have been deleted,
+ then stop. The argument *bytes* can be followed by the
+ multiplicative suffix `K`, `M`, `G` or `T`, denoting KiB, MiB, GiB
+ or TiB units.
+
+The behaviour of the collector is also influenced by the
+`keep-outputs` and `keep-derivations` settings in the Nix
+configuration file.
+
+By default, the collector prints the total number of freed bytes when it
+finishes (or when it is interrupted). With `--print-dead`, it prints the
+number of bytes that would be freed.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
+
+# Examples
+
+To delete all unreachable paths, just do:
+
+```console
+$ nix-store --gc
+deleting `/nix/store/kq82idx6g0nyzsp2s14gfsc38npai7lf-cairo-1.0.4.tar.gz.drv'
+...
+8825586 bytes freed (8.42 MiB)
+```
+
+To delete at least 100 MiBs of unreachable paths:
+
+```console
+$ nix-store --gc --max-freed $((100 * 1024 * 1024))
+```
+
diff --git a/doc/manual/src/command-ref/nix-store/generate-binary-cache-key.md b/doc/manual/src/command-ref/nix-store/generate-binary-cache-key.md
new file mode 100644
index 000000000..8085d877b
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/generate-binary-cache-key.md
@@ -0,0 +1,29 @@
+# Name
+
+`nix-store --generate-binary-cache-key` - generate key pair to use for a binary cache
+
+## Synopsis
+
+`nix-store` `--generate-binary-cache-key` *key-name* *secret-key-file* *public-key-file*
+
+## Description
+
+This command generates an [Ed25519 key pair](http://ed25519.cr.yp.to/)
+that can be used to create a signed binary cache. It takes three
+mandatory parameters:
+
+1. A key name, such as `cache.example.org-1`, that is used to look up
+ keys on the client when it verifies signatures. It can be anything,
+ but it’s suggested to use the host name of your cache (e.g.
+ `cache.example.org`) with a suffix denoting the number of the key
+ (to be incremented every time you need to revoke a key).
+
+2. The file name where the secret key is to be stored.
+
+3. The file name where the public key is to be stored.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
diff --git a/doc/manual/src/command-ref/nix-store/import.md b/doc/manual/src/command-ref/nix-store/import.md
new file mode 100644
index 000000000..2711316a7
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/import.md
@@ -0,0 +1,21 @@
+# Name
+
+`nix-store --import` - import Nix Archive into the store
+
+# Synopsis
+
+`nix-store` `--import`
+
+# Description
+
+The operation `--import` reads a serialisation of a set of store paths
+produced by `nix-store --export` from standard input and adds those
+store paths to the Nix store. Paths that already exist in the Nix store
+are ignored. If a path refers to another path that doesn’t exist in the
+Nix store, the import fails.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
diff --git a/doc/manual/src/command-ref/nix-store/load-db.md b/doc/manual/src/command-ref/nix-store/load-db.md
new file mode 100644
index 000000000..e2f438ed6
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/load-db.md
@@ -0,0 +1,18 @@
+# Name
+
+`nix-store --load-db` - import Nix database
+
+# Synopsis
+
+`nix-store` `--load-db`
+
+# Description
+
+The operation `--load-db` reads a dump of the Nix database created by
+`--dump-db` from standard input and loads it into the Nix database.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
diff --git a/doc/manual/src/command-ref/nix-store/opt-common.md b/doc/manual/src/command-ref/nix-store/opt-common.md
new file mode 100644
index 000000000..dd9a6bf21
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/opt-common.md
@@ -0,0 +1,36 @@
+# Options
+
+The following options are allowed for all `nix-store` operations, but may not always have an effect.
+
+- <span id="opt-add-root">[`--add-root`](#opt-add-root)</span> *path*
+
+ Causes the result of a realisation (`--realise` and
+ `--force-realise`) to be registered as a root of the garbage
+ collector. *path* will be created as a symlink to the resulting
+ store path. In addition, a uniquely named symlink to *path* will
+ be created in `/nix/var/nix/gcroots/auto/`. For instance,
+
+ ```console
+ $ nix-store --add-root /home/eelco/bla/result --realise ...
+
+ $ ls -l /nix/var/nix/gcroots/auto
+ lrwxrwxrwx 1 ... 2005-03-13 21:10 dn54lcypm8f8... -> /home/eelco/bla/result
+
+ $ ls -l /home/eelco/bla/result
+ lrwxrwxrwx 1 ... 2005-03-13 21:10 /home/eelco/bla/result -> /nix/store/1r11343n6qd4...-f-spot-0.0.10
+ ```
+
+ Thus, when `/home/eelco/bla/result` is removed, the GC root in the
+ `auto` directory becomes a dangling symlink and will be ignored by
+ the collector.
+
+ > **Warning**
+ >
+ > Note that it is not possible to move or rename GC roots, since
+ > the symlink in the `auto` directory will still point to the old
+ > location.
+
+ If there are multiple results, then multiple symlinks will be
+ created by sequentially numbering symlinks beyond the first one
+ (e.g., `foo`, `foo-2`, `foo-3`, and so on).
+
diff --git a/doc/manual/src/command-ref/nix-store/optimise.md b/doc/manual/src/command-ref/nix-store/optimise.md
new file mode 100644
index 000000000..dc392aeb8
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/optimise.md
@@ -0,0 +1,40 @@
+# Name
+
+`nix-store --optimise` - reduce disk space usage
+
+## Synopsis
+
+`nix-store` `--optimise`
+
+## Description
+
+The operation `--optimise` reduces Nix store disk space usage by finding
+identical files in the store and hard-linking them to each other. It
+typically reduces the size of the store by something like 25-35%. Only
+regular files and symlinks are hard-linked in this manner. Files are
+considered identical when they have the same NAR archive serialisation:
+that is, regular files must have the same contents and permission
+(executable or non-executable), and symlinks must have the same
+contents.
+
+After completion, or when the command is interrupted, a report on the
+achieved savings is printed on standard error.
+
+Use `-vv` or `-vvv` to get some progress indication.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
+
+## Example
+
+```console
+$ nix-store --optimise
+hashing files in `/nix/store/qhqx7l2f1kmwihc9bnxs7rc159hsxnf3-gcc-4.1.1'
+...
+541838819 bytes (516.74 MiB) freed by hard-linking 54143 files;
+there are 114486 files with equal contents out of 215894 files in total
+```
+
diff --git a/doc/manual/src/command-ref/nix-store/print-env.md b/doc/manual/src/command-ref/nix-store/print-env.md
new file mode 100644
index 000000000..bd2084ef6
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/print-env.md
@@ -0,0 +1,31 @@
+# Name
+
+`nix-store --print-env` - print the build environment of a derivation
+
+## Synopsis
+
+`nix-store` `--print-env` *drvpath*
+
+## Description
+
+The operation `--print-env` prints out the environment of a derivation
+in a format that can be evaluated by a shell. The command line arguments
+of the builder are placed in the variable `_args`.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
+
+## Example
+
+```console
+$ nix-store --print-env $(nix-instantiate '<nixpkgs>' -A firefox)
+…
+export src; src='/nix/store/plpj7qrwcz94z2psh6fchsi7s8yihc7k-firefox-12.0.source.tar.bz2'
+export stdenv; stdenv='/nix/store/7c8asx3yfrg5dg1gzhzyq2236zfgibnn-stdenv'
+export system; system='x86_64-linux'
+export _args; _args='-e /nix/store/9krlzvny65gdc8s7kpb6lkx8cd02c25c-default-builder.sh'
+```
+
diff --git a/doc/manual/src/command-ref/nix-store/query.md b/doc/manual/src/command-ref/nix-store/query.md
new file mode 100644
index 000000000..cd45a4932
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/query.md
@@ -0,0 +1,220 @@
+# Name
+
+`nix-store --query` - display information about store paths
+
+# Synopsis
+
+`nix-store` {`--query` | `-q`}
+ {`--outputs` | `--requisites` | `-R` | `--references` |
+ `--referrers` | `--referrers-closure` | `--deriver` | `-d` |
+ `--graph` | `--tree` | `--binding` *name* | `-b` *name* | `--hash` |
+ `--size` | `--roots`}
+ [`--use-output`] [`-u`] [`--force-realise`] [`-f`]
+ *paths…*
+
+# Description
+
+The operation `--query` displays various bits of information about the
+store paths . The queries are described below. At most one query can be
+specified. The default query is `--outputs`.
+
+The paths *paths* may also be symlinks from outside of the Nix store, to
+the Nix store. In that case, the query is applied to the target of the
+symlink.
+
+# Common query options
+
+ - `--use-output`; `-u`\
+ For each argument to the query that is a [store derivation], apply the
+ query to the output path of the derivation instead.
+
+ - `--force-realise`; `-f`\
+ Realise each argument to the query first (see [`nix-store --realise`](./realise.md)).
+
+[store derivation]: @docroot@/glossary.md#gloss-store-derivation
+
+# Queries
+
+ - `--outputs`\
+ Prints out the [output paths] of the store
+ derivations *paths*. These are the paths that will be produced when
+ the derivation is built.
+
+ [output paths]: ../../glossary.md#gloss-output-path
+
+ - `--requisites`; `-R`\
+ Prints out the [closure] of the store path *paths*.
+
+ [closure]: ../../glossary.md#gloss-closure
+
+ This query has one option:
+
+ - `--include-outputs`
+ Also include the existing output paths of [store derivation]s,
+ and their closures.
+
+ This query can be used to implement various kinds of deployment. A
+ *source deployment* is obtained by distributing the closure of a
+ store derivation. A *binary deployment* is obtained by distributing
+ the closure of an output path. A *cache deployment* (combined
+ source/binary deployment, including binaries of build-time-only
+ dependencies) is obtained by distributing the closure of a store
+ derivation and specifying the option `--include-outputs`.
+
+ - `--references`\
+ Prints the set of [references] of the store paths
+ *paths*, that is, their immediate dependencies. (For *all*
+ dependencies, use `--requisites`.)
+
+ [references]: ../../glossary.md#gloss-reference
+
+ - `--referrers`\
+ Prints the set of *referrers* of the store paths *paths*, that is,
+ the store paths currently existing in the Nix store that refer to
+ one of *paths*. Note that contrary to the references, the set of
+ referrers is not constant; it can change as store paths are added or
+ removed.
+
+ - `--referrers-closure`\
+ Prints the closure of the set of store paths *paths* under the
+ referrers relation; that is, all store paths that directly or
+ indirectly refer to one of *paths*. These are all the path currently
+ in the Nix store that are dependent on *paths*.
+
+ - `--deriver`; `-d`\
+ Prints the [deriver] of the store paths *paths*. If
+ the path has no deriver (e.g., if it is a source file), or if the
+ deriver is not known (e.g., in the case of a binary-only
+ deployment), the string `unknown-deriver` is printed.
+
+ [deriver]: ../../glossary.md#gloss-deriver
+
+ - `--graph`\
+ Prints the references graph of the store paths *paths* in the format
+ of the `dot` tool of AT\&T's [Graphviz
+ package](http://www.graphviz.org/). This can be used to visualise
+ dependency graphs. To obtain a build-time dependency graph, apply
+ this to a store derivation. To obtain a runtime dependency graph,
+ apply it to an output path.
+
+ - `--tree`\
+ Prints the references graph of the store paths *paths* as a nested
+ ASCII tree. References are ordered by descending closure size; this
+ tends to flatten the tree, making it more readable. The query only
+ recurses into a store path when it is first encountered; this
+ prevents a blowup of the tree representation of the graph.
+
+ - `--graphml`\
+ Prints the references graph of the store paths *paths* in the
+ [GraphML](http://graphml.graphdrawing.org/) file format. This can be
+ used to visualise dependency graphs. To obtain a build-time
+ dependency graph, apply this to a [store derivation]. To obtain a
+ runtime dependency graph, apply it to an output path.
+
+ - `--binding` *name*; `-b` *name*\
+ Prints the value of the attribute *name* (i.e., environment
+ variable) of the [store derivation]s *paths*. It is an error for a
+ derivation to not have the specified attribute.
+
+ - `--hash`\
+ Prints the SHA-256 hash of the contents of the store paths *paths*
+ (that is, the hash of the output of `nix-store --dump` on the given
+ paths). Since the hash is stored in the Nix database, this is a fast
+ operation.
+
+ - `--size`\
+ Prints the size in bytes of the contents of the store paths *paths*
+ — to be precise, the size of the output of `nix-store --dump` on
+ the given paths. Note that the actual disk space required by the
+ store paths may be higher, especially on filesystems with large
+ cluster sizes.
+
+ - `--roots`\
+ Prints the garbage collector roots that point, directly or
+ indirectly, at the store paths *paths*.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
+
+# Examples
+
+Print the closure (runtime dependencies) of the `svn` program in the
+current user environment:
+
+```console
+$ nix-store --query --requisites $(which svn)
+/nix/store/5mbglq5ldqld8sj57273aljwkfvj22mc-subversion-1.1.4
+/nix/store/9lz9yc6zgmc0vlqmn2ipcpkjlmbi51vv-glibc-2.3.4
+...
+```
+
+Print the build-time dependencies of `svn`:
+
+```console
+$ nix-store --query --requisites $(nix-store --query --deriver $(which svn))
+/nix/store/02iizgn86m42q905rddvg4ja975bk2i4-grep-2.5.1.tar.bz2.drv
+/nix/store/07a2bzxmzwz5hp58nf03pahrv2ygwgs3-gcc-wrapper.sh
+/nix/store/0ma7c9wsbaxahwwl04gbw3fcd806ski4-glibc-2.3.4.drv
+... lots of other paths ...
+```
+
+The difference with the previous example is that we ask the closure of
+the derivation (`-qd`), not the closure of the output path that contains
+`svn`.
+
+Show the build-time dependencies as a tree:
+
+```console
+$ nix-store --query --tree $(nix-store --query --deriver $(which svn))
+/nix/store/7i5082kfb6yjbqdbiwdhhza0am2xvh6c-subversion-1.1.4.drv
++---/nix/store/d8afh10z72n8l1cr5w42366abiblgn54-builder.sh
++---/nix/store/fmzxmpjx2lh849ph0l36snfj9zdibw67-bash-3.0.drv
+| +---/nix/store/570hmhmx3v57605cqg9yfvvyh0nnb8k8-bash
+| +---/nix/store/p3srsbd8dx44v2pg6nbnszab5mcwx03v-builder.sh
+...
+```
+
+Show all paths that depend on the same OpenSSL library as `svn`:
+
+```console
+$ nix-store --query --referrers $(nix-store --query --binding openssl $(nix-store --query --deriver $(which svn)))
+/nix/store/23ny9l9wixx21632y2wi4p585qhva1q8-sylpheed-1.0.0
+/nix/store/5mbglq5ldqld8sj57273aljwkfvj22mc-subversion-1.1.4
+/nix/store/dpmvp969yhdqs7lm2r1a3gng7pyq6vy4-subversion-1.1.3
+/nix/store/l51240xqsgg8a7yrbqdx1rfzyv6l26fx-lynx-2.8.5
+```
+
+Show all paths that directly or indirectly depend on the Glibc (C
+library) used by `svn`:
+
+```console
+$ nix-store --query --referrers-closure $(ldd $(which svn) | grep /libc.so | awk '{print $3}')
+/nix/store/034a6h4vpz9kds5r6kzb9lhh81mscw43-libgnomeprintui-2.8.2
+/nix/store/15l3yi0d45prm7a82pcrknxdh6nzmxza-gawk-3.1.4
+...
+```
+
+Note that `ldd` is a command that prints out the dynamic libraries used
+by an ELF executable.
+
+Make a picture of the runtime dependency graph of the current user
+environment:
+
+```console
+$ nix-store --query --graph ~/.nix-profile | dot -Tps > graph.ps
+$ gv graph.ps
+```
+
+Show every garbage collector root that points to a store path that
+depends on `svn`:
+
+```console
+$ nix-store --query --roots $(which svn)
+/nix/var/nix/profiles/default-81-link
+/nix/var/nix/profiles/default-82-link
+/home/eelco/.local/state/nix/profiles/profile-97-link
+```
+
diff --git a/doc/manual/src/command-ref/nix-store/read-log.md b/doc/manual/src/command-ref/nix-store/read-log.md
new file mode 100644
index 000000000..d1ff17891
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/read-log.md
@@ -0,0 +1,38 @@
+# Name
+
+`nix-store --read-log` - print build log
+
+# Synopsis
+
+`nix-store` {`--read-log` | `-l`} *paths…*
+
+# Description
+
+The operation `--read-log` prints the build log of the specified store
+paths on standard output. The build log is whatever the builder of a
+derivation wrote to standard output and standard error. If a store path
+is not a derivation, the deriver of the store path is used.
+
+Build logs are kept in `/nix/var/log/nix/drvs`. However, there is no
+guarantee that a build log is available for any particular store path.
+For instance, if the path was downloaded as a pre-built binary through a
+substitute, then the log is unavailable.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
+
+# Example
+
+```console
+$ nix-store --read-log $(which ktorrent)
+building /nix/store/dhc73pvzpnzxhdgpimsd9sw39di66ph1-ktorrent-2.2.1
+unpacking sources
+unpacking source archive /nix/store/p8n1jpqs27mgkjw07pb5269717nzf5f8-ktorrent-2.2.1.tar.gz
+ktorrent-2.2.1/
+ktorrent-2.2.1/NEWS
+...
+```
+
diff --git a/doc/manual/src/command-ref/nix-store/realise.md b/doc/manual/src/command-ref/nix-store/realise.md
new file mode 100644
index 000000000..6b50d2145
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/realise.md
@@ -0,0 +1,118 @@
+# Name
+
+`nix-store --realise` - realise specified store paths
+
+# Synopsis
+
+`nix-store` {`--realise` | `-r`} *paths…* [`--dry-run`]
+
+# Description
+
+The operation `--realise` essentially “builds” the specified store
+paths. Realisation is a somewhat overloaded term:
+
+ - If the store path is a *derivation*, realisation ensures that the
+ output paths of the derivation are [valid] (i.e.,
+ the output path and its closure exist in the file system). This
+ can be done in several ways. First, it is possible that the
+ outputs are already valid, in which case we are done
+ immediately. Otherwise, there may be [substitutes]
+ that produce the outputs (e.g., by downloading them). Finally, the
+ outputs can be produced by running the build task described
+ by the derivation.
+
+ - If the store path is not a derivation, realisation ensures that the
+ specified path is valid (i.e., it and its closure exist in the file
+ system). If the path is already valid, we are done immediately.
+ Otherwise, the path and any missing paths in its closure may be
+ produced through substitutes. If there are no (successful)
+ substitutes, realisation fails.
+
+[valid]: @docroot@/glossary.md#gloss-validity
+[substitutes]: @docroot@/glossary.md#gloss-substitute
+
+The output path of each derivation is printed on standard output. (For
+non-derivations argument, the argument itself is printed.)
+
+The following flags are available:
+
+ - `--dry-run`\
+ Print on standard error a description of what packages would be
+ built or downloaded, without actually performing the operation.
+
+ - `--ignore-unknown`\
+ If a non-derivation path does not have a substitute, then silently
+ ignore it.
+
+ - `--check`\
+ This option allows you to check whether a derivation is
+ deterministic. It rebuilds the specified derivation and checks
+ whether the result is bitwise-identical with the existing outputs,
+ printing an error if that’s not the case. The outputs of the
+ specified derivation must already exist. When used with `-K`, if an
+ output path is not identical to the corresponding output from the
+ previous build, the new output path is left in
+ `/nix/store/name.check.`
+
+Special exit codes:
+
+ - `100`\
+ Generic build failure, the builder process returned with a non-zero
+ exit code.
+
+ - `101`\
+ Build timeout, the build was aborted because it did not complete
+ within the specified `timeout`.
+
+ - `102`\
+ Hash mismatch, the build output was rejected because it does not
+ match the [`outputHash` attribute of the
+ derivation](@docroot@/language/advanced-attributes.md).
+
+ - `104`\
+ Not deterministic, the build succeeded in check mode but the
+ resulting output is not binary reproducible.
+
+With the `--keep-going` flag it's possible for multiple failures to
+occur, in this case the 1xx status codes are or combined using binary
+or.
+
+ 1100100
+ ^^^^
+ |||`- timeout
+ ||`-- output hash mismatch
+ |`--- build failure
+ `---- not deterministic
+
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
+
+# Examples
+
+This operation is typically used to build [store derivation]s produced by
+[`nix-instantiate`](@docroot@/command-ref/nix-instantiate.md):
+
+[store derivation]: @docroot@/glossary.md#gloss-store-derivation
+
+```console
+$ nix-store --realise $(nix-instantiate ./test.nix)
+/nix/store/31axcgrlbfsxzmfff1gyj1bf62hvkby2-aterm-2.3.1
+```
+
+This is essentially what [`nix-build`](@docroot@/command-ref/nix-build.md) does.
+
+To test whether a previously-built derivation is deterministic:
+
+```console
+$ nix-build '<nixpkgs>' --attr hello --check -K
+```
+
+Use [`nix-store --read-log`](./read-log.md) to show the stderr and stdout of a build:
+
+```console
+$ nix-store --read-log $(nix-instantiate ./test.nix)
+```
diff --git a/doc/manual/src/command-ref/nix-store/repair-path.md b/doc/manual/src/command-ref/nix-store/repair-path.md
new file mode 100644
index 000000000..9c3d9f7cd
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/repair-path.md
@@ -0,0 +1,35 @@
+# Name
+
+`nix --repair-path` - re-download path from substituter
+
+# Synopsis
+
+`nix-store` `--repair-path` *paths…*
+
+# Description
+
+The operation `--repair-path` attempts to “repair” the specified paths
+by redownloading them using the available substituters. If no
+substitutes are available, then repair is not possible.
+
+> **Warning**
+>
+> During repair, there is a very small time window during which the old
+> path (if it exists) is moved out of the way and replaced with the new
+> path. If repair is interrupted in between, then the system may be left
+> in a broken state (e.g., if the path contains a critical system
+> component like the GNU C Library).
+
+# Example
+
+```console
+$ nix-store --verify-path /nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13
+path `/nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13' was modified!
+ expected hash `2db57715ae90b7e31ff1f2ecb8c12ec1cc43da920efcbe3b22763f36a1861588',
+ got `481c5aa5483ebc97c20457bb8bca24deea56550d3985cda0027f67fe54b808e4'
+
+$ nix-store --repair-path /nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13
+fetching path `/nix/store/d7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13'...
+…
+```
+
diff --git a/doc/manual/src/command-ref/nix-store/restore.md b/doc/manual/src/command-ref/nix-store/restore.md
new file mode 100644
index 000000000..fcba43df4
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/restore.md
@@ -0,0 +1,18 @@
+# Name
+
+`nix-store --restore` - extract a Nix archive
+
+## Synopsis
+
+`nix-store` `--restore` *path*
+
+## Description
+
+The operation `--restore` unpacks a NAR archive to *path*, which must
+not already exist. The archive is read from standard input.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
diff --git a/doc/manual/src/command-ref/nix-store/serve.md b/doc/manual/src/command-ref/nix-store/serve.md
new file mode 100644
index 000000000..0f90f65ae
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/serve.md
@@ -0,0 +1,38 @@
+# Name
+
+`nix-store --serve` - serve local Nix store over SSH
+
+# Synopsis
+
+`nix-store` `--serve` [`--write`]
+
+# Description
+
+The operation `--serve` provides access to the Nix store over stdin and
+stdout, and is intended to be used as a means of providing Nix store
+access to a restricted ssh user.
+
+The following flags are available:
+
+ - `--write`\
+ Allow the connected client to request the realization of
+ derivations. In effect, this can be used to make the host act as a
+ remote builder.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
+
+# Examples
+
+To turn a host into a build server, the `authorized_keys` file can be
+used to provide build access to a given SSH public key:
+
+```console
+$ cat <<EOF >>/root/.ssh/authorized_keys
+command="nice -n20 nix-store --serve --write" ssh-rsa AAAAB3NzaC1yc2EAAAA...
+EOF
+```
+
diff --git a/doc/manual/src/command-ref/nix-store/verify-path.md b/doc/manual/src/command-ref/nix-store/verify-path.md
new file mode 100644
index 000000000..927201599
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/verify-path.md
@@ -0,0 +1,29 @@
+# Name
+
+`nix-store --verify-path` - check path contents against Nix database
+
+## Synopsis
+
+`nix-store` `--verify-path` *paths…*
+
+## Description
+
+The operation `--verify-path` compares the contents of the given store
+paths to their cryptographic hashes stored in Nix’s database. For every
+changed path, it prints a warning message. The exit status is 0 if no
+path has changed, and 1 otherwise.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
+
+## Example
+
+To verify the integrity of the `svn` command and all its dependencies:
+
+```console
+$ nix-store --verify-path $(nix-store --query --requisites $(which svn))
+```
+
diff --git a/doc/manual/src/command-ref/nix-store/verify.md b/doc/manual/src/command-ref/nix-store/verify.md
new file mode 100644
index 000000000..2695b3361
--- /dev/null
+++ b/doc/manual/src/command-ref/nix-store/verify.md
@@ -0,0 +1,36 @@
+# Name
+
+`nix-store --verify` - check Nix database for consistency
+
+# Synopsis
+
+`nix-store` `--verify` [`--check-contents`] [`--repair`]
+
+# Description
+
+The operation `--verify` verifies the internal consistency of the Nix
+database, and the consistency between the Nix database and the Nix
+store. Any inconsistencies encountered are automatically repaired.
+Inconsistencies are generally the result of the Nix store or database
+being modified by non-Nix tools, or of bugs in Nix itself.
+
+This operation has the following options:
+
+ - `--check-contents`\
+ Checks that the contents of every valid store path has not been
+ altered by computing a SHA-256 hash of the contents and comparing it
+ with the hash stored in the Nix database at build time. Paths that
+ have been modified are printed out. For large stores,
+ `--check-contents` is obviously quite slow.
+
+ - `--repair`\
+ If any valid path is missing from the store, or (if
+ `--check-contents` is given) the contents of a valid path has been
+ modified, then try to repair the path by redownloading it. See
+ `nix-store --repair-path` for details.
+
+{{#include ./opt-common.md}}
+
+{{#include ../opt-common.md}}
+
+{{#include ../env-common.md}}
diff --git a/doc/manual/src/command-ref/opt-common.md b/doc/manual/src/command-ref/opt-common.md
index e612c416f..54c0a1d0d 100644
--- a/doc/manual/src/command-ref/opt-common.md
+++ b/doc/manual/src/command-ref/opt-common.md
@@ -2,13 +2,13 @@
Most Nix commands accept the following command-line options:
- - [`--help`]{#opt-help}\
+ - <span id="opt-help">[`--help`](#opt-help)</span>\
Prints out a summary of the command syntax and exits.
- - [`--version`]{#opt-version}\
+ - <span id="opt-version">[`--version`](#opt-version)</span>\
Prints out the Nix version number on standard output and exits.
- - [`--verbose`]{#opt-verbose} / `-v`\
+ - <span id="opt-verbose">[`--verbose`](#opt-verbose)</span> / `-v`\
Increases the level of verbosity of diagnostic messages printed on
standard error. For each Nix operation, the information printed on
standard output is well-defined; any diagnostic information is
@@ -37,14 +37,14 @@ Most Nix commands accept the following command-line options:
- 5\
“Vomit”: print vast amounts of debug information.
- - [`--quiet`]{#opt-quiet}\
+ - <span id="opt-quiet">[`--quiet`](#opt-quiet)</span>\
Decreases the level of verbosity of diagnostic messages printed on
standard error. This is the inverse option to `-v` / `--verbose`.
This option may be specified repeatedly. See the previous verbosity
levels list.
- - [`--log-format`]{#opt-log-format} *format*\
+ - <span id="opt-log-format">[`--log-format`](#opt-log-format)</span> *format*\
This option can be used to change the output of the log format, with
*format* being one of:
@@ -66,14 +66,14 @@ Most Nix commands accept the following command-line options:
- bar-with-logs\
Display the raw logs, with the progress bar at the bottom.
- - [`--no-build-output`]{#opt-no-build-output} / `-Q`\
+ - <span id="opt-no-build-output">[`--no-build-output`](#opt-no-build-output)</span> / `-Q`\
By default, output written by builders to standard output and
standard error is echoed to the Nix command's standard error. This
option suppresses this behaviour. Note that the builder's standard
output and error are always written to a log file in
`prefix/nix/var/log/nix`.
- - [`--max-jobs`]{#opt-max-jobs} / `-j` *number*\
+ - <span id="opt-max-jobs">[`--max-jobs`](#opt-max-jobs)</span> / `-j` *number*\
Sets the maximum number of build jobs that Nix will perform in
parallel to the specified number. Specify `auto` to use the number
of CPUs in the system. The default is specified by the `max-jobs`
@@ -83,7 +83,7 @@ Most Nix commands accept the following command-line options:
Setting it to `0` disallows building on the local machine, which is
useful when you want builds to happen only on remote builders.
- - [`--cores`]{#opt-cores}\
+ - <span id="opt-cores">[`--cores`](#opt-cores)</span>\
Sets the value of the `NIX_BUILD_CORES` environment variable in
the invocation of builders. Builders can use this variable at
their discretion to control the maximum amount of parallelism. For
@@ -94,18 +94,18 @@ Most Nix commands accept the following command-line options:
means that the builder should use all available CPU cores in the
system.
- - [`--max-silent-time`]{#opt-max-silent-time}\
+ - <span id="opt-max-silent-time">[`--max-silent-time`](#opt-max-silent-time)</span>\
Sets the maximum number of seconds that a builder can go without
producing any data on standard output or standard error. The
default is specified by the `max-silent-time` configuration
setting. `0` means no time-out.
- - [`--timeout`]{#opt-timeout}\
+ - <span id="opt-timeout">[`--timeout`](#opt-timeout)</span>\
Sets the maximum number of seconds that a builder can run. The
default is specified by the `timeout` configuration setting. `0`
means no timeout.
- - [`--keep-going`]{#opt-keep-going} / `-k`\
+ - <span id="opt-keep-going">[`--keep-going`](#opt-keep-going)</span> / `-k`\
Keep going in case of failed builds, to the greatest extent
possible. That is, if building an input of some derivation fails,
Nix will still build the other inputs, but not the derivation
@@ -113,13 +113,13 @@ Most Nix commands accept the following command-line options:
for builds of substitutes), possibly killing builds in progress (in
case of parallel or distributed builds).
- - [`--keep-failed`]{#opt-keep-failed} / `-K`\
+ - <span id="opt-keep-failed">[`--keep-failed`](#opt-keep-failed)</span> / `-K`\
Specifies that in case of a build failure, the temporary directory
(usually in `/tmp`) in which the build takes place should not be
deleted. The path of the build directory is printed as an
informational message.
- - [`--fallback`]{#opt-fallback}\
+ - <span id="opt-fallback">[`--fallback`](#opt-fallback)</span>\
Whenever Nix attempts to build a derivation for which substitutes
are known for each output path, but realising the output paths
through the substitutes fails, fall back on building the derivation.
@@ -134,18 +134,18 @@ Most Nix commands accept the following command-line options:
failure in obtaining the substitutes to lead to a full build from
source (with the related consumption of resources).
- - [`--readonly-mode`]{#opt-readonly-mode}\
+ - <span id="opt-readonly-mode">[`--readonly-mode`](#opt-readonly-mode)</span>\
When this option is used, no attempt is made to open the Nix
database. Most Nix operations do need database access, so those
operations will fail.
- - [`--arg`]{#opt-arg} *name* *value*\
+ - <span id="opt-arg">[`--arg`](#opt-arg)</span> *name* *value*\
This option is accepted by `nix-env`, `nix-instantiate`,
`nix-shell` and `nix-build`. When evaluating Nix expressions, the
expression evaluator will automatically try to call functions that
it encounters. It can automatically call functions for which every
argument has a [default
- value](../language/constructs.md#functions) (e.g.,
+ value](@docroot@/language/constructs.md#functions) (e.g.,
`{ argName ? defaultValue }: ...`). With `--arg`, you can also
call functions that have arguments without a default value (or
override a default value). That is, if the evaluator encounters a
@@ -162,28 +162,28 @@ Most Nix commands accept the following command-line options:
}: ...
```
- So if you call this Nix expression (e.g., when you do `nix-env -iA
+ So if you call this Nix expression (e.g., when you do `nix-env --install --attr
pkgname`), the function will be called automatically using the
- value [`builtins.currentSystem`](../language/builtins.md) for
+ value [`builtins.currentSystem`](@docroot@/language/builtins.md) for
the `system` argument. You can override this using `--arg`, e.g.,
- `nix-env -iA pkgname --arg system \"i686-freebsd\"`. (Note that
+ `nix-env --install --attr pkgname --arg system \"i686-freebsd\"`. (Note that
since the argument is a Nix string literal, you have to escape the
quotes.)
- - [`--argstr`]{#opt-argstr} *name* *value*\
+ - <span id="opt-argstr">[`--argstr`](#opt-argstr)</span> *name* *value*\
This option is like `--arg`, only the value is not a Nix
expression but a string. So instead of `--arg system
\"i686-linux\"` (the outer quotes are to keep the shell happy) you
can say `--argstr system i686-linux`.
- - [`--attr`]{#opt-attr} / `-A` *attrPath*\
+ - <span id="opt-attr">[`--attr`](#opt-attr)</span> / `-A` *attrPath*\
Select an attribute from the top-level Nix expression being
evaluated. (`nix-env`, `nix-instantiate`, `nix-build` and
`nix-shell` only.) The *attribute path* *attrPath* is a sequence
of attribute names separated by dots. For instance, given a
top-level Nix expression *e*, the attribute path `xorg.xorgserver`
would cause the expression `e.xorg.xorgserver` to be used. See
- [`nix-env --install`](nix-env.md#operation---install) for some
+ [`nix-env --install`](@docroot@/command-ref/nix-env/install.md) for some
concrete examples.
In addition to attribute names, you can also specify array indices.
@@ -191,7 +191,7 @@ Most Nix commands accept the following command-line options:
attribute of the fourth element of the array in the `foo` attribute
of the top-level expression.
- - [`--expr`]{#opt-expr} / `-E`\
+ - <span id="opt-expr">[`--expr`](#opt-expr)</span> / `-E`\
Interpret the command line arguments as a list of Nix expressions to
be parsed and evaluated, rather than as a list of file names of Nix
expressions. (`nix-instantiate`, `nix-build` and `nix-shell` only.)
@@ -199,20 +199,19 @@ Most Nix commands accept the following command-line options:
For `nix-shell`, this option is commonly used to give you a shell in
which you can build the packages returned by the expression. If you
want to get a shell which contain the *built* packages ready for
- use, give your expression to the `nix-shell -p` convenience flag
+ use, give your expression to the `nix-shell --packages ` convenience flag
instead.
- - [`-I`]{#opt-I} *path*\
- Add a path to the Nix expression search path. This option may be
- given multiple times. See the `NIX_PATH` environment variable for
- information on the semantics of the Nix search path. Paths added
- through `-I` take precedence over `NIX_PATH`.
+ - <span id="opt-I">[`-I`](#opt-I)</span> *path*\
+ Add an entry to the [Nix expression search path](@docroot@/command-ref/conf-file.md#conf-nix-path).
+ This option may be given multiple times.
+ Paths added through `-I` take precedence over [`NIX_PATH`](@docroot@/command-ref/env-common.md#env-NIX_PATH).
- - [`--option`]{#opt-option} *name* *value*\
+ - <span id="opt-option">[`--option`](#opt-option)</span> *name* *value*\
Set the Nix configuration option *name* to *value*. This overrides
settings in the Nix configuration file (see nix.conf5).
- - [`--repair`]{#opt-repair}\
+ - <span id="opt-repair">[`--repair`](#opt-repair)</span>\
Fix corrupted or missing store paths by redownloading or rebuilding
them. Note that this is slow because it requires computing a
cryptographic hash of the contents of every path in the closure of
diff --git a/doc/manual/src/contributing/experimental-features.md b/doc/manual/src/contributing/experimental-features.md
new file mode 100644
index 000000000..ad5cffa91
--- /dev/null
+++ b/doc/manual/src/contributing/experimental-features.md
@@ -0,0 +1,95 @@
+This section describes the notion of *experimental features*, and how it fits into the big picture of the development of Nix.
+
+# What are experimental features?
+
+Experimental features are considered unstable, which means that they can be changed or removed at any time.
+Users must explicitly enable them by toggling the associated [experimental feature flags](@docroot@/command-ref/conf-file.md#conf-experimental-features).
+This allows accessing unstable functionality without unwittingly relying on it.
+
+Experimental feature flags were first introduced in [Nix 2.4](@docroot@/release-notes/rl-2.4.md).
+Before that, Nix did have experimental features, but they were not guarded by flags and were merely documented as unstable.
+This was a source of confusion and controversy.
+
+# When should a new feature be marked experimental?
+
+A change in the Nix codebase should be guarded by an experimental feature flag if it is considered likely to be reverted or adapted in a backwards-incompatible manner after gathering more experience with it in practice.
+
+Examples:
+
+- Changes to the Nix language, such as new built-ins, syntactic or semantic changes, etc.
+- Changes to the command-line interface
+
+# Lifecycle of an experimental feature
+
+Experimental features have to be treated on a case-by-case basis.
+However, the standard workflow for an experimental feature is as follows:
+
+- A new feature is implemented in a *pull request*
+ - It is guarded by an experimental feature flag that is disabled by default
+- The pull request is merged, the *experimental* feature ends up in a release
+ - Using the feature requires explicitly enabling it, signifying awareness of the potential risks
+ - Being experimental, the feature can still be changed arbitrarily
+- The feature can be *removed*
+ - The associated experimental feature flag is also removed
+- The feature can be declared *stable*
+ - The associated experimental feature flag is removed
+ - There should be enough evidence of users having tried the feature, such as feedback, fixed bugs, demonstrations of how it is put to use
+ - Maintainers must feel confident that:
+ - The feature is designed and implemented sensibly, that it is fit for purpose
+ - Potential interactions are well-understood
+ - Stabilising the feature will not incur an outsized maintenance burden in the future
+
+The following diagram illustrates the process:
+
+```
+ .------.
+ | idea |
+ '------'
+ |
+ discussion, design, implementation
+ |
+ | .-------.
+ | | |
+ v v |
+ .--------------. review
+ | pull request | |
+ '--------------' |
+ | ^ | |
+ | | '-------'
+ .---' '----.
+ | |
+ merge user feedback,
+ | (breaking) changes
+ | |
+ '---. .----'
+ | |
+ v |
+ +--------------+
+ .---| experimental |----.
+ | +--------------+ |
+ | |
+decision to stabilise decision against
+ | keeping the feature
+ | |
+ v v
+ +--------+ +---------+
+ | stable | | removed |
+ +--------+ +---------+
+```
+
+# Relation to the RFC process
+
+Experimental features and [RFCs](https://github.com/NixOS/rfcs/) both allow approaching substantial changes while minimizing the risk.
+However they serve different purposes:
+
+- An experimental feature enables developers to iterate on and deliver a new idea without committing to it or requiring a costly long-running fork.
+ It is primarily an issue of *implementation*, targeting Nix developers and early testers.
+- The goal of an RFC is to make explicit all the implications of a change:
+ Explain why it is wanted, which new use-cases it enables, which interface changes it requires, etc.
+ It is primarily an issue of *design* and *communication*, targeting the broader community.
+
+This means that experimental features and RFCs are orthogonal mechanisms, and can be used independently or together as needed.
+
+# Currently available experimental features
+
+{{#include ./experimental-feature-descriptions.md}}
diff --git a/doc/manual/src/contributing/hacking.md b/doc/manual/src/contributing/hacking.md
index 3869c37a4..b954a2167 100644
--- a/doc/manual/src/contributing/hacking.md
+++ b/doc/manual/src/contributing/hacking.md
@@ -77,7 +77,7 @@ $ nix-shell
To get a shell with one of the other [supported compilation environments](#compilation-environments):
```console
-$ nix-shell -A devShells.x86_64-linux.native-clang11StdenvPackages
+$ nix-shell --attr devShells.x86_64-linux.native-clang11StdenvPackages
```
> **Note**
@@ -139,7 +139,7 @@ $ nix build .#packages.aarch64-linux.default
for flake-enabled Nix, or
```console
-$ nix-build -A packages.aarch64-linux.default
+$ nix-build --attr packages.aarch64-linux.default
```
for classic Nix.
@@ -166,7 +166,7 @@ $ nix build .#nix-ccacheStdenv
for flake-enabled Nix, or
```console
-$ nix-build -A nix-ccacheStdenv
+$ nix-build --attr nix-ccacheStdenv
```
for classic Nix.
@@ -389,3 +389,35 @@ If a broken link occurs in a snippet that was inserted into multiple generated f
If the `@docroot@` literal appears in an error message from the `mdbook-linkcheck` tool, the `@docroot@` replacement needs to be applied to the generated source file that mentions it.
See existing `@docroot@` logic in the [Makefile].
Regular markdown files used for the manual have a base path of their own and they can use relative paths instead of `@docroot@`.
+
+## API documentation
+
+Doxygen API documentation is [available
+online](https://hydra.nixos.org/job/nix/master/internal-api-docs/latest/download-by-type/doc/internal-api-docs). You
+can also build and view it yourself:
+
+```console
+# nix build .#hydraJobs.internal-api-docs
+# xdg-open ./result/share/doc/nix/internal-api/html/index.html
+```
+
+or inside a `nix develop` shell by running:
+
+```
+# make internal-api-html
+# xdg-open ./outputs/doc/share/doc/nix/internal-api/html/index.html
+```
+
+## Coverage analysis
+
+A coverage analysis report is [available
+online](https://hydra.nixos.org/job/nix/master/coverage/latest/download-by-type/report/coverage). You
+can build it yourself:
+
+```
+# nix build .#hydraJobs.coverage
+# xdg-open ./result/coverage/index.html
+```
+
+Metrics about the change in line/function coverage over time are also
+[available](https://hydra.nixos.org/job/nix/master/coverage#tabs-charts).
diff --git a/doc/manual/src/glossary.md b/doc/manual/src/glossary.md
index d0aff34e2..e142bd415 100644
--- a/doc/manual/src/glossary.md
+++ b/doc/manual/src/glossary.md
@@ -15,7 +15,7 @@
Example: `/nix/store/g946hcz4c8mdvq2g8vxx42z51qb71rvp-git-2.38.1.drv`
- See [`nix show-derivation`](./command-ref/new-cli/nix3-show-derivation.md) (experimental) for displaying the contents of store derivations.
+ See [`nix derivation show`](./command-ref/new-cli/nix3-derivation-show.md) (experimental) for displaying the contents of store derivations.
[store derivation]: #gloss-store-derivation
@@ -31,7 +31,7 @@
This means either running the `builder` executable as specified in the corresponding [derivation] or fetching a pre-built [store object] from a [substituter].
- See [`nix-build`](./command-ref/nix-build.md) and [`nix-store --realise`](./command-ref/nix-store.md#operation---realise).
+ See [`nix-build`](./command-ref/nix-build.md) and [`nix-store --realise`](@docroot@/command-ref/nix-store/realise.md).
See [`nix build`](./command-ref/new-cli/nix3-build.md) (experimental).
@@ -54,7 +54,7 @@
invoked, the Nix store can be referred to
as a "_local_" or a "_remote_" one:
- + A *local store* exists on the filesystem of
+ + A [local store]{#gloss-local-store} exists on the filesystem of
the machine where Nix is invoked. You can use other
local stores by passing the `--store` flag to the
`nix` command. Local stores can be used for building derivations.
@@ -65,17 +65,17 @@
served by the `nix-serve` Perl script.
[store]: #gloss-store
+ [local store]: #gloss-local-store
- [chroot store]{#gloss-chroot-store}\
- A local store whose canonical path is anything other than `/nix/store`.
+ A [local store] whose canonical path is anything other than `/nix/store`.
- [binary cache]{#gloss-binary-cache}\
A *binary cache* is a Nix store which uses a different format: its
metadata and signatures are kept in `.narinfo` files rather than in a
- Nix database. This different format simplifies serving store objects
- over the network, but cannot host builds. Examples of binary caches
- include S3 buckets and the [NixOS binary
- cache](https://cache.nixos.org).
+ [Nix database]. This different format simplifies serving store objects
+ over the network, but cannot host builds. Examples of binary caches
+ include S3 buckets and the [NixOS binary cache](https://cache.nixos.org).
- [store path]{#gloss-store-path}\
The location of a [store object] in the file system, i.e., an
@@ -101,14 +101,11 @@
derivation.
- [output-addressed store object]{#gloss-output-addressed-store-object}\
- A store object whose store path hashes its content. This
- includes derivations, the outputs of
- [content-addressed derivations](#gloss-content-addressed-derivation),
- and the outputs of
- [fixed-output derivations](#gloss-fixed-output-derivation).
+ A [store object] whose [store path] is determined by its contents.
+ This includes derivations, the outputs of [content-addressed derivations](#gloss-content-addressed-derivation), and the outputs of [fixed-output derivations](#gloss-fixed-output-derivation).
- [substitute]{#gloss-substitute}\
- A substitute is a command invocation stored in the Nix database that
+ A substitute is a command invocation stored in the [Nix database] that
describes how to build a store object, bypassing the normal build
mechanism (i.e., derivations). Typically, the substitute builds the
store object by downloading a pre-built version of the store object
@@ -127,6 +124,14 @@
builder can rely on external inputs such as the network or the
system time) but the Nix model assumes it.
+ - [Nix database]{#gloss-nix-database}\
+ An SQlite database to track [reference]s between [store object]s.
+ This is an implementation detail of the [local store].
+
+ Default location: `/nix/var/nix/db`.
+
+ [Nix database]: #gloss-nix-database
+
- [Nix expression]{#gloss-nix-expression}\
A high-level description of software packages and compositions
thereof. Deploying software using Nix entails writing Nix
@@ -135,14 +140,13 @@
then be built.
- [reference]{#gloss-reference}\
- A store path `P` is said to have a reference to a store path `Q` if
- the store object at `P` contains the path `Q` somewhere. The
- *references* of a store path are the set of store paths to which it
- has a reference.
+ A [store object] `O` is said to have a *reference* to a store object `P` if a [store path] to `P` appears in the contents of `O`.
+
+ Store objects can refer to both other store objects and themselves.
+ References from a store object to itself are called *self-references*.
+ References other than a self-reference must not form a cycle.
- A derivation can reference other derivations and sources (but not
- output paths), whereas an output path only references other output
- paths.
+ [reference]: #gloss-reference
- [reachable]{#gloss-reachable}\
A store path `Q` is reachable from another store path `P` if `Q`
@@ -156,11 +160,11 @@
build-time dependencies, while the closure of its output path is
equivalent to its runtime dependencies. For correct deployment it
is necessary to deploy whole closures, since otherwise at runtime
- files could be missing. The command `nix-store -qR` prints out
+ files could be missing. The command `nix-store --query --requisites ` prints out
closures of store paths.
- As an example, if the store object at path `P` contains a reference
- to path `Q`, then `Q` is in the closure of `P`. Further, if `Q`
+ As an example, if the [store object] at path `P` contains a [reference]
+ to a store object at path `Q`, then `Q` is in the closure of `P`. Further, if `Q`
references `R` then `R` is also in the closure of `P`.
[closure]: #gloss-closure
@@ -176,9 +180,9 @@
- [validity]{#gloss-validity}\
A store path is valid if all [store object]s in its [closure] can be read from the [store].
- For a local store, this means:
+ For a [local store], this means:
- The store path leads to an existing [store object] in that [store].
- - The store path is listed in the Nix database as being valid.
+ - The store path is listed in the [Nix database] as being valid.
- All paths in the store path's [closure] are valid.
[validity]: #gloss-validity
@@ -193,6 +197,11 @@
A symlink to the current *user environment* of a user, e.g.,
`/nix/var/nix/profiles/default`.
+ - [installable]{#gloss-installable}\
+ Something that can be realised in the Nix store.
+
+ See [installables](./command-ref/new-cli/nix.md#installables) for [`nix` commands](./command-ref/new-cli/nix.md) (experimental) for details.
+
- [NAR]{#gloss-nar}\
A *N*ix *AR*chive. This is a serialisation of a path in the Nix
store. It can contain regular files, directories and symbolic
@@ -213,3 +222,9 @@
[string]: ./language/values.md#type-string
[path]: ./language/values.md#type-path
[attribute name]: ./language/values.md#attribute-set
+
+ - [experimental feature]{#gloss-experimental-feature}\
+ Not yet stabilized functionality guarded by named experimental feature flags.
+ These flags are enabled or disabled with the [`experimental-features`](./command-ref/conf-file.html#conf-experimental-features) setting.
+
+ See the contribution guide on the [purpose and lifecycle of experimental feaures](@docroot@/contributing/experimental-features.md).
diff --git a/doc/manual/src/installation/env-variables.md b/doc/manual/src/installation/env-variables.md
index fb8155a80..db98f52ff 100644
--- a/doc/manual/src/installation/env-variables.md
+++ b/doc/manual/src/installation/env-variables.md
@@ -42,14 +42,11 @@ export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt
> You must not add the export and then do the install, as the Nix
> installer will detect the presence of Nix configuration, and abort.
-## `NIX_SSL_CERT_FILE` with macOS and the Nix daemon
+If you use the Nix daemon, you should also add the following to
+`/etc/nix/nix.conf`:
-On macOS you must specify the environment variable for the Nix daemon
-service, then restart it:
-
-```console
-$ sudo launchctl setenv NIX_SSL_CERT_FILE /etc/ssl/my-certificate-bundle.crt
-$ sudo launchctl kickstart -k system/org.nixos.nix-daemon
+```
+ssl-cert-file = /etc/ssl/my-certificate-bundle.crt
```
## Proxy Environment Variables
diff --git a/doc/manual/src/installation/installing-binary.md b/doc/manual/src/installation/installing-binary.md
index e3fd962bd..ffabb250a 100644
--- a/doc/manual/src/installation/installing-binary.md
+++ b/doc/manual/src/installation/installing-binary.md
@@ -47,12 +47,6 @@ The install script will modify the first writable file from amongst
`NIX_INSTALLER_NO_MODIFY_PROFILE` environment variable before executing
the install script to disable this behaviour.
-You can uninstall Nix simply by running:
-
-```console
-$ rm -rf /nix
-```
-
# Multi User Installation
The multi-user Nix installation creates system users, and a system
@@ -84,154 +78,8 @@ The installer will modify `/etc/bashrc`, and `/etc/zshrc` if they exist.
The installer will first back up these files with a `.backup-before-nix`
extension. The installer will also create `/etc/profile.d/nix.sh`.
-## Uninstalling
-
-### Linux
-
-If you are on Linux with systemd:
-
-1. Remove the Nix daemon service:
-
- ```console
- sudo systemctl stop nix-daemon.service
- sudo systemctl disable nix-daemon.socket nix-daemon.service
- sudo systemctl daemon-reload
- ```
-
-1. Remove systemd service files:
-
- ```console
- sudo rm /etc/systemd/system/nix-daemon.service /etc/systemd/system/nix-daemon.socket
- ```
-
-1. The installer script uses systemd-tmpfiles to create the socket directory.
- You may also want to remove the configuration for that:
-
- ```console
- sudo rm /etc/tmpfiles.d/nix-daemon.conf
- ```
-
-Remove files created by Nix:
-
-```console
-sudo rm -rf /nix /etc/nix /etc/profile/nix.sh ~root/.nix-profile ~root/.nix-defexpr ~root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels
-```
-
-Remove build users and their group:
-
-```console
-for i in $(seq 1 32); do
- sudo userdel nixbld$i
-done
-sudo groupdel nixbld
-```
-
-There may also be references to Nix in
-
-- `/etc/profile`
-- `/etc/bashrc`
-- `/etc/zshrc`
-
-which you may remove.
-
-### macOS
-
-1. Edit `/etc/zshrc` and `/etc/bashrc` to remove the lines sourcing
- `nix-daemon.sh`, which should look like this:
-
- ```bash
- # Nix
- if [ -e '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh' ]; then
- . '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh'
- fi
- # End Nix
- ```
-
- If these files haven't been altered since installing Nix you can simply put
- the backups back in place:
-
- ```console
- sudo mv /etc/zshrc.backup-before-nix /etc/zshrc
- sudo mv /etc/bashrc.backup-before-nix /etc/bashrc
- ```
-
- This will stop shells from sourcing the file and bringing everything you
- installed using Nix in scope.
-
-2. Stop and remove the Nix daemon services:
-
- ```console
- sudo launchctl unload /Library/LaunchDaemons/org.nixos.nix-daemon.plist
- sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist
- sudo launchctl unload /Library/LaunchDaemons/org.nixos.darwin-store.plist
- sudo rm /Library/LaunchDaemons/org.nixos.darwin-store.plist
- ```
-
- This stops the Nix daemon and prevents it from being started next time you
- boot the system.
-
-3. Remove the `nixbld` group and the `_nixbuildN` users:
-
- ```console
- sudo dscl . -delete /Groups/nixbld
- for u in $(sudo dscl . -list /Users | grep _nixbld); do sudo dscl . -delete /Users/$u; done
- ```
-
- This will remove all the build users that no longer serve a purpose.
-
-4. Edit fstab using `sudo vifs` to remove the line mounting the Nix Store
- volume on `/nix`, which looks like
- `UUID=<uuid> /nix apfs rw,noauto,nobrowse,suid,owners` or
- `LABEL=Nix\040Store /nix apfs rw,nobrowse`. This will prevent automatic
- mounting of the Nix Store volume.
-
-5. Edit `/etc/synthetic.conf` to remove the `nix` line. If this is the only
- line in the file you can remove it entirely, `sudo rm /etc/synthetic.conf`.
- This will prevent the creation of the empty `/nix` directory to provide a
- mountpoint for the Nix Store volume.
-
-6. Remove the files Nix added to your system:
-
- ```console
- sudo rm -rf /etc/nix /var/root/.nix-profile /var/root/.nix-defexpr /var/root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels
- ```
-
- This gets rid of any data Nix may have created except for the store which is
- removed next.
-
-7. Remove the Nix Store volume:
-
- ```console
- sudo diskutil apfs deleteVolume /nix
- ```
-
- This will remove the Nix Store volume and everything that was added to the
- store.
-
- If the output indicates that the command couldn't remove the volume, you should
- make sure you don't have an _unmounted_ Nix Store volume. Look for a
- "Nix Store" volume in the output of the following command:
-
- ```console
- diskutil list
- ```
-
- If you _do_ see a "Nix Store" volume, delete it by re-running the diskutil
- deleteVolume command, but replace `/nix` with the store volume's `diskXsY`
- identifier.
-
-> **Note**
->
-> After you complete the steps here, you will still have an empty `/nix`
-> directory. This is an expected sign of a successful uninstall. The empty
-> `/nix` directory will disappear the next time you reboot.
->
-> You do not have to reboot to finish uninstalling Nix. The uninstall is
-> complete. macOS (Catalina+) directly controls root directories and its
-> read-only root will prevent you from manually deleting the empty `/nix`
-> mountpoint.
-
# macOS Installation
+
[]{#sect-macos-installation-change-store-prefix}[]{#sect-macos-installation-encrypted-volume}[]{#sect-macos-installation-symlink}[]{#sect-macos-installation-recommended-notes}
<!-- Note: anchors above to catch permalinks to old explanations -->
@@ -280,19 +128,16 @@ this to run the installer, but it may help if you run into trouble:
# Installing a pinned Nix version from a URL
-NixOS.org hosts version-specific installation URLs for all Nix versions
-since 1.11.16, at `https://releases.nixos.org/nix/nix-version/install`.
+Version-specific installation URLs for all Nix versions
+since 1.11.16 can be found at [releases.nixos.org](https://releases.nixos.org/?prefix=nix/).
+The corresponding SHA-256 hash can be found in the directory for the given version.
-These install scripts can be used the same as the main NixOS.org
-installation script:
+These install scripts can be used the same as usual:
```console
-$ curl -L https://nixos.org/nix/install | sh
+$ curl -L https://releases.nixos.org/nix/nix-<version>/install | sh
```
-In the same directory of the install script are sha256 sums, and gpg
-signature files.
-
# Installing from a binary tarball
You can also download a binary tarball that contains Nix and all its
diff --git a/doc/manual/src/installation/uninstall.md b/doc/manual/src/installation/uninstall.md
new file mode 100644
index 000000000..9ead5e53c
--- /dev/null
+++ b/doc/manual/src/installation/uninstall.md
@@ -0,0 +1,148 @@
+# Uninstalling Nix
+
+## Single User
+
+If you have a [single-user installation](./installing-binary.md#single-user-installation) of Nix, uninstall it by running:
+
+```console
+$ rm -rf /nix
+```
+
+## Multi User
+
+Removing a [multi-user installation](./installing-binary.md#multi-user-installation) of Nix is more involved, and depends on the operating system.
+
+### Linux
+
+If you are on Linux with systemd:
+
+1. Remove the Nix daemon service:
+
+ ```console
+ sudo systemctl stop nix-daemon.service
+ sudo systemctl disable nix-daemon.socket nix-daemon.service
+ sudo systemctl daemon-reload
+ ```
+
+Remove files created by Nix:
+
+```console
+sudo rm -rf /etc/nix /etc/profile.d/nix.sh /etc/tmpfiles.d/nix-daemon.conf /nix ~root/.nix-channels ~root/.nix-defexpr ~root/.nix-profile
+```
+
+Remove build users and their group:
+
+```console
+for i in $(seq 1 32); do
+ sudo userdel nixbld$i
+done
+sudo groupdel nixbld
+```
+
+There may also be references to Nix in
+
+- `/etc/bash.bashrc`
+- `/etc/bashrc`
+- `/etc/profile`
+- `/etc/zsh/zshrc`
+- `/etc/zshrc`
+
+which you may remove.
+
+### macOS
+
+1. Edit `/etc/zshrc`, `/etc/bashrc`, and `/etc/bash.bashrc` to remove the lines sourcing `nix-daemon.sh`, which should look like this:
+
+ ```bash
+ # Nix
+ if [ -e '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh' ]; then
+ . '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh'
+ fi
+ # End Nix
+ ```
+
+ If these files haven't been altered since installing Nix you can simply put
+ the backups back in place:
+
+ ```console
+ sudo mv /etc/zshrc.backup-before-nix /etc/zshrc
+ sudo mv /etc/bashrc.backup-before-nix /etc/bashrc
+ sudo mv /etc/bash.bashrc.backup-before-nix /etc/bash.bashrc
+ ```
+
+ This will stop shells from sourcing the file and bringing everything you
+ installed using Nix in scope.
+
+2. Stop and remove the Nix daemon services:
+
+ ```console
+ sudo launchctl unload /Library/LaunchDaemons/org.nixos.nix-daemon.plist
+ sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist
+ sudo launchctl unload /Library/LaunchDaemons/org.nixos.darwin-store.plist
+ sudo rm /Library/LaunchDaemons/org.nixos.darwin-store.plist
+ ```
+
+ This stops the Nix daemon and prevents it from being started next time you
+ boot the system.
+
+3. Remove the `nixbld` group and the `_nixbuildN` users:
+
+ ```console
+ sudo dscl . -delete /Groups/nixbld
+ for u in $(sudo dscl . -list /Users | grep _nixbld); do sudo dscl . -delete /Users/$u; done
+ ```
+
+ This will remove all the build users that no longer serve a purpose.
+
+4. Edit fstab using `sudo vifs` to remove the line mounting the Nix Store
+ volume on `/nix`, which looks like
+ `UUID=<uuid> /nix apfs rw,noauto,nobrowse,suid,owners` or
+ `LABEL=Nix\040Store /nix apfs rw,nobrowse`. This will prevent automatic
+ mounting of the Nix Store volume.
+
+5. Edit `/etc/synthetic.conf` to remove the `nix` line. If this is the only
+ line in the file you can remove it entirely, `sudo rm /etc/synthetic.conf`.
+ This will prevent the creation of the empty `/nix` directory to provide a
+ mountpoint for the Nix Store volume.
+
+6. Remove the files Nix added to your system:
+
+ ```console
+ sudo rm -rf /etc/nix /var/root/.nix-profile /var/root/.nix-defexpr /var/root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels
+ ```
+
+ This gets rid of any data Nix may have created except for the store which is
+ removed next.
+
+7. Remove the Nix Store volume:
+
+ ```console
+ sudo diskutil apfs deleteVolume /nix
+ ```
+
+ This will remove the Nix Store volume and everything that was added to the
+ store.
+
+ If the output indicates that the command couldn't remove the volume, you should
+ make sure you don't have an _unmounted_ Nix Store volume. Look for a
+ "Nix Store" volume in the output of the following command:
+
+ ```console
+ diskutil list
+ ```
+
+ If you _do_ see a "Nix Store" volume, delete it by re-running the diskutil
+ deleteVolume command, but replace `/nix` with the store volume's `diskXsY`
+ identifier.
+
+> **Note**
+>
+> After you complete the steps here, you will still have an empty `/nix`
+> directory. This is an expected sign of a successful uninstall. The empty
+> `/nix` directory will disappear the next time you reboot.
+>
+> You do not have to reboot to finish uninstalling Nix. The uninstall is
+> complete. macOS (Catalina+) directly controls root directories and its
+> read-only root will prevent you from manually deleting the empty `/nix`
+> mountpoint.
+
diff --git a/doc/manual/src/installation/upgrading.md b/doc/manual/src/installation/upgrading.md
index 24efc4681..6d09f54d8 100644
--- a/doc/manual/src/installation/upgrading.md
+++ b/doc/manual/src/installation/upgrading.md
@@ -2,13 +2,13 @@
Multi-user Nix users on macOS can upgrade Nix by running: `sudo -i sh -c
'nix-channel --update &&
-nix-env -iA nixpkgs.nix &&
+nix-env --install --attr nixpkgs.nix &&
launchctl remove org.nixos.nix-daemon &&
launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist'`
Single-user installations of Nix should run this: `nix-channel --update;
-nix-env -iA nixpkgs.nix nixpkgs.cacert`
+nix-env --install --attr nixpkgs.nix nixpkgs.cacert`
Multi-user Nix users on Linux should run this with sudo: `nix-channel
---update; nix-env -iA nixpkgs.nix nixpkgs.cacert; systemctl
+--update; nix-env --install --attr nixpkgs.nix nixpkgs.cacert; systemctl
daemon-reload; systemctl restart nix-daemon`
diff --git a/doc/manual/src/introduction.md b/doc/manual/src/introduction.md
index b54346db8..76489bc1b 100644
--- a/doc/manual/src/introduction.md
+++ b/doc/manual/src/introduction.md
@@ -76,7 +76,7 @@ there after an upgrade. This means that you can _roll back_ to the
old version:
```console
-$ nix-env --upgrade -A nixpkgs.some-package
+$ nix-env --upgrade --attr nixpkgs.some-package
$ nix-env --rollback
```
@@ -122,7 +122,7 @@ Nix expressions generally describe how to build a package from
source, so an installation action like
```console
-$ nix-env --install -A nixpkgs.firefox
+$ nix-env --install --attr nixpkgs.firefox
```
_could_ cause quite a bit of build activity, as not only Firefox but
@@ -158,7 +158,7 @@ Pan newsreader, as described by [its
Nix expression](https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/networking/newsreaders/pan/default.nix):
```console
-$ nix-shell '<nixpkgs>' -A pan
+$ nix-shell '<nixpkgs>' --attr pan
```
You’re then dropped into a shell where you can edit, build and test
diff --git a/doc/manual/src/language/advanced-attributes.md b/doc/manual/src/language/advanced-attributes.md
index 5a63236e5..307971434 100644
--- a/doc/manual/src/language/advanced-attributes.md
+++ b/doc/manual/src/language/advanced-attributes.md
@@ -198,8 +198,7 @@ Derivations can declare some infrequently used optional attributes.
- `"recursive"`\
The hash is computed over the NAR archive dump of the output
- (i.e., the result of [`nix-store
- --dump`](../command-ref/nix-store.md#operation---dump)). In
+ (i.e., the result of [`nix-store --dump`](@docroot@/command-ref/nix-store/dump.md)). In
this case, the output can be anything, including a directory
tree.
@@ -209,12 +208,26 @@ Derivations can declare some infrequently used optional attributes.
about converting to and from base-32 notation.)
- [`__contentAddressed`]{#adv-attr-__contentAddressed}
- If this **experimental** attribute is set to true, then the derivation
+ > **Warning**
+ > This attribute is part of an [experimental feature](@docroot@/contributing/experimental-features.md).
+ >
+ > To use this attribute, you must enable the
+ > [`ca-derivations`](@docroot@/contributing/experimental-features.md#xp-feature-ca-derivations) experimental feature.
+ > For example, in [nix.conf](../command-ref/conf-file.md) you could add:
+ >
+ > ```
+ > extra-experimental-features = ca-derivations
+ > ```
+
+ If this attribute is set to `true`, then the derivation
outputs will be stored in a content-addressed location rather than the
traditional input-addressed one.
- This only has an effect if the `ca-derivations` experimental feature is enabled.
- Setting this attribute also requires setting `outputHashMode` and `outputHashAlgo` like for *fixed-output derivations* (see above).
+ Setting this attribute also requires setting
+ [`outputHashMode`](#adv-attr-outputHashMode)
+ and
+ [`outputHashAlgo`](#adv-attr-outputHashAlgo)
+ like for *fixed-output derivations* (see above).
- [`passAsFile`]{#adv-attr-passAsFile}\
A list of names of attributes that should be passed via files rather
@@ -308,9 +321,11 @@ Derivations can declare some infrequently used optional attributes.
- [`unsafeDiscardReferences`]{#adv-attr-unsafeDiscardReferences}\
> **Warning**
- > This is an experimental feature.
+ > This attribute is part of an [experimental feature](@docroot@/contributing/experimental-features.md).
>
- > To enable it, add the following to [nix.conf](../command-ref/conf-file.md):
+ > To use this attribute, you must enable the
+ > [`discard-references`](@docroot@/contributing/experimental-features.md#xp-feature-discard-references) experimental feature.
+ > For example, in [nix.conf](../command-ref/conf-file.md) you could add:
>
> ```
> extra-experimental-features = discard-references
diff --git a/doc/manual/src/language/builtin-constants.md b/doc/manual/src/language/builtin-constants.md
index 78d066a82..c6bc9b74c 100644
--- a/doc/manual/src/language/builtin-constants.md
+++ b/doc/manual/src/language/builtin-constants.md
@@ -1,20 +1,19 @@
# Built-in Constants
-Here are the constants built into the Nix expression evaluator:
+These constants are built into the Nix language evaluator:
- - `builtins`\
- The set `builtins` contains all the built-in functions and values.
- You can use `builtins` to test for the availability of features in
- the Nix installation, e.g.,
-
- ```nix
- if builtins ? getEnv then builtins.getEnv "PATH" else ""
- ```
-
- This allows a Nix expression to fall back gracefully on older Nix
- installations that don’t have the desired built-in function.
+- [`builtins`]{#builtins-builtins} (attribute set)
- - [`builtins.currentSystem`]{#builtins-currentSystem}\
- The built-in value `currentSystem` evaluates to the Nix platform
- identifier for the Nix installation on which the expression is being
- evaluated, such as `"i686-linux"` or `"x86_64-darwin"`.
+ Contains all the [built-in functions](./builtins.md) and values, in order to avoid polluting the global scope.
+
+ Since built-in functions were added over time, [testing for attributes](./operators.md#has-attribute) in `builtins` can be used for graceful fallback on older Nix installations:
+
+ ```nix
+ if builtins ? getEnv then builtins.getEnv "PATH" else ""
+ ```
+
+- [`builtins.currentSystem`]{#builtins-currentSystem} (string)
+
+ The built-in value `currentSystem` evaluates to the Nix platform
+ identifier for the Nix installation on which the expression is being
+ evaluated, such as `"i686-linux"` or `"x86_64-darwin"`.
diff --git a/doc/manual/src/language/builtins-prefix.md b/doc/manual/src/language/builtins-prefix.md
index c631a8453..35e3dccc3 100644
--- a/doc/manual/src/language/builtins-prefix.md
+++ b/doc/manual/src/language/builtins-prefix.md
@@ -1,16 +1,16 @@
# Built-in Functions
-This section lists the functions built into the Nix expression
-evaluator. (The built-in function `derivation` is discussed above.)
-Some built-ins, such as `derivation`, are always in scope of every Nix
-expression; you can just access them right away. But to prevent
-polluting the namespace too much, most built-ins are not in
-scope. Instead, you can access them through the `builtins` built-in
-value, which is a set that contains all built-in functions and values.
-For instance, `derivation` is also available as `builtins.derivation`.
+This section lists the functions built into the Nix language evaluator.
+All built-in functions are available through the global [`builtins`](./builtin-constants.md#builtins-builtins) constant.
+
+For convenience, some built-ins are can be accessed directly:
+
+- [`derivation`](#builtins-derivation)
+- [`import`](#builtins-import)
+- [`abort`](#builtins-abort)
+- [`throw`](#builtins-throw)
<dl>
- <dt><code>derivation <var>attrs</var></code>;
- <code>builtins.derivation <var>attrs</var></code></dt>
+ <dt id="builtins-derivation"><a href="#builtins-derivation"><code>derivation <var>attrs</var></code></a></dt>
<dd><p><var>derivation</var> is described in
<a href="derivations.md">its own section</a>.</p></dd>
diff --git a/doc/manual/src/language/operators.md b/doc/manual/src/language/operators.md
index 90b325597..3e929724d 100644
--- a/doc/manual/src/language/operators.md
+++ b/doc/manual/src/language/operators.md
@@ -36,15 +36,15 @@
## Attribute selection
Select the attribute denoted by attribute path *attrpath* from [attribute set] *attrset*.
-If the attribute doesn’t exist, return *value* if provided, otherwise abort evaluation.
+If the attribute doesn’t exist, return the *expr* after `or` if provided, otherwise abort evaluation.
<!-- FIXME: the following should to into its own language syntax section, but that needs more work to fit in well -->
An attribute path is a dot-separated list of attribute names.
An attribute name can be an identifier or a string.
-> *attrpath* = *name* [ `.` *name* ]...
-> *name* = *identifier* | *string*
+> *attrpath* = *name* [ `.` *name* ]... \
+> *name* = *identifier* | *string* \
> *identifier* ~ `[a-zA-Z_][a-zA-Z0-9_'-]*`
[Attribute selection]: #attribute-selection
diff --git a/doc/manual/src/language/values.md b/doc/manual/src/language/values.md
index 3973518ca..9d0301753 100644
--- a/doc/manual/src/language/values.md
+++ b/doc/manual/src/language/values.md
@@ -190,13 +190,17 @@ instance,
```
evaluates to `"Foo"`. It is possible to provide a default value in an
-attribute selection using the `or` keyword. For example,
+attribute selection using the `or` keyword:
```nix
{ a = "Foo"; b = "Bar"; }.c or "Xyzzy"
```
-will evaluate to `"Xyzzy"` because there is no `c` attribute in the set.
+```nix
+{ a = "Foo"; b = "Bar"; }.c.d.e.f.g or "Xyzzy"
+```
+
+will both evaluate to `"Xyzzy"` because there is no `c` attribute in the set.
You can use arbitrary double-quoted strings as attribute names:
@@ -205,7 +209,7 @@ You can use arbitrary double-quoted strings as attribute names:
```
```nix
-let bar = "bar";
+let bar = "bar"; in
{ "foo ${bar}" = 123; }."foo ${bar}"
```
diff --git a/doc/manual/src/package-management/basic-package-mgmt.md b/doc/manual/src/package-management/basic-package-mgmt.md
index 5f1d7a89c..6b86e763e 100644
--- a/doc/manual/src/package-management/basic-package-mgmt.md
+++ b/doc/manual/src/package-management/basic-package-mgmt.md
@@ -47,7 +47,7 @@ $ nix-channel --update
You can view the set of available packages in Nixpkgs:
```console
-$ nix-env -qaP
+$ nix-env --query --available --attr-path
nixpkgs.aterm aterm-2.2
nixpkgs.bash bash-3.0
nixpkgs.binutils binutils-2.15
@@ -65,7 +65,7 @@ If you downloaded Nixpkgs yourself, or if you checked it out from GitHub,
then you need to pass the path to your Nixpkgs tree using the `-f` flag:
```console
-$ nix-env -qaPf /path/to/nixpkgs
+$ nix-env --query --available --attr-path --file /path/to/nixpkgs
aterm aterm-2.2
bash bash-3.0
@@ -77,7 +77,7 @@ Nixpkgs.
You can filter the packages by name:
```console
-$ nix-env -qaP firefox
+$ nix-env --query --available --attr-path firefox
nixpkgs.firefox-esr firefox-91.3.0esr
nixpkgs.firefox firefox-94.0.1
```
@@ -85,7 +85,7 @@ nixpkgs.firefox firefox-94.0.1
and using regular expressions:
```console
-$ nix-env -qaP 'firefox.*'
+$ nix-env --query --available --attr-path 'firefox.*'
```
It is also possible to see the *status* of available packages, i.e.,
@@ -93,7 +93,7 @@ whether they are installed into the user environment and/or present in
the system:
```console
-$ nix-env -qaPs
+$ nix-env --query --available --attr-path --status
-PS nixpkgs.bash bash-3.0
--S nixpkgs.binutils binutils-2.15
@@ -110,10 +110,10 @@ which is Nix’s mechanism for doing binary deployment. It just means that
Nix knows that it can fetch a pre-built package from somewhere
(typically a network server) instead of building it locally.
-You can install a package using `nix-env -iA`. For instance,
+You can install a package using `nix-env --install --attr `. For instance,
```console
-$ nix-env -iA nixpkgs.subversion
+$ nix-env --install --attr nixpkgs.subversion
```
will install the package called `subversion` from `nixpkgs` channel (which is, of course, the
@@ -143,14 +143,14 @@ instead of the attribute path, as `nix-env` does not record which attribute
was used for installing:
```console
-$ nix-env -e subversion
+$ nix-env --uninstall subversion
```
Upgrading to a new version is just as easy. If you have a new release of
Nix Packages, you can do:
```console
-$ nix-env -uA nixpkgs.subversion
+$ nix-env --upgrade --attr nixpkgs.subversion
```
This will *only* upgrade Subversion if there is a “newer” version in the
@@ -163,15 +163,15 @@ whatever version is in the Nix expressions, use `-i` instead of `-u`;
You can also upgrade all packages for which there are newer versions:
```console
-$ nix-env -u
+$ nix-env --upgrade
```
Sometimes it’s useful to be able to ask what `nix-env` would do, without
actually doing it. For instance, to find out what packages would be
-upgraded by `nix-env -u`, you can do
+upgraded by `nix-env --upgrade `, you can do
```console
-$ nix-env -u --dry-run
+$ nix-env --upgrade --dry-run
(dry run; not doing anything)
upgrading `libxslt-1.1.0' to `libxslt-1.1.10'
upgrading `graphviz-1.10' to `graphviz-1.12'
diff --git a/doc/manual/src/package-management/binary-cache-substituter.md b/doc/manual/src/package-management/binary-cache-substituter.md
index 5befad9f8..855eaf470 100644
--- a/doc/manual/src/package-management/binary-cache-substituter.md
+++ b/doc/manual/src/package-management/binary-cache-substituter.md
@@ -9,7 +9,7 @@ The daemon that handles binary cache requests via HTTP, `nix-serve`, is
not part of the Nix distribution, but you can install it from Nixpkgs:
```console
-$ nix-env -iA nixpkgs.nix-serve
+$ nix-env --install --attr nixpkgs.nix-serve
```
You can then start the server, listening for HTTP connections on
@@ -35,7 +35,7 @@ On the client side, you can tell Nix to use your binary cache using
`--substituters`, e.g.:
```console
-$ nix-env -iA nixpkgs.firefox --substituters http://avalon:8080/
+$ nix-env --install --attr nixpkgs.firefox --substituters http://avalon:8080/
```
The option `substituters` tells Nix to use this binary cache in
diff --git a/doc/manual/src/package-management/channels.md b/doc/manual/src/package-management/channels.md
index 93c8b41a6..8e4da180b 100644
--- a/doc/manual/src/package-management/channels.md
+++ b/doc/manual/src/package-management/channels.md
@@ -43,7 +43,7 @@ operations (via the symlink `~/.nix-defexpr/channels`). Consequently,
you can then say
```console
-$ nix-env -u
+$ nix-env --upgrade
```
to upgrade all packages in your profile to the latest versions available
diff --git a/doc/manual/src/package-management/copy-closure.md b/doc/manual/src/package-management/copy-closure.md
index d3fac4d76..14326298b 100644
--- a/doc/manual/src/package-management/copy-closure.md
+++ b/doc/manual/src/package-management/copy-closure.md
@@ -15,7 +15,7 @@ With `nix-store
path (that is, the path and all its dependencies) to a file, and then
unpack that file into another Nix store. For example,
- $ nix-store --export $(nix-store -qR $(type -p firefox)) > firefox.closure
+ $ nix-store --export $(nix-store --query --requisites $(type -p firefox)) > firefox.closure
writes the closure of Firefox to a file. You can then copy this file to
another machine and install the closure:
@@ -27,7 +27,7 @@ store are ignored. It is also possible to pipe the export into another
command, e.g. to copy and install a closure directly to/on another
machine:
- $ nix-store --export $(nix-store -qR $(type -p firefox)) | bzip2 | \
+ $ nix-store --export $(nix-store --query --requisites $(type -p firefox)) | bzip2 | \
ssh alice@itchy.example.org "bunzip2 | nix-store --import"
However, `nix-copy-closure` is generally more efficient because it only
diff --git a/doc/manual/src/package-management/profiles.md b/doc/manual/src/package-management/profiles.md
index d1a2580d4..1d9e672a8 100644
--- a/doc/manual/src/package-management/profiles.md
+++ b/doc/manual/src/package-management/profiles.md
@@ -39,7 +39,7 @@ just Subversion 1.1.2 (arrows in the figure indicate symlinks). This
would be what we would obtain if we had done
```console
-$ nix-env -iA nixpkgs.subversion
+$ nix-env --install --attr nixpkgs.subversion
```
on a set of Nix expressions that contained Subversion 1.1.2.
@@ -54,7 +54,7 @@ environment is generated based on the current one. For instance,
generation 43 was created from generation 42 when we did
```console
-$ nix-env -iA nixpkgs.subversion nixpkgs.firefox
+$ nix-env --install --attr nixpkgs.subversion nixpkgs.firefox
```
on a set of Nix expressions that contained Firefox and a new version of
@@ -127,7 +127,7 @@ All `nix-env` operations work on the profile pointed to by
(abbreviation `-p`):
```console
-$ nix-env -p /nix/var/nix/profiles/other-profile -iA nixpkgs.subversion
+$ nix-env --profile /nix/var/nix/profiles/other-profile --install --attr nixpkgs.subversion
```
This will *not* change the `~/.nix-profile` symlink.
diff --git a/doc/manual/src/package-management/s3-substituter.md b/doc/manual/src/package-management/s3-substituter.md
index 30f2b2e11..d8a1d9105 100644
--- a/doc/manual/src/package-management/s3-substituter.md
+++ b/doc/manual/src/package-management/s3-substituter.md
@@ -1,41 +1,11 @@
# Serving a Nix store via S3
-Nix has built-in support for storing and fetching store paths from
+Nix has [built-in support](@docroot@/command-ref/new-cli/nix3-help-stores.md#s3-binary-cache-store)
+for storing and fetching store paths from
Amazon S3 and S3-compatible services. This uses the same *binary*
cache mechanism that Nix usually uses to fetch prebuilt binaries from
[cache.nixos.org](https://cache.nixos.org/).
-The following options can be specified as URL parameters to the S3 URL:
-
- - `profile`\
- The name of the AWS configuration profile to use. By default Nix
- will use the `default` profile.
-
- - `region`\
- The region of the S3 bucket. `us–east-1` by default.
-
- If your bucket is not in `us–east-1`, you should always explicitly
- specify the region parameter.
-
- - `endpoint`\
- The URL to your S3-compatible service, for when not using Amazon S3.
- Do not specify this value if you're using Amazon S3.
-
- > **Note**
- >
- > This endpoint must support HTTPS and will use path-based
- > addressing instead of virtual host based addressing.
-
- - `scheme`\
- The scheme used for S3 requests, `https` (default) or `http`. This
- option allows you to disable HTTPS for binary caches which don't
- support it.
-
- > **Note**
- >
- > HTTPS should be used if the cache might contain sensitive
- > information.
-
In this example we will use the bucket named `example-nix-cache`.
## Anonymous Reads to your S3-compatible binary cache
diff --git a/doc/manual/src/package-management/ssh-substituter.md b/doc/manual/src/package-management/ssh-substituter.md
index c59933f61..7014c3cc8 100644
--- a/doc/manual/src/package-management/ssh-substituter.md
+++ b/doc/manual/src/package-management/ssh-substituter.md
@@ -6,7 +6,7 @@ automatically fetching any store paths in Firefox’s closure if they are
available on the server `avalon`:
```console
-$ nix-env -iA nixpkgs.firefox --substituters ssh://alice@avalon
+$ nix-env --install --attr nixpkgs.firefox --substituters ssh://alice@avalon
```
This works similar to the binary cache substituter that Nix usually
@@ -25,7 +25,7 @@ You can also copy the closure of some store path, without installing it
into your profile, e.g.
```console
-$ nix-store -r /nix/store/m85bxg…-firefox-34.0.5 --substituters
+$ nix-store --realise /nix/store/m85bxg…-firefox-34.0.5 --substituters
ssh://alice@avalon
```
diff --git a/doc/manual/src/quick-start.md b/doc/manual/src/quick-start.md
index 651134c25..1d2688ede 100644
--- a/doc/manual/src/quick-start.md
+++ b/doc/manual/src/quick-start.md
@@ -19,7 +19,7 @@ to subsequent chapters.
channel:
```console
- $ nix-env -qaP
+ $ nix-env --query --available --attr-path
nixpkgs.docbook_xml_dtd_43 docbook-xml-4.3
nixpkgs.docbook_xml_dtd_45 docbook-xml-4.5
nixpkgs.firefox firefox-33.0.2
@@ -31,7 +31,7 @@ to subsequent chapters.
1. Install some packages from the channel:
```console
- $ nix-env -iA nixpkgs.hello
+ $ nix-env --install --attr nixpkgs.hello
```
This should download pre-built packages; it should not build them
@@ -49,13 +49,13 @@ to subsequent chapters.
1. Uninstall a package:
```console
- $ nix-env -e hello
+ $ nix-env --uninstall hello
```
1. You can also test a package without installing it:
```console
- $ nix-shell -p hello
+ $ nix-shell --packages hello
```
This builds or downloads GNU Hello and its dependencies, then drops
@@ -76,7 +76,7 @@ to subsequent chapters.
```console
$ nix-channel --update nixpkgs
- $ nix-env -u '*'
+ $ nix-env --upgrade '*'
```
The latter command will upgrade each installed package for which
@@ -95,5 +95,5 @@ to subsequent chapters.
them:
```console
- $ nix-collect-garbage -d
+ $ nix-collect-garbage --delete-old
```
diff --git a/doc/manual/src/release-notes/rl-2.15.md b/doc/manual/src/release-notes/rl-2.15.md
new file mode 100644
index 000000000..133121999
--- /dev/null
+++ b/doc/manual/src/release-notes/rl-2.15.md
@@ -0,0 +1,58 @@
+# Release 2.15 (2023-04-11)
+
+* Commands which take installables on the command line can now read them from the standard input if
+ passed the `--stdin` flag. This is primarily useful when you have a large amount of paths which
+ exceed the OS argument limit.
+
+* The `nix-hash` command now supports Base64 and SRI. Use the flags `--base64`
+ or `--sri` to specify the format of output hash as Base64 or SRI, and `--to-base64`
+ or `--to-sri` to convert a hash to Base64 or SRI format, respectively.
+
+ As the choice of hash formats is no longer binary, the `--base16` flag is also added
+ to explicitly specify the Base16 format, which is still the default.
+
+* The special handling of an [installable](../command-ref/new-cli/nix.md#installables) with `.drv` suffix being interpreted as all of the given [store derivation](../glossary.md#gloss-store-derivation)'s output paths is removed, and instead taken as the literal store path that it represents.
+
+ The new `^` syntax for store paths introduced in Nix 2.13 allows explicitly referencing output paths of a derivation.
+ Using this is better and more clear than relying on the now-removed `.drv` special handling.
+
+ For example,
+ ```shell-session
+ $ nix path-info /nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv
+ ```
+
+ now gives info about the derivation itself, while
+
+ ```shell-session
+ $ nix path-info /nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^*
+ ```
+ provides information about each of its outputs.
+
+* The experimental command `nix describe-stores` has been removed.
+
+* Nix stores and their settings are now documented in [`nix help-stores`](@docroot@/command-ref/new-cli/nix3-help-stores.md).
+
+* Documentation for operations of `nix-store` and `nix-env` are now available on separate pages of the manual.
+ They include all common options that can be specified and common environment variables that affect these commands.
+
+ These pages can be viewed offline with `man` using
+
+ * `man nix-store-<operation>` and `man nix-env-<operation>`
+ * `nix-store --help --<operation>` and `nix-env --help --<operation>`.
+
+* Nix when used as a client now checks whether the store (the server) trusts the client.
+ (The store always had to check whether it trusts the client, but now the client is informed of the store's decision.)
+ This is useful for scripting interactions with (non-legacy-ssh) remote Nix stores.
+
+ `nix store ping` and `nix doctor` now display this information.
+
+* The new command `nix derivation add` allows adding derivations to the store without involving the Nix language.
+ It exists to round out our collection of basic utility/plumbing commands, and allow for a low barrier-to-entry way of experimenting with alternative front-ends to the Nix Store.
+ It uses the same JSON layout as `nix derivation show`, and is its inverse.
+
+* `nix show-derivation` has been renamed to `nix derivation show`.
+ This matches `nix derivation add`, and avoids bloating the top-level namespace.
+ The old name is still kept as an alias for compatibility, however.
+
+* The `nix derivation {add,show}` JSON format now includes the derivation name as a top-level field.
+ This is useful in general, but especially necessary for the `add` direction, as otherwise we would need to pass in the name out of band for certain cases.
diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md
index c78b20eed..bc0d41bdf 100644
--- a/doc/manual/src/release-notes/rl-next.md
+++ b/doc/manual/src/release-notes/rl-next.md
@@ -1,22 +1,6 @@
# Release X.Y (202?-??-??)
-* Commands which take installables on the command line can now read them from the standard input if
- passed the `--stdin` flag. This is primarily useful when you have a large amount of paths which
- exceed the OS arg limit.
-
-* The special handling of an [installable](../command-ref/new-cli/nix.md#installables) with `.drv` suffix being interpreted as all of the given [store derivation](../glossary.md#gloss-store-derivation)'s output paths is removed, and instead taken as the literal store path that it represents.
-
- The new `^` syntax for store paths introduced in Nix 2.13 allows explicitly referencing output paths of a derivation.
- Using this is better and more clear than relying on the now-removed `.drv` special handling.
-
- For example,
- ```shell-session
- $ nix path-info /nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv
- ```
-
- now gives info about the derivation itself, while
-
- ```shell-session
- $ nix path-info /nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^*
- ```
- provides information about each of its outputs. \ No newline at end of file
+- Speed-up of downloads from binary caches.
+ The number of parallel downloads (also known as substitutions) has been separated from the [`--max-jobs` setting](../command-ref/conf-file.md#conf-max-jobs).
+ The new setting is called [`max-substitution-jobs`](../command-ref/conf-file.md#conf-max-substitution-jobs).
+ The number of parallel downloads is now set to 16 by default (previously, the default was 1 due to the coupling to build jobs).
diff --git a/doc/manual/utils.nix b/doc/manual/utils.nix
index d0643ef46..9043dd8cd 100644
--- a/doc/manual/utils.nix
+++ b/doc/manual/utils.nix
@@ -5,6 +5,9 @@ rec {
concatStrings = concatStringsSep "";
+ attrsToList = a:
+ map (name: { inherit name; value = a.${name}; }) (builtins.attrNames a);
+
replaceStringsRec = from: to: string:
# recursively replace occurrences of `from` with `to` within `string`
# example:
@@ -38,4 +41,66 @@ rec {
filterAttrs = pred: set:
listToAttrs (concatMap (name: let v = set.${name}; in if pred name v then [(nameValuePair name v)] else []) (attrNames set));
+
+ optionalString = cond: string: if cond then string else "";
+
+ showSetting = { useAnchors }: name: { description, documentDefault, defaultValue, aliases, value, experimentalFeature }:
+ let
+ result = squash ''
+ - ${if useAnchors
+ then ''<span id="conf-${name}">[`${name}`](#conf-${name})</span>''
+ else ''`${name}`''}
+
+ ${indent " " body}
+ '';
+
+ experimentalFeatureNote = optionalString (experimentalFeature != null) ''
+ > **Warning**
+ > This setting is part of an
+ > [experimental feature](@docroot@/contributing/experimental-features.md).
+
+ To change this setting, you need to make sure the corresponding experimental feature,
+ [`${experimentalFeature}`](@docroot@/contributing/experimental-features.md#xp-feature-${experimentalFeature}),
+ is enabled.
+ For example, include the following in [`nix.conf`](#):
+
+ ```
+ extra-experimental-features = ${experimentalFeature}
+ ${name} = ...
+ ```
+ '';
+
+ # separate body to cleanly handle indentation
+ body = ''
+ ${description}
+
+ ${experimentalFeatureNote}
+
+ **Default:** ${showDefault documentDefault defaultValue}
+
+ ${showAliases aliases}
+ '';
+
+ showDefault = documentDefault: defaultValue:
+ if documentDefault then
+ # a StringMap value type is specified as a string, but
+ # this shows the value type. The empty stringmap is `null` in
+ # JSON, but that converts to `{ }` here.
+ if defaultValue == "" || defaultValue == [] || isAttrs defaultValue
+ then "*empty*"
+ else if isBool defaultValue then
+ if defaultValue then "`true`" else "`false`"
+ else "`${toString defaultValue}`"
+ else "*machine-specific*";
+
+ showAliases = aliases:
+ optionalString (aliases != [])
+ "**Deprecated alias:** ${(concatStringsSep ", " (map (s: "`${s}`") aliases))}";
+
+ in result;
+
+ indent = prefix: s:
+ concatStringsSep "\n" (map (x: if x == "" then x else "${prefix}${x}") (splitLines s));
+
+ showSettings = args: settingsInfo: concatStrings (attrValues (mapAttrs (showSetting args) settingsInfo));
}
diff --git a/docker.nix b/docker.nix
index 203a06b53..bd16b71cd 100644
--- a/docker.nix
+++ b/docker.nix
@@ -8,6 +8,7 @@
, extraPkgs ? []
, maxLayers ? 100
, nixConf ? {}
+, flake-registry ? null
}:
let
defaultPkgs = with pkgs; [
@@ -189,6 +190,12 @@ let
cp -a ${rootEnv}/* $out/
ln -s ${manifest} $out/manifest.nix
'';
+ flake-registry-path = if (flake-registry == null) then
+ null
+ else if (builtins.readFileType (toString flake-registry)) == "directory" then
+ "${flake-registry}/flake-registry.json"
+ else
+ flake-registry;
in
pkgs.runCommand "base-system"
{
@@ -201,7 +208,7 @@ let
];
allowSubstitutes = false;
preferLocalBuild = true;
- } ''
+ } (''
env
set -x
mkdir -p $out/etc
@@ -247,7 +254,16 @@ let
mkdir -p $out/bin $out/usr/bin
ln -s ${pkgs.coreutils}/bin/env $out/usr/bin/env
ln -s ${pkgs.bashInteractive}/bin/bash $out/bin/sh
- '';
+
+ '' + (lib.optionalString (flake-registry-path != null) ''
+ nixCacheDir="/root/.cache/nix"
+ mkdir -p $out$nixCacheDir
+ globalFlakeRegistryPath="$nixCacheDir/flake-registry.json"
+ ln -s ${flake-registry-path} $out$globalFlakeRegistryPath
+ mkdir -p $out/nix/var/nix/gcroots/auto
+ rootName=$(${pkgs.nix}/bin/nix --extra-experimental-features nix-command hash file --type sha1 --base32 <(echo -n $globalFlakeRegistryPath))
+ ln -s $globalFlakeRegistryPath $out/nix/var/nix/gcroots/auto/$rootName
+ ''));
in
pkgs.dockerTools.buildLayeredImageWithNixDb {
diff --git a/flake.lock b/flake.lock
index 4490b5ead..1d2aab5ed 100644
--- a/flake.lock
+++ b/flake.lock
@@ -1,5 +1,21 @@
{
"nodes": {
+ "flake-compat": {
+ "flake": false,
+ "locked": {
+ "lastModified": 1673956053,
+ "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
+ "type": "github"
+ },
+ "original": {
+ "owner": "edolstra",
+ "repo": "flake-compat",
+ "type": "github"
+ }
+ },
"lowdown-src": {
"flake": false,
"locked": {
@@ -50,6 +66,7 @@
},
"root": {
"inputs": {
+ "flake-compat": "flake-compat",
"lowdown-src": "lowdown-src",
"nixpkgs": "nixpkgs",
"nixpkgs-regression": "nixpkgs-regression"
diff --git a/flake.nix b/flake.nix
index 5e0504a59..a4ee80b32 100644
--- a/flake.nix
+++ b/flake.nix
@@ -4,8 +4,9 @@
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-22.11-small";
inputs.nixpkgs-regression.url = "github:NixOS/nixpkgs/215d4d0fd80ca5163643b03a33fde804a29cc1e2";
inputs.lowdown-src = { url = "github:kristapsdz/lowdown"; flake = false; };
+ inputs.flake-compat = { url = "github:edolstra/flake-compat"; flake = false; };
- outputs = { self, nixpkgs, nixpkgs-regression, lowdown-src }:
+ outputs = { self, nixpkgs, nixpkgs-regression, lowdown-src, flake-compat }:
let
inherit (nixpkgs) lib;
@@ -98,7 +99,11 @@
];
testConfigureFlags = [
- "CXXFLAGS=-I${lib.getDev rapidcheck}/extras/gtest/include"
+ "RAPIDCHECK_HEADERS=${lib.getDev rapidcheck}/extras/gtest/include"
+ ];
+
+ internalApiDocsConfigureFlags = [
+ "--enable-internal-api-docs"
];
nativeBuildDeps =
@@ -136,6 +141,10 @@
rapidcheck
];
+ internalApiDocsDeps = [
+ buildPackages.doxygen
+ ];
+
awsDeps = lib.optional (stdenv.isLinux || stdenv.isDarwin)
(aws-sdk-cpp.override {
apis = ["s3" "transfer"];
@@ -210,6 +219,7 @@
enableParallelBuilding = true;
+ configureFlags = testConfigureFlags; # otherwise configure fails
dontBuild = true;
doInstallCheck = true;
@@ -310,12 +320,18 @@
};
let
canRunInstalled = currentStdenv.buildPlatform.canExecute currentStdenv.hostPlatform;
+
+ sourceByRegexInverted = rxs: origSrc: final.lib.cleanSourceWith {
+ filter = (path: type:
+ let relPath = final.lib.removePrefix (toString origSrc + "/") (toString path);
+ in ! lib.any (re: builtins.match re relPath != null) rxs);
+ src = origSrc;
+ };
in currentStdenv.mkDerivation (finalAttrs: {
name = "nix-${version}";
inherit version;
- src = self;
-
+ src = sourceByRegexInverted [ "tests/nixos/.*" "tests/installer/.*" ] self;
VERSION_SUFFIX = versionSuffix;
outputs = [ "out" "dev" "doc" ];
@@ -369,6 +385,10 @@
postInstall = ''
mkdir -p $doc/nix-support
echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products
+ ${lib.optionalString currentStdenv.hostPlatform.isStatic ''
+ mkdir -p $out/nix-support
+ echo "file binary-dist $out/bin/nix" >> $out/nix-support/hydra-build-products
+ ''}
${lib.optionalString currentStdenv.isDarwin ''
install_name_tool \
-change ${boost}/lib/libboost_context.dylib \
@@ -379,6 +399,7 @@
doInstallCheck = finalAttrs.doCheck;
installCheckFlags = "sysconfdir=$(out)/etc";
+ installCheckTarget = "installcheck"; # work around buggy detection in stdenv
separateDebugInfo = !currentStdenv.hostPlatform.isStatic;
@@ -452,8 +473,6 @@
};
in {
- inherit nixpkgsFor;
-
# A Nixpkgs overlay that overrides the 'nix' and
# 'nix.perl-bindings' packages.
overlays.default = overlayFor (p: p.stdenv);
@@ -512,25 +531,48 @@
src = self;
- configureFlags = [
- "CXXFLAGS=-I${lib.getDev pkgs.rapidcheck}/extras/gtest/include"
- ];
+ configureFlags = testConfigureFlags;
enableParallelBuilding = true;
nativeBuildInputs = nativeBuildDeps;
- buildInputs = buildDeps ++ propagatedDeps ++ awsDeps;
+ buildInputs = buildDeps ++ propagatedDeps ++ awsDeps ++ checkDeps;
dontInstall = false;
doInstallCheck = true;
+ installCheckTarget = "installcheck"; # work around buggy detection in stdenv
lcovFilter = [ "*/boost/*" "*-tab.*" ];
- # We call `dot', and even though we just use it to
- # syntax-check generated dot files, it still requires some
- # fonts. So provide those.
- FONTCONFIG_FILE = texFunctions.fontsConf;
+ hardeningDisable = ["fortify"];
+ };
+
+ # API docs for Nix's unstable internal C++ interfaces.
+ internal-api-docs =
+ with nixpkgsFor.x86_64-linux.native;
+ with commonDeps { inherit pkgs; };
+
+ stdenv.mkDerivation {
+ pname = "nix-internal-api-docs";
+ inherit version;
+
+ src = self;
+
+ configureFlags = testConfigureFlags ++ internalApiDocsConfigureFlags;
+
+ nativeBuildInputs = nativeBuildDeps;
+ buildInputs = buildDeps ++ propagatedDeps
+ ++ awsDeps ++ checkDeps ++ internalApiDocsDeps;
+
+ dontBuild = true;
+
+ installTargets = [ "internal-api-html" ];
+
+ postInstall = ''
+ mkdir -p $out/nix-support
+ echo "doc internal-api-docs $out/share/doc/nix/internal-api/html" >> $out/nix-support/hydra-build-products
+ '';
};
# System tests.
@@ -540,6 +582,8 @@
tests.nix-copy-closure = runNixOSTestFor "x86_64-linux" ./tests/nixos/nix-copy-closure.nix;
+ tests.nix-copy = runNixOSTestFor "x86_64-linux" ./tests/nixos/nix-copy.nix;
+
tests.nssPreload = runNixOSTestFor "x86_64-linux" ./tests/nixos/nss-preload.nix;
tests.githubFlakes = runNixOSTestFor "x86_64-linux" ./tests/nixos/github-flakes.nix;
@@ -650,9 +694,11 @@
nativeBuildInputs = nativeBuildDeps
++ (lib.optionals stdenv.cc.isClang [ pkgs.bear pkgs.clang-tools ]);
- buildInputs = buildDeps ++ propagatedDeps ++ awsDeps ++ checkDeps;
+ buildInputs = buildDeps ++ propagatedDeps
+ ++ awsDeps ++ checkDeps ++ internalApiDocsDeps;
- configureFlags = configureFlags ++ testConfigureFlags;
+ configureFlags = configureFlags
+ ++ testConfigureFlags ++ internalApiDocsConfigureFlags;
enableParallelBuilding = true;
diff --git a/local.mk b/local.mk
index 6a7074e8e..6951c179e 100644
--- a/local.mk
+++ b/local.mk
@@ -1,6 +1,8 @@
clean-files += Makefile.config
-GLOBAL_CXXFLAGS += -Wno-deprecated-declarations
+GLOBAL_CXXFLAGS += -Wno-deprecated-declarations -Werror=switch
+# Allow switch-enum to be overridden for files that do not support it, usually because of dependency headers.
+ERROR_SWITCH_ENUM = -Werror=switch-enum
$(foreach i, config.h $(wildcard src/lib*/*.hh), \
$(eval $(call install-file-in, $(i), $(includedir)/nix, 0644)))
diff --git a/maintainers/README.md b/maintainers/README.md
index 08d197c1b..d13349438 100644
--- a/maintainers/README.md
+++ b/maintainers/README.md
@@ -2,7 +2,30 @@
## Motivation
-The goal of the team is to help other people to contribute to Nix.
+The team's main responsibility is to set a direction for the development of Nix and ensure that the code is in good shape.
+
+We aim to achieve this by improving the contributor experience and attracting more maintainers – that is, by helping other people contributing to Nix and eventually taking responsibility – in order to scale the development process to match users' needs.
+
+### Objectives
+
+- It is obvious what is worthwhile to work on.
+- It is easy to find the right place in the code to make a change.
+- It is clear what is expected of a pull request.
+- It is predictable how to get a change merged and released.
+
+### Tasks
+
+- Establish, communicate, and maintain a technical roadmap
+- Improve documentation targeted at contributors
+ - Record architecture and design decisions
+ - Elaborate contribution guides and abide to them
+ - Define and assert quality criteria for contributions
+- Maintain the issue tracker and triage pull requests
+- Help contributors succeed with pull requests that address roadmap milestones
+- Manage the release lifecycle
+- Regularly publish reports on work done
+- Engage with third parties in the interest of the project
+- Ensure the required maintainer capacity for all of the above
## Members
@@ -11,6 +34,7 @@ The goal of the team is to help other people to contribute to Nix.
- Valentin Gagarin (@fricklerhandwerk)
- Thomas Bereknyei (@tomberek)
- Robert Hensing (@roberth)
+- John Ericson (@Ericson2314)
## Meeting protocol
@@ -18,12 +42,12 @@ The team meets twice a week:
- Discussion meeting: [Fridays 13:00-14:00 CET](https://calendar.google.com/calendar/event?eid=MHNtOGVuNWtrZXNpZHR2bW1sM3QyN2ZjaGNfMjAyMjExMjVUMTIwMDAwWiBiOW81MmZvYnFqYWs4b3E4bGZraGczdDBxZ0Bn)
- 1. Triage issues and pull requests from the _No Status_ column (30 min)
- 2. Discuss issues and pull requests from the _To discuss_ column (30 min)
+ 1. Triage issues and pull requests from the [No Status](#no-status) column (30 min)
+ 2. Discuss issues and pull requests from the [To discuss](#to-discuss) column (30 min)
- Work meeting: [Mondays 13:00-15:00 CET](https://calendar.google.com/calendar/event?eid=NTM1MG1wNGJnOGpmOTZhYms3bTB1bnY5cWxfMjAyMjExMjFUMTIwMDAwWiBiOW81MmZvYnFqYWs4b3E4bGZraGczdDBxZ0Bn)
- 1. Code review on pull requests from _In review_.
+ 1. Code review on pull requests from [In review](#in-review).
2. Other chores and tasks.
Meeting notes are collected on a [collaborative scratchpad](https://pad.lassul.us/Cv7FpYx-Ri-4VjUykQOLAw), and published on Discourse under the [Nix category](https://discourse.nixos.org/c/dev/nix/50).
@@ -32,62 +56,76 @@ Meeting notes are collected on a [collaborative scratchpad](https://pad.lassul.u
The team uses a [GitHub project board](https://github.com/orgs/NixOS/projects/19/views/1) for tracking its work.
-Issues on the board progress through the following states:
+Items on the board progress through the following states:
+
+### No Status
+
+During the discussion meeting, the team triages new items.
+To be considered, issues and pull requests must have a high-level description to provide the whole team with the necessary context at a glance.
-- No Status
+On every meeting, at least one item from each of the following categories is inspected:
- During the discussion meeting, the team triages new items.
- To be considered, issues and pull requests must have a high-level description to provide the whole team with the necessary context at a glance.
+1. [critical](https://github.com/NixOS/nix/labels/critical)
+2. [security](https://github.com/NixOS/nix/labels/security)
+3. [regression](https://github.com/NixOS/nix/labels/regression)
+4. [bug](https://github.com/NixOS/nix/issues?q=is%3Aopen+label%3Abug+sort%3Areactions-%2B1-desc)
+5. [tests of existing functionality](https://github.com/NixOS/nix/issues?q=is%3Aopen+label%3Atests+-label%3Afeature+sort%3Areactions-%2B1-desc)
- On every meeting, at least one item from each of the following categories is inspected:
+- [oldest pull requests](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+sort%3Acreated-asc)
+- [most popular pull requests](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+sort%3Areactions-%2B1-desc)
+- [oldest issues](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Acreated-asc)
+- [most popular issues](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc)
- 1. [critical](https://github.com/NixOS/nix/labels/critical)
- 2. [security](https://github.com/NixOS/nix/labels/security)
- 3. [regression](https://github.com/NixOS/nix/labels/regression)
- 4. [bug](https://github.com/NixOS/nix/issues?q=is%3Aopen+label%3Abug+sort%3Areactions-%2B1-desc)
+Team members can also add pull requests or issues they would like the whole team to consider.
+To ensure process quality and reliability, all non-trivial pull requests must be triaged before merging.
- - [oldest pull requests](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+sort%3Acreated-asc)
- - [most popular pull requests](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+sort%3Areactions-%2B1-desc)
- - [oldest issues](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Acreated-asc)
- - [most popular issues](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc)
+If there is disagreement on the general idea behind an issue or pull request, it is moved to [To discuss](#to-discuss).
+Otherwise, the issue or pull request in questions get the label [`idea approved`](https://github.com/NixOS/nix/labels/idea%20approved).
+For issues this means that an implementation is welcome and will be prioritised for review.
+For pull requests this means that:
+- Unfinished work is encouraged to be continued.
+- A reviewer is assigned to take responsibility for getting the pull request merged.
+ The item is moved to the [Assigned](#assigned) column.
+- If needed, the team can decide to do a collarorative review.
+ Then the item is moved to the [In review](#in-review) column, and review session is scheduled.
- Team members can also add pull requests or issues they would like the whole team to consider.
+What constitutes a trivial pull request is up to maintainers' judgement.
- If there is disagreement on the general idea behind an issue or pull request, it is moved to _To discuss_, otherwise to _In review_.
+### To discuss
-- To discuss
+Pull requests and issues that are deemed important and controversial are discussed by the team during discussion meetings.
- Pull requests and issues that are deemed important and controversial are discussed by the team during discussion meetings.
+This may be where the merit of the change itself or the implementation strategy is contested by a team member.
- This may be where the merit of the change itself or the implementation strategy is contested by a team member.
+As a general guideline, the order of items is determined as follows:
- As a general guideline, the order of items is determined as follows:
+- Prioritise pull requests over issues
- - Prioritise pull requests over issues
+ Contributors who took the time to implement concrete change proposals should not wait indefinitely.
- Contributors who took the time to implement concrete change proposals should not wait indefinitely.
+- Prioritise fixing bugs and testing over documentation, improvements or new features
- - Prioritise fixing bugs over documentation, improvements or new features
+ The team values stability and accessibility higher than raw functionality.
- The team values stability and accessibility higher than raw functionality.
+- Interleave issues and PRs
- - Interleave issues and PRs
+ This way issues without attempts at a solution get a chance to get addressed.
- This way issues without attempts at a solution get a chance to get addressed.
+### In review
-- In review
+Pull requests in this column are reviewed together during work meetings.
+This is both for spreading implementation knowledge and for establishing common values in code reviews.
- Pull requests in this column are reviewed together during work meetings.
- This is both for spreading implementation knowledge and for establishing common values in code reviews.
+When the overall direction is agreed upon, even when further changes are required, the pull request is assigned to one team member.
- When the overall direction is agreed upon, even when further changes are required, the pull request is assigned to one team member.
+### Assigned
-- Assigned for merging
+One team member is assigned to each of these pull requests.
+They will communicate with the authors, and make the final approval once all remaining issues are addressed.
- One team member is assigned to each of these pull requests.
- They will communicate with the authors, and make the final approval once all remaining issues are addressed.
+If more substantive issues arise, the assignee can move the pull request back to [To discuss](#to-discuss) or [In review](#in-review) to involve the team again.
- If more substantive issues arise, the assignee can move the pull request back to _To discuss_ to involve the team again.
+### Flowchart
The process is illustrated in the following diagram:
diff --git a/misc/launchd/org.nixos.nix-daemon.plist.in b/misc/launchd/org.nixos.nix-daemon.plist.in
index 5fa489b20..e1470cf99 100644
--- a/misc/launchd/org.nixos.nix-daemon.plist.in
+++ b/misc/launchd/org.nixos.nix-daemon.plist.in
@@ -4,8 +4,6 @@
<dict>
<key>EnvironmentVariables</key>
<dict>
- <key>NIX_SSL_CERT_FILE</key>
- <string>/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt</string>
<key>OBJC_DISABLE_INITIALIZE_FORK_SAFETY</key>
<string>YES</string>
</dict>
diff --git a/mk/cxx-big-literal.mk b/mk/cxx-big-literal.mk
new file mode 100644
index 000000000..85365df8e
--- /dev/null
+++ b/mk/cxx-big-literal.mk
@@ -0,0 +1,5 @@
+%.gen.hh: %
+ @echo 'R"foo(' >> $@.tmp
+ $(trace-gen) cat $< >> $@.tmp
+ @echo ')foo"' >> $@.tmp
+ @mv $@.tmp $@
diff --git a/mk/debug-test.sh b/mk/debug-test.sh
index 6299e68a0..b5b628ecd 100755
--- a/mk/debug-test.sh
+++ b/mk/debug-test.sh
@@ -1,6 +1,6 @@
#!/usr/bin/env bash
-set -eu
+set -eu -o pipefail
test=$1
diff --git a/mk/lib.mk b/mk/lib.mk
index 92f0983d5..34fa624d8 100644
--- a/mk/lib.mk
+++ b/mk/lib.mk
@@ -101,6 +101,7 @@ include mk/libraries.mk
include mk/programs.mk
include mk/patterns.mk
include mk/templates.mk
+include mk/cxx-big-literal.mk
include mk/tests.mk
diff --git a/mk/libraries.mk b/mk/libraries.mk
index 02e4d47f9..1bc73d7f7 100644
--- a/mk/libraries.mk
+++ b/mk/libraries.mk
@@ -126,7 +126,7 @@ define build-library
$(1)_PATH := $$(_d)/$$($(1)_NAME).a
$$($(1)_PATH): $$($(1)_OBJS) | $$(_d)/
- +$$(trace-ld) $(LD) -Ur -o $$(_d)/$$($(1)_NAME).o $$^
+ $$(trace-ld) $(LD) $$(ifndef $(HOST_DARWIN),-U) -r -o $$(_d)/$$($(1)_NAME).o $$^
$$(trace-ar) $(AR) crs $$@ $$(_d)/$$($(1)_NAME).o
$(1)_LDFLAGS_USE += $$($(1)_PATH) $$($(1)_LDFLAGS) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE))
diff --git a/mk/patterns.mk b/mk/patterns.mk
index 86a724806..c81150260 100644
--- a/mk/patterns.mk
+++ b/mk/patterns.mk
@@ -1,10 +1,10 @@
$(buildprefix)%.o: %.cc
@mkdir -p "$(dir $@)"
- $(trace-cxx) $(CXX) -o $@ -c $< $(CPPFLAGS) $(GLOBAL_CXXFLAGS_PCH) $(GLOBAL_CXXFLAGS) $(CXXFLAGS) $($@_CXXFLAGS) -MMD -MF $(call filename-to-dep, $@) -MP
+ $(trace-cxx) $(CXX) -o $@ -c $< $(CPPFLAGS) $(GLOBAL_CXXFLAGS_PCH) $(GLOBAL_CXXFLAGS) $(CXXFLAGS) $($@_CXXFLAGS) $(ERROR_SWITCH_ENUM) -MMD -MF $(call filename-to-dep, $@) -MP
$(buildprefix)%.o: %.cpp
@mkdir -p "$(dir $@)"
- $(trace-cxx) $(CXX) -o $@ -c $< $(CPPFLAGS) $(GLOBAL_CXXFLAGS_PCH) $(GLOBAL_CXXFLAGS) $(CXXFLAGS) $($@_CXXFLAGS) -MMD -MF $(call filename-to-dep, $@) -MP
+ $(trace-cxx) $(CXX) -o $@ -c $< $(CPPFLAGS) $(GLOBAL_CXXFLAGS_PCH) $(GLOBAL_CXXFLAGS) $(CXXFLAGS) $($@_CXXFLAGS) $(ERROR_SWITCH_ENUM) -MMD -MF $(call filename-to-dep, $@) -MP
$(buildprefix)%.o: %.c
@mkdir -p "$(dir $@)"
diff --git a/mk/run-test.sh b/mk/run-test.sh
index 219c8577f..1a1d65930 100755
--- a/mk/run-test.sh
+++ b/mk/run-test.sh
@@ -1,6 +1,6 @@
#!/usr/bin/env bash
-set -u
+set -eu -o pipefail
red=""
green=""
@@ -22,24 +22,11 @@ fi
run_test () {
(init_test 2>/dev/null > /dev/null)
- log="$(run_test_proper 2>&1)"
- status=$?
+ log="$(run_test_proper 2>&1)" && status=0 || status=$?
}
run_test
-# Hack: Retry the test if it fails with “unexpected EOF reading a line” as these
-# appear randomly without anyone knowing why.
-# See https://github.com/NixOS/nix/issues/3605 for more info
-if [[ $status -ne 0 && $status -ne 99 && \
- "$(uname)" == "Darwin" && \
- "$log" =~ "unexpected EOF reading a line" \
-]]; then
- echo "$post_run_msg [${yellow}FAIL$normal] (possibly flaky, so will be retried)"
- echo "$log" | sed 's/^/ /'
- run_test
-fi
-
if [ $status -eq 0 ]; then
echo "$post_run_msg [${green}PASS$normal]"
elif [ $status -eq 99 ]; then
diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs
index de91dc28d..41ecbbeb4 100644
--- a/perl/lib/Nix/Store.xs
+++ b/perl/lib/Nix/Store.xs
@@ -27,8 +27,6 @@ static ref<Store> store()
if (!_store) {
try {
initLibStore();
- loadConfFile();
- settings.lockCPU = false;
_store = openStore();
} catch (Error & e) {
croak("%s", e.what());
@@ -295,7 +293,13 @@ SV * makeFixedOutputPath(int recursive, char * algo, char * hash, char * name)
try {
auto h = Hash::parseAny(hash, parseHashType(algo));
auto method = recursive ? FileIngestionMethod::Recursive : FileIngestionMethod::Flat;
- auto path = store()->makeFixedOutputPath(method, h, name);
+ auto path = store()->makeFixedOutputPath(name, FixedOutputInfo {
+ .hash = {
+ .method = method,
+ .hash = h,
+ },
+ .references = {},
+ });
XPUSHs(sv_2mortal(newSVpv(store()->printStorePath(path).c_str(), 0)));
} catch (Error & e) {
croak("%s", e.what());
diff --git a/scripts/install-systemd-multi-user.sh b/scripts/install-systemd-multi-user.sh
index 7dd567747..07b34033a 100755
--- a/scripts/install-systemd-multi-user.sh
+++ b/scripts/install-systemd-multi-user.sh
@@ -92,7 +92,7 @@ poly_configure_nix_daemon_service() {
task "Setting up the nix-daemon systemd service"
_sudo "to create the nix-daemon tmpfiles config" \
- ln -sfn /nix/var/nix/profiles/default/$TMPFILES_SRC $TMPFILES_DEST
+ ln -sfn "/nix/var/nix/profiles/default$TMPFILES_SRC" "$TMPFILES_DEST"
_sudo "to run systemd-tmpfiles once to pick that path up" \
systemd-tmpfiles --create --prefix=/nix/var/nix
diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc
index 174435e7c..323e04fdb 100644
--- a/src/build-remote/build-remote.cc
+++ b/src/build-remote/build-remote.cc
@@ -219,7 +219,7 @@ static int main_build_remote(int argc, char * * argv)
% concatStringsSep<StringSet>(", ", m.supportedFeatures)
% concatStringsSep<StringSet>(", ", m.mandatoryFeatures);
- printMsg(couldBuildLocally ? lvlChatty : lvlWarn, error);
+ printMsg(couldBuildLocally ? lvlChatty : lvlWarn, error.str());
std::cerr << "# decline\n";
}
@@ -258,6 +258,8 @@ static int main_build_remote(int argc, char * * argv)
connected:
close(5);
+ assert(sshStore);
+
std::cerr << "# accept\n" << storeUri << "\n";
auto inputs = readStrings<PathSet>(source);
@@ -286,33 +288,61 @@ connected:
uploadLock = -1;
auto drv = store->readDerivation(*drvPath);
- auto outputHashes = staticOutputHashes(*store, drv);
-
- // Hijack the inputs paths of the derivation to include all the paths
- // that come from the `inputDrvs` set.
- // We don’t do that for the derivations whose `inputDrvs` is empty
- // because
- // 1. It’s not needed
- // 2. Changing the `inputSrcs` set changes the associated output ids,
- // which break CA derivations
- if (!drv.inputDrvs.empty())
- drv.inputSrcs = store->parseStorePathSet(inputs);
- auto result = sshStore->buildDerivation(*drvPath, drv);
+ std::optional<BuildResult> optResult;
+
+ // If we don't know whether we are trusted (e.g. `ssh://`
+ // stores), we assume we are. This is necessary for backwards
+ // compat.
+ bool trustedOrLegacy = ({
+ std::optional trusted = sshStore->isTrustedClient();
+ !trusted || *trusted;
+ });
+
+ // See the very large comment in `case wopBuildDerivation:` in
+ // `src/libstore/daemon.cc` that explains the trust model here.
+ //
+ // This condition mirrors that: that code enforces the "rules" outlined there;
+ // we do the best we can given those "rules".
+ if (trustedOrLegacy || drv.type().isCA()) {
+ // Hijack the inputs paths of the derivation to include all
+ // the paths that come from the `inputDrvs` set. We don’t do
+ // that for the derivations whose `inputDrvs` is empty
+ // because:
+ //
+ // 1. It’s not needed
+ //
+ // 2. Changing the `inputSrcs` set changes the associated
+ // output ids, which break CA derivations
+ if (!drv.inputDrvs.empty())
+ drv.inputSrcs = store->parseStorePathSet(inputs);
+ optResult = sshStore->buildDerivation(*drvPath, (const BasicDerivation &) drv);
+ auto & result = *optResult;
+ if (!result.success())
+ throw Error("build of '%s' on '%s' failed: %s", store->printStorePath(*drvPath), storeUri, result.errorMsg);
+ } else {
+ copyClosure(*store, *sshStore, StorePathSet {*drvPath}, NoRepair, NoCheckSigs, substitute);
+ auto res = sshStore->buildPathsWithResults({ DerivedPath::Built { *drvPath, OutputsSpec::All {} } });
+ // One path to build should produce exactly one build result
+ assert(res.size() == 1);
+ optResult = std::move(res[0]);
+ }
- if (!result.success())
- throw Error("build of '%s' on '%s' failed: %s", store->printStorePath(*drvPath), storeUri, result.errorMsg);
+ auto outputHashes = staticOutputHashes(*store, drv);
std::set<Realisation> missingRealisations;
StorePathSet missingPaths;
- if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations) && !drv.type().hasKnownOutputPaths()) {
+ if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations) && !drv.type().hasKnownOutputPaths()) {
for (auto & outputName : wantedOutputs) {
auto thisOutputHash = outputHashes.at(outputName);
auto thisOutputId = DrvOutput{ thisOutputHash, outputName };
if (!store->queryRealisation(thisOutputId)) {
debug("missing output %s", outputName);
- assert(result.builtOutputs.count(thisOutputId));
- auto newRealisation = result.builtOutputs.at(thisOutputId);
+ assert(optResult);
+ auto & result = *optResult;
+ auto i = result.builtOutputs.find(outputName);
+ assert(i != result.builtOutputs.end());
+ auto & newRealisation = i->second;
missingRealisations.insert(newRealisation);
missingPaths.insert(newRealisation.outPath);
}
@@ -337,7 +367,7 @@ connected:
for (auto & realisation : missingRealisations) {
// Should hold, because if the feature isn't enabled the set
// of missing realisations should be empty
- settings.requireExperimentalFeature(Xp::CaDerivations);
+ experimentalFeatureSettings.require(Xp::CaDerivations);
store->registerDrvOutput(realisation);
}
diff --git a/src/libcmd/command-installable-value.cc b/src/libcmd/command-installable-value.cc
new file mode 100644
index 000000000..7e0c15eb8
--- /dev/null
+++ b/src/libcmd/command-installable-value.cc
@@ -0,0 +1,11 @@
+#include "command-installable-value.hh"
+
+namespace nix {
+
+void InstallableValueCommand::run(ref<Store> store, ref<Installable> installable)
+{
+ auto installableValue = InstallableValue::require(installable);
+ run(store, installableValue);
+}
+
+}
diff --git a/src/libcmd/command-installable-value.hh b/src/libcmd/command-installable-value.hh
new file mode 100644
index 000000000..7880d4119
--- /dev/null
+++ b/src/libcmd/command-installable-value.hh
@@ -0,0 +1,23 @@
+#pragma once
+///@file
+
+#include "installable-value.hh"
+#include "command.hh"
+
+namespace nix {
+
+/**
+ * An InstallableCommand where the single positional argument must be an
+ * InstallableValue in particular.
+ */
+struct InstallableValueCommand : InstallableCommand
+{
+ /**
+ * Entry point to this command
+ */
+ virtual void run(ref<Store> store, ref<InstallableValue> installable) = 0;
+
+ void run(ref<Store> store, ref<Installable> installable) override;
+};
+
+}
diff --git a/src/libcmd/command.cc b/src/libcmd/command.cc
index ab51c229d..6c4648b34 100644
--- a/src/libcmd/command.cc
+++ b/src/libcmd/command.cc
@@ -121,6 +121,8 @@ ref<EvalState> EvalCommand::getEvalState()
#endif
;
+ evalState->repair = repair;
+
if (startReplOnEvalErrors) {
evalState->debugRepl = &AbstractNixRepl::runSimple;
};
@@ -165,7 +167,7 @@ BuiltPathsCommand::BuiltPathsCommand(bool recursive)
});
}
-void BuiltPathsCommand::run(ref<Store> store)
+void BuiltPathsCommand::run(ref<Store> store, Installables && installables)
{
BuiltPaths paths;
if (all) {
@@ -211,7 +213,7 @@ void StorePathsCommand::run(ref<Store> store, BuiltPaths && paths)
run(store, std::move(sorted));
}
-void StorePathCommand::run(ref<Store> store, std::vector<StorePath> && storePaths)
+void StorePathCommand::run(ref<Store> store, StorePaths && storePaths)
{
if (storePaths.size() != 1)
throw UsageError("this command requires exactly one store path");
@@ -246,7 +248,7 @@ void MixProfile::updateProfile(const BuiltPaths & buildables)
{
if (!profile) return;
- std::vector<StorePath> result;
+ StorePaths result;
for (auto & buildable : buildables) {
std::visit(overloaded {
diff --git a/src/libcmd/command.hh b/src/libcmd/command.hh
index 49c7b4f9b..96236b987 100644
--- a/src/libcmd/command.hh
+++ b/src/libcmd/command.hh
@@ -1,6 +1,7 @@
#pragma once
+///@file
-#include "installables.hh"
+#include "installable-value.hh"
#include "args.hh"
#include "common-eval-args.hh"
#include "path.hh"
@@ -18,17 +19,21 @@ class EvalState;
struct Pos;
class Store;
+static constexpr Command::Category catHelp = -1;
static constexpr Command::Category catSecondary = 100;
static constexpr Command::Category catUtility = 101;
static constexpr Command::Category catNixInstallation = 102;
-static constexpr auto installablesCategory = "Options that change the interpretation of installables";
+static constexpr auto installablesCategory = "Options that change the interpretation of [installables](@docroot@/command-ref/new-cli/nix.md#installables)";
struct NixMultiCommand : virtual MultiCommand, virtual Command
{
nlohmann::json toJSON() override;
};
+// For the overloaded run methods
+#pragma GCC diagnostic ignored "-Woverloaded-virtual"
+
/* A command that requires a Nix store. */
struct StoreCommand : virtual Command
{
@@ -97,10 +102,10 @@ struct SourceExprCommand : virtual Args, MixFlakeOptions
SourceExprCommand();
- std::vector<std::shared_ptr<Installable>> parseInstallables(
+ Installables parseInstallables(
ref<Store> store, std::vector<std::string> ss);
- std::shared_ptr<Installable> parseInstallable(
+ ref<Installable> parseInstallable(
ref<Store> store, const std::string & installable);
virtual Strings getDefaultFlakeAttrPaths();
@@ -115,36 +120,43 @@ struct MixReadOnlyOption : virtual Args
MixReadOnlyOption();
};
-/* A command that operates on a list of "installables", which can be
- store paths, attribute paths, Nix expressions, etc. */
-struct InstallablesCommand : virtual Args, SourceExprCommand
+/* Like InstallablesCommand but the installables are not loaded */
+struct RawInstallablesCommand : virtual Args, SourceExprCommand
{
- std::vector<std::shared_ptr<Installable>> installables;
+ RawInstallablesCommand();
- InstallablesCommand();
+ virtual void run(ref<Store> store, std::vector<std::string> && rawInstallables) = 0;
- void prepare() override;
- Installables load();
+ void run(ref<Store> store) override;
- virtual bool useDefaultInstallables() { return true; }
+ // FIXME make const after CmdRepl's override is fixed up
+ virtual void applyDefaultInstallables(std::vector<std::string> & rawInstallables);
- bool readFromStdIn;
+ bool readFromStdIn = false;
std::vector<std::string> getFlakesForCompletion() override;
-protected:
+private:
+
+ std::vector<std::string> rawInstallables;
+};
+/* A command that operates on a list of "installables", which can be
+ store paths, attribute paths, Nix expressions, etc. */
+struct InstallablesCommand : RawInstallablesCommand
+{
+ virtual void run(ref<Store> store, Installables && installables) = 0;
- std::vector<std::string> _installables;
+ void run(ref<Store> store, std::vector<std::string> && rawInstallables) override;
};
/* A command that operates on exactly one "installable" */
struct InstallableCommand : virtual Args, SourceExprCommand
{
- std::shared_ptr<Installable> installable;
-
InstallableCommand();
- void prepare() override;
+ virtual void run(ref<Store> store, ref<Installable> installable) = 0;
+
+ void run(ref<Store> store) override;
std::vector<std::string> getFlakesForCompletion() override
{
@@ -179,22 +191,18 @@ public:
BuiltPathsCommand(bool recursive = false);
- using StoreCommand::run;
-
virtual void run(ref<Store> store, BuiltPaths && paths) = 0;
- void run(ref<Store> store) override;
+ void run(ref<Store> store, Installables && installables) override;
- bool useDefaultInstallables() override { return !all; }
+ void applyDefaultInstallables(std::vector<std::string> & rawInstallables) override;
};
struct StorePathsCommand : public BuiltPathsCommand
{
StorePathsCommand(bool recursive = false);
- using BuiltPathsCommand::run;
-
- virtual void run(ref<Store> store, std::vector<StorePath> && storePaths) = 0;
+ virtual void run(ref<Store> store, StorePaths && storePaths) = 0;
void run(ref<Store> store, BuiltPaths && paths) override;
};
@@ -202,11 +210,9 @@ struct StorePathsCommand : public BuiltPathsCommand
/* A command that operates on exactly one store path. */
struct StorePathCommand : public StorePathsCommand
{
- using StorePathsCommand::run;
-
virtual void run(ref<Store> store, const StorePath & storePath) = 0;
- void run(ref<Store> store, std::vector<StorePath> && storePaths) override;
+ void run(ref<Store> store, StorePaths && storePaths) override;
};
/* A helper class for registering commands globally. */
diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc
index 908127b4d..ff3abd534 100644
--- a/src/libcmd/common-eval-args.cc
+++ b/src/libcmd/common-eval-args.cc
@@ -136,7 +136,11 @@ MixEvalArgs::MixEvalArgs()
addFlag({
.longName = "eval-store",
- .description = "The Nix store to use for evaluations.",
+ .description =
+ R"(
+ The [URL of the Nix store](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format)
+ to use for evaluation, i.e. to store derivations (`.drv` files) and inputs referenced by them.
+ )",
.category = category,
.labels = {"store-url"},
.handler = {&evalStoreUrl},
@@ -149,7 +153,7 @@ Bindings * MixEvalArgs::getAutoArgs(EvalState & state)
for (auto & i : autoArgs) {
auto v = state.allocValue();
if (i.second[0] == 'E')
- state.mkThunk_(*v, state.parseExprFromString(i.second.substr(1), absPath(".")));
+ state.mkThunk_(*v, state.parseExprFromString(i.second.substr(1), state.rootPath(CanonPath::fromCwd())));
else
v->mkString(((std::string_view) i.second).substr(1));
res.insert(state.symbols.create(i.first), v);
@@ -157,19 +161,19 @@ Bindings * MixEvalArgs::getAutoArgs(EvalState & state)
return res.finish();
}
-Path lookupFileArg(EvalState & state, std::string_view s)
+SourcePath lookupFileArg(EvalState & state, std::string_view s)
{
if (EvalSettings::isPseudoUrl(s)) {
auto storePath = fetchers::downloadTarball(
state.store, EvalSettings::resolvePseudoUrl(s), "source", false).first.storePath;
- return state.store->toRealPath(storePath);
+ return state.rootPath(CanonPath(state.store->toRealPath(storePath)));
}
else if (hasPrefix(s, "flake:")) {
- settings.requireExperimentalFeature(Xp::Flakes);
+ experimentalFeatureSettings.require(Xp::Flakes);
auto flakeRef = parseFlakeRef(std::string(s.substr(6)), {}, true, false);
auto storePath = flakeRef.resolve(state.store).fetchTree(state.store).first.storePath;
- return state.store->toRealPath(storePath);
+ return state.rootPath(CanonPath(state.store->toRealPath(storePath)));
}
else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {
@@ -178,7 +182,7 @@ Path lookupFileArg(EvalState & state, std::string_view s)
}
else
- return absPath(std::string(s));
+ return state.rootPath(CanonPath::fromCwd(s));
}
}
diff --git a/src/libcmd/common-eval-args.hh b/src/libcmd/common-eval-args.hh
index 1ec800613..b65cb5b20 100644
--- a/src/libcmd/common-eval-args.hh
+++ b/src/libcmd/common-eval-args.hh
@@ -1,14 +1,17 @@
#pragma once
+///@file
#include "args.hh"
+#include "common-args.hh"
namespace nix {
class Store;
class EvalState;
class Bindings;
+struct SourcePath;
-struct MixEvalArgs : virtual Args
+struct MixEvalArgs : virtual Args, virtual MixRepair
{
static constexpr auto category = "Common evaluation options";
@@ -24,6 +27,6 @@ private:
std::map<std::string, std::string> autoArgs;
};
-Path lookupFileArg(EvalState & state, std::string_view s);
+SourcePath lookupFileArg(EvalState & state, std::string_view s);
}
diff --git a/src/libcmd/editor-for.cc b/src/libcmd/editor-for.cc
index f674f32bd..a17c6f12a 100644
--- a/src/libcmd/editor-for.cc
+++ b/src/libcmd/editor-for.cc
@@ -3,8 +3,11 @@
namespace nix {
-Strings editorFor(const Path & file, uint32_t line)
+Strings editorFor(const SourcePath & file, uint32_t line)
{
+ auto path = file.getPhysicalPath();
+ if (!path)
+ throw Error("cannot open '%s' in an editor because it has no physical path", file);
auto editor = getEnv("EDITOR").value_or("cat");
auto args = tokenizeString<Strings>(editor);
if (line > 0 && (
@@ -13,7 +16,7 @@ Strings editorFor(const Path & file, uint32_t line)
editor.find("vim") != std::string::npos ||
editor.find("kak") != std::string::npos))
args.push_back(fmt("+%d", line));
- args.push_back(file);
+ args.push_back(path->abs());
return args;
}
diff --git a/src/libcmd/editor-for.hh b/src/libcmd/editor-for.hh
index 8fbd08792..fbf4307c9 100644
--- a/src/libcmd/editor-for.hh
+++ b/src/libcmd/editor-for.hh
@@ -1,11 +1,15 @@
#pragma once
+///@file
#include "types.hh"
+#include "input-accessor.hh"
namespace nix {
-/* Helper function to generate args that invoke $EDITOR on
- filename:lineno. */
-Strings editorFor(const Path & file, uint32_t line);
+/**
+ * Helper function to generate args that invoke $EDITOR on
+ * filename:lineno.
+ */
+Strings editorFor(const SourcePath & file, uint32_t line);
}
diff --git a/src/libcmd/installable-attr-path.cc b/src/libcmd/installable-attr-path.cc
index d9377f0d6..b35ca2910 100644
--- a/src/libcmd/installable-attr-path.cc
+++ b/src/libcmd/installable-attr-path.cc
@@ -46,7 +46,15 @@ std::pair<Value *, PosIdx> InstallableAttrPath::toValue(EvalState & state)
DerivedPathsWithInfo InstallableAttrPath::toDerivedPaths()
{
- auto v = toValue(*state).first;
+ auto [v, pos] = toValue(*state);
+
+ if (std::optional derivedPathWithInfo = trySinglePathToDerivedPaths(
+ *v,
+ pos,
+ fmt("while evaluating the attribute '%s'", attrPath)))
+ {
+ return { *derivedPathWithInfo };
+ }
Bindings & autoArgs = *cmd.getAutoArgs(*state);
@@ -87,6 +95,10 @@ DerivedPathsWithInfo InstallableAttrPath::toDerivedPaths()
.drvPath = drvPath,
.outputs = outputs,
},
+ .info = make_ref<ExtraPathInfoValue>(ExtraPathInfoValue::Value {
+ /* FIXME: reconsider backwards compatibility above
+ so we can fill in this info. */
+ }),
});
return res;
diff --git a/src/libcmd/installable-attr-path.hh b/src/libcmd/installable-attr-path.hh
index c06132ec8..e9f0c33da 100644
--- a/src/libcmd/installable-attr-path.hh
+++ b/src/libcmd/installable-attr-path.hh
@@ -1,3 +1,6 @@
+#pragma once
+///@file
+
#include "globals.hh"
#include "installable-value.hh"
#include "outputs-spec.hh"
diff --git a/src/libcmd/installable-derived-path.cc b/src/libcmd/installable-derived-path.cc
index 729dc7d31..6ecf54b7c 100644
--- a/src/libcmd/installable-derived-path.cc
+++ b/src/libcmd/installable-derived-path.cc
@@ -10,7 +10,10 @@ std::string InstallableDerivedPath::what() const
DerivedPathsWithInfo InstallableDerivedPath::toDerivedPaths()
{
- return {{.path = derivedPath, .info = {} }};
+ return {{
+ .path = derivedPath,
+ .info = make_ref<ExtraPathInfo>(),
+ }};
}
std::optional<StorePath> InstallableDerivedPath::getStorePath()
diff --git a/src/libcmd/installable-derived-path.hh b/src/libcmd/installable-derived-path.hh
index 042878b91..e0b4f18b3 100644
--- a/src/libcmd/installable-derived-path.hh
+++ b/src/libcmd/installable-derived-path.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "installables.hh"
diff --git a/src/libcmd/installable-flake.cc b/src/libcmd/installable-flake.cc
index 60a97deaf..eb944240b 100644
--- a/src/libcmd/installable-flake.cc
+++ b/src/libcmd/installable-flake.cc
@@ -95,30 +95,13 @@ DerivedPathsWithInfo InstallableFlake::toDerivedPaths()
// FIXME: use eval cache?
auto v = attr->forceValue();
- if (v.type() == nPath) {
- PathSet context;
- auto storePath = state->copyPathToStore(context, Path(v.path));
- return {{
- .path = DerivedPath::Opaque {
- .path = std::move(storePath),
- }
- }};
+ if (std::optional derivedPathWithInfo = trySinglePathToDerivedPaths(
+ v,
+ noPos,
+ fmt("while evaluating the flake output attribute '%s'", attrPath)))
+ {
+ return { *derivedPathWithInfo };
}
-
- else if (v.type() == nString) {
- PathSet context;
- auto s = state->forceString(v, context, noPos, fmt("while evaluating the flake output attribute '%s'", attrPath));
- auto storePath = state->store->maybeParseStorePath(s);
- if (storePath && context.count(std::string(s))) {
- return {{
- .path = DerivedPath::Opaque {
- .path = std::move(*storePath),
- }
- }};
- } else
- throw Error("flake output attribute '%s' evaluates to the string '%s' which is not a store path", attrPath, s);
- }
-
else
throw Error("flake output attribute '%s' is not a derivation or path", attrPath);
}
@@ -160,13 +143,16 @@ DerivedPathsWithInfo InstallableFlake::toDerivedPaths()
},
}, extendedOutputsSpec.raw()),
},
- .info = {
- .priority = priority,
- .originalRef = flakeRef,
- .resolvedRef = getLockedFlake()->flake.lockedRef,
- .attrPath = attrPath,
- .extendedOutputsSpec = extendedOutputsSpec,
- }
+ .info = make_ref<ExtraPathInfoFlake>(
+ ExtraPathInfoValue::Value {
+ .priority = priority,
+ .attrPath = attrPath,
+ .extendedOutputsSpec = extendedOutputsSpec,
+ },
+ ExtraPathInfoFlake::Flake {
+ .originalRef = flakeRef,
+ .resolvedRef = getLockedFlake()->flake.lockedRef,
+ }),
}};
}
@@ -178,8 +164,7 @@ std::pair<Value *, PosIdx> InstallableFlake::toValue(EvalState & state)
std::vector<ref<eval_cache::AttrCursor>>
InstallableFlake::getCursors(EvalState & state)
{
- auto evalCache = openEvalCache(state,
- std::make_shared<flake::LockedFlake>(lockFlake(state, flakeRef, lockFlags)));
+ auto evalCache = openEvalCache(state, getLockedFlake());
auto root = evalCache->getRoot();
@@ -213,6 +198,7 @@ std::shared_ptr<flake::LockedFlake> InstallableFlake::getLockedFlake() const
{
if (!_lockedFlake) {
flake::LockFlags lockFlagsApplyConfig = lockFlags;
+ // FIXME why this side effect?
lockFlagsApplyConfig.applyNixConfig = true;
_lockedFlake = std::make_shared<flake::LockedFlake>(lockFlake(*state, flakeRef, lockFlagsApplyConfig));
}
@@ -230,7 +216,7 @@ FlakeRef InstallableFlake::nixpkgsFlakeRef() const
}
}
- return Installable::nixpkgsFlakeRef();
+ return defaultNixpkgsFlakeRef();
}
}
diff --git a/src/libcmd/installable-flake.hh b/src/libcmd/installable-flake.hh
index c75765086..7ac4358d2 100644
--- a/src/libcmd/installable-flake.hh
+++ b/src/libcmd/installable-flake.hh
@@ -1,9 +1,34 @@
#pragma once
+///@file
#include "installable-value.hh"
namespace nix {
+/**
+ * Extra info about a \ref DerivedPath "derived path" that ultimately
+ * come from a Flake.
+ *
+ * Invariant: every ExtraPathInfo gotten from an InstallableFlake should
+ * be possible to downcast to an ExtraPathInfoFlake.
+ */
+struct ExtraPathInfoFlake : ExtraPathInfoValue
+{
+ /**
+ * Extra struct to get around C++ designated initializer limitations
+ */
+ struct Flake {
+ FlakeRef originalRef;
+ FlakeRef resolvedRef;
+ };
+
+ Flake flake;
+
+ ExtraPathInfoFlake(Value && v, Flake && f)
+ : ExtraPathInfoValue(std::move(v)), flake(f)
+ { }
+};
+
struct InstallableFlake : InstallableValue
{
FlakeRef flakeRef;
@@ -33,16 +58,31 @@ struct InstallableFlake : InstallableValue
std::pair<Value *, PosIdx> toValue(EvalState & state) override;
- /* Get a cursor to every attrpath in getActualAttrPaths()
- that exists. However if none exists, throw an exception. */
+ /**
+ * Get a cursor to every attrpath in getActualAttrPaths() that
+ * exists. However if none exists, throw an exception.
+ */
std::vector<ref<eval_cache::AttrCursor>>
getCursors(EvalState & state) override;
std::shared_ptr<flake::LockedFlake> getLockedFlake() const;
- FlakeRef nixpkgsFlakeRef() const override;
+ FlakeRef nixpkgsFlakeRef() const;
};
+/**
+ * Default flake ref for referring to Nixpkgs. For flakes that don't
+ * have their own Nixpkgs input, or other installables.
+ *
+ * It is a layer violation for Nix to know about Nixpkgs; currently just
+ * `nix develop` does. Be wary of using this /
+ * `InstallableFlake::nixpkgsFlakeRef` more places.
+ */
+static inline FlakeRef defaultNixpkgsFlakeRef()
+{
+ return FlakeRef::fromAttrs({{"type","indirect"}, {"id", "nixpkgs"}});
+}
+
ref<eval_cache::EvalCache> openEvalCache(
EvalState & state,
std::shared_ptr<flake::LockedFlake> lockedFlake);
diff --git a/src/libcmd/installable-value.cc b/src/libcmd/installable-value.cc
new file mode 100644
index 000000000..1eff293cc
--- /dev/null
+++ b/src/libcmd/installable-value.cc
@@ -0,0 +1,66 @@
+#include "installable-value.hh"
+#include "eval-cache.hh"
+
+namespace nix {
+
+std::vector<ref<eval_cache::AttrCursor>>
+InstallableValue::getCursors(EvalState & state)
+{
+ auto evalCache =
+ std::make_shared<nix::eval_cache::EvalCache>(std::nullopt, state,
+ [&]() { return toValue(state).first; });
+ return {evalCache->getRoot()};
+}
+
+ref<eval_cache::AttrCursor>
+InstallableValue::getCursor(EvalState & state)
+{
+ /* Although getCursors should return at least one element, in case it doesn't,
+ bound check to avoid an undefined behavior for vector[0] */
+ return getCursors(state).at(0);
+}
+
+static UsageError nonValueInstallable(Installable & installable)
+{
+ return UsageError("installable '%s' does not correspond to a Nix language value", installable.what());
+}
+
+InstallableValue & InstallableValue::require(Installable & installable)
+{
+ auto * castedInstallable = dynamic_cast<InstallableValue *>(&installable);
+ if (!castedInstallable)
+ throw nonValueInstallable(installable);
+ return *castedInstallable;
+}
+
+ref<InstallableValue> InstallableValue::require(ref<Installable> installable)
+{
+ auto castedInstallable = installable.dynamic_pointer_cast<InstallableValue>();
+ if (!castedInstallable)
+ throw nonValueInstallable(*installable);
+ return ref { castedInstallable };
+}
+
+std::optional<DerivedPathWithInfo> InstallableValue::trySinglePathToDerivedPaths(Value & v, const PosIdx pos, std::string_view errorCtx)
+{
+ if (v.type() == nPath) {
+ auto storePath = v.path().fetchToStore(state->store);
+ return {{
+ .path = DerivedPath::Opaque {
+ .path = std::move(storePath),
+ },
+ .info = make_ref<ExtraPathInfo>(),
+ }};
+ }
+
+ else if (v.type() == nString) {
+ return {{
+ .path = state->coerceToDerivedPath(pos, v, errorCtx),
+ .info = make_ref<ExtraPathInfo>(),
+ }};
+ }
+
+ else return std::nullopt;
+}
+
+}
diff --git a/src/libcmd/installable-value.hh b/src/libcmd/installable-value.hh
index c6cdc4797..3138ce8ec 100644
--- a/src/libcmd/installable-value.hh
+++ b/src/libcmd/installable-value.hh
@@ -1,14 +1,121 @@
#pragma once
+///@file
#include "installables.hh"
+#include "flake/flake.hh"
namespace nix {
+struct DrvInfo;
+struct SourceExprCommand;
+
+namespace eval_cache { class EvalCache; class AttrCursor; }
+
+struct App
+{
+ std::vector<DerivedPath> context;
+ Path program;
+ // FIXME: add args, sandbox settings, metadata, ...
+};
+
+struct UnresolvedApp
+{
+ App unresolved;
+ App resolve(ref<Store> evalStore, ref<Store> store);
+};
+
+/**
+ * Extra info about a \ref DerivedPath "derived path" that ultimately
+ * come from a Nix language value.
+ *
+ * Invariant: every ExtraPathInfo gotten from an InstallableValue should
+ * be possible to downcast to an ExtraPathInfoValue.
+ */
+struct ExtraPathInfoValue : ExtraPathInfo
+{
+ /**
+ * Extra struct to get around C++ designated initializer limitations
+ */
+ struct Value {
+ /**
+ * An optional priority for use with "build envs". See Package
+ */
+ std::optional<NixInt> priority;
+
+ /**
+ * The attribute path associated with this value. The idea is
+ * that an installable referring to a value typically refers to
+ * a larger value, from which we project a smaller value out
+ * with this.
+ */
+ std::string attrPath;
+
+ /**
+ * \todo merge with DerivedPath's 'outputs' field?
+ */
+ ExtendedOutputsSpec extendedOutputsSpec;
+ };
+
+ Value value;
+
+ ExtraPathInfoValue(Value && v)
+ : value(v)
+ { }
+
+ virtual ~ExtraPathInfoValue() = default;
+};
+
+/**
+ * An Installable which corresponds a Nix langauge value, in addition to
+ * a collection of \ref DerivedPath "derived paths".
+ */
struct InstallableValue : Installable
{
ref<EvalState> state;
InstallableValue(ref<EvalState> state) : state(state) {}
+
+ virtual ~InstallableValue() { }
+
+ virtual std::pair<Value *, PosIdx> toValue(EvalState & state) = 0;
+
+ /**
+ * Get a cursor to each value this Installable could refer to.
+ * However if none exists, throw exception instead of returning
+ * empty vector.
+ */
+ virtual std::vector<ref<eval_cache::AttrCursor>>
+ getCursors(EvalState & state);
+
+ /**
+ * Get the first and most preferred cursor this Installable could
+ * refer to, or throw an exception if none exists.
+ */
+ virtual ref<eval_cache::AttrCursor>
+ getCursor(EvalState & state);
+
+ UnresolvedApp toApp(EvalState & state);
+
+ static InstallableValue & require(Installable & installable);
+ static ref<InstallableValue> require(ref<Installable> installable);
+
+protected:
+
+ /**
+ * Handles either a plain path, or a string with a single string
+ * context elem in the right format. The latter case is handled by
+ * `EvalState::coerceToDerivedPath()`; see it for details.
+ *
+ * @param v Value that is hopefully a string or path per the above.
+ *
+ * @param pos Position of value to aid with diagnostics.
+ *
+ * @param errorCtx Arbitrary message for use in potential error message when something is wrong with `v`.
+ *
+ * @result A derived path (with empty info, for now) if the value
+ * matched the above criteria.
+ */
+ std::optional<DerivedPathWithInfo> trySinglePathToDerivedPaths(Value & v, const PosIdx pos, std::string_view errorCtx);
};
}
diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc
index 7d444aac0..a2b882355 100644
--- a/src/libcmd/installables.cc
+++ b/src/libcmd/installables.cc
@@ -103,6 +103,28 @@ MixFlakeOptions::MixFlakeOptions()
});
addFlag({
+ .longName = "reference-lock-file",
+ .description = "Read the given lock file instead of `flake.lock` within the top-level flake.",
+ .category = category,
+ .labels = {"flake-lock-path"},
+ .handler = {[&](std::string lockFilePath) {
+ lockFlags.referenceLockFilePath = lockFilePath;
+ }},
+ .completer = completePath
+ });
+
+ addFlag({
+ .longName = "output-lock-file",
+ .description = "Write the given lock file instead of `flake.lock` within the top-level flake.",
+ .category = category,
+ .labels = {"flake-lock-path"},
+ .handler = {[&](std::string lockFilePath) {
+ lockFlags.outputLockFilePath = lockFilePath;
+ }},
+ .completer = completePath
+ });
+
+ addFlag({
.longName = "inputs-from",
.description = "Use the inputs of the specified flake as registry entries.",
.category = category,
@@ -153,7 +175,7 @@ SourceExprCommand::SourceExprCommand()
.longName = "file",
.shortName = 'f',
.description =
- "Interpret installables as attribute paths relative to the Nix expression stored in *file*. "
+ "Interpret [*installables*](@docroot@/command-ref/new-cli/nix.md#installables) as attribute paths relative to the Nix expression stored in *file*. "
"If *file* is the character -, then a Nix expression will be read from standard input. "
"Implies `--impure`.",
.category = installablesCategory,
@@ -164,7 +186,7 @@ SourceExprCommand::SourceExprCommand()
addFlag({
.longName = "expr",
- .description = "Interpret installables as attribute paths relative to the Nix expression *expr*.",
+ .description = "Interpret [*installables*](@docroot@/command-ref/new-cli/nix.md#installables) as attribute paths relative to the Nix expression *expr*.",
.category = installablesCategory,
.labels = {"expr"},
.handler = {&expr}
@@ -332,7 +354,7 @@ void completeFlakeRefWithFragment(
void completeFlakeRef(ref<Store> store, std::string_view prefix)
{
- if (!settings.isExperimentalFeatureEnabled(Xp::Flakes))
+ if (!experimentalFeatureSettings.isEnabled(Xp::Flakes))
return;
if (prefix == "")
@@ -364,23 +386,6 @@ DerivedPathWithInfo Installable::toDerivedPath()
return std::move(buildables[0]);
}
-std::vector<ref<eval_cache::AttrCursor>>
-Installable::getCursors(EvalState & state)
-{
- auto evalCache =
- std::make_shared<nix::eval_cache::EvalCache>(std::nullopt, state,
- [&]() { return toValue(state).first; });
- return {evalCache->getRoot()};
-}
-
-ref<eval_cache::AttrCursor>
-Installable::getCursor(EvalState & state)
-{
- /* Although getCursors should return at least one element, in case it doesn't,
- bound check to avoid an undefined behavior for vector[0] */
- return getCursors(state).at(0);
-}
-
static StorePath getDeriver(
ref<Store> store,
const Installable & i,
@@ -422,10 +427,10 @@ ref<eval_cache::EvalCache> openEvalCache(
});
}
-std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
+Installables SourceExprCommand::parseInstallables(
ref<Store> store, std::vector<std::string> ss)
{
- std::vector<std::shared_ptr<Installable>> result;
+ Installables result;
if (file || expr) {
if (file && expr)
@@ -444,14 +449,14 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
else if (file)
state->evalFile(lookupFileArg(*state, *file), *vFile);
else {
- auto e = state->parseExprFromString(*expr, absPath("."));
+ auto e = state->parseExprFromString(*expr, state->rootPath(CanonPath::fromCwd()));
state->eval(e, *vFile);
}
for (auto & s : ss) {
auto [prefix, extendedOutputsSpec] = ExtendedOutputsSpec::parse(s);
result.push_back(
- std::make_shared<InstallableAttrPath>(
+ make_ref<InstallableAttrPath>(
InstallableAttrPath::parse(
state, *this, vFile, prefix, extendedOutputsSpec)));
}
@@ -468,7 +473,7 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
if (prefix.find('/') != std::string::npos) {
try {
- result.push_back(std::make_shared<InstallableDerivedPath>(
+ result.push_back(make_ref<InstallableDerivedPath>(
InstallableDerivedPath::parse(store, prefix, extendedOutputsSpec)));
continue;
} catch (BadStorePath &) {
@@ -480,7 +485,7 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
try {
auto [flakeRef, fragment] = parseFlakeRefWithFragment(std::string { prefix }, absPath("."));
- result.push_back(std::make_shared<InstallableFlake>(
+ result.push_back(make_ref<InstallableFlake>(
this,
getEvalState(),
std::move(flakeRef),
@@ -501,7 +506,7 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
return result;
}
-std::shared_ptr<Installable> SourceExprCommand::parseInstallable(
+ref<Installable> SourceExprCommand::parseInstallable(
ref<Store> store, const std::string & installable)
{
auto installables = parseInstallables(store, {installable});
@@ -513,7 +518,7 @@ std::vector<BuiltPathWithResult> Installable::build(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
- const std::vector<std::shared_ptr<Installable>> & installables,
+ const Installables & installables,
BuildMode bMode)
{
std::vector<BuiltPathWithResult> res;
@@ -522,11 +527,11 @@ std::vector<BuiltPathWithResult> Installable::build(
return res;
}
-std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> Installable::build2(
+std::vector<std::pair<ref<Installable>, BuiltPathWithResult>> Installable::build2(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
- const std::vector<std::shared_ptr<Installable>> & installables,
+ const Installables & installables,
BuildMode bMode)
{
if (mode == Realise::Nothing)
@@ -534,8 +539,8 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> Instal
struct Aux
{
- ExtraPathInfo info;
- std::shared_ptr<Installable> installable;
+ ref<ExtraPathInfo> info;
+ ref<Installable> installable;
};
std::vector<DerivedPath> pathsToBuild;
@@ -548,7 +553,7 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> Instal
}
}
- std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> res;
+ std::vector<std::pair<ref<Installable>, BuiltPathWithResult>> res;
switch (mode) {
@@ -588,8 +593,8 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> Instal
std::visit(overloaded {
[&](const DerivedPath::Built & bfd) {
std::map<std::string, StorePath> outputs;
- for (auto & path : buildResult.builtOutputs)
- outputs.emplace(path.first.outputName, path.second.outPath);
+ for (auto & [outputName, realisation] : buildResult.builtOutputs)
+ outputs.emplace(outputName, realisation.outPath);
res.push_back({aux.installable, {
.path = BuiltPath::Built { bfd.drvPath, outputs },
.info = aux.info,
@@ -620,7 +625,7 @@ BuiltPaths Installable::toBuiltPaths(
ref<Store> store,
Realise mode,
OperateOn operateOn,
- const std::vector<std::shared_ptr<Installable>> & installables)
+ const Installables & installables)
{
if (operateOn == OperateOn::Output) {
BuiltPaths res;
@@ -642,7 +647,7 @@ StorePathSet Installable::toStorePaths(
ref<Store> evalStore,
ref<Store> store,
Realise mode, OperateOn operateOn,
- const std::vector<std::shared_ptr<Installable>> & installables)
+ const Installables & installables)
{
StorePathSet outPaths;
for (auto & path : toBuiltPaths(evalStore, store, mode, operateOn, installables)) {
@@ -656,7 +661,7 @@ StorePath Installable::toStorePath(
ref<Store> evalStore,
ref<Store> store,
Realise mode, OperateOn operateOn,
- std::shared_ptr<Installable> installable)
+ ref<Installable> installable)
{
auto paths = toStorePaths(evalStore, store, mode, operateOn, {installable});
@@ -668,7 +673,7 @@ StorePath Installable::toStorePath(
StorePathSet Installable::toDerivations(
ref<Store> store,
- const std::vector<std::shared_ptr<Installable>> & installables,
+ const Installables & installables,
bool useDeriver)
{
StorePathSet drvPaths;
@@ -692,9 +697,8 @@ StorePathSet Installable::toDerivations(
return drvPaths;
}
-InstallablesCommand::InstallablesCommand()
+RawInstallablesCommand::RawInstallablesCommand()
{
-
addFlag({
.longName = "stdin",
.description = "Read installables from the standard input.",
@@ -703,40 +707,45 @@ InstallablesCommand::InstallablesCommand()
expectArgs({
.label = "installables",
- .handler = {&_installables},
+ .handler = {&rawInstallables},
.completer = {[&](size_t, std::string_view prefix) {
completeInstallable(prefix);
}}
});
}
-void InstallablesCommand::prepare()
-{
- installables = load();
-}
-
-Installables InstallablesCommand::load()
+void RawInstallablesCommand::applyDefaultInstallables(std::vector<std::string> & rawInstallables)
{
- if (_installables.empty() && useDefaultInstallables() && !readFromStdIn)
+ if (rawInstallables.empty()) {
// FIXME: commands like "nix profile install" should not have a
// default, probably.
- _installables.push_back(".");
+ rawInstallables.push_back(".");
+ }
+}
+void RawInstallablesCommand::run(ref<Store> store)
+{
if (readFromStdIn && !isatty(STDIN_FILENO)) {
std::string word;
while (std::cin >> word) {
- _installables.emplace_back(std::move(word));
+ rawInstallables.emplace_back(std::move(word));
}
}
- return parseInstallables(getStore(), _installables);
+ applyDefaultInstallables(rawInstallables);
+ run(store, std::move(rawInstallables));
+}
+
+std::vector<std::string> RawInstallablesCommand::getFlakesForCompletion()
+{
+ applyDefaultInstallables(rawInstallables);
+ return rawInstallables;
}
-std::vector<std::string> InstallablesCommand::getFlakesForCompletion()
+void InstallablesCommand::run(ref<Store> store, std::vector<std::string> && rawInstallables)
{
- if (_installables.empty() && useDefaultInstallables())
- return {"."};
- return _installables;
+ auto installables = parseInstallables(store, rawInstallables);
+ run(store, std::move(installables));
}
InstallableCommand::InstallableCommand()
@@ -752,9 +761,16 @@ InstallableCommand::InstallableCommand()
});
}
-void InstallableCommand::prepare()
+void InstallableCommand::run(ref<Store> store)
+{
+ auto installable = parseInstallable(store, _installable);
+ run(store, std::move(installable));
+}
+
+void BuiltPathsCommand::applyDefaultInstallables(std::vector<std::string> & rawInstallables)
{
- installable = parseInstallable(getStore(), _installable);
+ if (rawInstallables.empty() && !all)
+ rawInstallables.push_back(".");
}
}
diff --git a/src/libcmd/installables.hh b/src/libcmd/installables.hh
index be77fdc81..42d6c7c7c 100644
--- a/src/libcmd/installables.hh
+++ b/src/libcmd/installables.hh
@@ -1,12 +1,11 @@
#pragma once
+///@file
#include "util.hh"
#include "path.hh"
#include "outputs-spec.hh"
#include "derived-path.hh"
-#include "eval.hh"
#include "store-api.hh"
-#include "flake/flake.hh"
#include "build-result.hh"
#include <optional>
@@ -14,122 +13,156 @@
namespace nix {
struct DrvInfo;
-struct SourceExprCommand;
-
-namespace eval_cache { class EvalCache; class AttrCursor; }
-
-struct App
-{
- std::vector<DerivedPath> context;
- Path program;
- // FIXME: add args, sandbox settings, metadata, ...
-};
-
-struct UnresolvedApp
-{
- App unresolved;
- App resolve(ref<Store> evalStore, ref<Store> store);
-};
enum class Realise {
- /* Build the derivation. Postcondition: the
- derivation outputs exist. */
+ /**
+ * Build the derivation.
+ *
+ * Postcondition: the derivation outputs exist.
+ */
Outputs,
- /* Don't build the derivation. Postcondition: the store derivation
- exists. */
+ /**
+ * Don't build the derivation.
+ *
+ * Postcondition: the store derivation exists.
+ */
Derivation,
- /* Evaluate in dry-run mode. Postcondition: nothing. */
- // FIXME: currently unused, but could be revived if we can
- // evaluate derivations in-memory.
+ /**
+ * Evaluate in dry-run mode.
+ *
+ * Postcondition: nothing.
+ *
+ * \todo currently unused, but could be revived if we can evaluate
+ * derivations in-memory.
+ */
Nothing
};
-/* How to handle derivations in commands that operate on store paths. */
+/**
+ * How to handle derivations in commands that operate on store paths.
+ */
enum class OperateOn {
- /* Operate on the output path. */
+ /**
+ * Operate on the output path.
+ */
Output,
- /* Operate on the .drv path. */
+ /**
+ * Operate on the .drv path.
+ */
Derivation
};
+/**
+ * Extra info about a DerivedPath
+ *
+ * Yes, this is empty, but that is intended. It will be sub-classed by
+ * the subclasses of Installable to allow those to provide more info.
+ * Certain commands will make use of this info.
+ */
struct ExtraPathInfo
{
- std::optional<NixInt> priority;
- std::optional<FlakeRef> originalRef;
- std::optional<FlakeRef> resolvedRef;
- std::optional<std::string> attrPath;
- // FIXME: merge with DerivedPath's 'outputs' field?
- std::optional<ExtendedOutputsSpec> extendedOutputsSpec;
+ virtual ~ExtraPathInfo() = default;
};
-/* A derived path with any additional info that commands might
- need from the derivation. */
+/**
+ * A DerivedPath with \ref ExtraPathInfo "any additional info" that
+ * commands might need from the derivation.
+ */
struct DerivedPathWithInfo
{
DerivedPath path;
- ExtraPathInfo info;
+ ref<ExtraPathInfo> info;
};
+/**
+ * Like DerivedPathWithInfo but extending BuiltPath with \ref
+ * ExtraPathInfo "extra info" and also possibly the \ref BuildResult
+ * "result of building".
+ */
struct BuiltPathWithResult
{
BuiltPath path;
- ExtraPathInfo info;
+ ref<ExtraPathInfo> info;
std::optional<BuildResult> result;
};
+/**
+ * Shorthand, for less typing and helping us keep the choice of
+ * collection in sync.
+ */
typedef std::vector<DerivedPathWithInfo> DerivedPathsWithInfo;
+struct Installable;
+
+/**
+ * Shorthand, for less typing and helping us keep the choice of
+ * collection in sync.
+ */
+typedef std::vector<ref<Installable>> Installables;
+
+/**
+ * Installables are the main positional arguments for the Nix
+ * Command-line.
+ *
+ * This base class is very flexible, and just assumes and the
+ * Installable refers to a collection of \ref DerivedPath "derived paths" with
+ * \ref ExtraPathInfo "extra info".
+ */
struct Installable
{
virtual ~Installable() { }
+ /**
+ * What Installable is this?
+ *
+ * Prints back valid CLI syntax that would result in this same
+ * installable. It doesn't need to be exactly what the user wrote,
+ * just something that means the same thing.
+ */
virtual std::string what() const = 0;
+ /**
+ * Get the collection of \ref DerivedPathWithInfo "derived paths
+ * with info" that this \ref Installable instalallable denotes.
+ *
+ * This is the main method of this class
+ */
virtual DerivedPathsWithInfo toDerivedPaths() = 0;
+ /**
+ * A convenience wrapper of the above for when we expect an
+ * installable to produce a single \ref DerivedPath "derived path"
+ * only.
+ *
+ * If no or multiple \ref DerivedPath "derived paths" are produced,
+ * and error is raised.
+ */
DerivedPathWithInfo toDerivedPath();
- UnresolvedApp toApp(EvalState & state);
-
- virtual std::pair<Value *, PosIdx> toValue(EvalState & state)
- {
- throw Error("argument '%s' cannot be evaluated", what());
- }
-
- /* Return a value only if this installable is a store path or a
- symlink to it. */
+ /**
+ * Return a value only if this installable is a store path or a
+ * symlink to it.
+ *
+ * \todo should we move this to InstallableDerivedPath? It is only
+ * supposed to work there anyways. Can always downcast.
+ */
virtual std::optional<StorePath> getStorePath()
{
return {};
}
- /* Get a cursor to each value this Installable could refer to. However
- if none exists, throw exception instead of returning empty vector. */
- virtual std::vector<ref<eval_cache::AttrCursor>>
- getCursors(EvalState & state);
-
- /* Get the first and most preferred cursor this Installable could refer
- to, or throw an exception if none exists. */
- virtual ref<eval_cache::AttrCursor>
- getCursor(EvalState & state);
-
- virtual FlakeRef nixpkgsFlakeRef() const
- {
- return FlakeRef::fromAttrs({{"type","indirect"}, {"id", "nixpkgs"}});
- }
-
static std::vector<BuiltPathWithResult> build(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
- const std::vector<std::shared_ptr<Installable>> & installables,
+ const Installables & installables,
BuildMode bMode = bmNormal);
- static std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> build2(
+ static std::vector<std::pair<ref<Installable>, BuiltPathWithResult>> build2(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
- const std::vector<std::shared_ptr<Installable>> & installables,
+ const Installables & installables,
BuildMode bMode = bmNormal);
static std::set<StorePath> toStorePaths(
@@ -137,18 +170,18 @@ struct Installable
ref<Store> store,
Realise mode,
OperateOn operateOn,
- const std::vector<std::shared_ptr<Installable>> & installables);
+ const Installables & installables);
static StorePath toStorePath(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
OperateOn operateOn,
- std::shared_ptr<Installable> installable);
+ ref<Installable> installable);
static std::set<StorePath> toDerivations(
ref<Store> store,
- const std::vector<std::shared_ptr<Installable>> & installables,
+ const Installables & installables,
bool useDeriver = false);
static BuiltPaths toBuiltPaths(
@@ -156,9 +189,7 @@ struct Installable
ref<Store> store,
Realise mode,
OperateOn operateOn,
- const std::vector<std::shared_ptr<Installable>> & installables);
+ const Installables & installables);
};
-typedef std::vector<std::shared_ptr<Installable>> Installables;
-
}
diff --git a/src/libcmd/legacy.hh b/src/libcmd/legacy.hh
index f503b0da3..357500a4d 100644
--- a/src/libcmd/legacy.hh
+++ b/src/libcmd/legacy.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <functional>
#include <map>
diff --git a/src/libcmd/markdown.hh b/src/libcmd/markdown.hh
index 78320fcf5..a04d32a4f 100644
--- a/src/libcmd/markdown.hh
+++ b/src/libcmd/markdown.hh
@@ -1,3 +1,6 @@
+#pragma once
+///@file
+
#include "types.hh"
namespace nix {
diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc
index e3afb1531..4b160a100 100644
--- a/src/libcmd/repl.cc
+++ b/src/libcmd/repl.cc
@@ -40,6 +40,7 @@ extern "C" {
#include "markdown.hh"
#include "local-fs-store.hh"
#include "progress-bar.hh"
+#include "print.hh"
#if HAVE_BOEHMGC
#define GC_INCLUDE_NEW
@@ -54,8 +55,6 @@ struct NixRepl
, gc
#endif
{
- std::string curDir;
-
size_t debugTraceIndex;
Strings loadedFiles;
@@ -113,7 +112,6 @@ NixRepl::NixRepl(const Strings & searchPath, nix::ref<Store> store, ref<EvalStat
, staticEnv(new StaticEnv(false, state->staticBaseEnv.get()))
, historyFile(getDataDir() + "/nix/repl-history")
{
- curDir = absPath(".");
}
@@ -252,7 +250,9 @@ void NixRepl::mainLoop()
el_hist_size = 1000;
#endif
read_history(historyFile.c_str());
+ auto oldRepl = curRepl;
curRepl = this;
+ Finally restoreRepl([&] { curRepl = oldRepl; });
#ifndef READLINE
rl_set_complete_func(completionCallback);
rl_set_list_possib_func(listPossibleCallback);
@@ -423,6 +423,7 @@ StringSet NixRepl::completePrefix(const std::string & prefix)
}
+// FIXME: DRY and match or use the parser
static bool isVarName(std::string_view s)
{
if (s.size() == 0) return false;
@@ -590,14 +591,14 @@ bool NixRepl::processLine(std::string line)
Value v;
evalString(arg, v);
- const auto [path, line] = [&] () -> std::pair<Path, uint32_t> {
+ const auto [path, line] = [&] () -> std::pair<SourcePath, uint32_t> {
if (v.type() == nPath || v.type() == nString) {
- PathSet context;
+ NixStringContext context;
auto path = state->coerceToPath(noPos, v, context, "while evaluating the filename to edit");
return {path, 0};
} else if (v.isLambda()) {
auto pos = state->positions[v.lambda.fun->pos];
- if (auto path = std::get_if<Path>(&pos.origin))
+ if (auto path = std::get_if<SourcePath>(&pos.origin))
return {*path, pos.line};
else
throw Error("'%s' cannot be shown in an editor", pos);
@@ -872,8 +873,7 @@ void NixRepl::addVarToScope(const Symbol name, Value & v)
Expr * NixRepl::parseString(std::string s)
{
- Expr * e = state->parseExprFromString(std::move(s), curDir, staticEnv);
- return e;
+ return state->parseExprFromString(std::move(s), state->rootPath(CanonPath::fromCwd()), staticEnv);
}
@@ -892,17 +892,6 @@ std::ostream & NixRepl::printValue(std::ostream & str, Value & v, unsigned int m
}
-std::ostream & printStringValue(std::ostream & str, const char * string) {
- str << "\"";
- for (const char * i = string; *i; i++)
- if (*i == '\"' || *i == '\\') str << "\\" << *i;
- else if (*i == '\n') str << "\\n";
- else if (*i == '\r') str << "\\r";
- else if (*i == '\t') str << "\\t";
- else str << *i;
- str << "\"";
- return str;
-}
// FIXME: lot of cut&paste from Nix's eval.cc.
@@ -920,17 +909,19 @@ std::ostream & NixRepl::printValue(std::ostream & str, Value & v, unsigned int m
break;
case nBool:
- str << ANSI_CYAN << (v.boolean ? "true" : "false") << ANSI_NORMAL;
+ str << ANSI_CYAN;
+ printLiteralBool(str, v.boolean);
+ str << ANSI_NORMAL;
break;
case nString:
str << ANSI_WARNING;
- printStringValue(str, v.string.s);
+ printLiteralString(str, v.string.s);
str << ANSI_NORMAL;
break;
case nPath:
- str << ANSI_GREEN << v.path << ANSI_NORMAL; // !!! escaping?
+ str << ANSI_GREEN << v.path().to_string() << ANSI_NORMAL; // !!! escaping?
break;
case nNull:
@@ -945,7 +936,7 @@ std::ostream & NixRepl::printValue(std::ostream & str, Value & v, unsigned int m
if (isDrv) {
str << "«derivation ";
Bindings::iterator i = v.attrs->find(state->sDrvPath);
- PathSet context;
+ NixStringContext context;
if (i != v.attrs->end())
str << state->store->printStorePath(state->coerceToStorePath(i->pos, *i->value, context, "while evaluating the drvPath of a derivation"));
else
@@ -962,10 +953,7 @@ std::ostream & NixRepl::printValue(std::ostream & str, Value & v, unsigned int m
sorted.emplace(state->symbols[i.name], i.value);
for (auto & i : sorted) {
- if (isVarName(i.first))
- str << i.first;
- else
- printStringValue(str, i.first.c_str());
+ printAttributeName(str, i.first);
str << " = ";
if (seen.count(i.second))
str << "«repeated»";
@@ -1024,6 +1012,8 @@ std::ostream & NixRepl::printValue(std::ostream & str, Value & v, unsigned int m
str << v.fpoint;
break;
+ case nThunk:
+ case nExternal:
default:
str << ANSI_RED "«unknown»" ANSI_NORMAL;
break;
diff --git a/src/libcmd/repl.hh b/src/libcmd/repl.hh
index dfccc93e7..731c8e6db 100644
--- a/src/libcmd/repl.hh
+++ b/src/libcmd/repl.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "eval.hh"
diff --git a/src/libexpr/attr-path.cc b/src/libexpr/attr-path.cc
index 7c0705091..ab654c1b0 100644
--- a/src/libexpr/attr-path.cc
+++ b/src/libexpr/attr-path.cc
@@ -106,7 +106,7 @@ std::pair<Value *, PosIdx> findAlongAttrPath(EvalState & state, const std::strin
}
-std::pair<std::string, uint32_t> findPackageFilename(EvalState & state, Value & v, std::string what)
+std::pair<SourcePath, uint32_t> findPackageFilename(EvalState & state, Value & v, std::string what)
{
Value * v2;
try {
@@ -118,21 +118,25 @@ std::pair<std::string, uint32_t> findPackageFilename(EvalState & state, Value &
// FIXME: is it possible to extract the Pos object instead of doing this
// toString + parsing?
- auto pos = state.forceString(*v2, noPos, "while evaluating the 'meta.position' attribute of a derivation");
+ NixStringContext context;
+ auto path = state.coerceToPath(noPos, *v2, context, "while evaluating the 'meta.position' attribute of a derivation");
- auto colon = pos.rfind(':');
- if (colon == std::string::npos)
- throw ParseError("cannot parse meta.position attribute '%s'", pos);
+ auto fn = path.path.abs();
+
+ auto fail = [fn]() {
+ throw ParseError("cannot parse 'meta.position' attribute '%s'", fn);
+ };
- std::string filename(pos, 0, colon);
- unsigned int lineno;
try {
- lineno = std::stoi(std::string(pos, colon + 1, std::string::npos));
+ auto colon = fn.rfind(':');
+ if (colon == std::string::npos) fail();
+ std::string filename(fn, 0, colon);
+ auto lineno = std::stoi(std::string(fn, colon + 1, std::string::npos));
+ return {CanonPath(fn.substr(0, colon)), lineno};
} catch (std::invalid_argument & e) {
- throw ParseError("cannot parse line number '%s'", pos);
+ fail();
+ abort();
}
-
- return { std::move(filename), lineno };
}
diff --git a/src/libexpr/attr-path.hh b/src/libexpr/attr-path.hh
index 117e0051b..eb00ffb93 100644
--- a/src/libexpr/attr-path.hh
+++ b/src/libexpr/attr-path.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "eval.hh"
@@ -16,8 +17,10 @@ std::pair<Value *, PosIdx> findAlongAttrPath(
Bindings & autoArgs,
Value & vIn);
-/* Heuristic to find the filename and lineno or a nix value. */
-std::pair<std::string, uint32_t> findPackageFilename(EvalState & state, Value & v, std::string what);
+/**
+ * Heuristic to find the filename and lineno or a nix value.
+ */
+std::pair<SourcePath, uint32_t> findPackageFilename(EvalState & state, Value & v, std::string what);
std::vector<Symbol> parseAttrPath(EvalState & state, std::string_view s);
diff --git a/src/libexpr/attr-set.hh b/src/libexpr/attr-set.hh
index dcc73b506..31215f880 100644
--- a/src/libexpr/attr-set.hh
+++ b/src/libexpr/attr-set.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "nixexpr.hh"
#include "symbol-table.hh"
@@ -12,7 +13,9 @@ namespace nix {
class EvalState;
struct Value;
-/* Map one attribute name to its value. */
+/**
+ * Map one attribute name to its value.
+ */
struct Attr
{
/* the placement of `name` and `pos` in this struct is important.
@@ -36,10 +39,12 @@ static_assert(sizeof(Attr) == 2 * sizeof(uint32_t) + sizeof(Value *),
"avoid introducing any padding into Attr if at all possible, and do not "
"introduce new fields that need not be present for almost every instance.");
-/* Bindings contains all the attributes of an attribute set. It is defined
- by its size and its capacity, the capacity being the number of Attr
- elements allocated after this structure, while the size corresponds to
- the number of elements already inserted in this structure. */
+/**
+ * Bindings contains all the attributes of an attribute set. It is defined
+ * by its size and its capacity, the capacity being the number of Attr
+ * elements allocated after this structure, while the size corresponds to
+ * the number of elements already inserted in this structure.
+ */
class Bindings
{
public:
@@ -94,7 +99,9 @@ public:
size_t capacity() { return capacity_; }
- /* Returns the attributes in lexicographically sorted order. */
+ /**
+ * Returns the attributes in lexicographically sorted order.
+ */
std::vector<const Attr *> lexicographicOrder(const SymbolTable & symbols) const
{
std::vector<const Attr *> res;
@@ -111,9 +118,11 @@ public:
friend class EvalState;
};
-/* A wrapper around Bindings that ensures that its always in sorted
- order at the end. The only way to consume a BindingsBuilder is to
- call finish(), which sorts the bindings. */
+/**
+ * A wrapper around Bindings that ensures that its always in sorted
+ * order at the end. The only way to consume a BindingsBuilder is to
+ * call finish(), which sorts the bindings.
+ */
class BindingsBuilder
{
Bindings * bindings;
diff --git a/src/libexpr/eval-cache.cc b/src/libexpr/eval-cache.cc
index 1219b2471..9e734e654 100644
--- a/src/libexpr/eval-cache.cc
+++ b/src/libexpr/eval-cache.cc
@@ -47,7 +47,7 @@ struct AttrDb
{
auto state(_state->lock());
- Path cacheDir = getCacheDir() + "/nix/eval-cache-v4";
+ Path cacheDir = getCacheDir() + "/nix/eval-cache-v5";
createDirs(cacheDir);
Path dbPath = cacheDir + "/" + fingerprint.to_string(Base16, false) + ".sqlite";
@@ -300,7 +300,7 @@ struct AttrDb
NixStringContext context;
if (!queryAttribute.isNull(3))
for (auto & s : tokenizeString<std::vector<std::string>>(queryAttribute.getStr(3), ";"))
- context.push_back(NixStringContextElem::parse(cfg, s));
+ context.insert(NixStringContextElem::parse(s));
return {{rowId, string_t{queryAttribute.getStr(2), context}}};
}
case AttrType::Bool:
@@ -442,8 +442,10 @@ Value & AttrCursor::forceValue()
if (v.type() == nString)
cachedValue = {root->db->setString(getKey(), v.string.s, v.string.context),
string_t{v.string.s, {}}};
- else if (v.type() == nPath)
- cachedValue = {root->db->setString(getKey(), v.path), string_t{v.path, {}}};
+ else if (v.type() == nPath) {
+ auto path = v.path().path;
+ cachedValue = {root->db->setString(getKey(), path.abs()), string_t{path.abs(), {}}};
+ }
else if (v.type() == nBool)
cachedValue = {root->db->setBool(getKey(), v.boolean), v.boolean};
else if (v.type() == nInt)
@@ -580,7 +582,7 @@ std::string AttrCursor::getString()
if (v.type() != nString && v.type() != nPath)
root->state.error("'%s' is not a string but %s", getAttrPathStr()).debugThrow<TypeError>();
- return v.type() == nString ? v.string.s : v.path;
+ return v.type() == nString ? v.string.s : v.path().to_string();
}
string_t AttrCursor::getStringWithContext()
@@ -619,10 +621,13 @@ string_t AttrCursor::getStringWithContext()
auto & v = forceValue();
- if (v.type() == nString)
- return {v.string.s, v.getContext(*root->state.store)};
+ if (v.type() == nString) {
+ NixStringContext context;
+ copyContext(v, context);
+ return {v.string.s, std::move(context)};
+ }
else if (v.type() == nPath)
- return {v.path, {}};
+ return {v.path().to_string(), {}};
else
root->state.error("'%s' is not a string but %s", getAttrPathStr()).debugThrow<TypeError>();
}
diff --git a/src/libexpr/eval-cache.hh b/src/libexpr/eval-cache.hh
index c93e55b93..46c4999c8 100644
--- a/src/libexpr/eval-cache.hh
+++ b/src/libexpr/eval-cache.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "sync.hh"
#include "hash.hh"
@@ -109,8 +110,10 @@ public:
ref<AttrCursor> getAttr(std::string_view name);
- /* Get an attribute along a chain of attrsets. Note that this does
- not auto-call functors or functions. */
+ /**
+ * Get an attribute along a chain of attrsets. Note that this does
+ * not auto-call functors or functions.
+ */
OrSuggestions<ref<AttrCursor>> findAlongAttrPath(const std::vector<Symbol> & attrPath, bool force = false);
std::string getString();
@@ -129,7 +132,9 @@ public:
Value & forceValue();
- /* Force creation of the .drv file in the Nix store. */
+ /**
+ * Force creation of the .drv file in the Nix store.
+ */
StorePath forceDerivation();
};
diff --git a/src/libexpr/eval-inline.hh b/src/libexpr/eval-inline.hh
index f0da688db..a988fa40c 100644
--- a/src/libexpr/eval-inline.hh
+++ b/src/libexpr/eval-inline.hh
@@ -1,10 +1,13 @@
#pragma once
+///@file
#include "eval.hh"
namespace nix {
-/* Note: Various places expect the allocated memory to be zeroed. */
+/**
+ * Note: Various places expect the allocated memory to be zeroed.
+ */
[[gnu::always_inline]]
inline void * allocBytes(size_t n)
{
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
index 21fc4d0fe..585670e69 100644
--- a/src/libexpr/eval.cc
+++ b/src/libexpr/eval.cc
@@ -4,10 +4,13 @@
#include "util.hh"
#include "store-api.hh"
#include "derivations.hh"
+#include "downstream-placeholder.hh"
#include "globals.hh"
#include "eval-inline.hh"
#include "filetransfer.hh"
#include "function-trace.hh"
+#include "profiles.hh"
+#include "print.hh"
#include <algorithm>
#include <chrono>
@@ -92,7 +95,6 @@ RootValue allocRootValue(Value * v)
#endif
}
-
void Value::print(const SymbolTable & symbols, std::ostream & str,
std::set<const void *> * seen) const
{
@@ -103,21 +105,13 @@ void Value::print(const SymbolTable & symbols, std::ostream & str,
str << integer;
break;
case tBool:
- str << (boolean ? "true" : "false");
+ printLiteralBool(str, boolean);
break;
case tString:
- str << "\"";
- for (const char * i = string.s; *i; i++)
- if (*i == '\"' || *i == '\\') str << "\\" << *i;
- else if (*i == '\n') str << "\\n";
- else if (*i == '\r') str << "\\r";
- else if (*i == '\t') str << "\\t";
- else if (*i == '$' && *(i+1) == '{') str << "\\" << *i;
- else str << *i;
- str << "\"";
+ printLiteralString(str, string.s);
break;
case tPath:
- str << path; // !!! escaping?
+ str << path().to_string(); // !!! escaping?
break;
case tNull:
str << "null";
@@ -172,7 +166,17 @@ void Value::print(const SymbolTable & symbols, std::ostream & str,
case tFloat:
str << fpoint;
break;
+ case tBlackhole:
+ // Although we know for sure that it's going to be an infinite recursion
+ // when this value is accessed _in the current context_, it's likely
+ // that the user will misinterpret a simpler «infinite recursion» output
+ // as a definitive statement about the value, while in fact it may be
+ // a valid value after `builtins.trace` and perhaps some other steps
+ // have completed.
+ str << "«potential infinite recursion»";
+ break;
default:
+ printError("Nix evaluator internal error: Value::print(): invalid value type %1%", internalType);
abort();
}
}
@@ -228,6 +232,9 @@ std::string_view showType(ValueType type)
std::string showType(const Value & v)
{
+ // Allow selecting a subset of enum values
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wswitch-enum"
switch (v.internalType) {
case tString: return v.string.context ? "a string with context" : "a string";
case tPrimOp:
@@ -241,16 +248,21 @@ std::string showType(const Value & v)
default:
return std::string(showType(v.type()));
}
+ #pragma GCC diagnostic pop
}
PosIdx Value::determinePos(const PosIdx pos) const
{
+ // Allow selecting a subset of enum values
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wswitch-enum"
switch (internalType) {
case tAttrs: return attrs->pos;
case tLambda: return lambda.fun->pos;
case tApp: return app.left->determinePos(pos);
default: return pos;
}
+ #pragma GCC diagnostic pop
}
bool Value::isTrivial() const
@@ -325,6 +337,22 @@ static Symbol getName(const AttrName & name, EvalState & state, Env & env)
}
}
+#if HAVE_BOEHMGC
+/* Disable GC while this object lives. Used by CoroutineContext.
+ *
+ * Boehm keeps a count of GC_disable() and GC_enable() calls,
+ * and only enables GC when the count matches.
+ */
+class BoehmDisableGC {
+public:
+ BoehmDisableGC() {
+ GC_disable();
+ };
+ ~BoehmDisableGC() {
+ GC_enable();
+ };
+};
+#endif
static bool gcInitialised = false;
@@ -349,6 +377,15 @@ void initGC()
StackAllocator::defaultAllocator = &boehmGCStackAllocator;
+
+#if NIX_BOEHM_PATCH_VERSION != 1
+ printTalkative("Unpatched BoehmGC, disabling GC inside coroutines");
+ /* Used to disable GC when entering coroutines on macOS */
+ create_coro_gc_hook = []() -> std::shared_ptr<void> {
+ return std::make_shared<BoehmDisableGC>();
+ };
+#endif
+
/* Set the initial heap size to something fairly big (25% of
physical RAM, up to a maximum of 384 MiB) so that in most cases
we don't need to garbage collect at all. (Collection has a
@@ -368,7 +405,7 @@ void initGC()
size = (pageSize * pages) / 4; // 25% of RAM
if (size > maxSize) size = maxSize;
#endif
- debug(format("setting initial heap size to %1% bytes") % size);
+ debug("setting initial heap size to %1% bytes", size);
GC_expand_hp(size);
}
@@ -498,6 +535,7 @@ EvalState::EvalState(
, sOutputSpecified(symbols.create("outputSpecified"))
, repair(NoRepair)
, emptyBindings(0)
+ , derivationInternal(rootPath(CanonPath("/builtin/derivation.nix")))
, store(store)
, buildStore(buildStore ? buildStore : store)
, debugRepl(nullptr)
@@ -572,15 +610,14 @@ void EvalState::allowAndSetStorePathString(const StorePath & storePath, Value &
{
allowPath(storePath);
- auto path = store->printStorePath(storePath);
- v.mkString(path, PathSet({path}));
+ mkStorePathString(storePath, v);
}
-Path EvalState::checkSourcePath(const Path & path_)
+SourcePath EvalState::checkSourcePath(const SourcePath & path_)
{
if (!allowedPaths) return path_;
- auto i = resolvedPaths.find(path_);
+ auto i = resolvedPaths.find(path_.path.abs());
if (i != resolvedPaths.end())
return i->second;
@@ -590,9 +627,9 @@ Path EvalState::checkSourcePath(const Path & path_)
* attacker can't append ../../... to a path that would be in allowedPaths
* and thus leak symlink targets.
*/
- Path abspath = canonPath(path_);
+ Path abspath = canonPath(path_.path.abs());
- if (hasPrefix(abspath, corepkgsPrefix)) return abspath;
+ if (hasPrefix(abspath, corepkgsPrefix)) return CanonPath(abspath);
for (auto & i : *allowedPaths) {
if (isDirOrInDir(abspath, i)) {
@@ -609,12 +646,12 @@ Path EvalState::checkSourcePath(const Path & path_)
}
/* Resolve symlinks. */
- debug(format("checking access to '%s'") % abspath);
- Path path = canonPath(abspath, true);
+ debug("checking access to '%s'", abspath);
+ SourcePath path = CanonPath(canonPath(abspath, true));
for (auto & i : *allowedPaths) {
- if (isDirOrInDir(path, i)) {
- resolvedPaths[path_] = path;
+ if (isDirOrInDir(path.path.abs(), i)) {
+ resolvedPaths.insert_or_assign(path_.path.abs(), path);
return path;
}
}
@@ -642,12 +679,12 @@ void EvalState::checkURI(const std::string & uri)
/* If the URI is a path, then check it against allowedPaths as
well. */
if (hasPrefix(uri, "/")) {
- checkSourcePath(uri);
+ checkSourcePath(CanonPath(uri));
return;
}
if (hasPrefix(uri, "file://")) {
- checkSourcePath(std::string(uri, 7));
+ checkSourcePath(CanonPath(std::string(uri, 7)));
return;
}
@@ -655,7 +692,7 @@ void EvalState::checkURI(const std::string & uri)
}
-Path EvalState::toRealPath(const Path & path, const PathSet & context)
+Path EvalState::toRealPath(const Path & path, const NixStringContext & context)
{
// FIXME: check whether 'path' is in 'context'.
return
@@ -907,34 +944,34 @@ void Value::mkString(std::string_view s)
}
-static void copyContextToValue(Value & v, const PathSet & context)
+static void copyContextToValue(Value & v, const NixStringContext & context)
{
if (!context.empty()) {
size_t n = 0;
v.string.context = (const char * *)
allocBytes((context.size() + 1) * sizeof(char *));
for (auto & i : context)
- v.string.context[n++] = dupString(i.c_str());
+ v.string.context[n++] = dupString(i.to_string().c_str());
v.string.context[n] = 0;
}
}
-void Value::mkString(std::string_view s, const PathSet & context)
+void Value::mkString(std::string_view s, const NixStringContext & context)
{
mkString(s);
copyContextToValue(*this, context);
}
-void Value::mkStringMove(const char * s, const PathSet & context)
+void Value::mkStringMove(const char * s, const NixStringContext & context)
{
mkString(s);
copyContextToValue(*this, context);
}
-void Value::mkPath(std::string_view s)
+void Value::mkPath(const SourcePath & path)
{
- mkPath(makeImmutableString(s));
+ mkPath(makeImmutableString(path.path.abs()));
}
@@ -990,9 +1027,9 @@ void EvalState::mkThunk_(Value & v, Expr * expr)
void EvalState::mkPos(Value & v, PosIdx p)
{
auto pos = positions[p];
- if (auto path = std::get_if<Path>(&pos.origin)) {
+ if (auto path = std::get_if<SourcePath>(&pos.origin)) {
auto attrs = buildBindings(3);
- attrs.alloc(sFile).mkString(*path);
+ attrs.alloc(sFile).mkString(path->path.abs());
attrs.alloc(sLine).mkInt(pos.line);
attrs.alloc(sColumn).mkInt(pos.column);
v.mkAttrs(attrs);
@@ -1001,6 +1038,37 @@ void EvalState::mkPos(Value & v, PosIdx p)
}
+void EvalState::mkStorePathString(const StorePath & p, Value & v)
+{
+ v.mkString(
+ store->printStorePath(p),
+ NixStringContext {
+ NixStringContextElem::Opaque { .path = p },
+ });
+}
+
+
+void EvalState::mkOutputString(
+ Value & value,
+ const StorePath & drvPath,
+ const std::string outputName,
+ std::optional<StorePath> optOutputPath)
+{
+ value.mkString(
+ optOutputPath
+ ? store->printStorePath(*std::move(optOutputPath))
+ /* Downstream we would substitute this for an actual path once
+ we build the floating CA derivation */
+ : DownstreamPlaceholder::unknownCaOutput(drvPath, outputName).render(),
+ NixStringContext {
+ NixStringContextElem::Built {
+ .drvPath = drvPath,
+ .output = outputName,
+ }
+ });
+}
+
+
/* Create a thunk for the delayed computation of the given expression
in the given environment. But if the expression is a variable,
then look it up right away. This significantly reduces the number
@@ -1048,7 +1116,7 @@ Value * ExprPath::maybeThunk(EvalState & state, Env & env)
}
-void EvalState::evalFile(const Path & path_, Value & v, bool mustBeTrivial)
+void EvalState::evalFile(const SourcePath & path_, Value & v, bool mustBeTrivial)
{
auto path = checkSourcePath(path_);
@@ -1058,7 +1126,7 @@ void EvalState::evalFile(const Path & path_, Value & v, bool mustBeTrivial)
return;
}
- Path resolvedPath = resolveExprPath(path);
+ auto resolvedPath = resolveExprPath(path);
if ((i = fileEvalCache.find(resolvedPath)) != fileEvalCache.end()) {
v = i->second;
return;
@@ -1086,8 +1154,8 @@ void EvalState::resetFileCache()
void EvalState::cacheFile(
- const Path & path,
- const Path & resolvedPath,
+ const SourcePath & path,
+ const SourcePath & resolvedPath,
Expr * e,
Value & v,
bool mustBeTrivial)
@@ -1101,7 +1169,7 @@ void EvalState::cacheFile(
*e,
this->baseEnv,
e->getPos() ? static_cast<std::shared_ptr<AbstractPos>>(positions[e->getPos()]) : nullptr,
- "while evaluating the file '%1%':", resolvedPath)
+ "while evaluating the file '%1%':", resolvedPath.to_string())
: nullptr;
// Enforce that 'flake.nix' is a direct attrset, not a
@@ -1111,7 +1179,7 @@ void EvalState::cacheFile(
error("file '%s' must be an attribute set", path).debugThrow<EvalError>();
eval(e, v);
} catch (Error & e) {
- addErrorTrace(e, "while evaluating the file '%1%':", resolvedPath);
+ addErrorTrace(e, "while evaluating the file '%1%':", resolvedPath.to_string());
throw;
}
@@ -1372,8 +1440,8 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v)
} catch (Error & e) {
if (pos2) {
auto pos2r = state.positions[pos2];
- auto origin = std::get_if<Path>(&pos2r.origin);
- if (!(origin && *origin == state.derivationNixPath))
+ auto origin = std::get_if<SourcePath>(&pos2r.origin);
+ if (!(origin && *origin == state.derivationInternal))
state.addErrorTrace(e, pos2, "while evaluating the attribute '%1%'",
showAttrPath(state, env, attrPath));
}
@@ -1863,7 +1931,7 @@ void EvalState::concatLists(Value & v, size_t nrLists, Value * * lists, const Po
void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
{
- PathSet context;
+ NixStringContext context;
std::vector<BackedStringView> s;
size_t sSize = 0;
NixInt n = 0;
@@ -1946,7 +2014,7 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
else if (firstType == nPath) {
if (!context.empty())
state.error("a string that refers to a store path cannot be appended to a path").atPos(pos).withFrame(env, *this).debugThrow<EvalError>();
- v.mkPath(canonPath(str()));
+ v.mkPath(CanonPath(canonPath(str())));
} else
v.mkStringMove(c_str(), context);
}
@@ -2072,26 +2140,15 @@ std::string_view EvalState::forceString(Value & v, const PosIdx pos, std::string
}
-void copyContext(const Value & v, PathSet & context)
+void copyContext(const Value & v, NixStringContext & context)
{
if (v.string.context)
for (const char * * p = v.string.context; *p; ++p)
- context.insert(*p);
+ context.insert(NixStringContextElem::parse(*p));
}
-NixStringContext Value::getContext(const Store & store)
-{
- NixStringContext res;
- assert(internalType == tString);
- if (string.context)
- for (const char * * p = string.context; *p; ++p)
- res.push_back(NixStringContextElem::parse(store, *p));
- return res;
-}
-
-
-std::string_view EvalState::forceString(Value & v, PathSet & context, const PosIdx pos, std::string_view errorCtx)
+std::string_view EvalState::forceString(Value & v, NixStringContext & context, const PosIdx pos, std::string_view errorCtx)
{
auto s = forceString(v, pos, errorCtx);
copyContext(v, context);
@@ -2121,7 +2178,7 @@ bool EvalState::isDerivation(Value & v)
std::optional<std::string> EvalState::tryAttrsToString(const PosIdx pos, Value & v,
- PathSet & context, bool coerceMore, bool copyToStore)
+ NixStringContext & context, bool coerceMore, bool copyToStore)
{
auto i = v.attrs->find(sToString);
if (i != v.attrs->end()) {
@@ -2135,8 +2192,14 @@ std::optional<std::string> EvalState::tryAttrsToString(const PosIdx pos, Value &
return {};
}
-BackedStringView EvalState::coerceToString(const PosIdx pos, Value &v, PathSet &context,
- std::string_view errorCtx, bool coerceMore, bool copyToStore, bool canonicalizePath)
+BackedStringView EvalState::coerceToString(
+ const PosIdx pos,
+ Value & v,
+ NixStringContext & context,
+ std::string_view errorCtx,
+ bool coerceMore,
+ bool copyToStore,
+ bool canonicalizePath)
{
forceValue(v, pos);
@@ -2146,12 +2209,14 @@ BackedStringView EvalState::coerceToString(const PosIdx pos, Value &v, PathSet &
}
if (v.type() == nPath) {
- BackedStringView path(PathView(v.path));
- if (canonicalizePath)
- path = canonPath(*path);
- if (copyToStore)
- path = store->printStorePath(copyPathToStore(context, std::move(path).toOwned()));
- return path;
+ return
+ !canonicalizePath && !copyToStore
+ ? // FIXME: hack to preserve path literals that end in a
+ // slash, as in /foo/${x}.
+ v._path
+ : copyToStore
+ ? store->printStorePath(copyPathToStore(context, v.path()))
+ : std::string(v.path().path.abs());
}
if (v.type() == nAttrs) {
@@ -2212,40 +2277,40 @@ BackedStringView EvalState::coerceToString(const PosIdx pos, Value &v, PathSet &
}
-StorePath EvalState::copyPathToStore(PathSet & context, const Path & path)
+StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePath & path)
{
- if (nix::isDerivation(path))
+ if (nix::isDerivation(path.path.abs()))
error("file names are not allowed to end in '%1%'", drvExtension).debugThrow<EvalError>();
- auto dstPath = [&]() -> StorePath
- {
- auto i = srcToStore.find(path);
- if (i != srcToStore.end()) return i->second;
-
- auto dstPath = settings.readOnlyMode
- ? store->computeStorePathForPath(std::string(baseNameOf(path)), checkSourcePath(path)).first
- : store->addToStore(std::string(baseNameOf(path)), checkSourcePath(path), FileIngestionMethod::Recursive, htSHA256, defaultPathFilter, repair);
- allowPath(dstPath);
- srcToStore.insert_or_assign(path, dstPath);
- printMsg(lvlChatty, "copied source '%1%' -> '%2%'", path, store->printStorePath(dstPath));
- return dstPath;
- }();
-
- context.insert(store->printStorePath(dstPath));
+ auto i = srcToStore.find(path);
+
+ auto dstPath = i != srcToStore.end()
+ ? i->second
+ : [&]() {
+ auto dstPath = path.fetchToStore(store, path.baseName(), nullptr, repair);
+ allowPath(dstPath);
+ srcToStore.insert_or_assign(path, dstPath);
+ printMsg(lvlChatty, "copied source '%1%' -> '%2%'", path, store->printStorePath(dstPath));
+ return dstPath;
+ }();
+
+ context.insert(NixStringContextElem::Opaque {
+ .path = dstPath
+ });
return dstPath;
}
-Path EvalState::coerceToPath(const PosIdx pos, Value & v, PathSet & context, std::string_view errorCtx)
+SourcePath EvalState::coerceToPath(const PosIdx pos, Value & v, NixStringContext & context, std::string_view errorCtx)
{
auto path = coerceToString(pos, v, context, errorCtx, false, false, true).toOwned();
if (path == "" || path[0] != '/')
error("string '%1%' doesn't represent an absolute path", path).withTrace(pos, errorCtx).debugThrow<EvalError>();
- return path;
+ return CanonPath(path);
}
-StorePath EvalState::coerceToStorePath(const PosIdx pos, Value & v, PathSet & context, std::string_view errorCtx)
+StorePath EvalState::coerceToStorePath(const PosIdx pos, Value & v, NixStringContext & context, std::string_view errorCtx)
{
auto path = coerceToString(pos, v, context, errorCtx, false, false, true).toOwned();
if (auto storePath = store->maybeParseStorePath(path))
@@ -2254,6 +2319,80 @@ StorePath EvalState::coerceToStorePath(const PosIdx pos, Value & v, PathSet & co
}
+std::pair<DerivedPath, std::string_view> EvalState::coerceToDerivedPathUnchecked(const PosIdx pos, Value & v, std::string_view errorCtx)
+{
+ NixStringContext context;
+ auto s = forceString(v, context, pos, errorCtx);
+ auto csize = context.size();
+ if (csize != 1)
+ error(
+ "string '%s' has %d entries in its context. It should only have exactly one entry",
+ s, csize)
+ .withTrace(pos, errorCtx).debugThrow<EvalError>();
+ auto derivedPath = std::visit(overloaded {
+ [&](NixStringContextElem::Opaque && o) -> DerivedPath {
+ return DerivedPath::Opaque {
+ .path = std::move(o.path),
+ };
+ },
+ [&](NixStringContextElem::DrvDeep &&) -> DerivedPath {
+ error(
+ "string '%s' has a context which refers to a complete source and binary closure. This is not supported at this time",
+ s).withTrace(pos, errorCtx).debugThrow<EvalError>();
+ },
+ [&](NixStringContextElem::Built && b) -> DerivedPath {
+ return DerivedPath::Built {
+ .drvPath = std::move(b.drvPath),
+ .outputs = OutputsSpec::Names { std::move(b.output) },
+ };
+ },
+ }, ((NixStringContextElem &&) *context.begin()).raw());
+ return {
+ std::move(derivedPath),
+ std::move(s),
+ };
+}
+
+
+DerivedPath EvalState::coerceToDerivedPath(const PosIdx pos, Value & v, std::string_view errorCtx)
+{
+ auto [derivedPath, s_] = coerceToDerivedPathUnchecked(pos, v, errorCtx);
+ auto s = s_;
+ std::visit(overloaded {
+ [&](const DerivedPath::Opaque & o) {
+ auto sExpected = store->printStorePath(o.path);
+ if (s != sExpected)
+ error(
+ "path string '%s' has context with the different path '%s'",
+ s, sExpected)
+ .withTrace(pos, errorCtx).debugThrow<EvalError>();
+ },
+ [&](const DerivedPath::Built & b) {
+ // TODO need derived path with single output to make this
+ // total. Will add as part of RFC 92 work and then this is
+ // cleaned up.
+ auto output = *std::get<OutputsSpec::Names>(b.outputs).begin();
+
+ auto drv = store->readDerivation(b.drvPath);
+ auto i = drv.outputs.find(output);
+ if (i == drv.outputs.end())
+ throw Error("derivation '%s' does not have output '%s'", store->printStorePath(b.drvPath), output);
+ auto optOutputPath = i->second.path(*store, drv.name, output);
+ // This is testing for the case of CA derivations
+ auto sExpected = optOutputPath
+ ? store->printStorePath(*optOutputPath)
+ : DownstreamPlaceholder::unknownCaOutput(b.drvPath, output).render();
+ if (s != sExpected)
+ error(
+ "string '%s' has context with the output '%s' from derivation '%s', but the string is not the right placeholder for this derivation output. It should be '%s'",
+ s, output, store->printStorePath(b.drvPath), sExpected)
+ .withTrace(pos, errorCtx).debugThrow<EvalError>();
+ }
+ }, derivedPath.raw());
+ return derivedPath;
+}
+
+
bool EvalState::eqValues(Value & v1, Value & v2, const PosIdx pos, std::string_view errorCtx)
{
forceValue(v1, noPos);
@@ -2284,7 +2423,7 @@ bool EvalState::eqValues(Value & v1, Value & v2, const PosIdx pos, std::string_v
return strcmp(v1.string.s, v2.string.s) == 0;
case nPath:
- return strcmp(v1.path, v2.path) == 0;
+ return strcmp(v1._path, v2._path) == 0;
case nNull:
return true;
@@ -2326,6 +2465,7 @@ bool EvalState::eqValues(Value & v1, Value & v2, const PosIdx pos, std::string_v
case nFloat:
return v1.fpoint == v2.fpoint;
+ case nThunk: // Must not be left by forceValue
default:
error("cannot compare %1% with %2%", showType(v1), showType(v2)).withTrace(pos, errorCtx).debugThrow<EvalError>();
}
@@ -2410,8 +2550,8 @@ void EvalState::printStats()
else
obj["name"] = nullptr;
if (auto pos = positions[fun->pos]) {
- if (auto path = std::get_if<Path>(&pos.origin))
- obj["file"] = *path;
+ if (auto path = std::get_if<SourcePath>(&pos.origin))
+ obj["file"] = path->to_string();
obj["line"] = pos.line;
obj["column"] = pos.column;
}
@@ -2425,8 +2565,8 @@ void EvalState::printStats()
for (auto & i : attrSelects) {
json obj = json::object();
if (auto pos = positions[i.first]) {
- if (auto path = std::get_if<Path>(&pos.origin))
- obj["file"] = *path;
+ if (auto path = std::get_if<SourcePath>(&pos.origin))
+ obj["file"] = path->to_string();
obj["line"] = pos.line;
obj["column"] = pos.column;
}
@@ -2451,7 +2591,7 @@ void EvalState::printStats()
}
-std::string ExternalValueBase::coerceToString(const Pos & pos, PathSet & context, bool copyMore, bool copyToStore) const
+std::string ExternalValueBase::coerceToString(const Pos & pos, NixStringContext & context, bool copyMore, bool copyToStore) const
{
throw TypeError({
.msg = hintfmt("cannot coerce %1% to a string", showType())
@@ -2491,8 +2631,8 @@ Strings EvalSettings::getDefaultNixPath()
if (!evalSettings.restrictEval && !evalSettings.pureEval) {
add(settings.useXDGBaseDirectories ? getStateDir() + "/nix/defexpr/channels" : getHome() + "/.nix-defexpr/channels");
- add(settings.nixStateDir + "/profiles/per-user/root/channels/nixpkgs", "nixpkgs");
- add(settings.nixStateDir + "/profiles/per-user/root/channels");
+ add(rootChannelsDir() + "/nixpkgs", "nixpkgs");
+ add(rootChannelsDir());
}
return res;
diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh
index e4d5906bd..62b380929 100644
--- a/src/libexpr/eval.hh
+++ b/src/libexpr/eval.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "attr-set.hh"
#include "types.hh"
@@ -7,6 +8,7 @@
#include "symbol-table.hh"
#include "config.hh"
#include "experimental-features.hh"
+#include "input-accessor.hh"
#include <map>
#include <optional>
@@ -19,6 +21,7 @@ namespace nix {
class Store;
class EvalState;
class StorePath;
+struct DerivedPath;
enum RepairFlag : bool;
@@ -42,7 +45,10 @@ struct PrimOp
struct Env
{
Env * up;
- unsigned short prevWith:14; // nr of levels up to next `with' environment
+ /**
+ * Number of of levels up to next `with` environment
+ */
+ unsigned short prevWith:14;
enum { Plain = 0, HasWithExpr, HasWithAttrs } type:2;
Value * values[0];
};
@@ -52,23 +58,21 @@ void printEnvBindings(const SymbolTable & st, const StaticEnv & se, const Env &
std::unique_ptr<ValMap> mapStaticEnvBindings(const SymbolTable & st, const StaticEnv & se, const Env & env);
-void copyContext(const Value & v, PathSet & context);
-
-
-/* Cache for calls to addToStore(); maps source paths to the store
- paths. */
-typedef std::map<Path, StorePath> SrcToStore;
+void copyContext(const Value & v, NixStringContext & context);
std::string printValue(const EvalState & state, const Value & v);
std::ostream & operator << (std::ostream & os, const ValueType t);
+// FIXME: maybe change this to an std::variant<SourcePath, URL>.
typedef std::pair<std::string, std::string> SearchPathElem;
typedef std::list<SearchPathElem> SearchPath;
-/* Initialise the Boehm GC, if applicable. */
+/**
+ * Initialise the Boehm GC, if applicable.
+ */
void initGC();
@@ -129,8 +133,6 @@ public:
SymbolTable symbols;
PosTable positions;
- static inline std::string derivationNixPath = "//builtin/derivation.nix";
-
const Symbol sWith, sOutPath, sDrvPath, sType, sMeta, sName, sValue,
sSystem, sOverrides, sOutputs, sOutputName, sIgnoreNulls,
sFile, sLine, sColumn, sFunctor, sToString,
@@ -141,28 +143,39 @@ public:
sDescription, sSelf, sEpsilon, sStartSet, sOperator, sKey, sPath,
sPrefix,
sOutputSpecified;
- Symbol sDerivationNix;
- /* If set, force copying files to the Nix store even if they
- already exist there. */
+ /**
+ * If set, force copying files to the Nix store even if they
+ * already exist there.
+ */
RepairFlag repair;
- /* The allowed filesystem paths in restricted or pure evaluation
- mode. */
+ /**
+ * The allowed filesystem paths in restricted or pure evaluation
+ * mode.
+ */
std::optional<PathSet> allowedPaths;
Bindings emptyBindings;
- /* Store used to materialise .drv files. */
+ const SourcePath derivationInternal;
+
+ /**
+ * Store used to materialise .drv files.
+ */
const ref<Store> store;
- /* Store used to build stuff. */
+ /**
+ * Store used to build stuff.
+ */
const ref<Store> buildStore;
RootValue vCallFlake = nullptr;
RootValue vImportedDrvToDerivation = nullptr;
- /* Debugger */
+ /**
+ * Debugger
+ */
void (* debugRepl)(ref<EvalState> es, const ValMap & extraEnv);
bool debugStop;
bool debugQuit;
@@ -216,21 +229,28 @@ public:
}
private:
- SrcToStore srcToStore;
- /* A cache from path names to parse trees. */
+ /* Cache for calls to addToStore(); maps source paths to the store
+ paths. */
+ std::map<SourcePath, StorePath> srcToStore;
+
+ /**
+ * A cache from path names to parse trees.
+ */
#if HAVE_BOEHMGC
- typedef std::map<Path, Expr *, std::less<Path>, traceable_allocator<std::pair<const Path, Expr *>>> FileParseCache;
+ typedef std::map<SourcePath, Expr *, std::less<SourcePath>, traceable_allocator<std::pair<const SourcePath, Expr *>>> FileParseCache;
#else
- typedef std::map<Path, Expr *> FileParseCache;
+ typedef std::map<SourcePath, Expr *> FileParseCache;
#endif
FileParseCache fileParseCache;
- /* A cache from path names to values. */
+ /**
+ * A cache from path names to values.
+ */
#if HAVE_BOEHMGC
- typedef std::map<Path, Value, std::less<Path>, traceable_allocator<std::pair<const Path, Value>>> FileEvalCache;
+ typedef std::map<SourcePath, Value, std::less<SourcePath>, traceable_allocator<std::pair<const SourcePath, Value>>> FileEvalCache;
#else
- typedef std::map<Path, Value> FileEvalCache;
+ typedef std::map<SourcePath, Value> FileEvalCache;
#endif
FileEvalCache fileEvalCache;
@@ -238,17 +258,25 @@ private:
std::map<std::string, std::pair<bool, std::string>> searchPathResolved;
- /* Cache used by checkSourcePath(). */
- std::unordered_map<Path, Path> resolvedPaths;
+ /**
+ * Cache used by checkSourcePath().
+ */
+ std::unordered_map<Path, SourcePath> resolvedPaths;
- /* Cache used by prim_match(). */
+ /**
+ * Cache used by prim_match().
+ */
std::shared_ptr<RegexCache> regexCache;
#if HAVE_BOEHMGC
- /* Allocation cache for GC'd Value objects. */
+ /**
+ * Allocation cache for GC'd Value objects.
+ */
std::shared_ptr<void *> valueAllocCache;
- /* Allocation cache for size-1 Env objects. */
+ /**
+ * Allocation cache for size-1 Env objects.
+ */
std::shared_ptr<void *> env1AllocCache;
#endif
@@ -264,87 +292,126 @@ public:
SearchPath getSearchPath() { return searchPath; }
- /* Allow access to a path. */
+ /**
+ * Return a `SourcePath` that refers to `path` in the root
+ * filesystem.
+ */
+ SourcePath rootPath(CanonPath path);
+
+ /**
+ * Allow access to a path.
+ */
void allowPath(const Path & path);
- /* Allow access to a store path. Note that this gets remapped to
- the real store path if `store` is a chroot store. */
+ /**
+ * Allow access to a store path. Note that this gets remapped to
+ * the real store path if `store` is a chroot store.
+ */
void allowPath(const StorePath & storePath);
- /* Allow access to a store path and return it as a string. */
+ /**
+ * Allow access to a store path and return it as a string.
+ */
void allowAndSetStorePathString(const StorePath & storePath, Value & v);
- /* Check whether access to a path is allowed and throw an error if
- not. Otherwise return the canonicalised path. */
- Path checkSourcePath(const Path & path);
+ /**
+ * Check whether access to a path is allowed and throw an error if
+ * not. Otherwise return the canonicalised path.
+ */
+ SourcePath checkSourcePath(const SourcePath & path);
void checkURI(const std::string & uri);
- /* When using a diverted store and 'path' is in the Nix store, map
- 'path' to the diverted location (e.g. /nix/store/foo is mapped
- to /home/alice/my-nix/nix/store/foo). However, this is only
- done if the context is not empty, since otherwise we're
- probably trying to read from the actual /nix/store. This is
- intended to distinguish between import-from-derivation and
- sources stored in the actual /nix/store. */
- Path toRealPath(const Path & path, const PathSet & context);
+ /**
+ * When using a diverted store and 'path' is in the Nix store, map
+ * 'path' to the diverted location (e.g. /nix/store/foo is mapped
+ * to /home/alice/my-nix/nix/store/foo). However, this is only
+ * done if the context is not empty, since otherwise we're
+ * probably trying to read from the actual /nix/store. This is
+ * intended to distinguish between import-from-derivation and
+ * sources stored in the actual /nix/store.
+ */
+ Path toRealPath(const Path & path, const NixStringContext & context);
- /* Parse a Nix expression from the specified file. */
- Expr * parseExprFromFile(const Path & path);
- Expr * parseExprFromFile(const Path & path, std::shared_ptr<StaticEnv> & staticEnv);
+ /**
+ * Parse a Nix expression from the specified file.
+ */
+ Expr * parseExprFromFile(const SourcePath & path);
+ Expr * parseExprFromFile(const SourcePath & path, std::shared_ptr<StaticEnv> & staticEnv);
- /* Parse a Nix expression from the specified string. */
- Expr * parseExprFromString(std::string s, const Path & basePath, std::shared_ptr<StaticEnv> & staticEnv);
- Expr * parseExprFromString(std::string s, const Path & basePath);
+ /**
+ * Parse a Nix expression from the specified string.
+ */
+ Expr * parseExprFromString(std::string s, const SourcePath & basePath, std::shared_ptr<StaticEnv> & staticEnv);
+ Expr * parseExprFromString(std::string s, const SourcePath & basePath);
Expr * parseStdin();
- /* Evaluate an expression read from the given file to normal
- form. Optionally enforce that the top-level expression is
- trivial (i.e. doesn't require arbitrary computation). */
- void evalFile(const Path & path, Value & v, bool mustBeTrivial = false);
+ /**
+ * Evaluate an expression read from the given file to normal
+ * form. Optionally enforce that the top-level expression is
+ * trivial (i.e. doesn't require arbitrary computation).
+ */
+ void evalFile(const SourcePath & path, Value & v, bool mustBeTrivial = false);
- /* Like `evalFile`, but with an already parsed expression. */
+ /**
+ * Like `evalFile`, but with an already parsed expression.
+ */
void cacheFile(
- const Path & path,
- const Path & resolvedPath,
+ const SourcePath & path,
+ const SourcePath & resolvedPath,
Expr * e,
Value & v,
bool mustBeTrivial = false);
void resetFileCache();
- /* Look up a file in the search path. */
- Path findFile(const std::string_view path);
- Path findFile(SearchPath & searchPath, const std::string_view path, const PosIdx pos = noPos);
+ /**
+ * Look up a file in the search path.
+ */
+ SourcePath findFile(const std::string_view path);
+ SourcePath findFile(SearchPath & searchPath, const std::string_view path, const PosIdx pos = noPos);
- /* If the specified search path element is a URI, download it. */
+ /**
+ * If the specified search path element is a URI, download it.
+ */
std::pair<bool, std::string> resolveSearchPathElem(const SearchPathElem & elem);
- /* Evaluate an expression to normal form, storing the result in
- value `v'. */
+ /**
+ * Evaluate an expression to normal form
+ *
+ * @param [out] v The resulting is stored here.
+ */
void eval(Expr * e, Value & v);
- /* Evaluation the expression, then verify that it has the expected
- type. */
+ /**
+ * Evaluation the expression, then verify that it has the expected
+ * type.
+ */
inline bool evalBool(Env & env, Expr * e);
inline bool evalBool(Env & env, Expr * e, const PosIdx pos, std::string_view errorCtx);
inline void evalAttrs(Env & env, Expr * e, Value & v, const PosIdx pos, std::string_view errorCtx);
- /* If `v' is a thunk, enter it and overwrite `v' with the result
- of the evaluation of the thunk. If `v' is a delayed function
- application, call the function and overwrite `v' with the
- result. Otherwise, this is a no-op. */
+ /**
+ * If `v` is a thunk, enter it and overwrite `v` with the result
+ * of the evaluation of the thunk. If `v` is a delayed function
+ * application, call the function and overwrite `v` with the
+ * result. Otherwise, this is a no-op.
+ */
inline void forceValue(Value & v, const PosIdx pos);
template <typename Callable>
inline void forceValue(Value & v, Callable getPos);
- /* Force a value, then recursively force list elements and
- attributes. */
+ /**
+ * Force a value, then recursively force list elements and
+ * attributes.
+ */
void forceValueDeep(Value & v);
- /* Force `v', and then verify that it has the expected type. */
+ /**
+ * Force `v`, and then verify that it has the expected type.
+ */
NixInt forceInt(Value & v, const PosIdx pos, std::string_view errorCtx);
NixFloat forceFloat(Value & v, const PosIdx pos, std::string_view errorCtx);
bool forceBool(Value & v, const PosIdx pos, std::string_view errorCtx);
@@ -355,9 +422,12 @@ public:
inline void forceAttrs(Value & v, Callable getPos, std::string_view errorCtx);
inline void forceList(Value & v, const PosIdx pos, std::string_view errorCtx);
- void forceFunction(Value & v, const PosIdx pos, std::string_view errorCtx); // either lambda or primop
+ /**
+ * @param v either lambda or primop
+ */
+ void forceFunction(Value & v, const PosIdx pos, std::string_view errorCtx);
std::string_view forceString(Value & v, const PosIdx pos, std::string_view errorCtx);
- std::string_view forceString(Value & v, PathSet & context, const PosIdx pos, std::string_view errorCtx);
+ std::string_view forceString(Value & v, NixStringContext & context, const PosIdx pos, std::string_view errorCtx);
std::string_view forceStringNoCtx(Value & v, const PosIdx pos, std::string_view errorCtx);
[[gnu::noinline]]
@@ -366,39 +436,77 @@ public:
void addErrorTrace(Error & e, const PosIdx pos, const char * s, const std::string & s2, bool frame = false) const;
public:
- /* Return true iff the value `v' denotes a derivation (i.e. a
- set with attribute `type = "derivation"'). */
+ /**
+ * @return true iff the value `v` denotes a derivation (i.e. a
+ * set with attribute `type = "derivation"`).
+ */
bool isDerivation(Value & v);
std::optional<std::string> tryAttrsToString(const PosIdx pos, Value & v,
- PathSet & context, bool coerceMore = false, bool copyToStore = true);
-
- /* String coercion. Converts strings, paths and derivations to a
- string. If `coerceMore' is set, also converts nulls, integers,
- booleans and lists to a string. If `copyToStore' is set,
- referenced paths are copied to the Nix store as a side effect. */
- BackedStringView coerceToString(const PosIdx pos, Value & v, PathSet & context,
+ NixStringContext & context, bool coerceMore = false, bool copyToStore = true);
+
+ /**
+ * String coercion.
+ *
+ * Converts strings, paths and derivations to a
+ * string. If `coerceMore` is set, also converts nulls, integers,
+ * booleans and lists to a string. If `copyToStore` is set,
+ * referenced paths are copied to the Nix store as a side effect.
+ */
+ BackedStringView coerceToString(const PosIdx pos, Value & v, NixStringContext & context,
std::string_view errorCtx,
bool coerceMore = false, bool copyToStore = true,
bool canonicalizePath = true);
- StorePath copyPathToStore(PathSet & context, const Path & path);
+ StorePath copyPathToStore(NixStringContext & context, const SourcePath & path);
+
+ /**
+ * Path coercion.
+ *
+ * Converts strings, paths and derivations to a
+ * path. The result is guaranteed to be a canonicalised, absolute
+ * path. Nothing is copied to the store.
+ */
+ SourcePath coerceToPath(const PosIdx pos, Value & v, NixStringContext & context, std::string_view errorCtx);
- /* Path coercion. Converts strings, paths and derivations to a
- path. The result is guaranteed to be a canonicalised, absolute
- path. Nothing is copied to the store. */
- Path coerceToPath(const PosIdx pos, Value & v, PathSet & context, std::string_view errorCtx);
+ /**
+ * Like coerceToPath, but the result must be a store path.
+ */
+ StorePath coerceToStorePath(const PosIdx pos, Value & v, NixStringContext & context, std::string_view errorCtx);
- /* Like coerceToPath, but the result must be a store path. */
- StorePath coerceToStorePath(const PosIdx pos, Value & v, PathSet & context, std::string_view errorCtx);
+ /**
+ * Part of `coerceToDerivedPath()` without any store IO which is exposed for unit testing only.
+ */
+ std::pair<DerivedPath, std::string_view> coerceToDerivedPathUnchecked(const PosIdx pos, Value & v, std::string_view errorCtx);
+
+ /**
+ * Coerce to `DerivedPath`.
+ *
+ * Must be a string which is either a literal store path or a
+ * "placeholder (see `DownstreamPlaceholder`).
+ *
+ * Even more importantly, the string context must be exactly one
+ * element, which is either a `NixStringContextElem::Opaque` or
+ * `NixStringContextElem::Built`. (`NixStringContextEleme::DrvDeep`
+ * is not permitted).
+ *
+ * The string is parsed based on the context --- the context is the
+ * source of truth, and ultimately tells us what we want, and then
+ * we ensure the string corresponds to it.
+ */
+ DerivedPath coerceToDerivedPath(const PosIdx pos, Value & v, std::string_view errorCtx);
public:
- /* The base environment, containing the builtin functions and
- values. */
+ /**
+ * The base environment, containing the builtin functions and
+ * values.
+ */
Env & baseEnv;
- /* The same, but used during parsing to resolve variables. */
+ /**
+ * The same, but used during parsing to resolve variables.
+ */
std::shared_ptr<StaticEnv> staticBaseEnv; // !!! should be private
private:
@@ -443,13 +551,15 @@ private:
char * text,
size_t length,
Pos::Origin origin,
- Path basePath,
+ const SourcePath & basePath,
std::shared_ptr<StaticEnv> & staticEnv);
public:
- /* Do a deep equality test between two values. That is, list
- elements and attributes are compared recursively. */
+ /**
+ * Do a deep equality test between two values. That is, list
+ * elements and attributes are compared recursively.
+ */
bool eqValues(Value & v1, Value & v2, const PosIdx pos, std::string_view errorCtx);
bool isFunctor(Value & fun);
@@ -463,11 +573,15 @@ public:
callFunction(fun, 1, args, vRes, pos);
}
- /* Automatically call a function for which each argument has a
- default value or has a binding in the `args' map. */
+ /**
+ * Automatically call a function for which each argument has a
+ * default value or has a binding in the `args` map.
+ */
void autoCallFunction(Bindings & args, Value & fun, Value & res);
- /* Allocation primitives. */
+ /**
+ * Allocation primitives.
+ */
inline Value * allocValue();
inline Env & allocEnv(size_t size);
@@ -485,15 +599,49 @@ public:
void mkThunk_(Value & v, Expr * expr);
void mkPos(Value & v, PosIdx pos);
+ /**
+ * Create a string representing a store path.
+ *
+ * The string is the printed store path with a context containing a single
+ * `NixStringContextElem::Opaque` element of that store path.
+ */
+ void mkStorePathString(const StorePath & storePath, Value & v);
+
+ /**
+ * Create a string representing a `DerivedPath::Built`.
+ *
+ * The string is the printed store path with a context containing a single
+ * `NixStringContextElem::Built` element of the drv path and output name.
+ *
+ * @param value Value we are settings
+ *
+ * @param drvPath Path the drv whose output we are making a string for
+ *
+ * @param outputName Name of the output
+ *
+ * @param optOutputPath Optional output path for that string. Must
+ * be passed if and only if output store object is input-addressed.
+ * Will be printed to form string if passed, otherwise a placeholder
+ * will be used (see `DownstreamPlaceholder`).
+ */
+ void mkOutputString(
+ Value & value,
+ const StorePath & drvPath,
+ const std::string outputName,
+ std::optional<StorePath> optOutputPath);
+
void concatLists(Value & v, size_t nrLists, Value * * lists, const PosIdx pos, std::string_view errorCtx);
- /* Print statistics. */
+ /**
+ * Print statistics.
+ */
void printStats();
- /* Realise the given context, and return a mapping from the placeholders
+ /**
+ * Realise the given context, and return a mapping from the placeholders
* used to construct the associated value to their final store path
*/
- [[nodiscard]] StringMap realiseContext(const PathSet & context);
+ [[nodiscard]] StringMap realiseContext(const NixStringContext & context);
private:
@@ -550,12 +698,16 @@ struct DebugTraceStacker {
DebugTrace trace;
};
-/* Return a string representing the type of the value `v'. */
+/**
+ * @return A string representing the type of the value `v`.
+ */
std::string_view showType(ValueType type);
std::string showType(const Value & v);
-/* If `path' refers to a directory, then append "/default.nix". */
-Path resolveExprPath(Path path);
+/**
+ * If `path` refers to a directory, then append "/default.nix".
+ */
+SourcePath resolveExprPath(const SourcePath & path);
struct InvalidPathError : EvalError
{
diff --git a/src/libexpr/flake/config.cc b/src/libexpr/flake/config.cc
index 89ddbde7e..e89014862 100644
--- a/src/libexpr/flake/config.cc
+++ b/src/libexpr/flake/config.cc
@@ -31,7 +31,7 @@ static void writeTrustedList(const TrustedList & trustedList)
void ConfigFile::apply()
{
- std::set<std::string> whitelist{"bash-prompt", "bash-prompt-prefix", "bash-prompt-suffix", "flake-registry"};
+ std::set<std::string> whitelist{"bash-prompt", "bash-prompt-prefix", "bash-prompt-suffix", "flake-registry", "commit-lockfile-summary"};
for (auto & [name, value] : settings) {
diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc
index 336eb274d..60bb6a71e 100644
--- a/src/libexpr/flake/flake.cc
+++ b/src/libexpr/flake/flake.cc
@@ -125,6 +125,9 @@ static FlakeInput parseFlakeInput(EvalState & state,
follows.insert(follows.begin(), lockRootPath.begin(), lockRootPath.end());
input.follows = follows;
} else {
+ // Allow selecting a subset of enum values
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wswitch-enum"
switch (attr.value->type()) {
case nString:
attrs.emplace(state.symbols[attr.name], attr.value->string.s);
@@ -139,6 +142,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
throw TypeError("flake input attribute '%s' is %s while a string, Boolean, or integer is expected",
state.symbols[attr.name], showType(*attr.value));
}
+ #pragma GCC diagnostic pop
}
} catch (Error & e) {
e.addTrace(
@@ -218,9 +222,9 @@ static Flake getFlake(
throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", lockedRef, lockedRef.subdir);
Value vInfo;
- state.evalFile(flakeFile, vInfo, true); // FIXME: symlink attack
+ state.evalFile(CanonPath(flakeFile), vInfo, true); // FIXME: symlink attack
- expectType(state, nAttrs, vInfo, state.positions.add({flakeFile}, 1, 1));
+ expectType(state, nAttrs, vInfo, state.positions.add({CanonPath(flakeFile)}, 1, 1));
if (auto description = vInfo.attrs->get(state.sDescription)) {
expectType(state, nString, *description->value, description->pos);
@@ -261,7 +265,7 @@ static Flake getFlake(
state.symbols[setting.name],
std::string(state.forceStringNoCtx(*setting.value, setting.pos, "")));
else if (setting.value->type() == nPath) {
- PathSet emptyContext = {};
+ NixStringContext emptyContext = {};
flake.config.settings.emplace(
state.symbols[setting.name],
state.coerceToString(setting.pos, *setting.value, emptyContext, "", false, true, true) .toOwned());
@@ -320,7 +324,7 @@ LockedFlake lockFlake(
const FlakeRef & topRef,
const LockFlags & lockFlags)
{
- settings.requireExperimentalFeature(Xp::Flakes);
+ experimentalFeatureSettings.require(Xp::Flakes);
FlakeCache flakeCache;
@@ -334,10 +338,14 @@ LockedFlake lockFlake(
}
try {
+ if (!fetchSettings.allowDirty && lockFlags.referenceLockFilePath) {
+ throw Error("reference lock file was provided, but the `allow-dirty` setting is set to false");
+ }
// FIXME: symlink attack
auto oldLockFile = LockFile::read(
- flake.sourceInfo->actualPath + "/" + flake.lockedRef.subdir + "/flake.lock");
+ lockFlags.referenceLockFilePath.value_or(
+ flake.sourceInfo->actualPath + "/" + flake.lockedRef.subdir + "/flake.lock"));
debug("old lock file: %s", oldLockFile);
@@ -619,13 +627,20 @@ LockedFlake lockFlake(
debug("new lock file: %s", newLockFile);
+ auto relPath = (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock";
+ auto sourcePath = topRef.input.getSourcePath();
+ auto outputLockFilePath = sourcePath ? std::optional{*sourcePath + "/" + relPath} : std::nullopt;
+ if (lockFlags.outputLockFilePath) {
+ outputLockFilePath = lockFlags.outputLockFilePath;
+ }
+
/* Check whether we need to / can write the new lock file. */
- if (!(newLockFile == oldLockFile)) {
+ if (newLockFile != oldLockFile || lockFlags.outputLockFilePath) {
auto diff = LockFile::diff(oldLockFile, newLockFile);
if (lockFlags.writeLockFile) {
- if (auto sourcePath = topRef.input.getSourcePath()) {
+ if (outputLockFilePath) {
if (auto unlockedInput = newLockFile.isUnlocked()) {
if (fetchSettings.warnDirty)
warn("will not write lock file of flake '%s' because it has an unlocked input ('%s')", topRef, *unlockedInput);
@@ -633,25 +648,24 @@ LockedFlake lockFlake(
if (!lockFlags.updateLockFile)
throw Error("flake '%s' requires lock file changes but they're not allowed due to '--no-update-lock-file'", topRef);
- auto relPath = (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock";
-
- auto path = *sourcePath + "/" + relPath;
-
- bool lockFileExists = pathExists(path);
+ bool lockFileExists = pathExists(*outputLockFilePath);
if (lockFileExists) {
auto s = chomp(diff);
if (s.empty())
- warn("updating lock file '%s'", path);
+ warn("updating lock file '%s'", *outputLockFilePath);
else
- warn("updating lock file '%s':\n%s", path, s);
+ warn("updating lock file '%s':\n%s", *outputLockFilePath, s);
} else
- warn("creating lock file '%s'", path);
+ warn("creating lock file '%s'", *outputLockFilePath);
- newLockFile.write(path);
+ newLockFile.write(*outputLockFilePath);
std::optional<std::string> commitMessage = std::nullopt;
if (lockFlags.commitLockFile) {
+ if (lockFlags.outputLockFilePath) {
+ throw Error("--commit-lock-file and --output-lock-file are currently incompatible");
+ }
std::string cm;
cm = fetchSettings.commitLockFileSummary.get();
@@ -731,7 +745,7 @@ void callFlake(EvalState & state,
state.vCallFlake = allocRootValue(state.allocValue());
state.eval(state.parseExprFromString(
#include "call-flake.nix.gen.hh"
- , "/"), **state.vCallFlake);
+ , CanonPath::root), **state.vCallFlake);
}
state.callFunction(**state.vCallFlake, *vLocks, *vTmp1, noPos);
diff --git a/src/libexpr/flake/flake.hh b/src/libexpr/flake/flake.hh
index 10301d8aa..c1d1b71e5 100644
--- a/src/libexpr/flake/flake.hh
+++ b/src/libexpr/flake/flake.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
#include "flakeref.hh"
@@ -17,7 +18,8 @@ struct FlakeInput;
typedef std::map<FlakeId, FlakeInput> FlakeInputs;
-/* FlakeInput is the 'Flake'-level parsed form of the "input" entries
+/**
+ * FlakeInput is the 'Flake'-level parsed form of the "input" entries
* in the flake file.
*
* A FlakeInput is normally constructed by the 'parseFlakeInput'
@@ -41,7 +43,12 @@ typedef std::map<FlakeId, FlakeInput> FlakeInputs;
struct FlakeInput
{
std::optional<FlakeRef> ref;
- bool isFlake = true; // true = process flake to get outputs, false = (fetched) static source path
+ /**
+ * true = process flake to get outputs
+ *
+ * false = (fetched) static source path
+ */
+ bool isFlake = true;
std::optional<InputPath> follows;
FlakeInputs overrides;
};
@@ -55,23 +62,42 @@ struct ConfigFile
void apply();
};
-/* The contents of a flake.nix file. */
+/**
+ * The contents of a flake.nix file.
+ */
struct Flake
{
- FlakeRef originalRef; // the original flake specification (by the user)
- FlakeRef resolvedRef; // registry references and caching resolved to the specific underlying flake
- FlakeRef lockedRef; // the specific local store result of invoking the fetcher
- bool forceDirty = false; // pretend that 'lockedRef' is dirty
+ /**
+ * The original flake specification (by the user)
+ */
+ FlakeRef originalRef;
+ /**
+ * registry references and caching resolved to the specific underlying flake
+ */
+ FlakeRef resolvedRef;
+ /**
+ * the specific local store result of invoking the fetcher
+ */
+ FlakeRef lockedRef;
+ /**
+ * pretend that 'lockedRef' is dirty
+ */
+ bool forceDirty = false;
std::optional<std::string> description;
std::shared_ptr<const fetchers::Tree> sourceInfo;
FlakeInputs inputs;
- ConfigFile config; // 'nixConfig' attribute
+ /**
+ * 'nixConfig' attribute
+ */
+ ConfigFile config;
~Flake();
};
Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool allowLookup);
-/* Fingerprint of a locked flake; used as a cache key. */
+/**
+ * Fingerprint of a locked flake; used as a cache key.
+ */
typedef Hash Fingerprint;
struct LockedFlake
@@ -84,44 +110,72 @@ struct LockedFlake
struct LockFlags
{
- /* Whether to ignore the existing lock file, creating a new one
- from scratch. */
+ /**
+ * Whether to ignore the existing lock file, creating a new one
+ * from scratch.
+ */
bool recreateLockFile = false;
- /* Whether to update the lock file at all. If set to false, if any
- change to the lock file is needed (e.g. when an input has been
- added to flake.nix), you get a fatal error. */
+ /**
+ * Whether to update the lock file at all. If set to false, if any
+ * change to the lock file is needed (e.g. when an input has been
+ * added to flake.nix), you get a fatal error.
+ */
bool updateLockFile = true;
- /* Whether to write the lock file to disk. If set to true, if the
- any changes to the lock file are needed and the flake is not
- writable (i.e. is not a local Git working tree or similar), you
- get a fatal error. If set to false, Nix will use the modified
- lock file in memory only, without writing it to disk. */
+ /**
+ * Whether to write the lock file to disk. If set to true, if the
+ * any changes to the lock file are needed and the flake is not
+ * writable (i.e. is not a local Git working tree or similar), you
+ * get a fatal error. If set to false, Nix will use the modified
+ * lock file in memory only, without writing it to disk.
+ */
bool writeLockFile = true;
- /* Whether to use the registries to lookup indirect flake
- references like 'nixpkgs'. */
+ /**
+ * Whether to use the registries to lookup indirect flake
+ * references like 'nixpkgs'.
+ */
std::optional<bool> useRegistries = std::nullopt;
- /* Whether to apply flake's nixConfig attribute to the configuration */
+ /**
+ * Whether to apply flake's nixConfig attribute to the configuration
+ */
bool applyNixConfig = false;
- /* Whether unlocked flake references (i.e. those without a Git
- revision or similar) without a corresponding lock are
- allowed. Unlocked flake references with a lock are always
- allowed. */
+ /**
+ * Whether unlocked flake references (i.e. those without a Git
+ * revision or similar) without a corresponding lock are
+ * allowed. Unlocked flake references with a lock are always
+ * allowed.
+ */
bool allowUnlocked = true;
- /* Whether to commit changes to flake.lock. */
+ /**
+ * Whether to commit changes to flake.lock.
+ */
bool commitLockFile = false;
- /* Flake inputs to be overridden. */
+ /**
+ * The path to a lock file to read instead of the `flake.lock` file in the top-level flake
+ */
+ std::optional<std::string> referenceLockFilePath;
+
+ /**
+ * The path to a lock file to write to instead of the `flake.lock` file in the top-level flake
+ */
+ std::optional<Path> outputLockFilePath;
+
+ /**
+ * Flake inputs to be overridden.
+ */
std::map<InputPath, FlakeRef> inputOverrides;
- /* Flake inputs to be updated. This means that any existing lock
- for those inputs will be ignored. */
+ /**
+ * Flake inputs to be updated. This means that any existing lock
+ * for those inputs will be ignored.
+ */
std::set<InputPath> inputUpdates;
};
diff --git a/src/libexpr/flake/flakeref.hh b/src/libexpr/flake/flakeref.hh
index c4142fc20..a7c9208c0 100644
--- a/src/libexpr/flake/flakeref.hh
+++ b/src/libexpr/flake/flakeref.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
#include "hash.hh"
@@ -13,7 +14,8 @@ class Store;
typedef std::string FlakeId;
-/* A flake reference specifies how to fetch a flake or raw source
+/**
+ * A flake reference specifies how to fetch a flake or raw source
* (e.g. from a Git repository). It is created from a URL-like syntax
* (e.g. 'github:NixOS/patchelf'), an attrset representation (e.g. '{
* type="github"; owner = "NixOS"; repo = "patchelf"; }'), or a local
@@ -32,14 +34,17 @@ typedef std::string FlakeId;
* be lazy), but the fetcher can be invoked at any time via the
* FlakeRef to ensure the store is populated with this input.
*/
-
struct FlakeRef
{
- /* Fetcher-specific representation of the input, sufficient to
- perform the fetch operation. */
+ /**
+ * Fetcher-specific representation of the input, sufficient to
+ * perform the fetch operation.
+ */
fetchers::Input input;
- /* sub-path within the fetched input that represents this input */
+ /**
+ * sub-path within the fetched input that represents this input
+ */
Path subdir;
bool operator==(const FlakeRef & other) const;
diff --git a/src/libexpr/flake/lockfile.cc b/src/libexpr/flake/lockfile.cc
index a74e68c9c..ba2fd46f0 100644
--- a/src/libexpr/flake/lockfile.cc
+++ b/src/libexpr/flake/lockfile.cc
@@ -234,6 +234,11 @@ bool LockFile::operator ==(const LockFile & other) const
return toJSON() == other.toJSON();
}
+bool LockFile::operator !=(const LockFile & other) const
+{
+ return !(*this == other);
+}
+
InputPath parseInputPath(std::string_view s)
{
InputPath path;
diff --git a/src/libexpr/flake/lockfile.hh b/src/libexpr/flake/lockfile.hh
index 02e9bdfbc..ba4c0c848 100644
--- a/src/libexpr/flake/lockfile.hh
+++ b/src/libexpr/flake/lockfile.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "flakeref.hh"
@@ -15,9 +16,11 @@ typedef std::vector<FlakeId> InputPath;
struct LockedNode;
-/* A node in the lock file. It has outgoing edges to other nodes (its
- inputs). Only the root node has this type; all other nodes have
- type LockedNode. */
+/**
+ * A node in the lock file. It has outgoing edges to other nodes (its
+ * inputs). Only the root node has this type; all other nodes have
+ * type LockedNode.
+ */
struct Node : std::enable_shared_from_this<Node>
{
typedef std::variant<ref<LockedNode>, InputPath> Edge;
@@ -27,7 +30,9 @@ struct Node : std::enable_shared_from_this<Node>
virtual ~Node() { }
};
-/* A non-root node in the lock file. */
+/**
+ * A non-root node in the lock file.
+ */
struct LockedNode : Node
{
FlakeRef lockedRef, originalRef;
@@ -62,10 +67,15 @@ struct LockFile
void write(const Path & path) const;
- /* Check whether this lock file has any unlocked inputs. */
+ /**
+ * Check whether this lock file has any unlocked inputs.
+ */
std::optional<FlakeRef> isUnlocked() const;
bool operator ==(const LockFile & other) const;
+ // Needed for old gcc versions that don't synthesize it (like gcc 8.2.2
+ // that is still the default on aarch64-linux)
+ bool operator !=(const LockFile & other) const;
std::shared_ptr<Node> findInput(const InputPath & path);
@@ -73,7 +83,9 @@ struct LockFile
static std::string diff(const LockFile & oldLocks, const LockFile & newLocks);
- /* Check that every 'follows' input target exists. */
+ /**
+ * Check that every 'follows' input target exists.
+ */
void check();
};
diff --git a/src/libexpr/function-trace.hh b/src/libexpr/function-trace.hh
index e9a2526bd..91439b0aa 100644
--- a/src/libexpr/function-trace.hh
+++ b/src/libexpr/function-trace.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "eval.hh"
diff --git a/src/libexpr/get-drvs.cc b/src/libexpr/get-drvs.cc
index 1602fbffb..506a63677 100644
--- a/src/libexpr/get-drvs.cc
+++ b/src/libexpr/get-drvs.cc
@@ -71,7 +71,7 @@ std::optional<StorePath> DrvInfo::queryDrvPath() const
{
if (!drvPath && attrs) {
Bindings::iterator i = attrs->find(state->sDrvPath);
- PathSet context;
+ NixStringContext context;
if (i == attrs->end())
drvPath = {std::nullopt};
else
@@ -93,7 +93,7 @@ StorePath DrvInfo::queryOutPath() const
{
if (!outPath && attrs) {
Bindings::iterator i = attrs->find(state->sOutPath);
- PathSet context;
+ NixStringContext context;
if (i != attrs->end())
outPath = state->coerceToStorePath(i->pos, *i->value, context, "while evaluating the output path of a derivation");
}
@@ -124,7 +124,7 @@ DrvInfo::Outputs DrvInfo::queryOutputs(bool withPaths, bool onlyOutputsToInstall
/* And evaluate its ‘outPath’ attribute. */
Bindings::iterator outPath = out->value->attrs->find(state->sOutPath);
if (outPath == out->value->attrs->end()) continue; // FIXME: throw error?
- PathSet context;
+ NixStringContext context;
outputs.emplace(output, state->coerceToStorePath(outPath->pos, *outPath->value, context, "while evaluating an output path of a derivation"));
} else
outputs.emplace(output, std::nullopt);
diff --git a/src/libexpr/get-drvs.hh b/src/libexpr/get-drvs.hh
index bbd2d3c47..584d64ac1 100644
--- a/src/libexpr/get-drvs.hh
+++ b/src/libexpr/get-drvs.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "eval.hh"
#include "path.hh"
@@ -25,7 +26,10 @@ private:
mutable std::string outputName;
Outputs outputs;
- bool failed = false; // set if we get an AssertionError
+ /**
+ * Set if we get an AssertionError
+ */
+ bool failed = false;
Bindings * attrs = nullptr, * meta = nullptr;
@@ -34,7 +38,10 @@ private:
bool checkMeta(Value & v);
public:
- std::string attrPath; /* path towards the derivation */
+ /**
+ * path towards the derivation
+ */
+ std::string attrPath;
DrvInfo(EvalState & state) : state(&state) { };
DrvInfo(EvalState & state, std::string attrPath, Bindings * attrs);
@@ -46,8 +53,10 @@ public:
StorePath requireDrvPath() const;
StorePath queryOutPath() const;
std::string queryOutputName() const;
- /** Return the unordered map of output names to (optional) output paths.
- * The "outputs to install" are determined by `meta.outputsToInstall`. */
+ /**
+ * Return the unordered map of output names to (optional) output paths.
+ * The "outputs to install" are determined by `meta.outputsToInstall`.
+ */
Outputs queryOutputs(bool withPaths = true, bool onlyOutputsToInstall = false);
StringSet queryMetaNames();
@@ -79,8 +88,10 @@ typedef std::list<DrvInfo> DrvInfos;
#endif
-/* If value `v' denotes a derivation, return a DrvInfo object
- describing it. Otherwise return nothing. */
+/**
+ * If value `v` denotes a derivation, return a DrvInfo object
+ * describing it. Otherwise return nothing.
+ */
std::optional<DrvInfo> getDerivation(EvalState & state,
Value & v, bool ignoreAssertionFailures);
diff --git a/src/libexpr/json-to-value.hh b/src/libexpr/json-to-value.hh
index 84bec4eba..3b8ec000f 100644
--- a/src/libexpr/json-to-value.hh
+++ b/src/libexpr/json-to-value.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "eval.hh"
diff --git a/src/libexpr/local.mk b/src/libexpr/local.mk
index 2171e769b..d243b9cec 100644
--- a/src/libexpr/local.mk
+++ b/src/libexpr/local.mk
@@ -46,3 +46,5 @@ $(foreach i, $(wildcard src/libexpr/flake/*.hh), \
$(d)/primops.cc: $(d)/imported-drv-to-derivation.nix.gen.hh $(d)/primops/derivation.nix.gen.hh $(d)/fetchurl.nix.gen.hh
$(d)/flake/flake.cc: $(d)/flake/call-flake.nix.gen.hh
+
+src/libexpr/primops/fromTOML.o: ERROR_SWITCH_ENUM =
diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc
index eb6f062b4..4566a1388 100644
--- a/src/libexpr/nixexpr.cc
+++ b/src/libexpr/nixexpr.cc
@@ -3,6 +3,7 @@
#include "eval.hh"
#include "symbol-table.hh"
#include "util.hh"
+#include "print.hh"
#include <cstdlib>
@@ -31,9 +32,9 @@ struct PosAdapter : AbstractPos
// Get rid of the null terminators added by the parser.
return std::string(s.source->c_str());
},
- [](const Path & path) -> std::optional<std::string> {
+ [](const SourcePath & path) -> std::optional<std::string> {
try {
- return readFile(path);
+ return path.readFile();
} catch (Error &) {
return std::nullopt;
}
@@ -47,7 +48,7 @@ struct PosAdapter : AbstractPos
[&](const Pos::none_tag &) { out << "«none»"; },
[&](const Pos::Stdin &) { out << "«stdin»"; },
[&](const Pos::String & s) { out << "«string»"; },
- [&](const Path & path) { out << path; }
+ [&](const SourcePath & path) { out << path; }
}, origin);
}
};
@@ -60,45 +61,12 @@ Pos::operator std::shared_ptr<AbstractPos>() const
return pos;
}
-/* Displaying abstract syntax trees. */
-
-static void showString(std::ostream & str, std::string_view s)
-{
- str << '"';
- for (auto c : s)
- if (c == '"' || c == '\\' || c == '$') str << "\\" << c;
- else if (c == '\n') str << "\\n";
- else if (c == '\r') str << "\\r";
- else if (c == '\t') str << "\\t";
- else str << c;
- str << '"';
-}
-
+// FIXME: remove, because *symbols* are abstract and do not have a single
+// textual representation; see printIdentifier()
std::ostream & operator <<(std::ostream & str, const SymbolStr & symbol)
{
std::string_view s = symbol;
-
- if (s.empty())
- str << "\"\"";
- else if (s == "if") // FIXME: handle other keywords
- str << '"' << s << '"';
- else {
- char c = s[0];
- if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_')) {
- showString(str, s);
- return str;
- }
- for (auto c : s)
- if (!((c >= 'a' && c <= 'z') ||
- (c >= 'A' && c <= 'Z') ||
- (c >= '0' && c <= '9') ||
- c == '_' || c == '\'' || c == '-')) {
- showString(str, s);
- return str;
- }
- str << s;
- }
- return str;
+ return printIdentifier(str, s);
}
void Expr::show(const SymbolTable & symbols, std::ostream & str) const
@@ -118,7 +86,7 @@ void ExprFloat::show(const SymbolTable & symbols, std::ostream & str) const
void ExprString::show(const SymbolTable & symbols, std::ostream & str) const
{
- showString(str, s);
+ printLiteralString(str, s);
}
void ExprPath::show(const SymbolTable & symbols, std::ostream & str) const
diff --git a/src/libexpr/nixexpr.hh b/src/libexpr/nixexpr.hh
index 4a81eaa47..5ca3d1fa6 100644
--- a/src/libexpr/nixexpr.hh
+++ b/src/libexpr/nixexpr.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <map>
#include <vector>
@@ -21,7 +22,9 @@ MakeError(UndefinedVarError, Error);
MakeError(MissingArgumentError, EvalError);
MakeError(RestrictedPathError, Error);
-/* Position objects. */
+/**
+ * Position objects.
+ */
struct Pos
{
uint32_t line;
@@ -31,7 +34,7 @@ struct Pos
struct Stdin { ref<std::string> source; };
struct String { ref<std::string> source; };
- typedef std::variant<none_tag, Stdin, String, Path> Origin;
+ typedef std::variant<none_tag, Stdin, String, SourcePath> Origin;
Origin origin;
@@ -132,7 +135,9 @@ class EvalState;
struct StaticEnv;
-/* An attribute path is a sequence of attribute names. */
+/**
+ * An attribute path is a sequence of attribute names.
+ */
struct AttrName
{
Symbol symbol;
@@ -212,11 +217,11 @@ struct ExprVar : Expr
or function argument) or from a "with". */
bool fromWith;
- /* In the former case, the value is obtained by going `level'
+ /* In the former case, the value is obtained by going `level`
levels up from the current environment and getting the
- `displ'th value in that environment. In the latter case, the
- value is obtained by getting the attribute named `name' from
- the set stored in the environment that is `level' levels up
+ `displ`th value in that environment. In the latter case, the
+ value is obtained by getting the attribute named `name` from
+ the set stored in the environment that is `level` levels up
from the current one.*/
Level level;
Displacement displ;
diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y
index dec5818fc..4d981712a 100644
--- a/src/libexpr/parser.y
+++ b/src/libexpr/parser.y
@@ -31,7 +31,7 @@ namespace nix {
EvalState & state;
SymbolTable & symbols;
Expr * result;
- Path basePath;
+ SourcePath basePath;
PosTable::Origin origin;
std::optional<ErrorInfo> error;
};
@@ -469,7 +469,7 @@ expr_simple
new ExprString(std::move(path))});
}
| URI {
- static bool noURLLiterals = settings.isExperimentalFeatureEnabled(Xp::NoUrlLiterals);
+ static bool noURLLiterals = experimentalFeatureSettings.isEnabled(Xp::NoUrlLiterals);
if (noURLLiterals)
throw ParseError({
.msg = hintfmt("URL literals are disabled"),
@@ -509,7 +509,7 @@ string_parts_interpolated
path_start
: PATH {
- Path path(absPath({$1.p, $1.l}, data->basePath));
+ Path path(absPath({$1.p, $1.l}, data->basePath.path.abs()));
/* add back in the trailing '/' to the first segment */
if ($1.p[$1.l-1] == '/' && $1.l > 1)
path += "/";
@@ -651,7 +651,7 @@ Expr * EvalState::parse(
char * text,
size_t length,
Pos::Origin origin,
- Path basePath,
+ const SourcePath & basePath,
std::shared_ptr<StaticEnv> & staticEnv)
{
yyscan_t scanner;
@@ -675,48 +675,36 @@ Expr * EvalState::parse(
}
-Path resolveExprPath(Path path)
+SourcePath resolveExprPath(const SourcePath & path)
{
- assert(path[0] == '/');
-
- unsigned int followCount = 0, maxFollow = 1024;
-
/* If `path' is a symlink, follow it. This is so that relative
path references work. */
- struct stat st;
- while (true) {
- // Basic cycle/depth limit to avoid infinite loops.
- if (++followCount >= maxFollow)
- throw Error("too many symbolic links encountered while traversing the path '%s'", path);
- st = lstat(path);
- if (!S_ISLNK(st.st_mode)) break;
- path = absPath(readLink(path), dirOf(path));
- }
+ auto path2 = path.resolveSymlinks();
/* If `path' refers to a directory, append `/default.nix'. */
- if (S_ISDIR(st.st_mode))
- path = canonPath(path + "/default.nix");
+ if (path2.lstat().type == InputAccessor::tDirectory)
+ return path2 + "default.nix";
- return path;
+ return path2;
}
-Expr * EvalState::parseExprFromFile(const Path & path)
+Expr * EvalState::parseExprFromFile(const SourcePath & path)
{
return parseExprFromFile(path, staticBaseEnv);
}
-Expr * EvalState::parseExprFromFile(const Path & path, std::shared_ptr<StaticEnv> & staticEnv)
+Expr * EvalState::parseExprFromFile(const SourcePath & path, std::shared_ptr<StaticEnv> & staticEnv)
{
- auto buffer = readFile(path);
- // readFile should have left some extra space for terminators
+ auto buffer = path.readFile();
+ // readFile hopefully have left some extra space for terminators
buffer.append("\0\0", 2);
- return parse(buffer.data(), buffer.size(), path, dirOf(path), staticEnv);
+ return parse(buffer.data(), buffer.size(), Pos::Origin(path), path.parent(), staticEnv);
}
-Expr * EvalState::parseExprFromString(std::string s_, const Path & basePath, std::shared_ptr<StaticEnv> & staticEnv)
+Expr * EvalState::parseExprFromString(std::string s_, const SourcePath & basePath, std::shared_ptr<StaticEnv> & staticEnv)
{
auto s = make_ref<std::string>(std::move(s_));
s->append("\0\0", 2);
@@ -724,7 +712,7 @@ Expr * EvalState::parseExprFromString(std::string s_, const Path & basePath, std
}
-Expr * EvalState::parseExprFromString(std::string s, const Path & basePath)
+Expr * EvalState::parseExprFromString(std::string s, const SourcePath & basePath)
{
return parseExprFromString(std::move(s), basePath, staticBaseEnv);
}
@@ -732,12 +720,12 @@ Expr * EvalState::parseExprFromString(std::string s, const Path & basePath)
Expr * EvalState::parseStdin()
{
- //Activity act(*logger, lvlTalkative, format("parsing standard input"));
+ //Activity act(*logger, lvlTalkative, "parsing standard input");
auto buffer = drainFD(0);
// drainFD should have left some extra space for terminators
buffer.append("\0\0", 2);
auto s = make_ref<std::string>(std::move(buffer));
- return parse(s->data(), s->size(), Pos::Stdin{.source = s}, absPath("."), staticBaseEnv);
+ return parse(s->data(), s->size(), Pos::Stdin{.source = s}, rootPath(CanonPath::fromCwd()), staticBaseEnv);
}
@@ -757,13 +745,13 @@ void EvalState::addToSearchPath(const std::string & s)
}
-Path EvalState::findFile(const std::string_view path)
+SourcePath EvalState::findFile(const std::string_view path)
{
return findFile(searchPath, path);
}
-Path EvalState::findFile(SearchPath & searchPath, const std::string_view path, const PosIdx pos)
+SourcePath EvalState::findFile(SearchPath & searchPath, const std::string_view path, const PosIdx pos)
{
for (auto & i : searchPath) {
std::string suffix;
@@ -779,11 +767,11 @@ Path EvalState::findFile(SearchPath & searchPath, const std::string_view path, c
auto r = resolveSearchPathElem(i);
if (!r.first) continue;
Path res = r.second + suffix;
- if (pathExists(res)) return canonPath(res);
+ if (pathExists(res)) return CanonPath(canonPath(res));
}
if (hasPrefix(path, "nix/"))
- return concatStrings(corepkgsPrefix, path.substr(4));
+ return CanonPath(concatStrings(corepkgsPrefix, path.substr(4)));
debugThrow(ThrownError({
.msg = hintfmt(evalSettings.pureEval
@@ -816,7 +804,7 @@ std::pair<bool, std::string> EvalState::resolveSearchPathElem(const SearchPathEl
}
else if (hasPrefix(elem.second, "flake:")) {
- settings.requireExperimentalFeature(Xp::Flakes);
+ experimentalFeatureSettings.require(Xp::Flakes);
auto flakeRef = parseFlakeRef(elem.second.substr(6), {}, true, false);
debug("fetching flake search path element '%s''", elem.second);
auto storePath = flakeRef.resolve(store).fetchTree(store).first.storePath;
@@ -835,7 +823,7 @@ std::pair<bool, std::string> EvalState::resolveSearchPathElem(const SearchPathEl
}
}
- debug(format("resolved search path element '%s' to '%s'") % elem.second % res.second);
+ debug("resolved search path element '%s' to '%s'", elem.second, res.second);
searchPathResolved[elem.second] = res;
return res;
diff --git a/src/libexpr/paths.cc b/src/libexpr/paths.cc
new file mode 100644
index 000000000..1d690b722
--- /dev/null
+++ b/src/libexpr/paths.cc
@@ -0,0 +1,10 @@
+#include "eval.hh"
+
+namespace nix {
+
+SourcePath EvalState::rootPath(CanonPath path)
+{
+ return std::move(path);
+}
+
+}
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
index fb7fc3ddb..cfae1e5f8 100644
--- a/src/libexpr/primops.cc
+++ b/src/libexpr/primops.cc
@@ -1,5 +1,6 @@
#include "archive.hh"
#include "derivations.hh"
+#include "downstream-placeholder.hh"
#include "eval-inline.hh"
#include "eval.hh"
#include "globals.hh"
@@ -38,17 +39,16 @@ namespace nix {
InvalidPathError::InvalidPathError(const Path & path) :
EvalError("path '%s' is not valid", path), path(path) {}
-StringMap EvalState::realiseContext(const PathSet & context)
+StringMap EvalState::realiseContext(const NixStringContext & context)
{
std::vector<DerivedPath::Built> drvs;
StringMap res;
- for (auto & c_ : context) {
+ for (auto & c : context) {
auto ensureValid = [&](const StorePath & p) {
if (!store->isValidPath(p))
debugThrowLastTrace(InvalidPathError(store->printStorePath(p)));
};
- auto c = NixStringContextElem::parse(*store, c_);
std::visit(overloaded {
[&](const NixStringContextElem::Built & b) {
drvs.push_back(DerivedPath::Built {
@@ -88,7 +88,7 @@ StringMap EvalState::realiseContext(const PathSet & context)
auto outputs = resolveDerivedPath(*store, drv);
for (auto & [outputName, outputPath] : outputs) {
res.insert_or_assign(
- downstreamPlaceholder(*store, drv.drvPath, outputName),
+ DownstreamPlaceholder::unknownCaOutput(drv.drvPath, outputName).render(),
store->printStorePath(outputPath)
);
}
@@ -110,16 +110,16 @@ struct RealisePathFlags {
bool checkForPureEval = true;
};
-static Path realisePath(EvalState & state, const PosIdx pos, Value & v, const RealisePathFlags flags = {})
+static SourcePath realisePath(EvalState & state, const PosIdx pos, Value & v, const RealisePathFlags flags = {})
{
- PathSet context;
+ NixStringContext context;
auto path = state.coerceToPath(noPos, v, context, "while realising the context of a path");
try {
StringMap rewrites = state.realiseContext(context);
- auto realPath = state.toRealPath(rewriteStrings(path, rewrites), context);
+ auto realPath = state.rootPath(CanonPath(state.toRealPath(rewriteStrings(path.path.abs(), rewrites), context)));
return flags.checkForPureEval
? state.checkSourcePath(realPath)
@@ -130,35 +130,31 @@ static Path realisePath(EvalState & state, const PosIdx pos, Value & v, const Re
}
}
-/* Add and attribute to the given attribute map from the output name to
- the output path, or a placeholder.
-
- Where possible the path is used, but for floating CA derivations we
- may not know it. For sake of determinism we always assume we don't
- and instead put in a place holder. In either case, however, the
- string context will contain the drv path and output name, so
- downstream derivations will have the proper dependency, and in
- addition, before building, the placeholder will be rewritten to be
- the actual path.
-
- The 'drv' and 'drvPath' outputs must correspond. */
+/**
+ * Add and attribute to the given attribute map from the output name to
+ * the output path, or a placeholder.
+ *
+ * Where possible the path is used, but for floating CA derivations we
+ * may not know it. For sake of determinism we always assume we don't
+ * and instead put in a place holder. In either case, however, the
+ * string context will contain the drv path and output name, so
+ * downstream derivations will have the proper dependency, and in
+ * addition, before building, the placeholder will be rewritten to be
+ * the actual path.
+ *
+ * The 'drv' and 'drvPath' outputs must correspond.
+ */
static void mkOutputString(
EvalState & state,
BindingsBuilder & attrs,
const StorePath & drvPath,
- const BasicDerivation & drv,
const std::pair<std::string, DerivationOutput> & o)
{
- auto optOutputPath = o.second.path(*state.store, drv.name, o.first);
- attrs.alloc(o.first).mkString(
- optOutputPath
- ? state.store->printStorePath(*optOutputPath)
- /* Downstream we would substitute this for an actual path once
- we build the floating CA derivation */
- /* FIXME: we need to depend on the basic derivation, not
- derivation */
- : downstreamPlaceholder(*state.store, drvPath, o.first),
- {"!" + o.first + "!" + state.store->printStorePath(drvPath)});
+ state.mkOutputString(
+ attrs.alloc(o.first),
+ drvPath,
+ o.first,
+ o.second.path(*state.store, Derivation::nameFromPath(drvPath), o.first));
}
/* Load and evaluate an expression from path specified by the
@@ -166,28 +162,30 @@ static void mkOutputString(
static void import(EvalState & state, const PosIdx pos, Value & vPath, Value * vScope, Value & v)
{
auto path = realisePath(state, pos, vPath);
+ auto path2 = path.path.abs();
// FIXME
auto isValidDerivationInStore = [&]() -> std::optional<StorePath> {
- if (!state.store->isStorePath(path))
+ if (!state.store->isStorePath(path2))
return std::nullopt;
- auto storePath = state.store->parseStorePath(path);
- if (!(state.store->isValidPath(storePath) && isDerivation(path)))
+ auto storePath = state.store->parseStorePath(path2);
+ if (!(state.store->isValidPath(storePath) && isDerivation(path2)))
return std::nullopt;
return storePath;
};
- if (auto optStorePath = isValidDerivationInStore()) {
- auto storePath = *optStorePath;
- Derivation drv = state.store->readDerivation(storePath);
+ if (auto storePath = isValidDerivationInStore()) {
+ Derivation drv = state.store->readDerivation(*storePath);
auto attrs = state.buildBindings(3 + drv.outputs.size());
- attrs.alloc(state.sDrvPath).mkString(path, {"=" + path});
+ attrs.alloc(state.sDrvPath).mkString(path2, {
+ NixStringContextElem::DrvDeep { .drvPath = *storePath },
+ });
attrs.alloc(state.sName).mkString(drv.env["name"]);
auto & outputsVal = attrs.alloc(state.sOutputs);
state.mkList(outputsVal, drv.outputs.size());
for (const auto & [i, o] : enumerate(drv.outputs)) {
- mkOutputString(state, attrs, storePath, drv, o);
+ mkOutputString(state, attrs, *storePath, o);
(outputsVal.listElems()[i] = state.allocValue())->mkString(o.first);
}
@@ -198,7 +196,7 @@ static void import(EvalState & state, const PosIdx pos, Value & vPath, Value * v
state.vImportedDrvToDerivation = allocRootValue(state.allocValue());
state.eval(state.parseExprFromString(
#include "imported-drv-to-derivation.nix.gen.hh"
- , "/"), **state.vImportedDrvToDerivation);
+ , CanonPath::root), **state.vImportedDrvToDerivation);
}
state.forceFunction(**state.vImportedDrvToDerivation, pos, "while evaluating imported-drv-to-derivation.nix.gen.hh");
@@ -206,10 +204,10 @@ static void import(EvalState & state, const PosIdx pos, Value & vPath, Value * v
state.forceAttrs(v, pos, "while calling imported-drv-to-derivation.nix.gen.hh");
}
- else if (path == corepkgsPrefix + "fetchurl.nix") {
+ else if (path2 == corepkgsPrefix + "fetchurl.nix") {
state.eval(state.parseExprFromString(
#include "fetchurl.nix.gen.hh"
- , "/"), v);
+ , CanonPath::root), v);
}
else {
@@ -254,9 +252,16 @@ static RegisterPrimOp primop_import({
.args = {"path"},
// TODO turn "normal path values" into link below
.doc = R"(
- Load, parse and return the Nix expression in the file *path*. If
- *path* is a directory, the file ` default.nix ` in that directory
- is loaded. Evaluation aborts if the file doesn’t exist or contains
+ Load, parse and return the Nix expression in the file *path*.
+
+ The value *path* can be a path, a string, or an attribute set with an
+ `__toString` attribute or a `outPath` attribute (as derivations or flake
+ inputs typically have).
+
+ If *path* is a directory, the file `default.nix` in that directory
+ is loaded.
+
+ Evaluation aborts if the file doesn’t exist or contains
an incorrect Nix expression. `import` implements Nix’s module
system: you can put any Nix expression (such as a set or a
function) in a separate file, and use it from Nix expressions in
@@ -323,7 +328,7 @@ void prim_importNative(EvalState & state, const PosIdx pos, Value * * args, Valu
std::string sym(state.forceStringNoCtx(*args[1], pos, "while evaluating the second argument passed to builtins.importNative"));
- void *handle = dlopen(path.c_str(), RTLD_LAZY | RTLD_LOCAL);
+ void *handle = dlopen(path.path.c_str(), RTLD_LAZY | RTLD_LOCAL);
if (!handle)
state.debugThrowLastTrace(EvalError("could not open '%1%': %2%", path, dlerror()));
@@ -351,7 +356,7 @@ void prim_exec(EvalState & state, const PosIdx pos, Value * * args, Value & v)
auto count = args[0]->listSize();
if (count == 0)
state.error("at least one argument to 'exec' required").atPos(pos).debugThrow<EvalError>();
- PathSet context;
+ NixStringContext context;
auto program = state.coerceToString(pos, *elems[0], context,
"while evaluating the first element of the argument passed to builtins.exec",
false, false).toOwned();
@@ -371,7 +376,7 @@ void prim_exec(EvalState & state, const PosIdx pos, Value * * args, Value & v)
auto output = runProgram(program, true, commandArgs);
Expr * parsed;
try {
- parsed = state.parseExprFromString(std::move(output), "/");
+ parsed = state.parseExprFromString(std::move(output), state.rootPath(CanonPath::root));
} catch (Error & e) {
e.addTrace(state.positions[pos], "while parsing the output from '%1%'", program);
throw;
@@ -570,6 +575,9 @@ struct CompareValues
return v1->integer < v2->fpoint;
if (v1->type() != v2->type())
state.error("cannot compare %s with %s", showType(*v1), showType(*v2)).debugThrow<EvalError>();
+ // Allow selecting a subset of enum values
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wswitch-enum"
switch (v1->type()) {
case nInt:
return v1->integer < v2->integer;
@@ -578,7 +586,7 @@ struct CompareValues
case nString:
return strcmp(v1->string.s, v2->string.s) < 0;
case nPath:
- return strcmp(v1->path, v2->path) < 0;
+ return strcmp(v1->_path, v2->_path) < 0;
case nList:
// Lexicographic comparison
for (size_t i = 0;; i++) {
@@ -592,6 +600,7 @@ struct CompareValues
}
default:
state.error("cannot compare %s with %s; values of that type are incomparable", showType(*v1), showType(*v2)).debugThrow<EvalError>();
+ #pragma GCC diagnostic pop
}
} catch (Error & e) {
if (!errorCtx.empty())
@@ -689,12 +698,14 @@ static RegisterPrimOp primop_genericClosure(RegisterPrimOp::Info {
.arity = 1,
.doc = R"(
Take an *attrset* with values named `startSet` and `operator` in order to
- return a *list of attrsets* by starting with the `startSet`, recursively
- applying the `operator` function to each element. The *attrsets* in the
- `startSet` and produced by the `operator` must each contain value named
- `key` which are comparable to each other. The result is produced by
- repeatedly calling the operator for each element encountered with a
- unique key, terminating when no new elements are produced. For example,
+ return a *list of attrsets* by starting with the `startSet` and recursively
+ applying the `operator` function to each `item`. The *attrsets* in the
+ `startSet` and the *attrsets* produced by `operator` must contain a value
+ named `key` which is comparable. The result is produced by calling `operator`
+ for each `item` with a value for `key` that has not been called yet including
+ newly produced `item`s. The function terminates when no new `item`s are
+ produced. The resulting *list of attrsets* contains only *attrsets* with a
+ unique key. For example,
```
builtins.genericClosure {
@@ -757,7 +768,7 @@ static RegisterPrimOp primop_abort({
)",
.fun = [](EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
- PathSet context;
+ NixStringContext context;
auto s = state.coerceToString(pos, *args[0], context,
"while evaluating the error message passed to builtins.abort").toOwned();
state.debugThrowLastTrace(Abort("evaluation aborted with the following error message: '%1%'", s));
@@ -776,7 +787,7 @@ static RegisterPrimOp primop_throw({
)",
.fun = [](EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
- PathSet context;
+ NixStringContext context;
auto s = state.coerceToString(pos, *args[0], context,
"while evaluating the error message passed to builtin.throw").toOwned();
state.debugThrowLastTrace(ThrownError(s));
@@ -789,7 +800,7 @@ static void prim_addErrorContext(EvalState & state, const PosIdx pos, Value * *
state.forceValue(*args[1], pos);
v = *args[1];
} catch (Error & e) {
- PathSet context;
+ NixStringContext context;
auto message = state.coerceToString(pos, *args[0], context,
"while evaluating the error message passed to builtins.addErrorContext",
false, false).toOwned();
@@ -1075,13 +1086,13 @@ drvName, Bindings * attrs, Value & v)
Derivation drv;
drv.name = drvName;
- PathSet context;
+ NixStringContext context;
bool contentAddressed = false;
bool isImpure = false;
std::optional<std::string> outputHash;
std::string outputHashAlgo;
- std::optional<FileIngestionMethod> ingestionMethod;
+ std::optional<ContentAddressMethod> ingestionMethod;
StringSet outputs;
outputs.insert("out");
@@ -1094,7 +1105,10 @@ drvName, Bindings * attrs, Value & v)
auto handleHashMode = [&](const std::string_view s) {
if (s == "recursive") ingestionMethod = FileIngestionMethod::Recursive;
else if (s == "flat") ingestionMethod = FileIngestionMethod::Flat;
- else
+ else if (s == "text") {
+ experimentalFeatureSettings.require(Xp::DynamicDerivations);
+ ingestionMethod = TextIngestionMethod {};
+ } else
state.debugThrowLastTrace(EvalError({
.msg = hintfmt("invalid value '%s' for 'outputHashMode' attribute", s),
.errPos = state.positions[noPos]
@@ -1141,13 +1155,13 @@ drvName, Bindings * attrs, Value & v)
if (i->name == state.sContentAddressed) {
contentAddressed = state.forceBool(*i->value, noPos, context_below);
if (contentAddressed)
- settings.requireExperimentalFeature(Xp::CaDerivations);
+ experimentalFeatureSettings.require(Xp::CaDerivations);
}
else if (i->name == state.sImpure) {
isImpure = state.forceBool(*i->value, noPos, context_below);
if (isImpure)
- settings.requireExperimentalFeature(Xp::ImpureDerivations);
+ experimentalFeatureSettings.require(Xp::ImpureDerivations);
}
/* The `args' attribute is special: it supplies the
@@ -1221,8 +1235,7 @@ drvName, Bindings * attrs, Value & v)
/* Everything in the context of the strings in the derivation
attributes should be added as dependencies of the resulting
derivation. */
- for (auto & c_ : context) {
- auto c = NixStringContextElem::parse(*state.store, c_);
+ for (auto & c : context) {
std::visit(overloaded {
/* Since this allows the builder to gain access to every
path in the dependency graph of the derivation (including
@@ -1262,11 +1275,16 @@ drvName, Bindings * attrs, Value & v)
}));
/* Check whether the derivation name is valid. */
- if (isDerivation(drvName))
+ if (isDerivation(drvName) &&
+ !(ingestionMethod == ContentAddressMethod { TextIngestionMethod { } } &&
+ outputs.size() == 1 &&
+ *(outputs.begin()) == "out"))
+ {
state.debugThrowLastTrace(EvalError({
- .msg = hintfmt("derivation names are not allowed to end in '%s'", drvExtension),
+ .msg = hintfmt("derivation names are allowed to end in '%s' only if they produce a single derivation file", drvExtension),
.errPos = state.positions[noPos]
}));
+ }
if (outputHash) {
/* Handle fixed-output derivations.
@@ -1282,15 +1300,15 @@ drvName, Bindings * attrs, Value & v)
auto h = newHashAllowEmpty(*outputHash, parseHashTypeOpt(outputHashAlgo));
auto method = ingestionMethod.value_or(FileIngestionMethod::Flat);
- auto outPath = state.store->makeFixedOutputPath(method, h, drvName);
- drv.env["out"] = state.store->printStorePath(outPath);
- drv.outputs.insert_or_assign("out",
- DerivationOutput::CAFixed {
- .hash = FixedOutputHash {
- .method = method,
- .hash = std::move(h),
- },
- });
+
+ DerivationOutput::CAFixed dof {
+ .ca = ContentAddress::fromParts(
+ std::move(method),
+ std::move(h)),
+ };
+
+ drv.env["out"] = state.store->printStorePath(dof.path(*state.store, drvName, "out"));
+ drv.outputs.insert_or_assign("out", std::move(dof));
}
else if (contentAddressed || isImpure) {
@@ -1308,13 +1326,13 @@ drvName, Bindings * attrs, Value & v)
if (isImpure)
drv.outputs.insert_or_assign(i,
DerivationOutput::Impure {
- .method = method,
+ .method = method.raw,
.hashType = ht,
});
else
drv.outputs.insert_or_assign(i,
DerivationOutput::CAFloating {
- .method = method,
+ .method = method.raw,
.hashType = ht,
});
}
@@ -1375,9 +1393,11 @@ drvName, Bindings * attrs, Value & v)
}
auto result = state.buildBindings(1 + drv.outputs.size());
- result.alloc(state.sDrvPath).mkString(drvPathS, {"=" + drvPathS});
+ result.alloc(state.sDrvPath).mkString(drvPathS, {
+ NixStringContextElem::DrvDeep { .drvPath = drvPath },
+ });
for (auto & i : drv.outputs)
- mkOutputString(state, result, drvPath, drv, i);
+ mkOutputString(state, result, drvPath, i);
v.mkAttrs(result);
}
@@ -1420,9 +1440,9 @@ static RegisterPrimOp primop_placeholder({
/* Convert the argument to a path. !!! obsolete? */
static void prim_toPath(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
- PathSet context;
- Path path = state.coerceToPath(pos, *args[0], context, "while evaluating the first argument passed to builtins.toPath");
- v.mkString(canonPath(path), context);
+ NixStringContext context;
+ auto path = state.coerceToPath(pos, *args[0], context, "while evaluating the first argument passed to builtins.toPath");
+ v.mkString(path.path.abs(), context);
}
static RegisterPrimOp primop_toPath({
@@ -1451,22 +1471,23 @@ static void prim_storePath(EvalState & state, const PosIdx pos, Value * * args,
.errPos = state.positions[pos]
}));
- PathSet context;
- Path path = state.checkSourcePath(state.coerceToPath(pos, *args[0], context, "while evaluating the first argument passed to builtins.storePath"));
+ NixStringContext context;
+ auto path = state.checkSourcePath(state.coerceToPath(pos, *args[0], context, "while evaluating the first argument passed to builtins.storePath")).path;
/* Resolve symlinks in ‘path’, unless ‘path’ itself is a symlink
directly in the store. The latter condition is necessary so
e.g. nix-push does the right thing. */
- if (!state.store->isStorePath(path)) path = canonPath(path, true);
- if (!state.store->isInStore(path))
+ if (!state.store->isStorePath(path.abs()))
+ path = CanonPath(canonPath(path.abs(), true));
+ if (!state.store->isInStore(path.abs()))
state.debugThrowLastTrace(EvalError({
.msg = hintfmt("path '%1%' is not in the Nix store", path),
.errPos = state.positions[pos]
}));
- auto path2 = state.store->toStorePath(path).first;
+ auto path2 = state.store->toStorePath(path.abs()).first;
if (!settings.readOnlyMode)
state.store->ensurePath(path2);
- context.insert(state.store->printStorePath(path2));
- v.mkString(path, context);
+ context.insert(NixStringContextElem::Opaque { .path = path2 });
+ v.mkString(path.abs(), context);
}
static RegisterPrimOp primop_storePath({
@@ -1497,7 +1518,7 @@ static void prim_pathExists(EvalState & state, const PosIdx pos, Value * * args,
auto path = realisePath(state, pos, *args[0], { .checkForPureEval = false });
try {
- v.mkBool(pathExists(state.checkSourcePath(path)));
+ v.mkBool(state.checkSourcePath(path).pathExists());
} catch (SysError & e) {
/* Don't give away info from errors while canonicalising
‘path’ in restricted mode. */
@@ -1521,7 +1542,7 @@ static RegisterPrimOp primop_pathExists({
following the last slash. */
static void prim_baseNameOf(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
- PathSet context;
+ NixStringContext context;
v.mkString(baseNameOf(*state.coerceToString(pos, *args[0], context,
"while evaluating the first argument passed to builtins.baseNameOf",
false, false)), context);
@@ -1543,12 +1564,18 @@ static RegisterPrimOp primop_baseNameOf({
of the argument. */
static void prim_dirOf(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
- PathSet context;
- auto path = state.coerceToString(pos, *args[0], context,
- "while evaluating the first argument passed to builtins.dirOf",
+ state.forceValue(*args[0], pos);
+ if (args[0]->type() == nPath) {
+ auto path = args[0]->path();
+ v.mkPath(path.path.isRoot() ? path : path.parent());
+ } else {
+ NixStringContext context;
+ auto path = state.coerceToString(pos, *args[0], context,
+ "while evaluating the first argument passed to 'builtins.dirOf'",
false, false);
- auto dir = dirOf(*path);
- if (args[0]->type() == nPath) v.mkPath(dir); else v.mkString(dir, context);
+ auto dir = dirOf(*path);
+ v.mkString(dir, context);
+ }
}
static RegisterPrimOp primop_dirOf({
@@ -1566,13 +1593,13 @@ static RegisterPrimOp primop_dirOf({
static void prim_readFile(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
auto path = realisePath(state, pos, *args[0]);
- auto s = readFile(path);
+ auto s = path.readFile();
if (s.find((char) 0) != std::string::npos)
state.debugThrowLastTrace(Error("the contents of the file '%1%' cannot be represented as a Nix string", path));
StorePathSet refs;
- if (state.store->isInStore(path)) {
+ if (state.store->isInStore(path.path.abs())) {
try {
- refs = state.store->queryPathInfo(state.store->toStorePath(path).first)->references;
+ refs = state.store->queryPathInfo(state.store->toStorePath(path.path.abs()).first)->references;
} catch (Error &) { // FIXME: should be InvalidPathError
}
// Re-scan references to filter down to just the ones that actually occur in the file.
@@ -1580,7 +1607,12 @@ static void prim_readFile(EvalState & state, const PosIdx pos, Value * * args, V
refsSink << s;
refs = refsSink.getResultPaths();
}
- auto context = state.store->printStorePathSet(refs);
+ NixStringContext context;
+ for (auto && p : std::move(refs)) {
+ context.insert(NixStringContextElem::Opaque {
+ .path = std::move((StorePath &&)p),
+ });
+ }
v.mkString(s, context);
}
@@ -1611,7 +1643,7 @@ static void prim_findFile(EvalState & state, const PosIdx pos, Value * * args, V
i = getAttr(state, state.sPath, v2->attrs, "in an element of the __nixPath");
- PathSet context;
+ NixStringContext context;
auto path = state.coerceToString(pos, *i->value, context,
"while evaluating the `path` attribute of an element of the list passed to builtins.findFile",
false, false).toOwned();
@@ -1653,7 +1685,7 @@ static void prim_hashFile(EvalState & state, const PosIdx pos, Value * * args, V
auto path = realisePath(state, pos, *args[1]);
- v.mkString(hashFile(*ht, path).to_string(Base16, false));
+ v.mkString(hashString(*ht, path.readFile()).to_string(Base16, false));
}
static RegisterPrimOp primop_hashFile({
@@ -1667,26 +1699,20 @@ static RegisterPrimOp primop_hashFile({
.fun = prim_hashFile,
});
-
-/* Stringize a directory entry enum. Used by `readFileType' and `readDir'. */
-static const char * dirEntTypeToString(unsigned char dtType)
+static std::string_view fileTypeToString(InputAccessor::Type type)
{
- /* Enum DT_(DIR|LNK|REG|UNKNOWN) */
- switch(dtType) {
- case DT_REG: return "regular"; break;
- case DT_DIR: return "directory"; break;
- case DT_LNK: return "symlink"; break;
- default: return "unknown"; break;
- }
- return "unknown"; /* Unreachable */
+ return
+ type == InputAccessor::Type::tRegular ? "regular" :
+ type == InputAccessor::Type::tDirectory ? "directory" :
+ type == InputAccessor::Type::tSymlink ? "symlink" :
+ "unknown";
}
-
static void prim_readFileType(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
auto path = realisePath(state, pos, *args[0]);
/* Retrieve the directory entry type and stringize it. */
- v.mkString(dirEntTypeToString(getFileType(path)));
+ v.mkString(fileTypeToString(path.lstat().type));
}
static RegisterPrimOp primop_readFileType({
@@ -1707,8 +1733,7 @@ static void prim_readDir(EvalState & state, const PosIdx pos, Value * * args, Va
// Retrieve directory entries for all nodes in a directory.
// This is similar to `getFileType` but is optimized to reduce system calls
// on many systems.
- DirEntries entries = readDirectory(path);
-
+ auto entries = path.readDirectory();
auto attrs = state.buildBindings(entries.size());
// If we hit unknown directory entry types we may need to fallback to
@@ -1717,22 +1742,21 @@ static void prim_readDir(EvalState & state, const PosIdx pos, Value * * args, Va
// `builtins.readFileType` application.
Value * readFileType = nullptr;
- for (auto & ent : entries) {
- auto & attr = attrs.alloc(ent.name);
- if (ent.type == DT_UNKNOWN) {
+ for (auto & [name, type] : entries) {
+ auto & attr = attrs.alloc(name);
+ if (!type) {
// Some filesystems or operating systems may not be able to return
// detailed node info quickly in this case we produce a thunk to
// query the file type lazily.
auto epath = state.allocValue();
- Path path2 = path + "/" + ent.name;
- epath->mkString(path2);
+ epath->mkPath(path + name);
if (!readFileType)
readFileType = &state.getBuiltin("readFileType");
attr.mkApp(readFileType, epath);
} else {
// This branch of the conditional is much more likely.
// Here we just stringize the directory entry type.
- attr.mkString(dirEntTypeToString(ent.type));
+ attr.mkString(fileTypeToString(*type));
}
}
@@ -1770,7 +1794,7 @@ static RegisterPrimOp primop_readDir({
static void prim_toXML(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
std::ostringstream out;
- PathSet context;
+ NixStringContext context;
printValueAsXML(state, true, false, *args[0], out, context, pos);
v.mkString(out.str(), context);
}
@@ -1878,7 +1902,7 @@ static RegisterPrimOp primop_toXML({
static void prim_toJSON(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
std::ostringstream out;
- PathSet context;
+ NixStringContext context;
printValueAsJSON(state, true, *args[0], pos, out, context);
v.mkString(out.str(), context);
}
@@ -1928,22 +1952,23 @@ static RegisterPrimOp primop_fromJSON({
as an input by derivations. */
static void prim_toFile(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
- PathSet context;
+ NixStringContext context;
std::string name(state.forceStringNoCtx(*args[0], pos, "while evaluating the first argument passed to builtins.toFile"));
std::string contents(state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.toFile"));
StorePathSet refs;
- for (auto path : context) {
- if (path.at(0) != '/')
+ for (auto c : context) {
+ if (auto p = std::get_if<NixStringContextElem::Opaque>(&c))
+ refs.insert(p->path);
+ else
state.debugThrowLastTrace(EvalError({
.msg = hintfmt(
"in 'toFile': the file named '%1%' must not contain a reference "
"to a derivation but contains (%2%)",
- name, path),
+ name, c.to_string()),
.errPos = state.positions[pos]
}));
- refs.insert(state.store->parseStorePath(path));
}
auto storePath = settings.readOnlyMode
@@ -2038,13 +2063,13 @@ static RegisterPrimOp primop_toFile({
static void addPath(
EvalState & state,
const PosIdx pos,
- const std::string & name,
+ std::string_view name,
Path path,
Value * filterFun,
FileIngestionMethod method,
const std::optional<Hash> expectedHash,
Value & v,
- const PathSet & context)
+ const NixStringContext & context)
{
try {
// FIXME: handle CA derivation outputs (where path needs to
@@ -2066,7 +2091,7 @@ static void addPath(
path = evalSettings.pureEval && expectedHash
? path
- : state.checkSourcePath(path);
+ : state.checkSourcePath(CanonPath(path)).path.abs();
PathFilter filter = filterFun ? ([&](const Path & path) {
auto st = lstat(path);
@@ -2092,7 +2117,13 @@ static void addPath(
std::optional<StorePath> expectedStorePath;
if (expectedHash)
- expectedStorePath = state.store->makeFixedOutputPath(method, *expectedHash, name);
+ expectedStorePath = state.store->makeFixedOutputPath(name, FixedOutputInfo {
+ .hash = {
+ .method = method,
+ .hash = *expectedHash,
+ },
+ .references = {},
+ });
if (!expectedHash || !state.store->isValidPath(*expectedStorePath)) {
StorePath dstPath = settings.readOnlyMode
@@ -2112,10 +2143,11 @@ static void addPath(
static void prim_filterSource(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
- PathSet context;
- Path path = state.coerceToPath(pos, *args[1], context, "while evaluating the second argument (the path to filter) passed to builtins.filterSource");
+ NixStringContext context;
+ auto path = state.coerceToPath(pos, *args[1], context,
+ "while evaluating the second argument (the path to filter) passed to builtins.filterSource");
state.forceFunction(*args[0], pos, "while evaluating the first argument passed to builtins.filterSource");
- addPath(state, pos, std::string(baseNameOf(path)), path, args[0], FileIngestionMethod::Recursive, std::nullopt, v, context);
+ addPath(state, pos, path.baseName(), path.path.abs(), args[0], FileIngestionMethod::Recursive, std::nullopt, v, context);
}
static RegisterPrimOp primop_filterSource({
@@ -2175,18 +2207,19 @@ static RegisterPrimOp primop_filterSource({
static void prim_path(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
- state.forceAttrs(*args[0], pos, "while evaluating the argument passed to builtins.path");
- Path path;
+ std::optional<SourcePath> path;
std::string name;
Value * filterFun = nullptr;
auto method = FileIngestionMethod::Recursive;
std::optional<Hash> expectedHash;
- PathSet context;
+ NixStringContext context;
+
+ state.forceAttrs(*args[0], pos, "while evaluating the argument passed to 'builtins.path'");
for (auto & attr : *args[0]->attrs) {
auto n = state.symbols[attr.name];
if (n == "path")
- path = state.coerceToPath(attr.pos, *attr.value, context, "while evaluating the `path` attribute passed to builtins.path");
+ path.emplace(state.coerceToPath(attr.pos, *attr.value, context, "while evaluating the 'path' attribute passed to 'builtins.path'"));
else if (attr.name == state.sName)
name = state.forceStringNoCtx(*attr.value, attr.pos, "while evaluating the `name` attribute passed to builtins.path");
else if (n == "filter")
@@ -2201,15 +2234,15 @@ static void prim_path(EvalState & state, const PosIdx pos, Value * * args, Value
.errPos = state.positions[attr.pos]
}));
}
- if (path.empty())
+ if (!path)
state.debugThrowLastTrace(EvalError({
.msg = hintfmt("missing required 'path' attribute in the first argument to builtins.path"),
.errPos = state.positions[pos]
}));
if (name.empty())
- name = baseNameOf(path);
+ name = path->baseName();
- addPath(state, pos, name, path, filterFun, method, expectedHash, v, context);
+ addPath(state, pos, name, path->path.abs(), filterFun, method, expectedHash, v, context);
}
static RegisterPrimOp primop_path({
@@ -3515,7 +3548,7 @@ static RegisterPrimOp primop_lessThan({
`"/nix/store/whatever..."'. */
static void prim_toString(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
- PathSet context;
+ NixStringContext context;
auto s = state.coerceToString(pos, *args[0], context,
"while evaluating the first argument passed to builtins.toString",
true, false);
@@ -3554,7 +3587,7 @@ static void prim_substring(EvalState & state, const PosIdx pos, Value * * args,
{
int start = state.forceInt(*args[0], pos, "while evaluating the first argument (the start offset) passed to builtins.substring");
int len = state.forceInt(*args[1], pos, "while evaluating the second argument (the substring length) passed to builtins.substring");
- PathSet context;
+ NixStringContext context;
auto s = state.coerceToString(pos, *args[2], context, "while evaluating the third argument (the string) passed to builtins.substring");
if (start < 0)
@@ -3588,7 +3621,7 @@ static RegisterPrimOp primop_substring({
static void prim_stringLength(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
- PathSet context;
+ NixStringContext context;
auto s = state.coerceToString(pos, *args[0], context, "while evaluating the argument passed to builtins.stringLength");
v.mkInt(s->size());
}
@@ -3614,7 +3647,7 @@ static void prim_hashString(EvalState & state, const PosIdx pos, Value * * args,
.errPos = state.positions[pos]
}));
- PathSet context; // discarded
+ NixStringContext context; // discarded
auto s = state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.hashString");
v.mkString(hashString(*ht, s).to_string(Base16, false));
@@ -3660,7 +3693,7 @@ void prim_match(EvalState & state, const PosIdx pos, Value * * args, Value & v)
auto regex = state.regexCache->get(re);
- PathSet context;
+ NixStringContext context;
const auto str = state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.match");
std::cmatch match;
@@ -3740,7 +3773,7 @@ void prim_split(EvalState & state, const PosIdx pos, Value * * args, Value & v)
auto regex = state.regexCache->get(re);
- PathSet context;
+ NixStringContext context;
const auto str = state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.split");
auto begin = std::cregex_iterator(str.begin(), str.end(), regex);
@@ -3837,7 +3870,7 @@ static RegisterPrimOp primop_split({
static void prim_concatStringsSep(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
- PathSet context;
+ NixStringContext context;
auto sep = state.forceString(*args[0], context, pos, "while evaluating the first argument (the separator string) passed to builtins.concatStringsSep");
state.forceList(*args[1], pos, "while evaluating the second argument (the list of strings to concat) passed to builtins.concatStringsSep");
@@ -3877,15 +3910,15 @@ static void prim_replaceStrings(EvalState & state, const PosIdx pos, Value * * a
for (auto elem : args[0]->listItems())
from.emplace_back(state.forceString(*elem, pos, "while evaluating one of the strings to replace passed to builtins.replaceStrings"));
- std::vector<std::pair<std::string, PathSet>> to;
+ std::vector<std::pair<std::string, NixStringContext>> to;
to.reserve(args[1]->listSize());
for (auto elem : args[1]->listItems()) {
- PathSet ctx;
+ NixStringContext ctx;
auto s = state.forceString(*elem, ctx, pos, "while evaluating one of the replacement strings passed to builtins.replaceStrings");
to.emplace_back(s, std::move(ctx));
}
- PathSet context;
+ NixStringContext context;
auto s = state.forceString(*args[2], context, pos, "while evaluating the third argument passed to builtins.replaceStrings");
std::string res;
@@ -4114,7 +4147,7 @@ void EvalState::createBaseEnv()
if (RegisterPrimOp::primOps)
for (auto & primOp : *RegisterPrimOp::primOps)
if (!primOp.experimentalFeature
- || settings.isExperimentalFeatureEnabled(*primOp.experimentalFeature))
+ || experimentalFeatureSettings.isEnabled(*primOp.experimentalFeature))
{
addPrimOp({
.fun = primOp.fun,
@@ -4127,7 +4160,6 @@ void EvalState::createBaseEnv()
/* Add a wrapper around the derivation primop that computes the
`drvPath' and `outPath' attributes lazily. */
- sDerivationNix = symbols.create(derivationNixPath);
auto vDerivation = allocValue();
addConstant("derivation", vDerivation);
@@ -4144,7 +4176,7 @@ void EvalState::createBaseEnv()
// the parser needs two NUL bytes as terminators; one of them
// is implied by being a C string.
"\0";
- eval(parse(code, sizeof(code), derivationNixPath, "/", staticBaseEnv), *vDerivation);
+ eval(parse(code, sizeof(code), derivationInternal, {CanonPath::root}, staticBaseEnv), *vDerivation);
}
diff --git a/src/libexpr/primops.hh b/src/libexpr/primops.hh
index 1cfb4356b..4ae73fe1f 100644
--- a/src/libexpr/primops.hh
+++ b/src/libexpr/primops.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "eval.hh"
@@ -22,9 +23,11 @@ struct RegisterPrimOp
typedef std::vector<Info> PrimOps;
static PrimOps * primOps;
- /* You can register a constant by passing an arity of 0. fun
- will get called during EvalState initialization, so there
- may be primops not yet added and builtins is not yet sorted. */
+ /**
+ * You can register a constant by passing an arity of 0. fun
+ * will get called during EvalState initialization, so there
+ * may be primops not yet added and builtins is not yet sorted.
+ */
RegisterPrimOp(
std::string name,
size_t arity,
@@ -37,10 +40,14 @@ struct RegisterPrimOp
may wish to use them in limited contexts without globally enabling
them. */
-/* Load a ValueInitializer from a DSO and return whatever it initializes */
+/**
+ * Load a ValueInitializer from a DSO and return whatever it initializes
+ */
void prim_importNative(EvalState & state, const PosIdx pos, Value * * args, Value & v);
-/* Execute a program and parse its output */
+/**
+ * Execute a program and parse its output
+ */
void prim_exec(EvalState & state, const PosIdx pos, Value * * args, Value & v);
}
diff --git a/src/libexpr/primops/context.cc b/src/libexpr/primops/context.cc
index db43e5771..07bf400cf 100644
--- a/src/libexpr/primops/context.cc
+++ b/src/libexpr/primops/context.cc
@@ -7,7 +7,7 @@ namespace nix {
static void prim_unsafeDiscardStringContext(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
- PathSet context;
+ NixStringContext context;
auto s = state.coerceToString(pos, *args[0], context, "while evaluating the argument passed to builtins.unsafeDiscardStringContext");
v.mkString(*s);
}
@@ -17,7 +17,7 @@ static RegisterPrimOp primop_unsafeDiscardStringContext("__unsafeDiscardStringCo
static void prim_hasContext(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
- PathSet context;
+ NixStringContext context;
state.forceString(*args[0], context, pos, "while evaluating the argument passed to builtins.hasContext");
v.mkBool(!context.empty());
}
@@ -33,17 +33,18 @@ static RegisterPrimOp primop_hasContext("__hasContext", 1, prim_hasContext);
drv.inputDrvs. */
static void prim_unsafeDiscardOutputDependency(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
- PathSet context;
+ NixStringContext context;
auto s = state.coerceToString(pos, *args[0], context, "while evaluating the argument passed to builtins.unsafeDiscardOutputDependency");
- PathSet context2;
- for (auto && p : context) {
- auto c = NixStringContextElem::parse(*state.store, p);
+ NixStringContext context2;
+ for (auto && c : context) {
if (auto * ptr = std::get_if<NixStringContextElem::DrvDeep>(&c)) {
- context2.emplace(state.store->printStorePath(ptr->drvPath));
+ context2.emplace(NixStringContextElem::Opaque {
+ .path = ptr->drvPath
+ });
} else {
/* Can reuse original item */
- context2.emplace(std::move(p));
+ context2.emplace(std::move(c));
}
}
@@ -79,22 +80,21 @@ static void prim_getContext(EvalState & state, const PosIdx pos, Value * * args,
bool allOutputs = false;
Strings outputs;
};
- PathSet context;
+ NixStringContext context;
state.forceString(*args[0], context, pos, "while evaluating the argument passed to builtins.getContext");
auto contextInfos = std::map<StorePath, ContextInfo>();
- for (const auto & p : context) {
- NixStringContextElem ctx = NixStringContextElem::parse(*state.store, p);
+ for (auto && i : context) {
std::visit(overloaded {
- [&](NixStringContextElem::DrvDeep & d) {
- contextInfos[d.drvPath].allOutputs = true;
+ [&](NixStringContextElem::DrvDeep && d) {
+ contextInfos[std::move(d.drvPath)].allOutputs = true;
},
- [&](NixStringContextElem::Built & b) {
- contextInfos[b.drvPath].outputs.emplace_back(std::move(b.output));
+ [&](NixStringContextElem::Built && b) {
+ contextInfos[std::move(b.drvPath)].outputs.emplace_back(std::move(b.output));
},
- [&](NixStringContextElem::Opaque & o) {
- contextInfos[o.path].path = true;
+ [&](NixStringContextElem::Opaque && o) {
+ contextInfos[std::move(o.path)].path = true;
},
- }, ctx.raw());
+ }, ((NixStringContextElem &&) i).raw());
}
auto attrs = state.buildBindings(contextInfos.size());
@@ -129,7 +129,7 @@ static RegisterPrimOp primop_getContext("__getContext", 1, prim_getContext);
*/
static void prim_appendContext(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
- PathSet context;
+ NixStringContext context;
auto orig = state.forceString(*args[0], context, noPos, "while evaluating the first argument passed to builtins.appendContext");
state.forceAttrs(*args[1], pos, "while evaluating the second argument passed to builtins.appendContext");
@@ -143,13 +143,16 @@ static void prim_appendContext(EvalState & state, const PosIdx pos, Value * * ar
.msg = hintfmt("context key '%s' is not a store path", name),
.errPos = state.positions[i.pos]
});
+ auto namePath = state.store->parseStorePath(name);
if (!settings.readOnlyMode)
- state.store->ensurePath(state.store->parseStorePath(name));
+ state.store->ensurePath(namePath);
state.forceAttrs(*i.value, i.pos, "while evaluating the value of a string context");
auto iter = i.value->attrs->find(sPath);
if (iter != i.value->attrs->end()) {
if (state.forceBool(*iter->value, iter->pos, "while evaluating the `path` attribute of a string context"))
- context.emplace(name);
+ context.emplace(NixStringContextElem::Opaque {
+ .path = namePath,
+ });
}
iter = i.value->attrs->find(sAllOutputs);
@@ -161,7 +164,9 @@ static void prim_appendContext(EvalState & state, const PosIdx pos, Value * * ar
.errPos = state.positions[i.pos]
});
}
- context.insert(concatStrings("=", name));
+ context.emplace(NixStringContextElem::DrvDeep {
+ .drvPath = namePath,
+ });
}
}
@@ -176,7 +181,10 @@ static void prim_appendContext(EvalState & state, const PosIdx pos, Value * * ar
}
for (auto elem : iter->value->listItems()) {
auto outputName = state.forceStringNoCtx(*elem, iter->pos, "while evaluating an output name within a string context");
- context.insert(concatStrings("!", outputName, "!", name));
+ context.emplace(NixStringContextElem::Built {
+ .drvPath = namePath,
+ .output = std::string { outputName },
+ });
}
}
}
diff --git a/src/libexpr/primops/fetchClosure.cc b/src/libexpr/primops/fetchClosure.cc
index 0dfa97fa3..4cf1f1e0b 100644
--- a/src/libexpr/primops/fetchClosure.cc
+++ b/src/libexpr/primops/fetchClosure.cc
@@ -18,7 +18,7 @@ static void prim_fetchClosure(EvalState & state, const PosIdx pos, Value * * arg
const auto & attrName = state.symbols[attr.name];
if (attrName == "fromPath") {
- PathSet context;
+ NixStringContext context;
fromPath = state.coerceToStorePath(attr.pos, *attr.value, context,
"while evaluating the 'fromPath' attribute passed to builtins.fetchClosure");
}
@@ -27,7 +27,7 @@ static void prim_fetchClosure(EvalState & state, const PosIdx pos, Value * * arg
state.forceValue(*attr.value, attr.pos);
toCA = true;
if (attr.value->type() != nString || attr.value->string.s != std::string("")) {
- PathSet context;
+ NixStringContext context;
toPath = state.coerceToStorePath(attr.pos, *attr.value, context,
"while evaluating the 'toPath' attribute passed to builtins.fetchClosure");
}
@@ -114,8 +114,7 @@ static void prim_fetchClosure(EvalState & state, const PosIdx pos, Value * * arg
});
}
- auto toPathS = state.store->printStorePath(*toPath);
- v.mkString(toPathS, {toPathS});
+ state.mkStorePathString(*toPath, v);
}
static RegisterPrimOp primop_fetchClosure({
diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc
index c41bd60b6..2c0d98e74 100644
--- a/src/libexpr/primops/fetchMercurial.cc
+++ b/src/libexpr/primops/fetchMercurial.cc
@@ -13,7 +13,7 @@ static void prim_fetchMercurial(EvalState & state, const PosIdx pos, Value * * a
std::optional<Hash> rev;
std::optional<std::string> ref;
std::string_view name = "source";
- PathSet context;
+ NixStringContext context;
state.forceValue(*args[0], pos);
@@ -73,8 +73,7 @@ static void prim_fetchMercurial(EvalState & state, const PosIdx pos, Value * * a
auto [tree, input2] = input.fetch(state.store);
auto attrs2 = state.buildBindings(8);
- auto storePath = state.store->printStorePath(tree.storePath);
- attrs2.alloc(state.sOutPath).mkString(storePath, {storePath});
+ state.mkStorePathString(tree.storePath, attrs2.alloc(state.sOutPath));
if (input2.getRef())
attrs2.alloc("branch").mkString(*input2.getRef());
// Backward compatibility: set 'rev' to
diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc
index c9faf3ffb..cd7039025 100644
--- a/src/libexpr/primops/fetchTree.cc
+++ b/src/libexpr/primops/fetchTree.cc
@@ -24,9 +24,8 @@ void emitTreeAttrs(
auto attrs = state.buildBindings(8);
- auto storePath = state.store->printStorePath(tree.storePath);
- attrs.alloc(state.sOutPath).mkString(storePath, {storePath});
+ state.mkStorePathString(tree.storePath, attrs.alloc(state.sOutPath));
// FIXME: support arbitrary input attributes.
@@ -107,7 +106,7 @@ static void fetchTree(
const FetchTreeParams & params = FetchTreeParams{}
) {
fetchers::Input input;
- PathSet context;
+ NixStringContext context;
state.forceValue(*args[0], pos);
@@ -190,7 +189,7 @@ static void fetchTree(
static void prim_fetchTree(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
- settings.requireExperimentalFeature(Xp::Flakes);
+ experimentalFeatureSettings.require(Xp::Flakes);
fetchTree(state, pos, args, v, std::nullopt, FetchTreeParams { .allowNameArgument = false });
}
@@ -243,10 +242,15 @@ static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v
// early exit if pinned and already in the store
if (expectedHash && expectedHash->type == htSHA256) {
- auto expectedPath =
- unpack
- ? state.store->makeFixedOutputPath(FileIngestionMethod::Recursive, *expectedHash, name, {})
- : state.store->makeFixedOutputPath(FileIngestionMethod::Flat, *expectedHash, name, {});
+ auto expectedPath = state.store->makeFixedOutputPath(
+ name,
+ FixedOutputInfo {
+ .hash = {
+ .method = unpack ? FileIngestionMethod::Recursive : FileIngestionMethod::Flat,
+ .hash = *expectedHash,
+ },
+ .references = {}
+ });
if (state.store->isValidPath(expectedPath)) {
state.allowAndSetStorePathString(expectedPath, v);
@@ -353,36 +357,44 @@ static RegisterPrimOp primop_fetchGit({
of the repo at that URL is fetched. Otherwise, it can be an
attribute with the following attributes (all except `url` optional):
- - url\
- The URL of the repo.
+ - `url`
- - name\
- The name of the directory the repo should be exported to in the
- store. Defaults to the basename of the URL.
+ The URL of the repo.
- - rev\
- The git revision to fetch. Defaults to the tip of `ref`.
+ - `name` (default: *basename of the URL*)
- - ref\
- The git ref to look for the requested revision under. This is
- often a branch or tag name. Defaults to `HEAD`.
+ The name of the directory the repo should be exported to in the store.
- By default, the `ref` value is prefixed with `refs/heads/`. As
- of Nix 2.3.0 Nix will not prefix `refs/heads/` if `ref` starts
- with `refs/`.
+ - `rev` (default: *the tip of `ref`*)
- - submodules\
- A Boolean parameter that specifies whether submodules should be
- checked out. Defaults to `false`.
+ The [Git revision] to fetch.
+ This is typically a commit hash.
- - shallow\
- A Boolean parameter that specifies whether fetching a shallow clone
- is allowed. Defaults to `false`.
+ [Git revision]: https://git-scm.com/docs/git-rev-parse#_specifying_revisions
- - allRefs\
- Whether to fetch all refs of the repository. With this argument being
- true, it's possible to load a `rev` from *any* `ref` (by default only
- `rev`s from the specified `ref` are supported).
+ - `ref` (default: `HEAD`)
+
+ The [Git reference] under which to look for the requested revision.
+ This is often a branch or tag name.
+
+ [Git reference]: https://git-scm.com/book/en/v2/Git-Internals-Git-References
+
+ By default, the `ref` value is prefixed with `refs/heads/`.
+ As of 2.3.0, Nix will not prefix `refs/heads/` if `ref` starts with `refs/`.
+
+ - `submodules` (default: `false`)
+
+ A Boolean parameter that specifies whether submodules should be checked out.
+
+ - `shallow` (default: `false`)
+
+ A Boolean parameter that specifies whether fetching a shallow clone is allowed.
+
+ - `allRefs`
+
+ Whether to fetch all references of the repository.
+ With this argument being true, it's possible to load a `rev` from *any* `ref`
+ (by default only `rev`s from the specified `ref` are supported).
Here are some examples of how to use `fetchGit`.
@@ -473,10 +485,10 @@ static RegisterPrimOp primop_fetchGit({
builtins.fetchGit ./work-dir
```
- If the URL points to a local directory, and no `ref` or `rev` is
- given, `fetchGit` will use the current content of the checked-out
- files, even if they are not committed or added to Git's index. It will
- only consider files added to the Git repository, as listed by `git ls-files`.
+ If the URL points to a local directory, and no `ref` or `rev` is
+ given, `fetchGit` will use the current content of the checked-out
+ files, even if they are not committed or added to Git's index. It will
+ only consider files added to the Git repository, as listed by `git ls-files`.
)",
.fun = prim_fetchGit,
});
diff --git a/src/libexpr/print.cc b/src/libexpr/print.cc
new file mode 100644
index 000000000..53ba70bdd
--- /dev/null
+++ b/src/libexpr/print.cc
@@ -0,0 +1,94 @@
+#include "print.hh"
+#include <unordered_set>
+
+namespace nix {
+
+std::ostream &
+printLiteralString(std::ostream & str, const std::string_view string)
+{
+ str << "\"";
+ for (auto i = string.begin(); i != string.end(); ++i) {
+ if (*i == '\"' || *i == '\\') str << "\\" << *i;
+ else if (*i == '\n') str << "\\n";
+ else if (*i == '\r') str << "\\r";
+ else if (*i == '\t') str << "\\t";
+ else if (*i == '$' && *(i+1) == '{') str << "\\" << *i;
+ else str << *i;
+ }
+ str << "\"";
+ return str;
+}
+
+std::ostream &
+printLiteralBool(std::ostream & str, bool boolean)
+{
+ str << (boolean ? "true" : "false");
+ return str;
+}
+
+// Returns `true' is a string is a reserved keyword which requires quotation
+// when printing attribute set field names.
+//
+// This list should generally be kept in sync with `./lexer.l'.
+// You can test if a keyword needs to be added by running:
+// $ nix eval --expr '{ <KEYWORD> = 1; }'
+// For example `or' doesn't need to be quoted.
+bool isReservedKeyword(const std::string_view str)
+{
+ static const std::unordered_set<std::string_view> reservedKeywords = {
+ "if", "then", "else", "assert", "with", "let", "in", "rec", "inherit"
+ };
+ return reservedKeywords.contains(str);
+}
+
+std::ostream &
+printIdentifier(std::ostream & str, std::string_view s) {
+ if (s.empty())
+ str << "\"\"";
+ else if (isReservedKeyword(s))
+ str << '"' << s << '"';
+ else {
+ char c = s[0];
+ if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_')) {
+ printLiteralString(str, s);
+ return str;
+ }
+ for (auto c : s)
+ if (!((c >= 'a' && c <= 'z') ||
+ (c >= 'A' && c <= 'Z') ||
+ (c >= '0' && c <= '9') ||
+ c == '_' || c == '\'' || c == '-')) {
+ printLiteralString(str, s);
+ return str;
+ }
+ str << s;
+ }
+ return str;
+}
+
+static bool isVarName(std::string_view s)
+{
+ if (s.size() == 0) return false;
+ if (isReservedKeyword(s)) return false;
+ char c = s[0];
+ if ((c >= '0' && c <= '9') || c == '-' || c == '\'') return false;
+ for (auto & i : s)
+ if (!((i >= 'a' && i <= 'z') ||
+ (i >= 'A' && i <= 'Z') ||
+ (i >= '0' && i <= '9') ||
+ i == '_' || i == '-' || i == '\''))
+ return false;
+ return true;
+}
+
+std::ostream &
+printAttributeName(std::ostream & str, std::string_view name) {
+ if (isVarName(name))
+ str << name;
+ else
+ printLiteralString(str, name);
+ return str;
+}
+
+
+}
diff --git a/src/libexpr/print.hh b/src/libexpr/print.hh
new file mode 100644
index 000000000..3b72ae201
--- /dev/null
+++ b/src/libexpr/print.hh
@@ -0,0 +1,54 @@
+#pragma once
+/**
+ * @file
+ * @brief Common printing functions for the Nix language
+ *
+ * While most types come with their own methods for printing, they share some
+ * functions that are placed here.
+ */
+
+#include <iostream>
+
+namespace nix {
+ /**
+ * Print a string as a Nix string literal.
+ *
+ * Quotes and fairly minimal escaping are added.
+ *
+ * @param s The logical string
+ */
+ std::ostream & printLiteralString(std::ostream & o, std::string_view s);
+ inline std::ostream & printLiteralString(std::ostream & o, const char * s) {
+ return printLiteralString(o, std::string_view(s));
+ }
+ inline std::ostream & printLiteralString(std::ostream & o, const std::string & s) {
+ return printLiteralString(o, std::string_view(s));
+ }
+
+ /** Print `true` or `false`. */
+ std::ostream & printLiteralBool(std::ostream & o, bool b);
+
+ /**
+ * Print a string as an attribute name in the Nix expression language syntax.
+ *
+ * Prints a quoted string if necessary.
+ */
+ std::ostream & printAttributeName(std::ostream & o, std::string_view s);
+
+ /**
+ * Returns `true' is a string is a reserved keyword which requires quotation
+ * when printing attribute set field names.
+ */
+ bool isReservedKeyword(const std::string_view str);
+
+ /**
+ * Print a string as an identifier in the Nix expression language syntax.
+ *
+ * FIXME: "identifier" is ambiguous. Identifiers do not have a single
+ * textual representation. They can be used in variable references,
+ * let bindings, left-hand sides or attribute names in a select
+ * expression, or something else entirely, like JSON. Use one of the
+ * `print*` functions instead.
+ */
+ std::ostream & printIdentifier(std::ostream & o, std::string_view s);
+}
diff --git a/src/libexpr/symbol-table.hh b/src/libexpr/symbol-table.hh
index 288c15602..967a186dd 100644
--- a/src/libexpr/symbol-table.hh
+++ b/src/libexpr/symbol-table.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <list>
#include <map>
@@ -9,15 +10,11 @@
namespace nix {
-/* Symbol table used by the parser and evaluator to represent and look
- up identifiers and attributes efficiently. SymbolTable::create()
- converts a string into a symbol. Symbols have the property that
- they can be compared efficiently (using an equality test),
- because the symbol table stores only one copy of each string. */
-
-/* This class mainly exists to give us an operator<< for ostreams. We could also
- return plain strings from SymbolTable, but then we'd have to wrap every
- instance of a symbol that is fmt()ed, which is inconvenient and error-prone. */
+/**
+ * This class mainly exists to give us an operator<< for ostreams. We could also
+ * return plain strings from SymbolTable, but then we'd have to wrap every
+ * instance of a symbol that is fmt()ed, which is inconvenient and error-prone.
+ */
class SymbolStr
{
friend class SymbolTable;
@@ -46,6 +43,11 @@ public:
friend std::ostream & operator <<(std::ostream & os, const SymbolStr & symbol);
};
+/**
+ * Symbols have the property that they can be compared efficiently
+ * (using an equality test), because the symbol table stores only one
+ * copy of each string.
+ */
class Symbol
{
friend class SymbolTable;
@@ -65,6 +67,10 @@ public:
bool operator!=(const Symbol other) const { return id != other.id; }
};
+/**
+ * Symbol table used by the parser and evaluator to represent and look
+ * up identifiers and attributes efficiently.
+ */
class SymbolTable
{
private:
@@ -73,6 +79,9 @@ private:
public:
+ /**
+ * converts a string into a symbol.
+ */
Symbol create(std::string_view s)
{
// Most symbols are looked up more than once, so we trade off insertion performance
diff --git a/src/libexpr/tests/derived-path.cc b/src/libexpr/tests/derived-path.cc
new file mode 100644
index 000000000..8210efef2
--- /dev/null
+++ b/src/libexpr/tests/derived-path.cc
@@ -0,0 +1,65 @@
+#include <nlohmann/json.hpp>
+#include <gtest/gtest.h>
+#include <rapidcheck/gtest.h>
+
+#include "tests/derived-path.hh"
+#include "tests/libexpr.hh"
+
+namespace nix {
+
+// Testing of trivial expressions
+class DerivedPathExpressionTest : public LibExprTest {};
+
+// FIXME: `RC_GTEST_FIXTURE_PROP` isn't calling `SetUpTestSuite` because it is
+// no a real fixture.
+//
+// See https://github.com/emil-e/rapidcheck/blob/master/doc/gtest.md#rc_gtest_fixture_propfixture-name-args
+TEST_F(DerivedPathExpressionTest, force_init)
+{
+}
+
+RC_GTEST_FIXTURE_PROP(
+ DerivedPathExpressionTest,
+ prop_opaque_path_round_trip,
+ (const DerivedPath::Opaque & o))
+{
+ auto * v = state.allocValue();
+ state.mkStorePathString(o.path, *v);
+ auto d = state.coerceToDerivedPath(noPos, *v, "");
+ RC_ASSERT(DerivedPath { o } == d);
+}
+
+// TODO use DerivedPath::Built for parameter once it supports a single output
+// path only.
+
+RC_GTEST_FIXTURE_PROP(
+ DerivedPathExpressionTest,
+ prop_built_path_placeholder_round_trip,
+ (const StorePath & drvPath, const StorePathName & outputName))
+{
+ auto * v = state.allocValue();
+ state.mkOutputString(*v, drvPath, outputName.name, std::nullopt);
+ auto [d, _] = state.coerceToDerivedPathUnchecked(noPos, *v, "");
+ DerivedPath::Built b {
+ .drvPath = drvPath,
+ .outputs = OutputsSpec::Names { outputName.name },
+ };
+ RC_ASSERT(DerivedPath { b } == d);
+}
+
+RC_GTEST_FIXTURE_PROP(
+ DerivedPathExpressionTest,
+ prop_built_path_out_path_round_trip,
+ (const StorePath & drvPath, const StorePathName & outputName, const StorePath & outPath))
+{
+ auto * v = state.allocValue();
+ state.mkOutputString(*v, drvPath, outputName.name, outPath);
+ auto [d, _] = state.coerceToDerivedPathUnchecked(noPos, *v, "");
+ DerivedPath::Built b {
+ .drvPath = drvPath,
+ .outputs = OutputsSpec::Names { outputName.name },
+ };
+ RC_ASSERT(DerivedPath { b } == d);
+}
+
+} /* namespace nix */
diff --git a/src/libexpr/tests/json.cc b/src/libexpr/tests/json.cc
index 411bc0ac3..7586bdd9b 100644
--- a/src/libexpr/tests/json.cc
+++ b/src/libexpr/tests/json.cc
@@ -8,7 +8,7 @@ namespace nix {
protected:
std::string getJSONValue(Value& value) {
std::stringstream ss;
- PathSet ps;
+ NixStringContext ps;
printValueAsJSON(state, true, value, noPos, ss, ps);
return ss.str();
}
diff --git a/src/libexpr/tests/libexpr.hh b/src/libexpr/tests/libexpr.hh
index 8534d9567..b8e65aafe 100644
--- a/src/libexpr/tests/libexpr.hh
+++ b/src/libexpr/tests/libexpr.hh
@@ -1,3 +1,6 @@
+#pragma once
+///@file
+
#include <gtest/gtest.h>
#include <gmock/gmock.h>
@@ -25,7 +28,7 @@ namespace nix {
}
Value eval(std::string input, bool forceValue = true) {
Value v;
- Expr * e = state.parseExprFromString(input, "");
+ Expr * e = state.parseExprFromString(input, state.rootPath(CanonPath::root));
assert(e);
state.eval(e, v);
if (forceValue)
diff --git a/src/libexpr/tests/local.mk b/src/libexpr/tests/local.mk
index 3e5504f71..331a5ead6 100644
--- a/src/libexpr/tests/local.mk
+++ b/src/libexpr/tests/local.mk
@@ -12,7 +12,7 @@ libexpr-tests_SOURCES := \
$(wildcard $(d)/*.cc) \
$(wildcard $(d)/value/*.cc)
-libexpr-tests_CXXFLAGS += -I src/libexpr -I src/libutil -I src/libstore -I src/libexpr/tests
+libexpr-tests_CXXFLAGS += -I src/libexpr -I src/libutil -I src/libstore -I src/libexpr/tests -I src/libfetchers
libexpr-tests_LIBS = libstore-tests libutils-tests libexpr libutil libstore libfetchers
diff --git a/src/libexpr/tests/primops.cc b/src/libexpr/tests/primops.cc
index e1d3ac503..ce3b5d11f 100644
--- a/src/libexpr/tests/primops.cc
+++ b/src/libexpr/tests/primops.cc
@@ -15,8 +15,8 @@ namespace nix {
return oss.str();
}
- void log(Verbosity lvl, const FormatOrString & fs) override {
- oss << fs.s << std::endl;
+ void log(Verbosity lvl, std::string_view s) override {
+ oss << s << std::endl;
}
void logEI(const ErrorInfo & ei) override {
diff --git a/src/libexpr/tests/value/context.cc b/src/libexpr/tests/value/context.cc
index 083359b7a..0d9381577 100644
--- a/src/libexpr/tests/value/context.cc
+++ b/src/libexpr/tests/value/context.cc
@@ -8,69 +8,62 @@
namespace nix {
-// Testing of trivial expressions
-struct NixStringContextElemTest : public LibExprTest {
- const Store & store() const {
- return *LibExprTest::store;
- }
-};
-
-TEST_F(NixStringContextElemTest, empty_invalid) {
+TEST(NixStringContextElemTest, empty_invalid) {
EXPECT_THROW(
- NixStringContextElem::parse(store(), ""),
+ NixStringContextElem::parse(""),
BadNixStringContextElem);
}
-TEST_F(NixStringContextElemTest, single_bang_invalid) {
+TEST(NixStringContextElemTest, single_bang_invalid) {
EXPECT_THROW(
- NixStringContextElem::parse(store(), "!"),
+ NixStringContextElem::parse("!"),
BadNixStringContextElem);
}
-TEST_F(NixStringContextElemTest, double_bang_invalid) {
+TEST(NixStringContextElemTest, double_bang_invalid) {
EXPECT_THROW(
- NixStringContextElem::parse(store(), "!!/"),
+ NixStringContextElem::parse("!!/"),
BadStorePath);
}
-TEST_F(NixStringContextElemTest, eq_slash_invalid) {
+TEST(NixStringContextElemTest, eq_slash_invalid) {
EXPECT_THROW(
- NixStringContextElem::parse(store(), "=/"),
+ NixStringContextElem::parse("=/"),
BadStorePath);
}
-TEST_F(NixStringContextElemTest, slash_invalid) {
+TEST(NixStringContextElemTest, slash_invalid) {
EXPECT_THROW(
- NixStringContextElem::parse(store(), "/"),
+ NixStringContextElem::parse("/"),
BadStorePath);
}
-TEST_F(NixStringContextElemTest, opaque) {
- std::string_view opaque = "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x";
- auto elem = NixStringContextElem::parse(store(), opaque);
+TEST(NixStringContextElemTest, opaque) {
+ std::string_view opaque = "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x";
+ auto elem = NixStringContextElem::parse(opaque);
auto * p = std::get_if<NixStringContextElem::Opaque>(&elem);
ASSERT_TRUE(p);
- ASSERT_EQ(p->path, store().parseStorePath(opaque));
- ASSERT_EQ(elem.to_string(store()), opaque);
+ ASSERT_EQ(p->path, StorePath { opaque });
+ ASSERT_EQ(elem.to_string(), opaque);
}
-TEST_F(NixStringContextElemTest, drvDeep) {
- std::string_view drvDeep = "=/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x.drv";
- auto elem = NixStringContextElem::parse(store(), drvDeep);
+TEST(NixStringContextElemTest, drvDeep) {
+ std::string_view drvDeep = "=g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x.drv";
+ auto elem = NixStringContextElem::parse(drvDeep);
auto * p = std::get_if<NixStringContextElem::DrvDeep>(&elem);
ASSERT_TRUE(p);
- ASSERT_EQ(p->drvPath, store().parseStorePath(drvDeep.substr(1)));
- ASSERT_EQ(elem.to_string(store()), drvDeep);
+ ASSERT_EQ(p->drvPath, StorePath { drvDeep.substr(1) });
+ ASSERT_EQ(elem.to_string(), drvDeep);
}
-TEST_F(NixStringContextElemTest, built) {
- std::string_view built = "!foo!/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x.drv";
- auto elem = NixStringContextElem::parse(store(), built);
+TEST(NixStringContextElemTest, built) {
+ std::string_view built = "!foo!g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x.drv";
+ auto elem = NixStringContextElem::parse(built);
auto * p = std::get_if<NixStringContextElem::Built>(&elem);
ASSERT_TRUE(p);
ASSERT_EQ(p->output, "foo");
- ASSERT_EQ(p->drvPath, store().parseStorePath(built.substr(5)));
- ASSERT_EQ(elem.to_string(store()), built);
+ ASSERT_EQ(p->drvPath, StorePath { built.substr(5) });
+ ASSERT_EQ(elem.to_string(), built);
}
}
@@ -102,13 +95,15 @@ Gen<NixStringContextElem::Built> Arbitrary<NixStringContextElem::Built>::arbitra
Gen<NixStringContextElem> Arbitrary<NixStringContextElem>::arbitrary()
{
- switch (*gen::inRange<uint8_t>(0, 2)) {
+ switch (*gen::inRange<uint8_t>(0, std::variant_size_v<NixStringContextElem::Raw>)) {
case 0:
return gen::just<NixStringContextElem>(*gen::arbitrary<NixStringContextElem::Opaque>());
case 1:
return gen::just<NixStringContextElem>(*gen::arbitrary<NixStringContextElem::DrvDeep>());
- default:
+ case 2:
return gen::just<NixStringContextElem>(*gen::arbitrary<NixStringContextElem::Built>());
+ default:
+ assert(false);
}
}
@@ -116,12 +111,12 @@ Gen<NixStringContextElem> Arbitrary<NixStringContextElem>::arbitrary()
namespace nix {
-RC_GTEST_FIXTURE_PROP(
+RC_GTEST_PROP(
NixStringContextElemTest,
prop_round_rip,
(const NixStringContextElem & o))
{
- RC_ASSERT(o == NixStringContextElem::parse(store(), o.to_string(store())));
+ RC_ASSERT(o == NixStringContextElem::parse(o.to_string()));
}
}
diff --git a/src/libexpr/tests/value/context.hh b/src/libexpr/tests/value/context.hh
index 54d21760e..c0bc97ba3 100644
--- a/src/libexpr/tests/value/context.hh
+++ b/src/libexpr/tests/value/context.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <rapidcheck/gen/Arbitrary.h>
diff --git a/src/libexpr/value-to-json.cc b/src/libexpr/value-to-json.cc
index c35c876e3..4996a5bde 100644
--- a/src/libexpr/value-to-json.cc
+++ b/src/libexpr/value-to-json.cc
@@ -11,7 +11,7 @@
namespace nix {
using json = nlohmann::json;
json printValueAsJSON(EvalState & state, bool strict,
- Value & v, const PosIdx pos, PathSet & context, bool copyToStore)
+ Value & v, const PosIdx pos, NixStringContext & context, bool copyToStore)
{
checkInterrupt();
@@ -36,9 +36,10 @@ json printValueAsJSON(EvalState & state, bool strict,
case nPath:
if (copyToStore)
- out = state.store->printStorePath(state.copyPathToStore(context, v.path));
+ out = state.store->printStorePath(
+ state.copyPathToStore(context, v.path()));
else
- out = v.path;
+ out = v.path().path.abs();
break;
case nNull:
@@ -94,13 +95,13 @@ json printValueAsJSON(EvalState & state, bool strict,
}
void printValueAsJSON(EvalState & state, bool strict,
- Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore)
+ Value & v, const PosIdx pos, std::ostream & str, NixStringContext & context, bool copyToStore)
{
str << printValueAsJSON(state, strict, v, pos, context, copyToStore);
}
json ExternalValueBase::printValueAsJSON(EvalState & state, bool strict,
- PathSet & context, bool copyToStore) const
+ NixStringContext & context, bool copyToStore) const
{
state.debugThrowLastTrace(TypeError("cannot convert %1% to JSON", showType()));
}
diff --git a/src/libexpr/value-to-json.hh b/src/libexpr/value-to-json.hh
index 22f26b790..47ac90313 100644
--- a/src/libexpr/value-to-json.hh
+++ b/src/libexpr/value-to-json.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "nixexpr.hh"
#include "eval.hh"
@@ -10,9 +11,9 @@
namespace nix {
nlohmann::json printValueAsJSON(EvalState & state, bool strict,
- Value & v, const PosIdx pos, PathSet & context, bool copyToStore = true);
+ Value & v, const PosIdx pos, NixStringContext & context, bool copyToStore = true);
void printValueAsJSON(EvalState & state, bool strict,
- Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore = true);
+ Value & v, const PosIdx pos, std::ostream & str, NixStringContext & context, bool copyToStore = true);
}
diff --git a/src/libexpr/value-to-xml.cc b/src/libexpr/value-to-xml.cc
index 3f6222768..2539ad1c1 100644
--- a/src/libexpr/value-to-xml.cc
+++ b/src/libexpr/value-to-xml.cc
@@ -18,21 +18,21 @@ static XMLAttrs singletonAttrs(const std::string & name, const std::string & val
static void printValueAsXML(EvalState & state, bool strict, bool location,
- Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen,
+ Value & v, XMLWriter & doc, NixStringContext & context, PathSet & drvsSeen,
const PosIdx pos);
static void posToXML(EvalState & state, XMLAttrs & xmlAttrs, const Pos & pos)
{
- if (auto path = std::get_if<Path>(&pos.origin))
- xmlAttrs["path"] = *path;
- xmlAttrs["line"] = (format("%1%") % pos.line).str();
- xmlAttrs["column"] = (format("%1%") % pos.column).str();
+ if (auto path = std::get_if<SourcePath>(&pos.origin))
+ xmlAttrs["path"] = path->path.abs();
+ xmlAttrs["line"] = fmt("%1%", pos.line);
+ xmlAttrs["column"] = fmt("%1%", pos.column);
}
static void showAttrs(EvalState & state, bool strict, bool location,
- Bindings & attrs, XMLWriter & doc, PathSet & context, PathSet & drvsSeen)
+ Bindings & attrs, XMLWriter & doc, NixStringContext & context, PathSet & drvsSeen)
{
StringSet names;
@@ -54,7 +54,7 @@ static void showAttrs(EvalState & state, bool strict, bool location,
static void printValueAsXML(EvalState & state, bool strict, bool location,
- Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen,
+ Value & v, XMLWriter & doc, NixStringContext & context, PathSet & drvsSeen,
const PosIdx pos)
{
checkInterrupt();
@@ -64,7 +64,7 @@ static void printValueAsXML(EvalState & state, bool strict, bool location,
switch (v.type()) {
case nInt:
- doc.writeEmptyElement("int", singletonAttrs("value", (format("%1%") % v.integer).str()));
+ doc.writeEmptyElement("int", singletonAttrs("value", fmt("%1%", v.integer)));
break;
case nBool:
@@ -78,7 +78,7 @@ static void printValueAsXML(EvalState & state, bool strict, bool location,
break;
case nPath:
- doc.writeEmptyElement("path", singletonAttrs("value", v.path));
+ doc.writeEmptyElement("path", singletonAttrs("value", v.path().to_string()));
break;
case nNull:
@@ -156,7 +156,7 @@ static void printValueAsXML(EvalState & state, bool strict, bool location,
break;
case nFloat:
- doc.writeEmptyElement("float", singletonAttrs("value", (format("%1%") % v.fpoint).str()));
+ doc.writeEmptyElement("float", singletonAttrs("value", fmt("%1%", v.fpoint)));
break;
case nThunk:
@@ -166,7 +166,7 @@ static void printValueAsXML(EvalState & state, bool strict, bool location,
void ExternalValueBase::printValueAsXML(EvalState & state, bool strict,
- bool location, XMLWriter & doc, PathSet & context, PathSet & drvsSeen,
+ bool location, XMLWriter & doc, NixStringContext & context, PathSet & drvsSeen,
const PosIdx pos) const
{
doc.writeEmptyElement("unevaluated");
@@ -174,7 +174,7 @@ void ExternalValueBase::printValueAsXML(EvalState & state, bool strict,
void printValueAsXML(EvalState & state, bool strict, bool location,
- Value & v, std::ostream & out, PathSet & context, const PosIdx pos)
+ Value & v, std::ostream & out, NixStringContext & context, const PosIdx pos)
{
XMLWriter doc(true, out);
XMLOpenElement root(doc, "expr");
diff --git a/src/libexpr/value-to-xml.hh b/src/libexpr/value-to-xml.hh
index 506f32b6b..6d702c0f2 100644
--- a/src/libexpr/value-to-xml.hh
+++ b/src/libexpr/value-to-xml.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "nixexpr.hh"
#include "eval.hh"
@@ -9,6 +10,6 @@
namespace nix {
void printValueAsXML(EvalState & state, bool strict, bool location,
- Value & v, std::ostream & out, PathSet & context, const PosIdx pos);
+ Value & v, std::ostream & out, NixStringContext & context, const PosIdx pos);
}
diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh
index 508dbe218..89c0c36fd 100644
--- a/src/libexpr/value.hh
+++ b/src/libexpr/value.hh
@@ -1,9 +1,11 @@
#pragma once
+///@file
#include <cassert>
#include "symbol-table.hh"
#include "value/context.hh"
+#include "input-accessor.hh"
#if HAVE_BOEHMGC
#include <gc/gc_allocator.h>
@@ -35,9 +37,11 @@ typedef enum {
tFloat
} InternalType;
-// This type abstracts over all actual value types in the language,
-// grouping together implementation details like tList*, different function
-// types, and types in non-normal form (so thunks and co.)
+/**
+ * This type abstracts over all actual value types in the language,
+ * grouping together implementation details like tList*, different function
+ * types, and types in non-normal form (so thunks and co.)
+ */
typedef enum {
nThunk,
nInt,
@@ -69,40 +73,53 @@ class XMLWriter;
typedef int64_t NixInt;
typedef double NixFloat;
-/* External values must descend from ExternalValueBase, so that
+/**
+ * External values must descend from ExternalValueBase, so that
* type-agnostic nix functions (e.g. showType) can be implemented
*/
class ExternalValueBase
{
friend std::ostream & operator << (std::ostream & str, const ExternalValueBase & v);
protected:
- /* Print out the value */
+ /**
+ * Print out the value
+ */
virtual std::ostream & print(std::ostream & str) const = 0;
public:
- /* Return a simple string describing the type */
+ /**
+ * Return a simple string describing the type
+ */
virtual std::string showType() const = 0;
- /* Return a string to be used in builtins.typeOf */
+ /**
+ * Return a string to be used in builtins.typeOf
+ */
virtual std::string typeOf() const = 0;
- /* Coerce the value to a string. Defaults to uncoercable, i.e. throws an
+ /**
+ * Coerce the value to a string. Defaults to uncoercable, i.e. throws an
* error.
*/
- virtual std::string coerceToString(const Pos & pos, PathSet & context, bool copyMore, bool copyToStore) const;
+ virtual std::string coerceToString(const Pos & pos, NixStringContext & context, bool copyMore, bool copyToStore) const;
- /* Compare to another value of the same type. Defaults to uncomparable,
+ /**
+ * Compare to another value of the same type. Defaults to uncomparable,
* i.e. always false.
*/
virtual bool operator ==(const ExternalValueBase & b) const;
- /* Print the value as JSON. Defaults to unconvertable, i.e. throws an error */
+ /**
+ * Print the value as JSON. Defaults to unconvertable, i.e. throws an error
+ */
virtual nlohmann::json printValueAsJSON(EvalState & state, bool strict,
- PathSet & context, bool copyToStore = true) const;
+ NixStringContext & context, bool copyToStore = true) const;
- /* Print the value as XML. Defaults to unevaluated */
+ /**
+ * Print the value as XML. Defaults to unevaluated
+ */
virtual void printValueAsXML(EvalState & state, bool strict, bool location,
- XMLWriter & doc, PathSet & context, PathSet & drvsSeen,
+ XMLWriter & doc, NixStringContext & context, PathSet & drvsSeen,
const PosIdx pos) const;
virtual ~ExternalValueBase()
@@ -145,32 +162,34 @@ public:
NixInt integer;
bool boolean;
- /* Strings in the evaluator carry a so-called `context' which
- is a list of strings representing store paths. This is to
- allow users to write things like
+ /**
+ * Strings in the evaluator carry a so-called `context` which
+ * is a list of strings representing store paths. This is to
+ * allow users to write things like
- "--with-freetype2-library=" + freetype + "/lib"
+ * "--with-freetype2-library=" + freetype + "/lib"
- where `freetype' is a derivation (or a source to be copied
- to the store). If we just concatenated the strings without
- keeping track of the referenced store paths, then if the
- string is used as a derivation attribute, the derivation
- will not have the correct dependencies in its inputDrvs and
- inputSrcs.
+ * where `freetype` is a derivation (or a source to be copied
+ * to the store). If we just concatenated the strings without
+ * keeping track of the referenced store paths, then if the
+ * string is used as a derivation attribute, the derivation
+ * will not have the correct dependencies in its inputDrvs and
+ * inputSrcs.
- The semantics of the context is as follows: when a string
- with context C is used as a derivation attribute, then the
- derivations in C will be added to the inputDrvs of the
- derivation, and the other store paths in C will be added to
- the inputSrcs of the derivations.
+ * The semantics of the context is as follows: when a string
+ * with context C is used as a derivation attribute, then the
+ * derivations in C will be added to the inputDrvs of the
+ * derivation, and the other store paths in C will be added to
+ * the inputSrcs of the derivations.
- For canonicity, the store paths should be in sorted order. */
+ * For canonicity, the store paths should be in sorted order.
+ */
struct {
const char * s;
const char * * context; // must be in sorted order
} string;
- const char * path;
+ const char * _path;
Bindings * attrs;
struct {
size_t size;
@@ -196,8 +215,10 @@ public:
NixFloat fpoint;
};
- // Returns the normal type of a Value. This only returns nThunk if the
- // Value hasn't been forceValue'd
+ /**
+ * Returns the normal type of a Value. This only returns nThunk if
+ * the Value hasn't been forceValue'd
+ */
inline ValueType type() const
{
switch (internalType) {
@@ -216,8 +237,10 @@ public:
abort();
}
- /* After overwriting an app node, be sure to clear pointers in the
- Value to ensure that the target isn't kept alive unnecessarily. */
+ /**
+ * After overwriting an app node, be sure to clear pointers in the
+ * Value to ensure that the target isn't kept alive unnecessarily.
+ */
inline void clearValue()
{
app.left = app.right = 0;
@@ -246,19 +269,24 @@ public:
void mkString(std::string_view s);
- void mkString(std::string_view s, const PathSet & context);
+ void mkString(std::string_view s, const NixStringContext & context);
+
+ void mkStringMove(const char * s, const NixStringContext & context);
+
+ inline void mkString(const Symbol & s)
+ {
+ mkString(((const std::string &) s).c_str());
+ }
- void mkStringMove(const char * s, const PathSet & context);
+ void mkPath(const SourcePath & path);
- inline void mkPath(const char * s)
+ inline void mkPath(const char * path)
{
clearValue();
internalType = tPath;
- path = s;
+ _path = path;
}
- void mkPath(std::string_view s);
-
inline void mkNull()
{
clearValue();
@@ -365,13 +393,13 @@ public:
PosIdx determinePos(const PosIdx pos) const;
- /* Check whether forcing this value requires a trivial amount of
- computation. In particular, function applications are
- non-trivial. */
+ /**
+ * Check whether forcing this value requires a trivial amount of
+ * computation. In particular, function applications are
+ * non-trivial.
+ */
bool isTrivial() const;
- NixStringContext getContext(const Store &);
-
auto listItems()
{
struct ListIterable
@@ -399,6 +427,18 @@ public:
auto begin = listElems();
return ConstListIterable { begin, begin + listSize() };
}
+
+ SourcePath path() const
+ {
+ assert(internalType == tPath);
+ return SourcePath{CanonPath(_path)};
+ }
+
+ std::string_view str() const
+ {
+ assert(internalType == tString);
+ return std::string_view(string.s);
+ }
};
@@ -413,7 +453,9 @@ typedef std::map<Symbol, ValueVector> ValueVectorMap;
#endif
-/* A value allocated in traceable memory. */
+/**
+ * A value allocated in traceable memory.
+ */
typedef std::shared_ptr<Value *> RootValue;
RootValue allocRootValue(Value * v);
diff --git a/src/libexpr/value/context.cc b/src/libexpr/value/context.cc
index 61d9c53df..f76fc76e4 100644
--- a/src/libexpr/value/context.cc
+++ b/src/libexpr/value/context.cc
@@ -1,11 +1,10 @@
#include "value/context.hh"
-#include "store-api.hh"
#include <optional>
namespace nix {
-NixStringContextElem NixStringContextElem::parse(const Store & store, std::string_view s0)
+NixStringContextElem NixStringContextElem::parse(std::string_view s0)
{
std::string_view s = s0;
@@ -25,41 +24,41 @@ NixStringContextElem NixStringContextElem::parse(const Store & store, std::strin
"String content element beginning with '!' should have a second '!'");
}
return NixStringContextElem::Built {
- .drvPath = store.parseStorePath(s.substr(index + 1)),
+ .drvPath = StorePath { s.substr(index + 1) },
.output = std::string(s.substr(0, index)),
};
}
case '=': {
return NixStringContextElem::DrvDeep {
- .drvPath = store.parseStorePath(s.substr(1)),
+ .drvPath = StorePath { s.substr(1) },
};
}
default: {
return NixStringContextElem::Opaque {
- .path = store.parseStorePath(s),
+ .path = StorePath { s },
};
}
}
}
-std::string NixStringContextElem::to_string(const Store & store) const {
+std::string NixStringContextElem::to_string() const {
return std::visit(overloaded {
[&](const NixStringContextElem::Built & b) {
std::string res;
res += '!';
res += b.output;
res += '!';
- res += store.printStorePath(b.drvPath);
+ res += b.drvPath.to_string();
return res;
},
[&](const NixStringContextElem::DrvDeep & d) {
std::string res;
res += '=';
- res += store.printStorePath(d.drvPath);
+ res += d.drvPath.to_string();
return res;
},
[&](const NixStringContextElem::Opaque & o) {
- return store.printStorePath(o.path);
+ return std::string { o.path.to_string() };
},
}, raw());
}
diff --git a/src/libexpr/value/context.hh b/src/libexpr/value/context.hh
index 721563cba..287ae08a9 100644
--- a/src/libexpr/value/context.hh
+++ b/src/libexpr/value/context.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "util.hh"
#include "comparator.hh"
@@ -25,36 +26,37 @@ public:
}
};
-class Store;
-
-/* Plain opaque path to some store object.
-
- Encoded as just the path: ‘<path>’.
-*/
+/**
+ * Plain opaque path to some store object.
+ *
+ * Encoded as just the path: ‘<path>’.
+ */
struct NixStringContextElem_Opaque {
StorePath path;
GENERATE_CMP(NixStringContextElem_Opaque, me->path);
};
-/* Path to a derivation and its entire build closure.
-
- The path doesn't just refer to derivation itself and its closure, but
- also all outputs of all derivations in that closure (including the
- root derivation).
-
- Encoded in the form ‘=<drvPath>’.
-*/
+/**
+ * Path to a derivation and its entire build closure.
+ *
+ * The path doesn't just refer to derivation itself and its closure, but
+ * also all outputs of all derivations in that closure (including the
+ * root derivation).
+ *
+ * Encoded in the form ‘=<drvPath>’.
+ */
struct NixStringContextElem_DrvDeep {
StorePath drvPath;
GENERATE_CMP(NixStringContextElem_DrvDeep, me->drvPath);
};
-/* Derivation output.
-
- Encoded in the form ‘!<output>!<drvPath>’.
-*/
+/**
+ * Derivation output.
+ *
+ * Encoded in the form ‘!<output>!<drvPath>’.
+ */
struct NixStringContextElem_Built {
StorePath drvPath;
std::string output;
@@ -76,22 +78,26 @@ struct NixStringContextElem : _NixStringContextElem_Raw {
using DrvDeep = NixStringContextElem_DrvDeep;
using Built = NixStringContextElem_Built;
- inline const Raw & raw() const {
+ inline const Raw & raw() const & {
return static_cast<const Raw &>(*this);
}
- inline Raw & raw() {
+ inline Raw & raw() & {
return static_cast<Raw &>(*this);
}
+ inline Raw && raw() && {
+ return static_cast<Raw &&>(*this);
+ }
- /* Decode a context string, one of:
- - ‘<path>’
- - ‘=<path>’
- - ‘!<name>!<path>’
- */
- static NixStringContextElem parse(const Store & store, std::string_view s);
- std::string to_string(const Store & store) const;
+ /**
+ * Decode a context string, one of:
+ * - ‘<path>’
+ * - ‘=<path>’
+ * - ‘!<name>!<path>’
+ */
+ static NixStringContextElem parse(std::string_view s);
+ std::string to_string() const;
};
-typedef std::vector<NixStringContextElem> NixStringContext;
+typedef std::set<NixStringContextElem> NixStringContext;
}
diff --git a/src/libfetchers/attrs.hh b/src/libfetchers/attrs.hh
index e41037633..1a14bb023 100644
--- a/src/libfetchers/attrs.hh
+++ b/src/libfetchers/attrs.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
diff --git a/src/libfetchers/cache.hh b/src/libfetchers/cache.hh
index 3763ee2a6..ae398d040 100644
--- a/src/libfetchers/cache.hh
+++ b/src/libfetchers/cache.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "fetchers.hh"
diff --git a/src/libfetchers/fetch-settings.hh b/src/libfetchers/fetch-settings.hh
index 7049dea30..6108a179c 100644
--- a/src/libfetchers/fetch-settings.hh
+++ b/src/libfetchers/fetch-settings.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
#include "config.hh"
@@ -75,21 +76,25 @@ struct FetchSettings : public Config
Path or URI of the global flake registry.
When empty, disables the global flake registry.
- )"};
+ )",
+ {}, true, Xp::Flakes};
Setting<bool> useRegistries{this, true, "use-registries",
- "Whether to use flake registries to resolve flake references."};
+ "Whether to use flake registries to resolve flake references.",
+ {}, true, Xp::Flakes};
Setting<bool> acceptFlakeConfig{this, false, "accept-flake-config",
- "Whether to accept nix configuration from a flake without prompting."};
+ "Whether to accept nix configuration from a flake without prompting.",
+ {}, true, Xp::Flakes};
Setting<std::string> commitLockFileSummary{
this, "", "commit-lockfile-summary",
R"(
The commit summary to use when committing changed flake lock files. If
empty, the summary is generated based on the action performed.
- )"};
+ )",
+ {}, true, Xp::Flakes};
};
// FIXME: don't use a global variable.
diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc
index c767e72e5..91db3a9eb 100644
--- a/src/libfetchers/fetchers.cc
+++ b/src/libfetchers/fetchers.cc
@@ -210,7 +210,13 @@ StorePath Input::computeStorePath(Store & store) const
auto narHash = getNarHash();
if (!narHash)
throw Error("cannot compute store path for unlocked input '%s'", to_string());
- return store.makeFixedOutputPath(FileIngestionMethod::Recursive, *narHash, getName());
+ return store.makeFixedOutputPath(getName(), FixedOutputInfo {
+ .hash = {
+ .method = FileIngestionMethod::Recursive,
+ .hash = *narHash,
+ },
+ .references = {},
+ });
}
std::string Input::getType() const
diff --git a/src/libfetchers/fetchers.hh b/src/libfetchers/fetchers.hh
index 95c0f5974..498ad7e4d 100644
--- a/src/libfetchers/fetchers.hh
+++ b/src/libfetchers/fetchers.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
#include "hash.hh"
@@ -20,14 +21,14 @@ struct Tree
struct InputScheme;
-/* The Input object is generated by a specific fetcher, based on the
+/**
+ * The Input object is generated by a specific fetcher, based on the
* user-supplied input attribute in the flake.nix file, and contains
* the information that the specific fetcher needs to perform the
* actual fetch. The Input object is most commonly created via the
* "fromURL()" or "fromAttrs()" static functions which are provided
* the url or attrset specified in the flake file.
*/
-
struct Input
{
friend struct InputScheme;
@@ -37,7 +38,9 @@ struct Input
bool locked = false;
bool direct = true;
- /* path of the parent of this input, used for relative path resolution */
+ /**
+ * path of the parent of this input, used for relative path resolution
+ */
std::optional<Path> parent;
public:
@@ -55,27 +58,35 @@ public:
Attrs toAttrs() const;
- /* Check whether this is a "direct" input, that is, not
- one that goes through a registry. */
+ /**
+ * Check whether this is a "direct" input, that is, not
+ * one that goes through a registry.
+ */
bool isDirect() const { return direct; }
- /* Check whether this is a "locked" input, that is,
- one that contains a commit hash or content hash. */
+ /**
+ * Check whether this is a "locked" input, that is,
+ * one that contains a commit hash or content hash.
+ */
bool isLocked() const { return locked; }
- /* Check whether the input carries all necessary info required
- for cache insertion and substitution.
- These fields are used to uniquely identify cached trees
- within the "tarball TTL" window without necessarily
- indicating that the input's origin is unchanged. */
+ /**
+ * Check whether the input carries all necessary info required
+ * for cache insertion and substitution.
+ * These fields are used to uniquely identify cached trees
+ * within the "tarball TTL" window without necessarily
+ * indicating that the input's origin is unchanged.
+ */
bool hasAllInfo() const;
bool operator ==(const Input & other) const;
bool contains(const Input & other) const;
- /* Fetch the input into the Nix store, returning the location in
- the Nix store and the locked input. */
+ /**
+ * Fetch the input into the Nix store, returning the location in
+ * the Nix store and the locked input.
+ */
std::pair<Tree, Input> fetch(ref<Store> store) const;
Input applyOverrides(
@@ -104,7 +115,8 @@ public:
};
-/* The InputScheme represents a type of fetcher. Each fetcher
+/**
+ * The InputScheme represents a type of fetcher. Each fetcher
* registers with nix at startup time. When processing an input for a
* flake, each scheme is given an opportunity to "recognize" that
* input from the url or attributes in the flake file's specification
diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc
index 309a143f5..47282f6c4 100644
--- a/src/libfetchers/git.cc
+++ b/src/libfetchers/git.cc
@@ -62,6 +62,7 @@ std::optional<std::string> readHead(const Path & path)
.program = "git",
// FIXME: use 'HEAD' to avoid returning all refs
.args = {"ls-remote", "--symref", path},
+ .isInteractive = true,
});
if (status != 0) return std::nullopt;
@@ -266,7 +267,7 @@ struct GitInputScheme : InputScheme
for (auto & [name, value] : url.query) {
if (name == "rev" || name == "ref")
attrs.emplace(name, value);
- else if (name == "shallow" || name == "submodules")
+ else if (name == "shallow" || name == "submodules" || name == "allRefs")
attrs.emplace(name, Explicit<bool> { value == "1" });
else
url2.query.emplace(name, value);
@@ -350,7 +351,7 @@ struct GitInputScheme : InputScheme
args.push_back(destDir);
- runProgram("git", true, args);
+ runProgram("git", true, args, {}, true);
}
std::optional<Path> getSourcePath(const Input & input) override
@@ -555,7 +556,7 @@ struct GitInputScheme : InputScheme
: ref == "HEAD"
? *ref
: "refs/heads/" + *ref;
- runProgram("git", true, { "-C", repoDir, "--git-dir", gitDir, "fetch", "--quiet", "--force", "--", actualUrl, fmt("%s:%s", fetchRef, fetchRef) });
+ runProgram("git", true, { "-C", repoDir, "--git-dir", gitDir, "fetch", "--quiet", "--force", "--", actualUrl, fmt("%s:%s", fetchRef, fetchRef) }, {}, true);
} catch (Error & e) {
if (!pathExists(localRefFile)) throw;
warn("could not update local clone of Git repository '%s'; continuing with the most recent version", actualUrl);
@@ -622,7 +623,7 @@ struct GitInputScheme : InputScheme
// everything to ensure we get the rev.
Activity act(*logger, lvlTalkative, actUnknown, fmt("making temporary clone of '%s'", repoDir));
runProgram("git", true, { "-C", tmpDir, "fetch", "--quiet", "--force",
- "--update-head-ok", "--", repoDir, "refs/*:refs/*" });
+ "--update-head-ok", "--", repoDir, "refs/*:refs/*" }, {}, true);
}
runProgram("git", true, { "-C", tmpDir, "checkout", "--quiet", input.getRev()->gitRev() });
@@ -649,7 +650,7 @@ struct GitInputScheme : InputScheme
{
Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching submodules of '%s'", actualUrl));
- runProgram("git", true, { "-C", tmpDir, "submodule", "--quiet", "update", "--init", "--recursive" });
+ runProgram("git", true, { "-C", tmpDir, "submodule", "--quiet", "update", "--init", "--recursive" }, {}, true);
}
filter = isNotDotGitDirectory;
diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc
index 1ed09d30d..6c1d573ce 100644
--- a/src/libfetchers/github.cc
+++ b/src/libfetchers/github.cc
@@ -21,7 +21,7 @@ struct DownloadUrl
};
// A github, gitlab, or sourcehut host
-const static std::string hostRegexS = "[a-zA-Z0-9.]*"; // FIXME: check
+const static std::string hostRegexS = "[a-zA-Z0-9.-]*"; // FIXME: check
std::regex hostRegex(hostRegexS, std::regex::ECMAScript);
struct GitArchiveInputScheme : InputScheme
diff --git a/src/libfetchers/input-accessor.cc b/src/libfetchers/input-accessor.cc
new file mode 100644
index 000000000..f9909c218
--- /dev/null
+++ b/src/libfetchers/input-accessor.cc
@@ -0,0 +1,100 @@
+#include "input-accessor.hh"
+#include "store-api.hh"
+
+namespace nix {
+
+std::ostream & operator << (std::ostream & str, const SourcePath & path)
+{
+ str << path.to_string();
+ return str;
+}
+
+std::string_view SourcePath::baseName() const
+{
+ return path.baseName().value_or("source");
+}
+
+SourcePath SourcePath::parent() const
+{
+ auto p = path.parent();
+ assert(p);
+ return std::move(*p);
+}
+
+InputAccessor::Stat SourcePath::lstat() const
+{
+ auto st = nix::lstat(path.abs());
+ return InputAccessor::Stat {
+ .type =
+ S_ISREG(st.st_mode) ? InputAccessor::tRegular :
+ S_ISDIR(st.st_mode) ? InputAccessor::tDirectory :
+ S_ISLNK(st.st_mode) ? InputAccessor::tSymlink :
+ InputAccessor::tMisc,
+ .isExecutable = S_ISREG(st.st_mode) && st.st_mode & S_IXUSR
+ };
+}
+
+std::optional<InputAccessor::Stat> SourcePath::maybeLstat() const
+{
+ // FIXME: merge these into one operation.
+ if (!pathExists())
+ return {};
+ return lstat();
+}
+
+InputAccessor::DirEntries SourcePath::readDirectory() const
+{
+ InputAccessor::DirEntries res;
+ for (auto & entry : nix::readDirectory(path.abs())) {
+ std::optional<InputAccessor::Type> type;
+ switch (entry.type) {
+ case DT_REG: type = InputAccessor::Type::tRegular; break;
+ case DT_LNK: type = InputAccessor::Type::tSymlink; break;
+ case DT_DIR: type = InputAccessor::Type::tDirectory; break;
+ }
+ res.emplace(entry.name, type);
+ }
+ return res;
+}
+
+StorePath SourcePath::fetchToStore(
+ ref<Store> store,
+ std::string_view name,
+ PathFilter * filter,
+ RepairFlag repair) const
+{
+ return
+ settings.readOnlyMode
+ ? store->computeStorePathForPath(name, path.abs(), FileIngestionMethod::Recursive, htSHA256, filter ? *filter : defaultPathFilter).first
+ : store->addToStore(name, path.abs(), FileIngestionMethod::Recursive, htSHA256, filter ? *filter : defaultPathFilter, repair);
+}
+
+SourcePath SourcePath::resolveSymlinks() const
+{
+ SourcePath res(CanonPath::root);
+
+ int linksAllowed = 1024;
+
+ for (auto & component : path) {
+ res.path.push(component);
+ while (true) {
+ if (auto st = res.maybeLstat()) {
+ if (!linksAllowed--)
+ throw Error("infinite symlink recursion in path '%s'", path);
+ if (st->type != InputAccessor::tSymlink) break;
+ auto target = res.readLink();
+ if (hasPrefix(target, "/"))
+ res = CanonPath(target);
+ else {
+ res.path.pop();
+ res.path.extend(CanonPath(target));
+ }
+ } else
+ break;
+ }
+ }
+
+ return res;
+}
+
+}
diff --git a/src/libfetchers/input-accessor.hh b/src/libfetchers/input-accessor.hh
new file mode 100644
index 000000000..5a2f17f62
--- /dev/null
+++ b/src/libfetchers/input-accessor.hh
@@ -0,0 +1,167 @@
+#pragma once
+
+#include "ref.hh"
+#include "types.hh"
+#include "archive.hh"
+#include "canon-path.hh"
+#include "repair-flag.hh"
+
+namespace nix {
+
+class StorePath;
+class Store;
+
+struct InputAccessor
+{
+ enum Type {
+ tRegular, tSymlink, tDirectory,
+ /**
+ Any other node types that may be encountered on the file system, such as device nodes, sockets, named pipe, and possibly even more exotic things.
+
+ Responsible for `"unknown"` from `builtins.readFileType "/dev/null"`.
+
+ Unlike `DT_UNKNOWN`, this must not be used for deferring the lookup of types.
+ */
+ tMisc
+ };
+
+ struct Stat
+ {
+ Type type = tMisc;
+ //uint64_t fileSize = 0; // regular files only
+ bool isExecutable = false; // regular files only
+ };
+
+ typedef std::optional<Type> DirEntry;
+
+ typedef std::map<std::string, DirEntry> DirEntries;
+};
+
+/**
+ * An abstraction for accessing source files during
+ * evaluation. Currently, it's just a wrapper around `CanonPath` that
+ * accesses files in the regular filesystem, but in the future it will
+ * support fetching files in other ways.
+ */
+struct SourcePath
+{
+ CanonPath path;
+
+ SourcePath(CanonPath path)
+ : path(std::move(path))
+ { }
+
+ std::string_view baseName() const;
+
+ /**
+ * Construct the parent of this `SourcePath`. Aborts if `this`
+ * denotes the root.
+ */
+ SourcePath parent() const;
+
+ /**
+ * If this `SourcePath` denotes a regular file (not a symlink),
+ * return its contents; otherwise throw an error.
+ */
+ std::string readFile() const
+ { return nix::readFile(path.abs()); }
+
+ /**
+ * Return whether this `SourcePath` denotes a file (of any type)
+ * that exists
+ */
+ bool pathExists() const
+ { return nix::pathExists(path.abs()); }
+
+ /**
+ * Return stats about this `SourcePath`, or throw an exception if
+ * it doesn't exist.
+ */
+ InputAccessor::Stat lstat() const;
+
+ /**
+ * Return stats about this `SourcePath`, or std::nullopt if it
+ * doesn't exist.
+ */
+ std::optional<InputAccessor::Stat> maybeLstat() const;
+
+ /**
+ * If this `SourcePath` denotes a directory (not a symlink),
+ * return its directory entries; otherwise throw an error.
+ */
+ InputAccessor::DirEntries readDirectory() const;
+
+ /**
+ * If this `SourcePath` denotes a symlink, return its target;
+ * otherwise throw an error.
+ */
+ std::string readLink() const
+ { return nix::readLink(path.abs()); }
+
+ /**
+ * Dump this `SourcePath` to `sink` as a NAR archive.
+ */
+ void dumpPath(
+ Sink & sink,
+ PathFilter & filter = defaultPathFilter) const
+ { return nix::dumpPath(path.abs(), sink, filter); }
+
+ /**
+ * Copy this `SourcePath` to the Nix store.
+ */
+ StorePath fetchToStore(
+ ref<Store> store,
+ std::string_view name = "source",
+ PathFilter * filter = nullptr,
+ RepairFlag repair = NoRepair) const;
+
+ /**
+ * Return the location of this path in the "real" filesystem, if
+ * it has a physical location.
+ */
+ std::optional<CanonPath> getPhysicalPath() const
+ { return path; }
+
+ std::string to_string() const
+ { return path.abs(); }
+
+ /**
+ * Append a `CanonPath` to this path.
+ */
+ SourcePath operator + (const CanonPath & x) const
+ { return {path + x}; }
+
+ /**
+ * Append a single component `c` to this path. `c` must not
+ * contain a slash. A slash is implicitly added between this path
+ * and `c`.
+ */
+ SourcePath operator + (std::string_view c) const
+ { return {path + c}; }
+
+ bool operator == (const SourcePath & x) const
+ {
+ return path == x.path;
+ }
+
+ bool operator != (const SourcePath & x) const
+ {
+ return path != x.path;
+ }
+
+ bool operator < (const SourcePath & x) const
+ {
+ return path < x.path;
+ }
+
+ /**
+ * Resolve any symlinks in this `SourcePath` (including its
+ * parents). The result is a `SourcePath` in which no element is a
+ * symlink.
+ */
+ SourcePath resolveSymlinks() const;
+};
+
+std::ostream & operator << (std::ostream & str, const SourcePath & path);
+
+}
diff --git a/src/libfetchers/registry.hh b/src/libfetchers/registry.hh
index 260a2c460..f57ab1e6b 100644
--- a/src/libfetchers/registry.hh
+++ b/src/libfetchers/registry.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
#include "fetchers.hh"
diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc
index e9686262a..96fe5faca 100644
--- a/src/libfetchers/tarball.cc
+++ b/src/libfetchers/tarball.cc
@@ -71,15 +71,19 @@ DownloadFileResult downloadFile(
dumpString(res.data, sink);
auto hash = hashString(htSHA256, res.data);
ValidPathInfo info {
- store->makeFixedOutputPath(FileIngestionMethod::Flat, hash, name),
+ *store,
+ name,
+ FixedOutputInfo {
+ .hash = {
+ .method = FileIngestionMethod::Flat,
+ .hash = hash,
+ },
+ .references = {},
+ },
hashString(htSHA256, sink.s),
};
info.narSize = sink.s.size();
- info.ca = FixedOutputHash {
- .method = FileIngestionMethod::Flat,
- .hash = hash,
- };
- auto source = StringSource(sink.s);
+ auto source = StringSource { sink.s };
store->addToStore(info, source, NoRepair, NoCheckSigs);
storePath = std::move(info.path);
}
diff --git a/src/libmain/common-args.hh b/src/libmain/common-args.hh
index f180d83ce..c35406c3b 100644
--- a/src/libmain/common-args.hh
+++ b/src/libmain/common-args.hh
@@ -1,6 +1,8 @@
#pragma once
+///@file
#include "args.hh"
+#include "repair-flag.hh"
namespace nix {
@@ -48,4 +50,21 @@ struct MixJSON : virtual Args
}
};
+struct MixRepair : virtual Args
+{
+ RepairFlag repair = NoRepair;
+
+ MixRepair()
+ {
+ addFlag({
+ .longName = "repair",
+ .description =
+ "During evaluation, rewrite missing or corrupted files in the Nix store. "
+ "During building, rebuild missing or corrupted store paths.",
+ .category = miscCategory,
+ .handler = {&repair, Repair},
+ });
+ }
+};
+
}
diff --git a/src/libmain/loggers.hh b/src/libmain/loggers.hh
index f3c759193..e5721420c 100644
--- a/src/libmain/loggers.hh
+++ b/src/libmain/loggers.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc
index e9205a5e5..6600ec177 100644
--- a/src/libmain/progress-bar.cc
+++ b/src/libmain/progress-bar.cc
@@ -72,6 +72,7 @@ private:
uint64_t corruptedPaths = 0, untrustedPaths = 0;
bool active = true;
+ bool paused = false;
bool haveUpdate = true;
};
@@ -120,16 +121,28 @@ public:
updateThread.join();
}
+ void pause() override {
+ state_.lock()->paused = true;
+ writeToStderr("\r\e[K");
+ }
+
+ void resume() override {
+ state_.lock()->paused = false;
+ writeToStderr("\r\e[K");
+ state_.lock()->haveUpdate = true;
+ updateCV.notify_one();
+ }
+
bool isVerbose() override
{
return printBuildLogs;
}
- void log(Verbosity lvl, const FormatOrString & fs) override
+ void log(Verbosity lvl, std::string_view s) override
{
if (lvl > verbosity) return;
auto state(state_.lock());
- log(*state, lvl, fs.s);
+ log(*state, lvl, s);
}
void logEI(const ErrorInfo & ei) override
@@ -142,7 +155,7 @@ public:
log(*state, ei.level, oss.str());
}
- void log(State & state, Verbosity lvl, const std::string & s)
+ void log(State & state, Verbosity lvl, std::string_view s)
{
if (state.active) {
writeToStderr("\r\e[K" + filterANSIEscapes(s, !isTTY) + ANSI_NORMAL "\n");
@@ -339,7 +352,7 @@ public:
auto nextWakeup = std::chrono::milliseconds::max();
state.haveUpdate = false;
- if (!state.active) return nextWakeup;
+ if (state.paused || !state.active) return nextWakeup;
std::string line;
diff --git a/src/libmain/progress-bar.hh b/src/libmain/progress-bar.hh
index 3a76f8448..c3c6e3833 100644
--- a/src/libmain/progress-bar.hh
+++ b/src/libmain/progress-bar.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "logging.hh"
diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc
index d4871a8e2..56f47a4ac 100644
--- a/src/libmain/shared.cc
+++ b/src/libmain/shared.cc
@@ -10,7 +10,6 @@
#include <cctype>
#include <exception>
#include <iostream>
-#include <mutex>
#include <cstdlib>
#include <sys/time.h>
@@ -20,16 +19,9 @@
#ifdef __linux__
#include <features.h>
#endif
-#ifdef __GLIBC__
-#include <gnu/lib-names.h>
-#include <nss.h>
-#include <dlfcn.h>
-#endif
#include <openssl/crypto.h>
-#include <sodium.h>
-
namespace nix {
@@ -84,8 +76,18 @@ void printMissing(ref<Store> store, const StorePathSet & willBuild,
downloadSizeMiB,
narSizeMiB);
}
- for (auto & i : willSubstitute)
- printMsg(lvl, " %s", store->printStorePath(i));
+ std::vector<const StorePath *> willSubstituteSorted = {};
+ std::for_each(willSubstitute.begin(), willSubstitute.end(),
+ [&](const StorePath &p) { willSubstituteSorted.push_back(&p); });
+ std::sort(willSubstituteSorted.begin(), willSubstituteSorted.end(),
+ [](const StorePath *lhs, const StorePath *rhs) {
+ if (lhs->name() == rhs->name())
+ return lhs->to_string() < rhs->to_string();
+ else
+ return lhs->name() < rhs->name();
+ });
+ for (auto p : willSubstituteSorted)
+ printMsg(lvl, " %s", store->printStorePath(*p));
}
if (!unknown.empty()) {
@@ -105,57 +107,6 @@ std::string getArg(const std::string & opt,
return *i;
}
-
-#if OPENSSL_VERSION_NUMBER < 0x10101000L
-/* OpenSSL is not thread-safe by default - it will randomly crash
- unless the user supplies a mutex locking function. So let's do
- that. */
-static std::vector<std::mutex> opensslLocks;
-
-static void opensslLockCallback(int mode, int type, const char * file, int line)
-{
- if (mode & CRYPTO_LOCK)
- opensslLocks[type].lock();
- else
- opensslLocks[type].unlock();
-}
-#endif
-
-static std::once_flag dns_resolve_flag;
-
-static void preloadNSS() {
- /* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of
- one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already
- been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to
- load its lookup libraries in the parent before any child gets a chance to. */
- std::call_once(dns_resolve_flag, []() {
-#ifdef __GLIBC__
- /* On linux, glibc will run every lookup through the nss layer.
- * That means every lookup goes, by default, through nscd, which acts as a local
- * cache.
- * Because we run builds in a sandbox, we also remove access to nscd otherwise
- * lookups would leak into the sandbox.
- *
- * But now we have a new problem, we need to make sure the nss_dns backend that
- * does the dns lookups when nscd is not available is loaded or available.
- *
- * We can't make it available without leaking nix's environment, so instead we'll
- * load the backend, and configure nss so it does not try to run dns lookups
- * through nscd.
- *
- * This is technically only used for builtins:fetch* functions so we only care
- * about dns.
- *
- * All other platforms are unaffected.
- */
- if (!dlopen(LIBNSS_DNS_SO, RTLD_NOW))
- warn("unable to load nss_dns backend");
- // FIXME: get hosts entry from nsswitch.conf.
- __nss_configure_lookup("hosts", "files dns");
-#endif
- });
-}
-
static void sigHandler(int signo) { }
@@ -167,16 +118,7 @@ void initNix()
std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf));
#endif
-#if OPENSSL_VERSION_NUMBER < 0x10101000L
- /* Initialise OpenSSL locking. */
- opensslLocks = std::vector<std::mutex>(CRYPTO_num_locks());
- CRYPTO_set_locking_callback(opensslLockCallback);
-#endif
-
- if (sodium_init() == -1)
- throw Error("could not initialise libsodium");
-
- loadConfFile();
+ initLibStore();
startSignalHandlerThread();
@@ -213,7 +155,10 @@ void initNix()
if (sigaction(SIGTRAP, &act, 0)) throw SysError("handling SIGTRAP");
#endif
- /* Register a SIGSEGV handler to detect stack overflows. */
+ /* Register a SIGSEGV handler to detect stack overflows.
+ Why not initLibExpr()? initGC() is essentially that, but
+ detectStackOverflow is not an instance of the init function concept, as
+ it may have to be invoked more than once per process. */
detectStackOverflow();
/* There is no privacy in the Nix system ;-) At least not for
@@ -226,16 +171,6 @@ void initNix()
gettimeofday(&tv, 0);
srandom(tv.tv_usec);
- /* On macOS, don't use the per-session TMPDIR (as set e.g. by
- sshd). This breaks build users because they don't have access
- to the TMPDIR, in particular in ‘nix-store --serve’. */
-#if __APPLE__
- if (hasPrefix(getEnv("TMPDIR").value_or("/tmp"), "/var/folders/"))
- unsetenv("TMPDIR");
-#endif
-
- preloadNSS();
- initLibStore();
}
@@ -347,7 +282,7 @@ void parseCmdLine(const std::string & programName, const Strings & args,
void printVersion(const std::string & programName)
{
- std::cout << format("%1% (Nix) %2%") % programName % nixVersion << std::endl;
+ std::cout << fmt("%1% (Nix) %2%", programName, nixVersion) << std::endl;
if (verbosity > lvlInfo) {
Strings cfg;
#if HAVE_BOEHMGC
diff --git a/src/libmain/shared.hh b/src/libmain/shared.hh
index 1715374a6..7a9e83c6c 100644
--- a/src/libmain/shared.hh
+++ b/src/libmain/shared.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "util.hh"
#include "args.hh"
@@ -24,7 +25,9 @@ public:
int handleExceptions(const std::string & programName, std::function<void()> fun);
-/* Don't forget to call initPlugins() after settings are initialized! */
+/**
+ * Don't forget to call initPlugins() after settings are initialized!
+ */
void initNix();
void parseCmdLine(int argc, char * * argv,
@@ -35,7 +38,9 @@ void parseCmdLine(const std::string & programName, const Strings & args,
void printVersion(const std::string & programName);
-/* Ugh. No better place to put this. */
+/**
+ * Ugh. No better place to put this.
+ */
void printGCWarning();
class Store;
@@ -74,11 +79,15 @@ struct LegacyArgs : public MixCommonArgs
};
-/* Show the manual page for the specified program. */
+/**
+ * Show the manual page for the specified program.
+ */
void showManPage(const std::string & name);
-/* The constructor of this class starts a pager if stdout is a
- terminal and $PAGER is set. Stdout is redirected to the pager. */
+/**
+ * The constructor of this class starts a pager if stdout is a
+ * terminal and $PAGER is set. Stdout is redirected to the pager.
+ */
class RunPager
{
public:
@@ -109,28 +118,34 @@ struct PrintFreed
};
-/* Install a SIGSEGV handler to detect stack overflows. */
+/**
+ * Install a SIGSEGV handler to detect stack overflows.
+ */
void detectStackOverflow();
-/* Pluggable behavior to run in case of a stack overflow.
-
- Default value: defaultStackOverflowHandler.
-
- This is called by the handler installed by detectStackOverflow().
-
- This gives Nix library consumers a limit opportunity to report the error
- condition. The handler should exit the process.
- See defaultStackOverflowHandler() for a reference implementation.
-
- NOTE: Use with diligence, because this runs in the signal handler, with very
- limited stack space and a potentially a corrupted heap, all while the failed
- thread is blocked indefinitely. All functions called must be reentrant. */
+/**
+ * Pluggable behavior to run in case of a stack overflow.
+ *
+ * Default value: defaultStackOverflowHandler.
+ *
+ * This is called by the handler installed by detectStackOverflow().
+ *
+ * This gives Nix library consumers a limit opportunity to report the error
+ * condition. The handler should exit the process.
+ * See defaultStackOverflowHandler() for a reference implementation.
+ *
+ * NOTE: Use with diligence, because this runs in the signal handler, with very
+ * limited stack space and a potentially a corrupted heap, all while the failed
+ * thread is blocked indefinitely. All functions called must be reentrant.
+ */
extern std::function<void(siginfo_t * info, void * ctx)> stackOverflowHandler;
-/* The default, robust implementation of stackOverflowHandler.
-
- Prints an error message directly to stderr using a syscall instead of the
- logger. Exits the process immediately after. */
+/**
+ * The default, robust implementation of stackOverflowHandler.
+ *
+ * Prints an error message directly to stderr using a syscall instead of the
+ * logger. Exits the process immediately after.
+ */
void defaultStackOverflowHandler(siginfo_t * info, void * ctx);
}
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc
index 751cf8c30..fcd763a9d 100644
--- a/src/libstore/binary-cache-store.cc
+++ b/src/libstore/binary-cache-store.cc
@@ -306,11 +306,22 @@ StorePath BinaryCacheStore::addToStoreFromDump(Source & dump, std::string_view n
unsupported("addToStoreFromDump");
return addToStoreCommon(dump, repair, CheckSigs, [&](HashResult nar) {
ValidPathInfo info {
- makeFixedOutputPath(method, nar.first, name, references),
+ *this,
+ name,
+ FixedOutputInfo {
+ .hash = {
+ .method = method,
+ .hash = nar.first,
+ },
+ .references = {
+ .others = references,
+ // caller is not capable of creating a self-reference, because this is content-addressed without modulus
+ .self = false,
+ },
+ },
nar.first,
};
info.narSize = nar.second;
- info.references = references;
return info;
})->path;
}
@@ -414,15 +425,22 @@ StorePath BinaryCacheStore::addToStore(
});
return addToStoreCommon(*source, repair, CheckSigs, [&](HashResult nar) {
ValidPathInfo info {
- makeFixedOutputPath(method, h, name, references),
+ *this,
+ name,
+ FixedOutputInfo {
+ .hash = {
+ .method = method,
+ .hash = h,
+ },
+ .references = {
+ .others = references,
+ // caller is not capable of creating a self-reference, because this is content-addressed without modulus
+ .self = false,
+ },
+ },
nar.first,
};
info.narSize = nar.second;
- info.references = references;
- info.ca = FixedOutputHash {
- .method = method,
- .hash = h,
- };
return info;
})->path;
}
@@ -434,7 +452,7 @@ StorePath BinaryCacheStore::addTextToStore(
RepairFlag repair)
{
auto textHash = hashString(htSHA256, s);
- auto path = makeTextPath(name, textHash, references);
+ auto path = makeTextPath(name, TextInfo { { textHash }, references });
if (!repair && isValidPath(path))
return path;
@@ -443,10 +461,16 @@ StorePath BinaryCacheStore::addTextToStore(
dumpString(s, sink);
StringSource source(sink.s);
return addToStoreCommon(source, repair, CheckSigs, [&](HashResult nar) {
- ValidPathInfo info { path, nar.first };
+ ValidPathInfo info {
+ *this,
+ std::string { name },
+ TextInfo {
+ { .hash = textHash },
+ references,
+ },
+ nar.first,
+ };
info.narSize = nar.second;
- info.ca = TextHash { textHash };
- info.references = references;
return info;
})->path;
}
diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh
index abd92a83c..49f271d24 100644
--- a/src/libstore/binary-cache-store.hh
+++ b/src/libstore/binary-cache-store.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "crypto.hh"
#include "store-api.hh"
@@ -16,19 +17,40 @@ struct BinaryCacheStoreConfig : virtual StoreConfig
{
using StoreConfig::StoreConfig;
- const Setting<std::string> compression{(StoreConfig*) this, "xz", "compression", "NAR compression method ('xz', 'bzip2', 'gzip', 'zstd', or 'none')"};
- const Setting<bool> writeNARListing{(StoreConfig*) this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"};
- const Setting<bool> writeDebugInfo{(StoreConfig*) this, false, "index-debug-info", "whether to index DWARF debug info files by build ID"};
- const Setting<Path> secretKeyFile{(StoreConfig*) this, "", "secret-key", "path to secret key used to sign the binary cache"};
- const Setting<Path> localNarCache{(StoreConfig*) this, "", "local-nar-cache", "path to a local cache of NARs"};
+ const Setting<std::string> compression{(StoreConfig*) this, "xz", "compression",
+ "NAR compression method (`xz`, `bzip2`, `gzip`, `zstd`, or `none`)."};
+
+ const Setting<bool> writeNARListing{(StoreConfig*) this, false, "write-nar-listing",
+ "Whether to write a JSON file that lists the files in each NAR."};
+
+ const Setting<bool> writeDebugInfo{(StoreConfig*) this, false, "index-debug-info",
+ R"(
+ Whether to index DWARF debug info files by build ID. This allows [`dwarffs`](https://github.com/edolstra/dwarffs) to
+ fetch debug info on demand
+ )"};
+
+ const Setting<Path> secretKeyFile{(StoreConfig*) this, "", "secret-key",
+ "Path to the secret key used to sign the binary cache."};
+
+ const Setting<Path> localNarCache{(StoreConfig*) this, "", "local-nar-cache",
+ "Path to a local cache of NARs fetched from this binary cache, used by commands such as `nix store cat`."};
+
const Setting<bool> parallelCompression{(StoreConfig*) this, false, "parallel-compression",
- "enable multi-threading compression for NARs, available for xz and zstd only currently"};
+ "Enable multi-threaded compression of NARs. This is currently only available for `xz` and `zstd`."};
+
const Setting<int> compressionLevel{(StoreConfig*) this, -1, "compression-level",
- "specify 'preset level' of compression to be used with NARs: "
- "meaning and accepted range of values depends on compression method selected, "
- "other than -1 which we reserve to indicate Nix defaults should be used"};
+ R"(
+ The *preset level* to be used when compressing NARs.
+ The meaning and accepted values depend on the compression method selected.
+ `-1` specifies that the default compression level should be used.
+ )"};
};
+
+/**
+ * @note subclasses must implement at least one of the two
+ * virtual getFile() methods.
+ */
class BinaryCacheStore : public virtual BinaryCacheStoreConfig,
public virtual Store,
public virtual LogStore
@@ -58,14 +80,15 @@ public:
std::string && data,
const std::string & mimeType);
- /* Note: subclasses must implement at least one of the two
- following getFile() methods. */
-
- /* Dump the contents of the specified file to a sink. */
+ /**
+ * Dump the contents of the specified file to a sink.
+ */
virtual void getFile(const std::string & path, Sink & sink);
- /* Fetch the specified file and call the specified callback with
- the result. A subclass may implement this asynchronously. */
+ /**
+ * Fetch the specified file and call the specified callback with
+ * the result. A subclass may implement this asynchronously.
+ */
virtual void getFile(
const std::string & path,
Callback<std::optional<std::string>> callback) noexcept;
diff --git a/src/libstore/build-result.hh b/src/libstore/build-result.hh
index a5749cf33..b7a56e791 100644
--- a/src/libstore/build-result.hh
+++ b/src/libstore/build-result.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "realisation.hh"
#include "derived-path.hh"
@@ -11,9 +12,12 @@ namespace nix {
struct BuildResult
{
- /* Note: don't remove status codes, and only add new status codes
- at the end of the list, to prevent client/server
- incompatibilities in the nix-store --serve protocol. */
+ /**
+ * @note This is directly used in the nix-store --serve protocol.
+ * That means we need to worry about compatability across versions.
+ * Therefore, don't remove status codes, and only add new status
+ * codes at the end of the list.
+ */
enum Status {
Built = 0,
Substituted,
@@ -21,8 +25,10 @@ struct BuildResult
PermanentFailure,
InputRejected,
OutputRejected,
- TransientFailure, // possibly transient
- CachedFailure, // no longer used
+ /// possibly transient
+ TransientFailure,
+ /// no longer used
+ CachedFailure,
TimedOut,
MiscFailure,
DependencyFailed,
@@ -32,7 +38,12 @@ struct BuildResult
NoSubstituters,
} status = MiscFailure;
- // FIXME: include entire ErrorInfo object.
+ /**
+ * Information about the error if the build failed.
+ *
+ * @todo This should be an entire ErrorInfo object, not just a
+ * string, for richer information.
+ */
std::string errorMsg;
std::string toString() const {
@@ -52,33 +63,41 @@ struct BuildResult
case LogLimitExceeded: return "LogLimitExceeded";
case NotDeterministic: return "NotDeterministic";
case ResolvesToAlreadyValid: return "ResolvesToAlreadyValid";
+ case NoSubstituters: return "NoSubstituters";
default: return "Unknown";
};
}();
return strStatus + ((errorMsg == "") ? "" : " : " + errorMsg);
}
- /* How many times this build was performed. */
+ /**
+ * How many times this build was performed.
+ */
unsigned int timesBuilt = 0;
- /* If timesBuilt > 1, whether some builds did not produce the same
- result. (Note that 'isNonDeterministic = false' does not mean
- the build is deterministic, just that we don't have evidence of
- non-determinism.) */
+ /**
+ * If timesBuilt > 1, whether some builds did not produce the same
+ * result. (Note that 'isNonDeterministic = false' does not mean
+ * the build is deterministic, just that we don't have evidence of
+ * non-determinism.)
+ */
bool isNonDeterministic = false;
- /* The derivation we built or the store path we substituted. */
- DerivedPath path;
-
- /* For derivations, a mapping from the names of the wanted outputs
- to actual paths. */
- DrvOutputs builtOutputs;
+ /**
+ * For derivations, a mapping from the names of the wanted outputs
+ * to actual paths.
+ */
+ SingleDrvOutputs builtOutputs;
- /* The start/stop times of the build (or one of the rounds, if it
- was repeated). */
+ /**
+ * The start/stop times of the build (or one of the rounds, if it
+ * was repeated).
+ */
time_t startTime = 0, stopTime = 0;
- /* User and system CPU time the build took. */
+ /**
+ * User and system CPU time the build took.
+ */
std::optional<std::chrono::microseconds> cpuUser, cpuSystem;
bool success()
@@ -92,4 +111,15 @@ struct BuildResult
}
};
+/**
+ * A `BuildResult` together with its "primary key".
+ */
+struct KeyedBuildResult : BuildResult
+{
+ /**
+ * The derivation we built or the store path we substituted.
+ */
+ DerivedPath path;
+};
+
}
diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc
index 2021d0023..df7d21e54 100644
--- a/src/libstore/build/derivation-goal.cc
+++ b/src/libstore/build/derivation-goal.cc
@@ -145,8 +145,20 @@ void DerivationGoal::work()
void DerivationGoal::addWantedOutputs(const OutputsSpec & outputs)
{
auto newWanted = wantedOutputs.union_(outputs);
- if (!newWanted.isSubsetOf(wantedOutputs))
- needRestart = true;
+ switch (needRestart) {
+ case NeedRestartForMoreOutputs::OutputsUnmodifedDontNeed:
+ if (!newWanted.isSubsetOf(wantedOutputs))
+ needRestart = NeedRestartForMoreOutputs::OutputsAddedDoNeed;
+ break;
+ case NeedRestartForMoreOutputs::OutputsAddedDoNeed:
+ /* No need to check whether we added more outputs, because a
+ restart is already queued up. */
+ break;
+ case NeedRestartForMoreOutputs::BuildInProgressWillNotNeed:
+ /* We are already building all outputs, so it doesn't matter if
+ we now want more. */
+ break;
+ };
wantedOutputs = newWanted;
}
@@ -199,10 +211,10 @@ void DerivationGoal::haveDerivation()
parsedDrv = std::make_unique<ParsedDerivation>(drvPath, *drv);
if (!drv->type().hasKnownOutputPaths())
- settings.requireExperimentalFeature(Xp::CaDerivations);
+ experimentalFeatureSettings.require(Xp::CaDerivations);
if (!drv->type().isPure()) {
- settings.requireExperimentalFeature(Xp::ImpureDerivations);
+ experimentalFeatureSettings.require(Xp::ImpureDerivations);
for (auto & [outputName, output] : drv->outputs) {
auto randomPath = StorePath::random(outputPathName(drv->name, outputName));
@@ -262,11 +274,13 @@ void DerivationGoal::haveDerivation()
)
)
);
- else
+ else {
+ auto * cap = getDerivationCA(*drv);
addWaitee(upcast_goal(worker.makePathSubstitutionGoal(
status.known->path,
buildMode == bmRepair ? Repair : NoRepair,
- getDerivationCA(*drv))));
+ cap ? std::optional { *cap } : std::nullopt)));
+ }
}
if (waitees.empty()) /* to prevent hang (no wake-up event) */
@@ -297,12 +311,29 @@ void DerivationGoal::outputsSubstitutionTried()
In particular, it may be the case that the hole in the closure is
an output of the current derivation, which causes a loop if retried.
*/
- if (nrIncompleteClosure > 0 && nrIncompleteClosure == nrFailed) retrySubstitution = true;
+ {
+ bool substitutionFailed =
+ nrIncompleteClosure > 0 &&
+ nrIncompleteClosure == nrFailed;
+ switch (retrySubstitution) {
+ case RetrySubstitution::NoNeed:
+ if (substitutionFailed)
+ retrySubstitution = RetrySubstitution::YesNeed;
+ break;
+ case RetrySubstitution::YesNeed:
+ // Should not be able to reach this state from here.
+ assert(false);
+ break;
+ case RetrySubstitution::AlreadyRetried:
+ debug("substitution failed again, but we already retried once. Not retrying again.");
+ break;
+ }
+ }
nrFailed = nrNoSubstituters = nrIncompleteClosure = 0;
- if (needRestart) {
- needRestart = false;
+ if (needRestart == NeedRestartForMoreOutputs::OutputsAddedDoNeed) {
+ needRestart = NeedRestartForMoreOutputs::OutputsUnmodifedDontNeed;
haveDerivation();
return;
}
@@ -330,13 +361,17 @@ void DerivationGoal::outputsSubstitutionTried()
produced using a substitute. So we have to build instead. */
void DerivationGoal::gaveUpOnSubstitution()
{
+ /* At this point we are building all outputs, so if more are wanted there
+ is no need to restart. */
+ needRestart = NeedRestartForMoreOutputs::BuildInProgressWillNotNeed;
+
/* The inputs must be built before we can build this goal. */
inputDrvOutputs.clear();
if (useDerivation)
for (auto & i : dynamic_cast<Derivation *>(drv.get())->inputDrvs) {
/* Ensure that pure, non-fixed-output derivations don't
depend on impure derivations. */
- if (settings.isExperimentalFeatureEnabled(Xp::ImpureDerivations) && drv->type().isPure() && !drv->type().isFixed()) {
+ if (experimentalFeatureSettings.isEnabled(Xp::ImpureDerivations) && drv->type().isPure() && !drv->type().isFixed()) {
auto inputDrv = worker.evalStore.readDerivation(i.first);
if (!inputDrv.type().isPure())
throw Error("pure derivation '%s' depends on impure derivation '%s'",
@@ -451,8 +486,8 @@ void DerivationGoal::inputsRealised()
return;
}
- if (retrySubstitution && !retriedSubstitution) {
- retriedSubstitution = true;
+ if (retrySubstitution == RetrySubstitution::YesNeed) {
+ retrySubstitution = RetrySubstitution::AlreadyRetried;
haveDerivation();
return;
}
@@ -477,7 +512,7 @@ void DerivationGoal::inputsRealised()
ca.fixed
/* Can optionally resolve if fixed, which is good
for avoiding unnecessary rebuilds. */
- ? settings.isExperimentalFeatureEnabled(Xp::CaDerivations)
+ ? experimentalFeatureSettings.isEnabled(Xp::CaDerivations)
/* Must resolve if floating and there are any inputs
drvs. */
: true);
@@ -488,7 +523,7 @@ void DerivationGoal::inputsRealised()
}, drvType.raw());
if (resolveDrv && !fullDrv.inputDrvs.empty()) {
- settings.requireExperimentalFeature(Xp::CaDerivations);
+ experimentalFeatureSettings.require(Xp::CaDerivations);
/* We are be able to resolve this derivation based on the
now-known results of dependencies. If so, we become a
@@ -570,8 +605,6 @@ void DerivationGoal::inputsRealised()
build hook. */
state = &DerivationGoal::tryToBuild;
worker.wakeUp(shared_from_this());
-
- buildResult = BuildResult { .path = buildResult.path };
}
void DerivationGoal::started()
@@ -732,7 +765,7 @@ void replaceValidPath(const Path & storePath, const Path & tmpPath)
tmpPath (the replacement), so we have to move it out of the
way first. We'd better not be interrupted here, because if
we're repairing (say) Glibc, we end up with a broken system. */
- Path oldPath = (format("%1%.old-%2%-%3%") % storePath % getpid() % random()).str();
+ Path oldPath = fmt("%1%.old-%2%-%3%", storePath, getpid(), random());
if (pathExists(storePath))
movePath(storePath, oldPath);
@@ -911,7 +944,11 @@ void DerivationGoal::buildDone()
msg += line;
msg += "\n";
}
- msg += fmt("For full logs, run '" ANSI_BOLD "nix log %s" ANSI_NORMAL "'.",
+ auto nixLogCommand = experimentalFeatureSettings.isEnabled(Xp::NixCommand)
+ ? "nix log"
+ : "nix-store -l";
+ msg += fmt("For full logs, run '" ANSI_BOLD "%s %s" ANSI_NORMAL "'.",
+ nixLogCommand,
worker.store.printStorePath(drvPath));
}
@@ -978,50 +1015,40 @@ void DerivationGoal::resolvedFinished()
auto resolvedDrv = *resolvedDrvGoal->drv;
auto & resolvedResult = resolvedDrvGoal->buildResult;
- DrvOutputs builtOutputs;
+ SingleDrvOutputs builtOutputs;
if (resolvedResult.success()) {
auto resolvedHashes = staticOutputHashes(worker.store, resolvedDrv);
StorePathSet outputPaths;
- // `wantedOutputs` might merely indicate “all the outputs”
- auto realWantedOutputs = std::visit(overloaded {
- [&](const OutputsSpec::All &) {
- return resolvedDrv.outputNames();
- },
- [&](const OutputsSpec::Names & names) {
- return static_cast<std::set<std::string>>(names);
- },
- }, wantedOutputs.raw());
-
- for (auto & wantedOutput : realWantedOutputs) {
- auto initialOutput = get(initialOutputs, wantedOutput);
- auto resolvedHash = get(resolvedHashes, wantedOutput);
+ for (auto & outputName : resolvedDrv.outputNames()) {
+ auto initialOutput = get(initialOutputs, outputName);
+ auto resolvedHash = get(resolvedHashes, outputName);
if ((!initialOutput) || (!resolvedHash))
throw Error(
"derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,resolve)",
- worker.store.printStorePath(drvPath), wantedOutput);
+ worker.store.printStorePath(drvPath), outputName);
auto realisation = [&]{
- auto take1 = get(resolvedResult.builtOutputs, DrvOutput { *resolvedHash, wantedOutput });
+ auto take1 = get(resolvedResult.builtOutputs, outputName);
if (take1) return *take1;
/* The above `get` should work. But sateful tracking of
outputs in resolvedResult, this can get out of sync with the
store, which is our actual source of truth. For now we just
check the store directly if it fails. */
- auto take2 = worker.evalStore.queryRealisation(DrvOutput { *resolvedHash, wantedOutput });
+ auto take2 = worker.evalStore.queryRealisation(DrvOutput { *resolvedHash, outputName });
if (take2) return *take2;
throw Error(
"derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,realisation)",
- worker.store.printStorePath(resolvedDrvGoal->drvPath), wantedOutput);
+ worker.store.printStorePath(resolvedDrvGoal->drvPath), outputName);
}();
if (drv->type().isPure()) {
auto newRealisation = realisation;
- newRealisation.id = DrvOutput { initialOutput->outputHash, wantedOutput };
+ newRealisation.id = DrvOutput { initialOutput->outputHash, outputName };
newRealisation.signatures.clear();
if (!drv->type().isFixed())
newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation.outPath);
@@ -1029,7 +1056,7 @@ void DerivationGoal::resolvedFinished()
worker.store.registerDrvOutput(newRealisation);
}
outputPaths.insert(realisation.outPath);
- builtOutputs.emplace(realisation.id, realisation);
+ builtOutputs.emplace(outputName, realisation);
}
runPostBuildHook(
@@ -1125,7 +1152,7 @@ HookReply DerivationGoal::tryBuildHook()
/* Tell the hook all the inputs that have to be copied to the
remote system. */
- worker_proto::write(worker.store, hook->sink, inputPaths);
+ workerProtoWrite(worker.store, hook->sink, inputPaths);
/* Tell the hooks the missing outputs that have to be copied back
from the remote system. */
@@ -1136,7 +1163,7 @@ HookReply DerivationGoal::tryBuildHook()
if (buildMode != bmCheck && status.known && status.known->isValid()) continue;
missingOutputs.insert(outputName);
}
- worker_proto::write(worker.store, hook->sink, missingOutputs);
+ workerProtoWrite(worker.store, hook->sink, missingOutputs);
}
hook->sink = FdSink();
@@ -1154,7 +1181,7 @@ HookReply DerivationGoal::tryBuildHook()
}
-DrvOutputs DerivationGoal::registerOutputs()
+SingleDrvOutputs DerivationGoal::registerOutputs()
{
/* When using a build hook, the build hook can register the output
as valid (by doing `nix-store --import'). If so we don't have
@@ -1316,7 +1343,7 @@ OutputPathMap DerivationGoal::queryDerivationOutputMap()
}
-std::pair<bool, DrvOutputs> DerivationGoal::checkPathValidity()
+std::pair<bool, SingleDrvOutputs> DerivationGoal::checkPathValidity()
{
if (!drv->type().isPure()) return { false, {} };
@@ -1329,7 +1356,7 @@ std::pair<bool, DrvOutputs> DerivationGoal::checkPathValidity()
return static_cast<StringSet>(names);
},
}, wantedOutputs.raw());
- DrvOutputs validOutputs;
+ SingleDrvOutputs validOutputs;
for (auto & i : queryPartialDerivationOutputMap()) {
auto initialOutput = get(initialOutputs, i.first);
@@ -1352,7 +1379,7 @@ std::pair<bool, DrvOutputs> DerivationGoal::checkPathValidity()
};
}
auto drvOutput = DrvOutput{info.outputHash, i.first};
- if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
+ if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
if (auto real = worker.store.queryRealisation(drvOutput)) {
info.known = {
.path = real->outPath,
@@ -1371,8 +1398,8 @@ std::pair<bool, DrvOutputs> DerivationGoal::checkPathValidity()
);
}
}
- if (info.wanted && info.known && info.known->isValid())
- validOutputs.emplace(drvOutput, Realisation { drvOutput, info.known->path });
+ if (info.known && info.known->isValid())
+ validOutputs.emplace(i.first, Realisation { drvOutput, info.known->path });
}
// If we requested all the outputs, we are always fine.
@@ -1396,7 +1423,7 @@ std::pair<bool, DrvOutputs> DerivationGoal::checkPathValidity()
}
-DrvOutputs DerivationGoal::assertPathValidity()
+SingleDrvOutputs DerivationGoal::assertPathValidity()
{
auto [allValid, validOutputs] = checkPathValidity();
if (!allValid)
@@ -1407,7 +1434,7 @@ DrvOutputs DerivationGoal::assertPathValidity()
void DerivationGoal::done(
BuildResult::Status status,
- DrvOutputs builtOutputs,
+ SingleDrvOutputs builtOutputs,
std::optional<Error> ex)
{
buildResult.status = status;
@@ -1422,8 +1449,9 @@ void DerivationGoal::done(
mcRunningBuilds.reset();
if (buildResult.success()) {
- assert(!builtOutputs.empty());
- buildResult.builtOutputs = std::move(builtOutputs);
+ auto wantedBuiltOutputs = filterDrvOutputs(wantedOutputs, std::move(builtOutputs));
+ assert(!wantedBuiltOutputs.empty());
+ buildResult.builtOutputs = std::move(wantedBuiltOutputs);
if (status == BuildResult::Built)
worker.doneBuilds++;
} else {
@@ -1448,12 +1476,28 @@ void DerivationGoal::waiteeDone(GoalPtr waitee, ExitCode result)
{
Goal::waiteeDone(waitee, result);
- if (waitee->buildResult.success())
- if (auto bfd = std::get_if<DerivedPath::Built>(&waitee->buildResult.path))
- for (auto & [output, realisation] : waitee->buildResult.builtOutputs)
+ if (!useDerivation) return;
+ auto & fullDrv = *dynamic_cast<Derivation *>(drv.get());
+
+ auto * dg = dynamic_cast<DerivationGoal *>(&*waitee);
+ if (!dg) return;
+
+ auto outputs = fullDrv.inputDrvs.find(dg->drvPath);
+ if (outputs == fullDrv.inputDrvs.end()) return;
+
+ for (auto & outputName : outputs->second) {
+ auto buildResult = dg->getBuildResult(DerivedPath::Built {
+ .drvPath = dg->drvPath,
+ .outputs = OutputsSpec::Names { outputName },
+ });
+ if (buildResult.success()) {
+ auto i = buildResult.builtOutputs.find(outputName);
+ if (i != buildResult.builtOutputs.end())
inputDrvOutputs.insert_or_assign(
- { bfd->drvPath, output.outputName },
- realisation.outPath);
+ { dg->drvPath, outputName },
+ i->second.outPath);
+ }
+ }
}
}
diff --git a/src/libstore/build/derivation-goal.hh b/src/libstore/build/derivation-goal.hh
index 707e38b4b..ee8f06f25 100644
--- a/src/libstore/build/derivation-goal.hh
+++ b/src/libstore/build/derivation-goal.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "parsed-derivations.hh"
#include "lock.hh"
@@ -15,8 +16,10 @@ struct HookInstance;
typedef enum {rpAccept, rpDecline, rpPostpone} HookReply;
-/* Unless we are repairing, we don't both to test validity and just assume it,
- so the choices are `Absent` or `Valid`. */
+/**
+ * Unless we are repairing, we don't both to test validity and just assume it,
+ * so the choices are `Absent` or `Valid`.
+ */
enum struct PathStatus {
Corrupt,
Absent,
@@ -26,11 +29,15 @@ enum struct PathStatus {
struct InitialOutputStatus {
StorePath path;
PathStatus status;
- /* Valid in the store, and additionally non-corrupt if we are repairing */
+ /**
+ * Valid in the store, and additionally non-corrupt if we are repairing
+ */
bool isValid() const {
return status == PathStatus::Valid;
}
- /* Merely present, allowed to be corrupt */
+ /**
+ * Merely present, allowed to be corrupt
+ */
bool isPresent() const {
return status == PathStatus::Corrupt
|| status == PathStatus::Valid;
@@ -45,59 +52,123 @@ struct InitialOutput {
struct DerivationGoal : public Goal
{
- /* Whether to use an on-disk .drv file. */
+ /**
+ * Whether to use an on-disk .drv file.
+ */
bool useDerivation;
- /* The path of the derivation. */
+ /** The path of the derivation. */
StorePath drvPath;
- /* The goal for the corresponding resolved derivation */
+ /**
+ * The goal for the corresponding resolved derivation
+ */
std::shared_ptr<DerivationGoal> resolvedDrvGoal;
- /* The specific outputs that we need to build. Empty means all of
- them. */
+ /**
+ * The specific outputs that we need to build. Empty means all of
+ * them.
+ */
OutputsSpec wantedOutputs;
- /* Mapping from input derivations + output names to actual store
- paths. This is filled in by waiteeDone() as each dependency
- finishes, before inputsRealised() is reached, */
+ /**
+ * Mapping from input derivations + output names to actual store
+ * paths. This is filled in by waiteeDone() as each dependency
+ * finishes, before inputsRealised() is reached.
+ */
std::map<std::pair<StorePath, std::string>, StorePath> inputDrvOutputs;
- /* Whether additional wanted outputs have been added. */
- bool needRestart = false;
-
- /* Whether to retry substituting the outputs after building the
- inputs. This is done in case of an incomplete closure. */
- bool retrySubstitution = false;
-
- /* Whether we've retried substitution, in which case we won't try
- again. */
- bool retriedSubstitution = false;
-
- /* The derivation stored at drvPath. */
+ /**
+ * See `needRestart`; just for that field.
+ */
+ enum struct NeedRestartForMoreOutputs {
+ /**
+ * The goal state machine is progressing based on the current value of
+ * `wantedOutputs. No actions are needed.
+ */
+ OutputsUnmodifedDontNeed,
+ /**
+ * `wantedOutputs` has been extended, but the state machine is
+ * proceeding according to its old value, so we need to restart.
+ */
+ OutputsAddedDoNeed,
+ /**
+ * The goal state machine has progressed to the point of doing a build,
+ * in which case all outputs will be produced, so extensions to
+ * `wantedOutputs` no longer require a restart.
+ */
+ BuildInProgressWillNotNeed,
+ };
+
+ /**
+ * Whether additional wanted outputs have been added.
+ */
+ NeedRestartForMoreOutputs needRestart = NeedRestartForMoreOutputs::OutputsUnmodifedDontNeed;
+
+ /**
+ * See `retrySubstitution`; just for that field.
+ */
+ enum RetrySubstitution {
+ /**
+ * No issues have yet arose, no need to restart.
+ */
+ NoNeed,
+ /**
+ * Something failed and there is an incomplete closure. Let's retry
+ * substituting.
+ */
+ YesNeed,
+ /**
+ * We are current or have already retried substitution, and whether or
+ * not something goes wrong we will not retry again.
+ */
+ AlreadyRetried,
+ };
+
+ /**
+ * Whether to retry substituting the outputs after building the
+ * inputs. This is done in case of an incomplete closure.
+ */
+ RetrySubstitution retrySubstitution = RetrySubstitution::NoNeed;
+
+ /**
+ * The derivation stored at drvPath.
+ */
std::unique_ptr<Derivation> drv;
std::unique_ptr<ParsedDerivation> parsedDrv;
- /* The remainder is state held during the build. */
+ /**
+ * The remainder is state held during the build.
+ */
- /* Locks on (fixed) output paths. */
+ /**
+ * Locks on (fixed) output paths.
+ */
PathLocks outputLocks;
- /* All input paths (that is, the union of FS closures of the
- immediate input paths). */
+ /**
+ * All input paths (that is, the union of FS closures of the
+ * immediate input paths).
+ */
StorePathSet inputPaths;
std::map<std::string, InitialOutput> initialOutputs;
- /* File descriptor for the log file. */
+ /**
+ * File descriptor for the log file.
+ */
AutoCloseFD fdLogFile;
std::shared_ptr<BufferedSink> logFileSink, logSink;
- /* Number of bytes received from the builder's stdout/stderr. */
+ /**
+ * Number of bytes received from the builder's stdout/stderr.
+ */
unsigned long logSize;
- /* The most recent log lines. */
+ /**
+ * The most recent log lines.
+ */
std::list<std::string> logTail;
std::string currentLogLine;
@@ -105,10 +176,14 @@ struct DerivationGoal : public Goal
std::string currentHookLine;
- /* The build hook. */
+ /**
+ * The build hook.
+ */
std::unique_ptr<HookInstance> hook;
- /* The sort of derivation we are building. */
+ /**
+ * The sort of derivation we are building.
+ */
DerivationType derivationType;
typedef void (DerivationGoal::*GoalState)();
@@ -120,12 +195,16 @@ struct DerivationGoal : public Goal
std::unique_ptr<Activity> act;
- /* Activity that denotes waiting for a lock. */
+ /**
+ * Activity that denotes waiting for a lock.
+ */
std::unique_ptr<Activity> actLock;
std::map<ActivityId, Activity> builderActivities;
- /* The remote machine on which we're building. */
+ /**
+ * The remote machine on which we're building.
+ */
std::string machineName;
DerivationGoal(const StorePath & drvPath,
@@ -142,10 +221,14 @@ struct DerivationGoal : public Goal
void work() override;
- /* Add wanted outputs to an already existing derivation goal. */
+ /**
+ * Add wanted outputs to an already existing derivation goal.
+ */
void addWantedOutputs(const OutputsSpec & outputs);
- /* The states. */
+ /**
+ * The states.
+ */
void getDerivation();
void loadDerivation();
void haveDerivation();
@@ -159,28 +242,42 @@ struct DerivationGoal : public Goal
void resolvedFinished();
- /* Is the build hook willing to perform the build? */
+ /**
+ * Is the build hook willing to perform the build?
+ */
HookReply tryBuildHook();
virtual int getChildStatus();
- /* Check that the derivation outputs all exist and register them
- as valid. */
- virtual DrvOutputs registerOutputs();
+ /**
+ * Check that the derivation outputs all exist and register them
+ * as valid.
+ */
+ virtual SingleDrvOutputs registerOutputs();
- /* Open a log file and a pipe to it. */
+ /**
+ * Open a log file and a pipe to it.
+ */
Path openLogFile();
- /* Sign the newly built realisation if the store allows it */
+ /**
+ * Sign the newly built realisation if the store allows it
+ */
virtual void signRealisation(Realisation&) {}
- /* Close the log file. */
+ /**
+ * Close the log file.
+ */
void closeLogFile();
- /* Close the read side of the logger pipe. */
+ /**
+ * Close the read side of the logger pipe.
+ */
virtual void closeReadPipes();
- /* Cleanup hooks for buildDone() */
+ /**
+ * Cleanup hooks for buildDone()
+ */
virtual void cleanupHookFinally();
virtual void cleanupPreChildKill();
virtual void cleanupPostChildKill();
@@ -190,30 +287,38 @@ struct DerivationGoal : public Goal
virtual bool isReadDesc(int fd);
- /* Callback used by the worker to write to the log. */
+ /**
+ * Callback used by the worker to write to the log.
+ */
void handleChildOutput(int fd, std::string_view data) override;
void handleEOF(int fd) override;
void flushLine();
- /* Wrappers around the corresponding Store methods that first consult the
- derivation. This is currently needed because when there is no drv file
- there also is no DB entry. */
+ /**
+ * Wrappers around the corresponding Store methods that first consult the
+ * derivation. This is currently needed because when there is no drv file
+ * there also is no DB entry.
+ */
std::map<std::string, std::optional<StorePath>> queryPartialDerivationOutputMap();
OutputPathMap queryDerivationOutputMap();
- /* Update 'initialOutputs' to determine the current status of the
- outputs of the derivation. Also returns a Boolean denoting
- whether all outputs are valid and non-corrupt, and a
- 'DrvOutputs' structure containing the valid and wanted
- outputs. */
- std::pair<bool, DrvOutputs> checkPathValidity();
-
- /* Aborts if any output is not valid or corrupt, and otherwise
- returns a 'DrvOutputs' structure containing the wanted
- outputs. */
- DrvOutputs assertPathValidity();
-
- /* Forcibly kill the child process, if any. */
+ /**
+ * Update 'initialOutputs' to determine the current status of the
+ * outputs of the derivation. Also returns a Boolean denoting
+ * whether all outputs are valid and non-corrupt, and a
+ * 'SingleDrvOutputs' structure containing the valid outputs.
+ */
+ std::pair<bool, SingleDrvOutputs> checkPathValidity();
+
+ /**
+ * Aborts if any output is not valid or corrupt, and otherwise
+ * returns a 'SingleDrvOutputs' structure containing all outputs.
+ */
+ SingleDrvOutputs assertPathValidity();
+
+ /**
+ * Forcibly kill the child process, if any.
+ */
virtual void killChild();
void repairClosure();
@@ -222,12 +327,14 @@ struct DerivationGoal : public Goal
void done(
BuildResult::Status status,
- DrvOutputs builtOutputs = {},
+ SingleDrvOutputs builtOutputs = {},
std::optional<Error> ex = {});
void waiteeDone(GoalPtr waitee, ExitCode result) override;
StorePathSet exportReferences(const StorePathSet & storePaths);
+
+ JobCategory jobCategory() override { return JobCategory::Build; };
};
MakeError(NotDeterministic, BuildError);
diff --git a/src/libstore/build/drv-output-substitution-goal.cc b/src/libstore/build/drv-output-substitution-goal.cc
index b7f7b5ab1..b30957c84 100644
--- a/src/libstore/build/drv-output-substitution-goal.cc
+++ b/src/libstore/build/drv-output-substitution-goal.cc
@@ -61,20 +61,25 @@ void DrvOutputSubstitutionGoal::tryNext()
// FIXME: Make async
// outputInfo = sub->queryRealisation(id);
- outPipe.create();
- promise = decltype(promise)();
+
+ /* The callback of the curl download below can outlive `this` (if
+ some other error occurs), so it must not touch `this`. So put
+ the shared state in a separate refcounted object. */
+ downloadState = std::make_shared<DownloadState>();
+ downloadState->outPipe.create();
sub->queryRealisation(
- id, { [&](std::future<std::shared_ptr<const Realisation>> res) {
+ id,
+ { [downloadState(downloadState)](std::future<std::shared_ptr<const Realisation>> res) {
try {
- Finally updateStats([this]() { outPipe.writeSide.close(); });
- promise.set_value(res.get());
+ Finally updateStats([&]() { downloadState->outPipe.writeSide.close(); });
+ downloadState->promise.set_value(res.get());
} catch (...) {
- promise.set_exception(std::current_exception());
+ downloadState->promise.set_exception(std::current_exception());
}
} });
- worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false);
+ worker.childStarted(shared_from_this(), {downloadState->outPipe.readSide.get()}, true, false);
state = &DrvOutputSubstitutionGoal::realisationFetched;
}
@@ -84,7 +89,7 @@ void DrvOutputSubstitutionGoal::realisationFetched()
worker.childTerminated(this);
try {
- outputInfo = promise.get_future().get();
+ outputInfo = downloadState->promise.get_future().get();
} catch (std::exception & e) {
printError(e.what());
substituterFailed = true;
@@ -155,7 +160,7 @@ void DrvOutputSubstitutionGoal::work()
void DrvOutputSubstitutionGoal::handleEOF(int fd)
{
- if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this());
+ if (fd == downloadState->outPipe.readSide.get()) worker.wakeUp(shared_from_this());
}
diff --git a/src/libstore/build/drv-output-substitution-goal.hh b/src/libstore/build/drv-output-substitution-goal.hh
index 948dbda8f..5d1253a71 100644
--- a/src/libstore/build/drv-output-substitution-goal.hh
+++ b/src/libstore/build/drv-output-substitution-goal.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "store-api.hh"
#include "goal.hh"
@@ -10,31 +11,47 @@ namespace nix {
class Worker;
-// Substitution of a derivation output.
-// This is done in three steps:
-// 1. Fetch the output info from a substituter
-// 2. Substitute the corresponding output path
-// 3. Register the output info
+/**
+ * Substitution of a derivation output.
+ * This is done in three steps:
+ * 1. Fetch the output info from a substituter
+ * 2. Substitute the corresponding output path
+ * 3. Register the output info
+ */
class DrvOutputSubstitutionGoal : public Goal {
-private:
- // The drv output we're trying to substitue
+
+ /**
+ * The drv output we're trying to substitute
+ */
DrvOutput id;
- // The realisation corresponding to the given output id.
- // Will be filled once we can get it.
+ /**
+ * The realisation corresponding to the given output id.
+ * Will be filled once we can get it.
+ */
std::shared_ptr<const Realisation> outputInfo;
- /* The remaining substituters. */
+ /**
+ * The remaining substituters.
+ */
std::list<ref<Store>> subs;
- /* The current substituter. */
+ /**
+ * The current substituter.
+ */
std::shared_ptr<Store> sub;
- Pipe outPipe;
- std::thread thr;
- std::promise<std::shared_ptr<const Realisation>> promise;
+ struct DownloadState
+ {
+ Pipe outPipe;
+ std::promise<std::shared_ptr<const Realisation>> promise;
+ };
+
+ std::shared_ptr<DownloadState> downloadState;
- /* Whether a substituter failed. */
+ /**
+ * Whether a substituter failed.
+ */
bool substituterFailed = false;
public:
@@ -55,6 +72,8 @@ public:
void work() override;
void handleEOF(int fd) override;
+
+ JobCategory jobCategory() override { return JobCategory::Substitution; };
};
}
diff --git a/src/libstore/build/entry-points.cc b/src/libstore/build/entry-points.cc
index 2925fe3ca..edd6cb6d2 100644
--- a/src/libstore/build/entry-points.cc
+++ b/src/libstore/build/entry-points.cc
@@ -10,16 +10,8 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
Worker worker(*this, evalStore ? *evalStore : *this);
Goals goals;
- for (const auto & br : reqs) {
- std::visit(overloaded {
- [&](const DerivedPath::Built & bfd) {
- goals.insert(worker.makeDerivationGoal(bfd.drvPath, bfd.outputs, buildMode));
- },
- [&](const DerivedPath::Opaque & bo) {
- goals.insert(worker.makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair));
- },
- }, br.raw());
- }
+ for (auto & br : reqs)
+ goals.insert(worker.makeGoal(br, buildMode));
worker.run(goals);
@@ -47,7 +39,7 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
}
}
-std::vector<BuildResult> Store::buildPathsWithResults(
+std::vector<KeyedBuildResult> Store::buildPathsWithResults(
const std::vector<DerivedPath> & reqs,
BuildMode buildMode,
std::shared_ptr<Store> evalStore)
@@ -55,23 +47,23 @@ std::vector<BuildResult> Store::buildPathsWithResults(
Worker worker(*this, evalStore ? *evalStore : *this);
Goals goals;
- for (const auto & br : reqs) {
- std::visit(overloaded {
- [&](const DerivedPath::Built & bfd) {
- goals.insert(worker.makeDerivationGoal(bfd.drvPath, bfd.outputs, buildMode));
- },
- [&](const DerivedPath::Opaque & bo) {
- goals.insert(worker.makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair));
- },
- }, br.raw());
+ std::vector<std::pair<const DerivedPath &, GoalPtr>> state;
+
+ for (const auto & req : reqs) {
+ auto goal = worker.makeGoal(req, buildMode);
+ goals.insert(goal);
+ state.push_back({req, goal});
}
worker.run(goals);
- std::vector<BuildResult> results;
+ std::vector<KeyedBuildResult> results;
- for (auto & i : goals)
- results.push_back(i->buildResult);
+ for (auto & [req, goalPtr] : state)
+ results.emplace_back(KeyedBuildResult {
+ goalPtr->getBuildResult(req),
+ /* .path = */ req,
+ });
return results;
}
@@ -84,15 +76,14 @@ BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivat
try {
worker.run(Goals{goal});
- return goal->buildResult;
+ return goal->getBuildResult(DerivedPath::Built {
+ .drvPath = drvPath,
+ .outputs = OutputsSpec::All {},
+ });
} catch (Error & e) {
return BuildResult {
.status = BuildResult::MiscFailure,
.errorMsg = e.msg(),
- .path = DerivedPath::Built {
- .drvPath = drvPath,
- .outputs = OutputsSpec::All { },
- },
};
};
}
@@ -119,7 +110,7 @@ void Store::ensurePath(const StorePath & path)
}
-void LocalStore::repairPath(const StorePath & path)
+void Store::repairPath(const StorePath & path)
{
Worker worker(*this, *this);
GoalPtr goal = worker.makePathSubstitutionGoal(path, Repair);
diff --git a/src/libstore/build/goal.cc b/src/libstore/build/goal.cc
index 58e805f55..ca7097a68 100644
--- a/src/libstore/build/goal.cc
+++ b/src/libstore/build/goal.cc
@@ -11,6 +11,29 @@ bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) const {
}
+BuildResult Goal::getBuildResult(const DerivedPath & req) {
+ BuildResult res { buildResult };
+
+ if (auto pbp = std::get_if<DerivedPath::Built>(&req)) {
+ auto & bp = *pbp;
+
+ /* Because goals are in general shared between derived paths
+ that share the same derivation, we need to filter their
+ results to get back just the results we care about.
+ */
+
+ for (auto it = res.builtOutputs.begin(); it != res.builtOutputs.end();) {
+ if (bp.outputs.contains(it->first))
+ ++it;
+ else
+ it = res.builtOutputs.erase(it);
+ }
+ }
+
+ return res;
+}
+
+
void addToWeakGoals(WeakGoals & goals, GoalPtr p)
{
if (goals.find(p) != goals.end())
@@ -78,9 +101,9 @@ void Goal::amDone(ExitCode result, std::optional<Error> ex)
}
-void Goal::trace(const FormatOrString & fs)
+void Goal::trace(std::string_view s)
{
- debug("%1%: %2%", name, fs.s);
+ debug("%1%: %2%", name, s);
}
}
diff --git a/src/libstore/build/goal.hh b/src/libstore/build/goal.hh
index 35121c5d9..a313bf22c 100644
--- a/src/libstore/build/goal.hh
+++ b/src/libstore/build/goal.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
#include "store-api.hh"
@@ -6,11 +7,15 @@
namespace nix {
-/* Forward definition. */
+/**
+ * Forward definition.
+ */
struct Goal;
class Worker;
-/* A pointer to a goal. */
+/**
+ * A pointer to a goal.
+ */
typedef std::shared_ptr<Goal> GoalPtr;
typedef std::weak_ptr<Goal> WeakGoalPtr;
@@ -18,53 +23,102 @@ struct CompareGoalPtrs {
bool operator() (const GoalPtr & a, const GoalPtr & b) const;
};
-/* Set of goals. */
+/**
+ * Set of goals.
+ */
typedef std::set<GoalPtr, CompareGoalPtrs> Goals;
typedef std::set<WeakGoalPtr, std::owner_less<WeakGoalPtr>> WeakGoals;
-/* A map of paths to goals (and the other way around). */
+/**
+ * A map of paths to goals (and the other way around).
+ */
typedef std::map<StorePath, WeakGoalPtr> WeakGoalMap;
+/**
+ * Used as a hint to the worker on how to schedule a particular goal. For example,
+ * builds are typically CPU- and memory-bound, while substitutions are I/O bound.
+ * Using this information, the worker might decide to schedule more or fewer goals
+ * of each category in parallel.
+ */
+enum struct JobCategory {
+ Build,
+ Substitution,
+};
+
struct Goal : public std::enable_shared_from_this<Goal>
{
typedef enum {ecBusy, ecSuccess, ecFailed, ecNoSubstituters, ecIncompleteClosure} ExitCode;
- /* Backlink to the worker. */
+ /**
+ * Backlink to the worker.
+ */
Worker & worker;
- /* Goals that this goal is waiting for. */
+ /**
+ * Goals that this goal is waiting for.
+ */
Goals waitees;
- /* Goals waiting for this one to finish. Must use weak pointers
- here to prevent cycles. */
+ /**
+ * Goals waiting for this one to finish. Must use weak pointers
+ * here to prevent cycles.
+ */
WeakGoals waiters;
- /* Number of goals we are/were waiting for that have failed. */
+ /**
+ * Number of goals we are/were waiting for that have failed.
+ */
size_t nrFailed = 0;
- /* Number of substitution goals we are/were waiting for that
- failed because there are no substituters. */
+ /**
+ * Number of substitution goals we are/were waiting for that
+ * failed because there are no substituters.
+ */
size_t nrNoSubstituters = 0;
- /* Number of substitution goals we are/were waiting for that
- failed because they had unsubstitutable references. */
+ /**
+ * Number of substitution goals we are/were waiting for that
+ * failed because they had unsubstitutable references.
+ */
size_t nrIncompleteClosure = 0;
- /* Name of this goal for debugging purposes. */
+ /**
+ * Name of this goal for debugging purposes.
+ */
std::string name;
- /* Whether the goal is finished. */
+ /**
+ * Whether the goal is finished.
+ */
ExitCode exitCode = ecBusy;
- /* Build result. */
+protected:
+ /**
+ * Build result.
+ */
BuildResult buildResult;
- /* Exception containing an error message, if any. */
+public:
+
+ /**
+ * Project a `BuildResult` with just the information that pertains
+ * to the given request.
+ *
+ * In general, goals may be aliased between multiple requests, and
+ * the stored `BuildResult` has information for the union of all
+ * requests. We don't want to leak what the other request are for
+ * sake of both privacy and determinism, and this "safe accessor"
+ * ensures we don't.
+ */
+ BuildResult getBuildResult(const DerivedPath &);
+
+ /**
+ * Exception containing an error message, if any.
+ */
std::optional<Error> ex;
Goal(Worker & worker, DerivedPath path)
: worker(worker)
- , buildResult { .path = std::move(path) }
{ }
virtual ~Goal()
@@ -88,16 +142,18 @@ struct Goal : public std::enable_shared_from_this<Goal>
abort();
}
- void trace(const FormatOrString & fs);
+ void trace(std::string_view s);
std::string getName()
{
return name;
}
- /* Callback in case of a timeout. It should wake up its waiters,
- get rid of any running child processes that are being monitored
- by the worker (important!), etc. */
+ /**
+ * Callback in case of a timeout. It should wake up its waiters,
+ * get rid of any running child processes that are being monitored
+ * by the worker (important!), etc.
+ */
virtual void timedOut(Error && ex) = 0;
virtual std::string key() = 0;
@@ -105,6 +161,8 @@ struct Goal : public std::enable_shared_from_this<Goal>
void amDone(ExitCode result, std::optional<Error> ex = {});
virtual void cleanup() { }
+
+ virtual JobCategory jobCategory() = 0;
};
void addToWeakGoals(WeakGoals & goals, GoalPtr p);
diff --git a/src/libstore/build/hook-instance.cc b/src/libstore/build/hook-instance.cc
index cb58a1f02..075ad554f 100644
--- a/src/libstore/build/hook-instance.cc
+++ b/src/libstore/build/hook-instance.cc
@@ -35,7 +35,10 @@ HookInstance::HookInstance()
/* Fork the hook. */
pid = startProcess([&]() {
- commonChildInit(fromHook);
+ if (dup2(fromHook.writeSide.get(), STDERR_FILENO) == -1)
+ throw SysError("cannot pipe standard error into log file");
+
+ commonChildInit();
if (chdir("/") == -1) throw SysError("changing into /");
diff --git a/src/libstore/build/hook-instance.hh b/src/libstore/build/hook-instance.hh
index 9e8cff128..d84f62877 100644
--- a/src/libstore/build/hook-instance.hh
+++ b/src/libstore/build/hook-instance.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "logging.hh"
#include "serialise.hh"
@@ -7,16 +8,24 @@ namespace nix {
struct HookInstance
{
- /* Pipes for talking to the build hook. */
+ /**
+ * Pipes for talking to the build hook.
+ */
Pipe toHook;
- /* Pipe for the hook's standard output/error. */
+ /**
+ * Pipe for the hook's standard output/error.
+ */
Pipe fromHook;
- /* Pipe for the builder's standard output/error. */
+ /**
+ * Pipe for the builder's standard output/error.
+ */
Pipe builderOut;
- /* The process ID of the hook. */
+ /**
+ * The process ID of the hook.
+ */
Pid pid;
FdSink sink;
diff --git a/src/libstore/build/local-derivation-goal.cc b/src/libstore/build/local-derivation-goal.cc
index a961d8eed..05d6685da 100644
--- a/src/libstore/build/local-derivation-goal.cc
+++ b/src/libstore/build/local-derivation-goal.cc
@@ -292,7 +292,7 @@ void LocalDerivationGoal::closeReadPipes()
if (hook) {
DerivationGoal::closeReadPipes();
} else
- builderOut.readSide = -1;
+ builderOut.close();
}
@@ -413,7 +413,7 @@ void LocalDerivationGoal::startBuilder()
)
{
#if __linux__
- settings.requireExperimentalFeature(Xp::Cgroups);
+ experimentalFeatureSettings.require(Xp::Cgroups);
auto cgroupFS = getCgroupFS();
if (!cgroupFS)
@@ -650,7 +650,7 @@ void LocalDerivationGoal::startBuilder()
/* Clean up the chroot directory automatically. */
autoDelChroot = std::make_shared<AutoDelete>(chrootRootDir);
- printMsg(lvlChatty, format("setting up chroot environment in '%1%'") % chrootRootDir);
+ printMsg(lvlChatty, "setting up chroot environment in '%1%'", chrootRootDir);
// FIXME: make this 0700
if (mkdir(chrootRootDir.c_str(), buildUser && buildUser->getUIDCount() != 1 ? 0755 : 0750) == -1)
@@ -753,8 +753,7 @@ void LocalDerivationGoal::startBuilder()
throw Error("home directory '%1%' exists; please remove it to assure purity of builds without sandboxing", homeDir);
if (useChroot && settings.preBuildHook != "" && dynamic_cast<Derivation *>(drv.get())) {
- printMsg(lvlChatty, format("executing pre-build hook '%1%'")
- % settings.preBuildHook);
+ printMsg(lvlChatty, "executing pre-build hook '%1%'", settings.preBuildHook);
auto args = useChroot ? Strings({worker.store.printStorePath(drvPath), chrootRootDir}) :
Strings({ worker.store.printStorePath(drvPath) });
enum BuildHookState {
@@ -803,15 +802,13 @@ void LocalDerivationGoal::startBuilder()
/* Create the log file. */
Path logFile = openLogFile();
- /* Create a pipe to get the output of the builder. */
- //builderOut.create();
-
- builderOut.readSide = posix_openpt(O_RDWR | O_NOCTTY);
- if (!builderOut.readSide)
+ /* Create a pseudoterminal to get the output of the builder. */
+ builderOut = posix_openpt(O_RDWR | O_NOCTTY);
+ if (!builderOut)
throw SysError("opening pseudoterminal master");
// FIXME: not thread-safe, use ptsname_r
- std::string slaveName(ptsname(builderOut.readSide.get()));
+ std::string slaveName = ptsname(builderOut.get());
if (buildUser) {
if (chmod(slaveName.c_str(), 0600))
@@ -822,34 +819,34 @@ void LocalDerivationGoal::startBuilder()
}
#if __APPLE__
else {
- if (grantpt(builderOut.readSide.get()))
+ if (grantpt(builderOut.get()))
throw SysError("granting access to pseudoterminal slave");
}
#endif
- #if 0
- // Mount the pt in the sandbox so that the "tty" command works.
- // FIXME: this doesn't work with the new devpts in the sandbox.
- if (useChroot)
- dirsInChroot[slaveName] = {slaveName, false};
- #endif
-
- if (unlockpt(builderOut.readSide.get()))
+ if (unlockpt(builderOut.get()))
throw SysError("unlocking pseudoterminal");
- builderOut.writeSide = open(slaveName.c_str(), O_RDWR | O_NOCTTY);
- if (!builderOut.writeSide)
- throw SysError("opening pseudoterminal slave");
+ /* Open the slave side of the pseudoterminal and use it as stderr. */
+ auto openSlave = [&]()
+ {
+ AutoCloseFD builderOut = open(slaveName.c_str(), O_RDWR | O_NOCTTY);
+ if (!builderOut)
+ throw SysError("opening pseudoterminal slave");
- // Put the pt into raw mode to prevent \n -> \r\n translation.
- struct termios term;
- if (tcgetattr(builderOut.writeSide.get(), &term))
- throw SysError("getting pseudoterminal attributes");
+ // Put the pt into raw mode to prevent \n -> \r\n translation.
+ struct termios term;
+ if (tcgetattr(builderOut.get(), &term))
+ throw SysError("getting pseudoterminal attributes");
- cfmakeraw(&term);
+ cfmakeraw(&term);
- if (tcsetattr(builderOut.writeSide.get(), TCSANOW, &term))
- throw SysError("putting pseudoterminal into raw mode");
+ if (tcsetattr(builderOut.get(), TCSANOW, &term))
+ throw SysError("putting pseudoterminal into raw mode");
+
+ if (dup2(builderOut.get(), STDERR_FILENO) == -1)
+ throw SysError("cannot pipe standard error into log file");
+ };
buildResult.startTime = time(0);
@@ -898,7 +895,16 @@ void LocalDerivationGoal::startBuilder()
usingUserNamespace = userNamespacesSupported();
+ Pipe sendPid;
+ sendPid.create();
+
Pid helper = startProcess([&]() {
+ sendPid.readSide.close();
+
+ /* We need to open the slave early, before
+ CLONE_NEWUSER. Otherwise we get EPERM when running as
+ root. */
+ openSlave();
/* Drop additional groups here because we can't do it
after we've created the new user namespace. FIXME:
@@ -920,11 +926,12 @@ void LocalDerivationGoal::startBuilder()
pid_t child = startProcess([&]() { runChild(); }, options);
- writeFull(builderOut.writeSide.get(),
- fmt("%d %d\n", usingUserNamespace, child));
+ writeFull(sendPid.writeSide.get(), fmt("%d\n", child));
_exit(0);
});
+ sendPid.writeSide.close();
+
if (helper.wait() != 0)
throw Error("unable to start build process");
@@ -936,10 +943,9 @@ void LocalDerivationGoal::startBuilder()
userNamespaceSync.writeSide = -1;
});
- auto ss = tokenizeString<std::vector<std::string>>(readLine(builderOut.readSide.get()));
- assert(ss.size() == 2);
- usingUserNamespace = ss[0] == "1";
- pid = string2Int<pid_t>(ss[1]).value();
+ auto ss = tokenizeString<std::vector<std::string>>(readLine(sendPid.readSide.get()));
+ assert(ss.size() == 1);
+ pid = string2Int<pid_t>(ss[0]).value();
if (usingUserNamespace) {
/* Set the UID/GID mapping of the builder's user namespace
@@ -994,21 +1000,21 @@ void LocalDerivationGoal::startBuilder()
#endif
{
pid = startProcess([&]() {
+ openSlave();
runChild();
});
}
/* parent */
pid.setSeparatePG(true);
- builderOut.writeSide = -1;
- worker.childStarted(shared_from_this(), {builderOut.readSide.get()}, true, true);
+ worker.childStarted(shared_from_this(), {builderOut.get()}, true, true);
/* Check if setting up the build environment failed. */
std::vector<std::string> msgs;
while (true) {
std::string msg = [&]() {
try {
- return readLine(builderOut.readSide.get());
+ return readLine(builderOut.get());
} catch (Error & e) {
auto status = pid.wait();
e.addTrace({}, "while waiting for the build environment for '%s' to initialize (%s, previous messages: %s)",
@@ -1020,7 +1026,7 @@ void LocalDerivationGoal::startBuilder()
}();
if (msg.substr(0, 1) == "\2") break;
if (msg.substr(0, 1) == "\1") {
- FdSource source(builderOut.readSide.get());
+ FdSource source(builderOut.get());
auto ex = readError(source);
ex.addTrace({}, "while setting up the build environment");
throw ex;
@@ -1104,7 +1110,7 @@ void LocalDerivationGoal::initEnv()
env["NIX_STORE"] = worker.store.storeDir;
/* The maximum number of cores to utilize for parallel building. */
- env["NIX_BUILD_CORES"] = (format("%d") % settings.buildCores).str();
+ env["NIX_BUILD_CORES"] = fmt("%d", settings.buildCores);
initTmpDir();
@@ -1155,10 +1161,10 @@ void LocalDerivationGoal::writeStructuredAttrs()
writeFile(tmpDir + "/.attrs.sh", rewriteStrings(jsonSh, inputRewrites));
chownToBuilder(tmpDir + "/.attrs.sh");
- env["NIX_ATTRS_SH_FILE"] = tmpDir + "/.attrs.sh";
+ env["NIX_ATTRS_SH_FILE"] = tmpDirInSandbox + "/.attrs.sh";
writeFile(tmpDir + "/.attrs.json", rewriteStrings(json.dump(), inputRewrites));
chownToBuilder(tmpDir + "/.attrs.json");
- env["NIX_ATTRS_JSON_FILE"] = tmpDir + "/.attrs.json";
+ env["NIX_ATTRS_JSON_FILE"] = tmpDirInSandbox + "/.attrs.json";
}
}
@@ -1329,7 +1335,7 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
result.rethrow();
}
- std::vector<BuildResult> buildPathsWithResults(
+ std::vector<KeyedBuildResult> buildPathsWithResults(
const std::vector<DerivedPath> & paths,
BuildMode buildMode = bmNormal,
std::shared_ptr<Store> evalStore = nullptr) override
@@ -1409,12 +1415,15 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
virtual void addBuildLog(const StorePath & path, std::string_view log) override
{ unsupported("addBuildLog"); }
+
+ std::optional<TrustedFlag> isTrustedClient() override
+ { return NotTrusted; }
};
void LocalDerivationGoal::startDaemon()
{
- settings.requireExperimentalFeature(Xp::RecursiveNix);
+ experimentalFeatureSettings.require(Xp::RecursiveNix);
Store::Params params;
params["path-info-cache-size"] = "0";
@@ -1461,7 +1470,7 @@ void LocalDerivationGoal::startDaemon()
FdSink to(remote.get());
try {
daemon::processConnection(store, from, to,
- daemon::NotTrusted, daemon::Recursive);
+ NotTrusted, daemon::Recursive);
debug("terminated daemon connection");
} catch (SysError &) {
ignoreException();
@@ -1650,7 +1659,7 @@ void LocalDerivationGoal::runChild()
try { /* child */
- commonChildInit(builderOut);
+ commonChildInit();
try {
setupSeccomp();
@@ -1767,6 +1776,9 @@ void LocalDerivationGoal::runChild()
for (auto & path : { "/etc/resolv.conf", "/etc/services", "/etc/hosts" })
if (pathExists(path))
ss.push_back(path);
+
+ if (settings.caFile != "")
+ dirsInChroot.try_emplace("/etc/ssl/certs/ca-certificates.crt", settings.caFile, true);
}
for (auto & i : ss) dirsInChroot.emplace(i, i);
@@ -2063,7 +2075,7 @@ void LocalDerivationGoal::runChild()
/* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms
to find temporary directories, so we want to open up a broader place for them to dump their files, if needed. */
- Path globalTmpDir = canonPath(getEnv("TMPDIR").value_or("/tmp"), true);
+ Path globalTmpDir = canonPath(getEnvNonEmpty("TMPDIR").value_or("/tmp"), true);
/* They don't like trailing slashes on subpath directives */
if (globalTmpDir.back() == '/') globalTmpDir.pop_back();
@@ -2165,7 +2177,7 @@ void LocalDerivationGoal::runChild()
}
-DrvOutputs LocalDerivationGoal::registerOutputs()
+SingleDrvOutputs LocalDerivationGoal::registerOutputs()
{
/* When using a build hook, the build hook can register the output
as valid (by doing `nix-store --import'). If so we don't have
@@ -2274,7 +2286,7 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
bool discardReferences = false;
if (auto structuredAttrs = parsedDrv->getStructuredAttrs()) {
if (auto udr = get(*structuredAttrs, "unsafeDiscardReferences")) {
- settings.requireExperimentalFeature(Xp::DiscardReferences);
+ experimentalFeatureSettings.require(Xp::DiscardReferences);
if (auto output = get(*udr, outputName)) {
if (!output->is_boolean())
throw Error("attribute 'unsafeDiscardReferences.\"%s\"' of derivation '%s' must be a Boolean", outputName, drvPath.to_string());
@@ -2386,27 +2398,26 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
}
};
- auto rewriteRefs = [&]() -> std::pair<bool, StorePathSet> {
+ auto rewriteRefs = [&]() -> StoreReferences {
/* In the CA case, we need the rewritten refs to calculate the
final path, therefore we look for a *non-rewritten
self-reference, and use a bool rather try to solve the
computationally intractable fixed point. */
- std::pair<bool, StorePathSet> res {
- false,
- {},
+ StoreReferences res {
+ .self = false,
};
for (auto & r : references) {
auto name = r.name();
auto origHash = std::string { r.hashPart() };
if (r == *scratchPath) {
- res.first = true;
+ res.self = true;
} else if (auto outputRewrite = get(outputRewrites, origHash)) {
std::string newRef = *outputRewrite;
newRef += '-';
newRef += name;
- res.second.insert(StorePath { newRef });
+ res.others.insert(StorePath { newRef });
} else {
- res.second.insert(r);
+ res.others.insert(r);
}
}
return res;
@@ -2418,39 +2429,57 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
throw BuildError(
"output path %1% without valid stats info",
actualPath);
- if (outputHash.method == FileIngestionMethod::Flat) {
+ if (outputHash.method == ContentAddressMethod { FileIngestionMethod::Flat } ||
+ outputHash.method == ContentAddressMethod { TextIngestionMethod {} })
+ {
/* The output path should be a regular file without execute permission. */
if (!S_ISREG(st->st_mode) || (st->st_mode & S_IXUSR) != 0)
throw BuildError(
"output path '%1%' should be a non-executable regular file "
- "since recursive hashing is not enabled (outputHashMode=flat)",
+ "since recursive hashing is not enabled (one of outputHashMode={flat,text} is true)",
actualPath);
}
rewriteOutput();
/* FIXME optimize and deduplicate with addToStore */
std::string oldHashPart { scratchPath->hashPart() };
HashModuloSink caSink { outputHash.hashType, oldHashPart };
- switch (outputHash.method) {
- case FileIngestionMethod::Recursive:
- dumpPath(actualPath, caSink);
- break;
- case FileIngestionMethod::Flat:
- readFile(actualPath, caSink);
- break;
- }
+ std::visit(overloaded {
+ [&](const TextIngestionMethod &) {
+ readFile(actualPath, caSink);
+ },
+ [&](const FileIngestionMethod & m2) {
+ switch (m2) {
+ case FileIngestionMethod::Recursive:
+ dumpPath(actualPath, caSink);
+ break;
+ case FileIngestionMethod::Flat:
+ readFile(actualPath, caSink);
+ break;
+ }
+ },
+ }, outputHash.method.raw);
auto got = caSink.finish().first;
- auto refs = rewriteRefs();
-
- auto finalPath = worker.store.makeFixedOutputPath(
- outputHash.method,
- got,
- outputPathName(drv->name, outputName),
- refs.second,
- refs.first);
- if (*scratchPath != finalPath) {
+
+ auto optCA = ContentAddressWithReferences::fromPartsOpt(
+ outputHash.method,
+ std::move(got),
+ rewriteRefs());
+ if (!optCA) {
+ // TODO track distinct failure modes separately (at the time of
+ // writing there is just one but `nullopt` is unclear) so this
+ // message can't get out of sync.
+ throw BuildError("output path '%s' has illegal content address, probably a spurious self-reference with text hashing");
+ }
+ ValidPathInfo newInfo0 {
+ worker.store,
+ outputPathName(drv->name, outputName),
+ *std::move(optCA),
+ Hash::dummy,
+ };
+ if (*scratchPath != newInfo0.path) {
// Also rewrite the output path
auto source = sinkToSource([&](Sink & nextSink) {
- RewritingSink rsink2(oldHashPart, std::string(finalPath.hashPart()), nextSink);
+ RewritingSink rsink2(oldHashPart, std::string(newInfo0.path.hashPart()), nextSink);
dumpPath(actualPath, rsink2);
rsink2.flush();
});
@@ -2461,19 +2490,8 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
}
HashResult narHashAndSize = hashPath(htSHA256, actualPath);
- ValidPathInfo newInfo0 {
- finalPath,
- narHashAndSize.first,
- };
-
+ newInfo0.narHash = narHashAndSize.first;
newInfo0.narSize = narHashAndSize.second;
- newInfo0.ca = FixedOutputHash {
- .method = outputHash.method,
- .hash = got,
- };
- newInfo0.references = refs.second;
- if (refs.first)
- newInfo0.references.insert(newInfo0.path);
assert(newInfo0.ca);
return newInfo0;
@@ -2495,22 +2513,23 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
ValidPathInfo newInfo0 { requiredFinalPath, narHashAndSize.first };
newInfo0.narSize = narHashAndSize.second;
auto refs = rewriteRefs();
- newInfo0.references = refs.second;
- if (refs.first)
+ newInfo0.references = std::move(refs.others);
+ if (refs.self)
newInfo0.references.insert(newInfo0.path);
return newInfo0;
},
[&](const DerivationOutput::CAFixed & dof) {
+ auto wanted = dof.ca.getHash();
+
auto newInfo0 = newInfoFromCA(DerivationOutput::CAFloating {
- .method = dof.hash.method,
- .hashType = dof.hash.hash.type,
+ .method = dof.ca.getMethod(),
+ .hashType = wanted.type,
});
/* Check wanted hash */
- const Hash & wanted = dof.hash.hash;
assert(newInfo0.ca);
- auto got = getContentAddressHash(*newInfo0.ca);
+ auto got = newInfo0.ca->getHash();
if (wanted != got) {
/* Throw an error after registering the path as
valid. */
@@ -2521,6 +2540,11 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
wanted.to_string(SRI, true),
got.to_string(SRI, true)));
}
+ if (!newInfo0.references.empty())
+ delayedException = std::make_exception_ptr(
+ BuildError("illegal path references in fixed-output derivation '%s'",
+ worker.store.printStorePath(drvPath)));
+
return newInfo0;
},
@@ -2682,7 +2706,7 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
means it's safe to link the derivation to the output hash. We must do
that for floating CA derivations, which otherwise couldn't be cached,
but it's fine to do in all cases. */
- DrvOutputs builtOutputs;
+ SingleDrvOutputs builtOutputs;
for (auto & [outputName, newInfo] : infos) {
auto oldinfo = get(initialOutputs, outputName);
@@ -2694,14 +2718,13 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
},
.outPath = newInfo.path
};
- if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)
+ if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)
&& drv->type().isPure())
{
signRealisation(thisRealisation);
worker.store.registerDrvOutput(thisRealisation);
}
- if (wantedOutputs.contains(outputName))
- builtOutputs.emplace(thisRealisation.id, thisRealisation);
+ builtOutputs.emplace(outputName, thisRealisation);
}
return builtOutputs;
@@ -2892,7 +2915,7 @@ void LocalDerivationGoal::deleteTmpDir(bool force)
bool LocalDerivationGoal::isReadDesc(int fd)
{
return (hook && DerivationGoal::isReadDesc(fd)) ||
- (!hook && fd == builderOut.readSide.get());
+ (!hook && fd == builderOut.get());
}
diff --git a/src/libstore/build/local-derivation-goal.hh b/src/libstore/build/local-derivation-goal.hh
index 34c4e9187..9acd7593d 100644
--- a/src/libstore/build/local-derivation-goal.hh
+++ b/src/libstore/build/local-derivation-goal.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "derivation-goal.hh"
#include "local-store.hh"
@@ -9,48 +10,75 @@ struct LocalDerivationGoal : public DerivationGoal
{
LocalStore & getLocalStore();
- /* User selected for running the builder. */
+ /**
+ * User selected for running the builder.
+ */
std::unique_ptr<UserLock> buildUser;
- /* The process ID of the builder. */
+ /**
+ * The process ID of the builder.
+ */
Pid pid;
- /* The cgroup of the builder, if any. */
+ /**
+ * The cgroup of the builder, if any.
+ */
std::optional<Path> cgroup;
- /* The temporary directory. */
+ /**
+ * The temporary directory.
+ */
Path tmpDir;
- /* The path of the temporary directory in the sandbox. */
+ /**
+ * The path of the temporary directory in the sandbox.
+ */
Path tmpDirInSandbox;
- /* Pipe for the builder's standard output/error. */
- Pipe builderOut;
+ /**
+ * Master side of the pseudoterminal used for the builder's
+ * standard output/error.
+ */
+ AutoCloseFD builderOut;
- /* Pipe for synchronising updates to the builder namespaces. */
+ /**
+ * Pipe for synchronising updates to the builder namespaces.
+ */
Pipe userNamespaceSync;
- /* The mount namespace and user namespace of the builder, used to add additional
- paths to the sandbox as a result of recursive Nix calls. */
+ /**
+ * The mount namespace and user namespace of the builder, used to add additional
+ * paths to the sandbox as a result of recursive Nix calls.
+ */
AutoCloseFD sandboxMountNamespace;
AutoCloseFD sandboxUserNamespace;
- /* On Linux, whether we're doing the build in its own user
- namespace. */
+ /**
+ * On Linux, whether we're doing the build in its own user
+ * namespace.
+ */
bool usingUserNamespace = true;
- /* Whether we're currently doing a chroot build. */
+ /**
+ * Whether we're currently doing a chroot build.
+ */
bool useChroot = false;
Path chrootRootDir;
- /* RAII object to delete the chroot directory. */
+ /**
+ * RAII object to delete the chroot directory.
+ */
std::shared_ptr<AutoDelete> autoDelChroot;
- /* Whether to run the build in a private network namespace. */
+ /**
+ * Whether to run the build in a private network namespace.
+ */
bool privateNetwork = false;
- /* Stuff we need to pass to initChild(). */
+ /**
+ * Stuff we need to pass to initChild().
+ */
struct ChrootPath {
Path source;
bool optional;
@@ -69,30 +97,35 @@ struct LocalDerivationGoal : public DerivationGoal
SandboxProfile additionalSandboxProfile;
#endif
- /* Hash rewriting. */
+ /**
+ * Hash rewriting.
+ */
StringMap inputRewrites, outputRewrites;
typedef map<StorePath, StorePath> RedirectedOutputs;
RedirectedOutputs redirectedOutputs;
- /* The outputs paths used during the build.
-
- - Input-addressed derivations or fixed content-addressed outputs are
- sometimes built when some of their outputs already exist, and can not
- be hidden via sandboxing. We use temporary locations instead and
- rewrite after the build. Otherwise the regular predetermined paths are
- put here.
-
- - Floating content-addressed derivations do not know their final build
- output paths until the outputs are hashed, so random locations are
- used, and then renamed. The randomness helps guard against hidden
- self-references.
+ /**
+ * The outputs paths used during the build.
+ *
+ * - Input-addressed derivations or fixed content-addressed outputs are
+ * sometimes built when some of their outputs already exist, and can not
+ * be hidden via sandboxing. We use temporary locations instead and
+ * rewrite after the build. Otherwise the regular predetermined paths are
+ * put here.
+ *
+ * - Floating content-addressed derivations do not know their final build
+ * output paths until the outputs are hashed, so random locations are
+ * used, and then renamed. The randomness helps guard against hidden
+ * self-references.
*/
OutputPathMap scratchOutputs;
- /* Path registration info from the previous round, if we're
- building multiple times. Since this contains the hash, it
- allows us to compare whether two rounds produced the same
- result. */
+ /**
+ * Path registration info from the previous round, if we're
+ * building multiple times. Since this contains the hash, it
+ * allows us to compare whether two rounds produced the same
+ * result.
+ */
std::map<Path, ValidPathInfo> prevInfos;
uid_t sandboxUid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 1000 : 0) : buildUser->getUID(); }
@@ -100,25 +133,37 @@ struct LocalDerivationGoal : public DerivationGoal
const static Path homeDir;
- /* The recursive Nix daemon socket. */
+ /**
+ * The recursive Nix daemon socket.
+ */
AutoCloseFD daemonSocket;
- /* The daemon main thread. */
+ /**
+ * The daemon main thread.
+ */
std::thread daemonThread;
- /* The daemon worker threads. */
+ /**
+ * The daemon worker threads.
+ */
std::vector<std::thread> daemonWorkerThreads;
- /* Paths that were added via recursive Nix calls. */
+ /**
+ * Paths that were added via recursive Nix calls.
+ */
StorePathSet addedPaths;
- /* Realisations that were added via recursive Nix calls. */
+ /**
+ * Realisations that were added via recursive Nix calls.
+ */
std::set<DrvOutput> addedDrvOutputs;
- /* Recursive Nix calls are only allowed to build or realize paths
- in the original input closure or added via a recursive Nix call
- (so e.g. you can't do 'nix-store -r /nix/store/<bla>' where
- /nix/store/<bla> is some arbitrary path in a binary cache). */
+ /**
+ * Recursive Nix calls are only allowed to build or realize paths
+ * in the original input closure or added via a recursive Nix call
+ * (so e.g. you can't do 'nix-store -r /nix/store/<bla>' where
+ * /nix/store/<bla> is some arbitrary path in a binary cache).
+ */
bool isAllowed(const StorePath & path)
{
return inputPaths.count(path) || addedPaths.count(path);
@@ -136,55 +181,81 @@ struct LocalDerivationGoal : public DerivationGoal
virtual ~LocalDerivationGoal() override;
- /* Whether we need to perform hash rewriting if there are valid output paths. */
+ /**
+ * Whether we need to perform hash rewriting if there are valid output paths.
+ */
bool needsHashRewrite();
- /* The additional states. */
+ /**
+ * The additional states.
+ */
void tryLocalBuild() override;
- /* Start building a derivation. */
+ /**
+ * Start building a derivation.
+ */
void startBuilder();
- /* Fill in the environment for the builder. */
+ /**
+ * Fill in the environment for the builder.
+ */
void initEnv();
- /* Setup tmp dir location. */
+ /**
+ * Setup tmp dir location.
+ */
void initTmpDir();
- /* Write a JSON file containing the derivation attributes. */
+ /**
+ * Write a JSON file containing the derivation attributes.
+ */
void writeStructuredAttrs();
void startDaemon();
void stopDaemon();
- /* Add 'path' to the set of paths that may be referenced by the
- outputs, and make it appear in the sandbox. */
+ /**
+ * Add 'path' to the set of paths that may be referenced by the
+ * outputs, and make it appear in the sandbox.
+ */
void addDependency(const StorePath & path);
- /* Make a file owned by the builder. */
+ /**
+ * Make a file owned by the builder.
+ */
void chownToBuilder(const Path & path);
int getChildStatus() override;
- /* Run the builder's process. */
+ /**
+ * Run the builder's process.
+ */
void runChild();
- /* Check that the derivation outputs all exist and register them
- as valid. */
- DrvOutputs registerOutputs() override;
+ /**
+ * Check that the derivation outputs all exist and register them
+ * as valid.
+ */
+ SingleDrvOutputs registerOutputs() override;
void signRealisation(Realisation &) override;
- /* Check that an output meets the requirements specified by the
- 'outputChecks' attribute (or the legacy
- '{allowed,disallowed}{References,Requisites}' attributes). */
+ /**
+ * Check that an output meets the requirements specified by the
+ * 'outputChecks' attribute (or the legacy
+ * '{allowed,disallowed}{References,Requisites}' attributes).
+ */
void checkOutputs(const std::map<std::string, ValidPathInfo> & outputs);
- /* Close the read side of the logger pipe. */
+ /**
+ * Close the read side of the logger pipe.
+ */
void closeReadPipes() override;
- /* Cleanup hooks for buildDone() */
+ /**
+ * Cleanup hooks for buildDone()
+ */
void cleanupHookFinally() override;
void cleanupPreChildKill() override;
void cleanupPostChildKill() override;
@@ -194,24 +265,36 @@ struct LocalDerivationGoal : public DerivationGoal
bool isReadDesc(int fd) override;
- /* Delete the temporary directory, if we have one. */
+ /**
+ * Delete the temporary directory, if we have one.
+ */
void deleteTmpDir(bool force);
- /* Forcibly kill the child process, if any. */
+ /**
+ * Forcibly kill the child process, if any.
+ */
void killChild() override;
- /* Kill any processes running under the build user UID or in the
- cgroup of the build. */
+ /**
+ * Kill any processes running under the build user UID or in the
+ * cgroup of the build.
+ */
void killSandbox(bool getStats);
- /* Create alternative path calculated from but distinct from the
- input, so we can avoid overwriting outputs (or other store paths)
- that already exist. */
+ /**
+ * Create alternative path calculated from but distinct from the
+ * input, so we can avoid overwriting outputs (or other store paths)
+ * that already exist.
+ */
StorePath makeFallbackPath(const StorePath & path);
- /* Make a path to another based on the output name along with the
- derivation hash. */
- /* FIXME add option to randomize, so we can audit whether our
- rewrites caught everything */
+
+ /**
+ * Make a path to another based on the output name along with the
+ * derivation hash.
+ *
+ * @todo Add option to randomize, so we can audit whether our
+ * rewrites caught everything
+ */
StorePath makeFallbackPath(std::string_view outputName);
};
diff --git a/src/libstore/build/personality.hh b/src/libstore/build/personality.hh
index 30e4f4062..91b730fab 100644
--- a/src/libstore/build/personality.hh
+++ b/src/libstore/build/personality.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <string>
diff --git a/src/libstore/build/substitution-goal.cc b/src/libstore/build/substitution-goal.cc
index 2af105b4d..93867007d 100644
--- a/src/libstore/build/substitution-goal.cc
+++ b/src/libstore/build/substitution-goal.cc
@@ -95,7 +95,9 @@ void PathSubstitutionGoal::tryNext()
subs.pop_front();
if (ca) {
- subPath = sub->makeFixedOutputPathFromCA(storePath.name(), *ca);
+ subPath = sub->makeFixedOutputPathFromCA(
+ std::string { storePath.name() },
+ ContentAddressWithReferences::withoutRefs(*ca));
if (sub->storeDir == worker.store.storeDir)
assert(subPath == storePath);
} else if (sub->storeDir != worker.store.storeDir) {
@@ -198,11 +200,10 @@ void PathSubstitutionGoal::tryToRun()
{
trace("trying to run");
- /* Make sure that we are allowed to start a build. Note that even
- if maxBuildJobs == 0 (no local builds allowed), we still allow
- a substituter to run. This is because substitutions cannot be
- distributed to another machine via the build hook. */
- if (worker.getNrLocalBuilds() >= std::max(1U, (unsigned int) settings.maxBuildJobs)) {
+ /* Make sure that we are allowed to start a substitution. Note that even
+ if maxSubstitutionJobs == 0, we still allow a substituter to run. This
+ prevents infinite waiting. */
+ if (worker.getNrSubstitutions() >= std::max(1U, (unsigned int) settings.maxSubstitutionJobs)) {
worker.waitForBuildSlot(shared_from_this());
return;
}
diff --git a/src/libstore/build/substitution-goal.hh b/src/libstore/build/substitution-goal.hh
index a73f8e666..9fc041920 100644
--- a/src/libstore/build/substitution-goal.hh
+++ b/src/libstore/build/substitution-goal.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "lock.hh"
#include "store-api.hh"
@@ -10,38 +11,58 @@ class Worker;
struct PathSubstitutionGoal : public Goal
{
- /* The store path that should be realised through a substitute. */
+ /**
+ * The store path that should be realised through a substitute.
+ */
StorePath storePath;
- /* The path the substituter refers to the path as. This will be
- different when the stores have different names. */
+ /**
+ * The path the substituter refers to the path as. This will be
+ * different when the stores have different names.
+ */
std::optional<StorePath> subPath;
- /* The remaining substituters. */
+ /**
+ * The remaining substituters.
+ */
std::list<ref<Store>> subs;
- /* The current substituter. */
+ /**
+ * The current substituter.
+ */
std::shared_ptr<Store> sub;
- /* Whether a substituter failed. */
+ /**
+ * Whether a substituter failed.
+ */
bool substituterFailed = false;
- /* Path info returned by the substituter's query info operation. */
+ /**
+ * Path info returned by the substituter's query info operation.
+ */
std::shared_ptr<const ValidPathInfo> info;
- /* Pipe for the substituter's standard output. */
+ /**
+ * Pipe for the substituter's standard output.
+ */
Pipe outPipe;
- /* The substituter thread. */
+ /**
+ * The substituter thread.
+ */
std::thread thr;
std::promise<void> promise;
- /* Whether to try to repair a valid path. */
+ /**
+ * Whether to try to repair a valid path.
+ */
RepairFlag repair;
- /* Location where we're downloading the substitute. Differs from
- storePath when doing a repair. */
+ /**
+ * Location where we're downloading the substitute. Differs from
+ * storePath when doing a repair.
+ */
Path destPath;
std::unique_ptr<MaintainCount<uint64_t>> maintainExpectedSubstitutions,
@@ -50,7 +71,9 @@ struct PathSubstitutionGoal : public Goal
typedef void (PathSubstitutionGoal::*GoalState)();
GoalState state;
- /* Content address for recomputing store path */
+ /**
+ * Content address for recomputing store path
+ */
std::optional<ContentAddress> ca;
void done(
@@ -64,16 +87,20 @@ public:
void timedOut(Error && ex) override { abort(); };
+ /**
+ * We prepend "a$" to the key name to ensure substitution goals
+ * happen before derivation goals.
+ */
std::string key() override
{
- /* "a$" ensures substitution goals happen before derivation
- goals. */
return "a$" + std::string(storePath.name()) + "$" + worker.store.printStorePath(storePath);
}
void work() override;
- /* The states. */
+ /**
+ * The states.
+ */
void init();
void tryNext();
void gotInfo();
@@ -81,11 +108,15 @@ public:
void tryToRun();
void finished();
- /* Callback used by the worker to write to the log. */
+ /**
+ * Callback used by the worker to write to the log.
+ */
void handleChildOutput(int fd, std::string_view data) override;
void handleEOF(int fd) override;
void cleanup() override;
+
+ JobCategory jobCategory() override { return JobCategory::Substitution; };
};
}
diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc
index f775f8486..ee334d54a 100644
--- a/src/libstore/build/worker.cc
+++ b/src/libstore/build/worker.cc
@@ -18,6 +18,7 @@ Worker::Worker(Store & store, Store & evalStore)
{
/* Debugging: prevent recursive workers. */
nrLocalBuilds = 0;
+ nrSubstitutions = 0;
lastWokenUp = steady_time_point::min();
permanentFailure = false;
timedOut = false;
@@ -92,6 +93,7 @@ std::shared_ptr<PathSubstitutionGoal> Worker::makePathSubstitutionGoal(const Sto
return goal;
}
+
std::shared_ptr<DrvOutputSubstitutionGoal> Worker::makeDrvOutputSubstitutionGoal(const DrvOutput& id, RepairFlag repair, std::optional<ContentAddress> ca)
{
std::weak_ptr<DrvOutputSubstitutionGoal> & goal_weak = drvOutputSubstitutionGoals[id];
@@ -104,6 +106,20 @@ std::shared_ptr<DrvOutputSubstitutionGoal> Worker::makeDrvOutputSubstitutionGoal
return goal;
}
+
+GoalPtr Worker::makeGoal(const DerivedPath & req, BuildMode buildMode)
+{
+ return std::visit(overloaded {
+ [&](const DerivedPath::Built & bfd) -> GoalPtr {
+ return makeDerivationGoal(bfd.drvPath, bfd.outputs, buildMode);
+ },
+ [&](const DerivedPath::Opaque & bo) -> GoalPtr {
+ return makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair);
+ },
+ }, req.raw());
+}
+
+
template<typename K, typename G>
static void removeGoal(std::shared_ptr<G> goal, std::map<K, std::weak_ptr<G>> & goalMap)
{
@@ -161,6 +177,12 @@ unsigned Worker::getNrLocalBuilds()
}
+unsigned Worker::getNrSubstitutions()
+{
+ return nrSubstitutions;
+}
+
+
void Worker::childStarted(GoalPtr goal, const std::set<int> & fds,
bool inBuildSlot, bool respectTimeouts)
{
@@ -172,7 +194,10 @@ void Worker::childStarted(GoalPtr goal, const std::set<int> & fds,
child.inBuildSlot = inBuildSlot;
child.respectTimeouts = respectTimeouts;
children.emplace_back(child);
- if (inBuildSlot) nrLocalBuilds++;
+ if (inBuildSlot) {
+ if (goal->jobCategory() == JobCategory::Substitution) nrSubstitutions++;
+ else nrLocalBuilds++;
+ }
}
@@ -183,8 +208,13 @@ void Worker::childTerminated(Goal * goal, bool wakeSleepers)
if (i == children.end()) return;
if (i->inBuildSlot) {
- assert(nrLocalBuilds > 0);
- nrLocalBuilds--;
+ if (goal->jobCategory() == JobCategory::Substitution) {
+ assert(nrSubstitutions > 0);
+ nrSubstitutions--;
+ } else {
+ assert(nrLocalBuilds > 0);
+ nrLocalBuilds--;
+ }
}
children.erase(i);
@@ -205,7 +235,9 @@ void Worker::childTerminated(Goal * goal, bool wakeSleepers)
void Worker::waitForBuildSlot(GoalPtr goal)
{
debug("wait for build slot");
- if (getNrLocalBuilds() < settings.maxBuildJobs)
+ bool isSubstitutionGoal = goal->jobCategory() == JobCategory::Substitution;
+ if ((!isSubstitutionGoal && getNrLocalBuilds() < settings.maxBuildJobs) ||
+ (isSubstitutionGoal && getNrSubstitutions() < settings.maxSubstitutionJobs))
wakeUp(goal); /* we can do it right away */
else
addToWeakGoals(wantingToBuild, goal);
diff --git a/src/libstore/build/worker.hh b/src/libstore/build/worker.hh
index 6d68d3cf1..63624d910 100644
--- a/src/libstore/build/worker.hh
+++ b/src/libstore/build/worker.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
#include "lock.hh"
@@ -16,24 +17,29 @@ struct DerivationGoal;
struct PathSubstitutionGoal;
class DrvOutputSubstitutionGoal;
-/* Workaround for not being able to declare a something like
-
- class PathSubstitutionGoal : public Goal;
-
- even when Goal is a complete type.
-
- This is still a static cast. The purpose of exporting it is to define it in
- a place where `PathSubstitutionGoal` is concrete, and use it in a place where it
- is opaque. */
+/**
+ * Workaround for not being able to declare a something like
+ *
+ * ```c++
+ * class PathSubstitutionGoal : public Goal;
+ * ```
+ * even when Goal is a complete type.
+ *
+ * This is still a static cast. The purpose of exporting it is to define it in
+ * a place where `PathSubstitutionGoal` is concrete, and use it in a place where it
+ * is opaque.
+ */
GoalPtr upcast_goal(std::shared_ptr<PathSubstitutionGoal> subGoal);
GoalPtr upcast_goal(std::shared_ptr<DrvOutputSubstitutionGoal> subGoal);
typedef std::chrono::time_point<std::chrono::steady_clock> steady_time_point;
-/* A mapping used to remember for each child process to what goal it
- belongs, and file descriptors for receiving log data and output
- path creation commands. */
+/**
+ * A mapping used to remember for each child process to what goal it
+ * belongs, and file descriptors for receiving log data and output
+ * path creation commands.
+ */
struct Child
{
WeakGoalPtr goal;
@@ -41,14 +47,19 @@ struct Child
std::set<int> fds;
bool respectTimeouts;
bool inBuildSlot;
- steady_time_point lastOutput; /* time we last got output on stdout/stderr */
+ /**
+ * Time we last got output on stdout/stderr
+ */
+ steady_time_point lastOutput;
steady_time_point timeStarted;
};
/* Forward definition. */
struct HookInstance;
-/* The worker class. */
+/**
+ * The worker class.
+ */
class Worker
{
private:
@@ -56,38 +67,63 @@ private:
/* Note: the worker should only have strong pointers to the
top-level goals. */
- /* The top-level goals of the worker. */
+ /**
+ * The top-level goals of the worker.
+ */
Goals topGoals;
- /* Goals that are ready to do some work. */
+ /**
+ * Goals that are ready to do some work.
+ */
WeakGoals awake;
- /* Goals waiting for a build slot. */
+ /**
+ * Goals waiting for a build slot.
+ */
WeakGoals wantingToBuild;
- /* Child processes currently running. */
+ /**
+ * Child processes currently running.
+ */
std::list<Child> children;
- /* Number of build slots occupied. This includes local builds and
- substitutions but not remote builds via the build hook. */
+ /**
+ * Number of build slots occupied. This includes local builds but does not
+ * include substitutions or remote builds via the build hook.
+ */
unsigned int nrLocalBuilds;
- /* Maps used to prevent multiple instantiations of a goal for the
- same derivation / path. */
+ /**
+ * Number of substitution slots occupied.
+ */
+ unsigned int nrSubstitutions;
+
+ /**
+ * Maps used to prevent multiple instantiations of a goal for the
+ * same derivation / path.
+ */
std::map<StorePath, std::weak_ptr<DerivationGoal>> derivationGoals;
std::map<StorePath, std::weak_ptr<PathSubstitutionGoal>> substitutionGoals;
std::map<DrvOutput, std::weak_ptr<DrvOutputSubstitutionGoal>> drvOutputSubstitutionGoals;
- /* Goals waiting for busy paths to be unlocked. */
+ /**
+ * Goals waiting for busy paths to be unlocked.
+ */
WeakGoals waitingForAnyGoal;
- /* Goals sleeping for a few seconds (polling a lock). */
+ /**
+ * Goals sleeping for a few seconds (polling a lock).
+ */
WeakGoals waitingForAWhile;
- /* Last time the goals in `waitingForAWhile' where woken up. */
+ /**
+ * Last time the goals in `waitingForAWhile` where woken up.
+ */
steady_time_point lastWokenUp;
- /* Cache for pathContentsGood(). */
+ /**
+ * Cache for pathContentsGood().
+ */
std::map<StorePath, bool> pathContentsGoodCache;
public:
@@ -96,17 +132,25 @@ public:
const Activity actDerivations;
const Activity actSubstitutions;
- /* Set if at least one derivation had a BuildError (i.e. permanent
- failure). */
+ /**
+ * Set if at least one derivation had a BuildError (i.e. permanent
+ * failure).
+ */
bool permanentFailure;
- /* Set if at least one derivation had a timeout. */
+ /**
+ * Set if at least one derivation had a timeout.
+ */
bool timedOut;
- /* Set if at least one derivation fails with a hash mismatch. */
+ /**
+ * Set if at least one derivation fails with a hash mismatch.
+ */
bool hashMismatch;
- /* Set if at least one derivation is not deterministic in check mode. */
+ /**
+ * Set if at least one derivation is not deterministic in check mode.
+ */
bool checkMismatch;
Store & store;
@@ -128,16 +172,22 @@ public:
uint64_t expectedNarSize = 0;
uint64_t doneNarSize = 0;
- /* Whether to ask the build hook if it can build a derivation. If
- it answers with "decline-permanently", we don't try again. */
+ /**
+ * Whether to ask the build hook if it can build a derivation. If
+ * it answers with "decline-permanently", we don't try again.
+ */
bool tryBuildHook = true;
Worker(Store & store, Store & evalStore);
~Worker();
- /* Make a goal (with caching). */
+ /**
+ * Make a goal (with caching).
+ */
- /* derivation goal */
+ /**
+ * @ref DerivationGoal "derivation goal"
+ */
private:
std::shared_ptr<DerivationGoal> makeDerivationGoalCommon(
const StorePath & drvPath, const OutputsSpec & wantedOutputs,
@@ -150,56 +200,92 @@ public:
const StorePath & drvPath, const BasicDerivation & drv,
const OutputsSpec & wantedOutputs, BuildMode buildMode = bmNormal);
- /* substitution goal */
+ /**
+ * @ref SubstitutionGoal "substitution goal"
+ */
std::shared_ptr<PathSubstitutionGoal> makePathSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
std::shared_ptr<DrvOutputSubstitutionGoal> makeDrvOutputSubstitutionGoal(const DrvOutput & id, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
- /* Remove a dead goal. */
+ /**
+ * Make a goal corresponding to the `DerivedPath`.
+ *
+ * It will be a `DerivationGoal` for a `DerivedPath::Built` or
+ * a `SubstitutionGoal` for a `DerivedPath::Opaque`.
+ */
+ GoalPtr makeGoal(const DerivedPath & req, BuildMode buildMode = bmNormal);
+
+ /**
+ * Remove a dead goal.
+ */
void removeGoal(GoalPtr goal);
- /* Wake up a goal (i.e., there is something for it to do). */
+ /**
+ * Wake up a goal (i.e., there is something for it to do).
+ */
void wakeUp(GoalPtr goal);
- /* Return the number of local build and substitution processes
- currently running (but not remote builds via the build
- hook). */
+ /**
+ * Return the number of local build processes currently running (but not
+ * remote builds via the build hook).
+ */
unsigned int getNrLocalBuilds();
- /* Registers a running child process. `inBuildSlot' means that
- the process counts towards the jobs limit. */
+ /**
+ * Return the number of substitution processes currently running.
+ */
+ unsigned int getNrSubstitutions();
+
+ /**
+ * Registers a running child process. `inBuildSlot` means that
+ * the process counts towards the jobs limit.
+ */
void childStarted(GoalPtr goal, const std::set<int> & fds,
bool inBuildSlot, bool respectTimeouts);
- /* Unregisters a running child process. `wakeSleepers' should be
- false if there is no sense in waking up goals that are sleeping
- because they can't run yet (e.g., there is no free build slot,
- or the hook would still say `postpone'). */
+ /**
+ * Unregisters a running child process. `wakeSleepers` should be
+ * false if there is no sense in waking up goals that are sleeping
+ * because they can't run yet (e.g., there is no free build slot,
+ * or the hook would still say `postpone`).
+ */
void childTerminated(Goal * goal, bool wakeSleepers = true);
- /* Put `goal' to sleep until a build slot becomes available (which
- might be right away). */
+ /**
+ * Put `goal` to sleep until a build slot becomes available (which
+ * might be right away).
+ */
void waitForBuildSlot(GoalPtr goal);
- /* Wait for any goal to finish. Pretty indiscriminate way to
- wait for some resource that some other goal is holding. */
+ /**
+ * Wait for any goal to finish. Pretty indiscriminate way to
+ * wait for some resource that some other goal is holding.
+ */
void waitForAnyGoal(GoalPtr goal);
- /* Wait for a few seconds and then retry this goal. Used when
- waiting for a lock held by another process. This kind of
- polling is inefficient, but POSIX doesn't really provide a way
- to wait for multiple locks in the main select() loop. */
+ /**
+ * Wait for a few seconds and then retry this goal. Used when
+ * waiting for a lock held by another process. This kind of
+ * polling is inefficient, but POSIX doesn't really provide a way
+ * to wait for multiple locks in the main select() loop.
+ */
void waitForAWhile(GoalPtr goal);
- /* Loop until the specified top-level goals have finished. */
+ /**
+ * Loop until the specified top-level goals have finished.
+ */
void run(const Goals & topGoals);
- /* Wait for input to become available. */
+ /**
+ * Wait for input to become available.
+ */
void waitForInput();
unsigned int exitStatus();
- /* Check whether the given valid path exists and has the right
- contents. */
+ /**
+ * Check whether the given valid path exists and has the right
+ * contents.
+ */
bool pathContentsGood(const StorePath & path);
void markContentsGood(const StorePath & path);
diff --git a/src/libstore/builtins.hh b/src/libstore/builtins.hh
index 66597e456..d201fb3ac 100644
--- a/src/libstore/builtins.hh
+++ b/src/libstore/builtins.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "derivations.hh"
diff --git a/src/libstore/builtins/buildenv.hh b/src/libstore/builtins/buildenv.hh
index a018de3af..0923c2adb 100644
--- a/src/libstore/builtins/buildenv.hh
+++ b/src/libstore/builtins/buildenv.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "derivations.hh"
#include "store-api.hh"
diff --git a/src/libstore/content-address.cc b/src/libstore/content-address.cc
index cf32ccdc4..04f7ac214 100644
--- a/src/libstore/content-address.cc
+++ b/src/libstore/content-address.cc
@@ -9,7 +9,7 @@ std::string FixedOutputHash::printMethodAlgo() const
return makeFileIngestionPrefix(method) + printHashType(hash.type);
}
-std::string makeFileIngestionPrefix(const FileIngestionMethod m)
+std::string makeFileIngestionPrefix(FileIngestionMethod m)
{
switch (m) {
case FileIngestionMethod::Flat:
@@ -21,41 +21,58 @@ std::string makeFileIngestionPrefix(const FileIngestionMethod m)
}
}
-std::string makeFixedOutputCA(FileIngestionMethod method, const Hash & hash)
+std::string ContentAddressMethod::renderPrefix() const
{
- return "fixed:"
- + makeFileIngestionPrefix(method)
- + hash.to_string(Base32, true);
+ return std::visit(overloaded {
+ [](TextIngestionMethod) -> std::string { return "text:"; },
+ [](FileIngestionMethod m2) {
+ /* Not prefixed for back compat with things that couldn't produce text before. */
+ return makeFileIngestionPrefix(m2);
+ },
+ }, raw);
+}
+
+ContentAddressMethod ContentAddressMethod::parsePrefix(std::string_view & m)
+{
+ ContentAddressMethod method = FileIngestionMethod::Flat;
+ if (splitPrefix(m, "r:"))
+ method = FileIngestionMethod::Recursive;
+ else if (splitPrefix(m, "text:"))
+ method = TextIngestionMethod {};
+ return method;
}
-std::string renderContentAddress(ContentAddress ca)
+std::string ContentAddress::render() const
{
return std::visit(overloaded {
- [](TextHash & th) {
- return "text:" + th.hash.to_string(Base32, true);
+ [](const TextHash & th) {
+ return "text:"
+ + th.hash.to_string(Base32, true);
},
- [](FixedOutputHash & fsh) {
- return makeFixedOutputCA(fsh.method, fsh.hash);
+ [](const FixedOutputHash & fsh) {
+ return "fixed:"
+ + makeFileIngestionPrefix(fsh.method)
+ + fsh.hash.to_string(Base32, true);
}
- }, ca);
+ }, raw);
}
-std::string renderContentAddressMethod(ContentAddressMethod cam)
+std::string ContentAddressMethod::render(HashType ht) const
{
return std::visit(overloaded {
- [](TextHashMethod & th) {
- return std::string{"text:"} + printHashType(htSHA256);
+ [&](const TextIngestionMethod & th) {
+ return std::string{"text:"} + printHashType(ht);
},
- [](FixedOutputHashMethod & fshm) {
- return "fixed:" + makeFileIngestionPrefix(fshm.fileIngestionMethod) + printHashType(fshm.hashType);
+ [&](const FileIngestionMethod & fim) {
+ return "fixed:" + makeFileIngestionPrefix(fim) + printHashType(ht);
}
- }, cam);
+ }, raw);
}
-/*
- Parses content address strings up to the hash.
+/**
+ * Parses content address strings up to the hash.
*/
-static ContentAddressMethod parseContentAddressMethodPrefix(std::string_view & rest)
+static std::pair<ContentAddressMethod, HashType> parseContentAddressMethodPrefix(std::string_view & rest)
{
std::string_view wholeInput { rest };
@@ -79,46 +96,47 @@ static ContentAddressMethod parseContentAddressMethodPrefix(std::string_view & r
if (prefix == "text") {
// No parsing of the ingestion method, "text" only support flat.
HashType hashType = parseHashType_();
- if (hashType != htSHA256)
- throw Error("text content address hash should use %s, but instead uses %s",
- printHashType(htSHA256), printHashType(hashType));
- return TextHashMethod {};
+ return {
+ TextIngestionMethod {},
+ std::move(hashType),
+ };
} else if (prefix == "fixed") {
// Parse method
auto method = FileIngestionMethod::Flat;
if (splitPrefix(rest, "r:"))
method = FileIngestionMethod::Recursive;
HashType hashType = parseHashType_();
- return FixedOutputHashMethod {
- .fileIngestionMethod = method,
- .hashType = std::move(hashType),
+ return {
+ std::move(method),
+ std::move(hashType),
};
} else
throw UsageError("content address prefix '%s' is unrecognized. Recogonized prefixes are 'text' or 'fixed'", prefix);
}
-ContentAddress parseContentAddress(std::string_view rawCa) {
+ContentAddress ContentAddress::parse(std::string_view rawCa)
+{
auto rest = rawCa;
- ContentAddressMethod caMethod = parseContentAddressMethodPrefix(rest);
+ auto [caMethod, hashType_] = parseContentAddressMethodPrefix(rest);
+ auto hashType = hashType_; // work around clang bug
- return std::visit(
- overloaded {
- [&](TextHashMethod & thm) {
- return ContentAddress(TextHash {
- .hash = Hash::parseNonSRIUnprefixed(rest, htSHA256)
- });
- },
- [&](FixedOutputHashMethod & fohMethod) {
- return ContentAddress(FixedOutputHash {
- .method = fohMethod.fileIngestionMethod,
- .hash = Hash::parseNonSRIUnprefixed(rest, std::move(fohMethod.hashType)),
- });
- },
- }, caMethod);
+ return std::visit(overloaded {
+ [&](TextIngestionMethod &) {
+ return ContentAddress(TextHash {
+ .hash = Hash::parseNonSRIUnprefixed(rest, hashType)
+ });
+ },
+ [&](FileIngestionMethod & fim) {
+ return ContentAddress(FixedOutputHash {
+ .method = fim,
+ .hash = Hash::parseNonSRIUnprefixed(rest, hashType),
+ });
+ },
+ }, caMethod.raw);
}
-ContentAddressMethod parseContentAddressMethod(std::string_view caMethod)
+std::pair<ContentAddressMethod, HashType> ContentAddressMethod::parse(std::string_view caMethod)
{
std::string asPrefix = std::string{caMethod} + ":";
// parseContentAddressMethodPrefix takes its argument by reference
@@ -126,26 +144,144 @@ ContentAddressMethod parseContentAddressMethod(std::string_view caMethod)
return parseContentAddressMethodPrefix(asPrefixView);
}
-std::optional<ContentAddress> parseContentAddressOpt(std::string_view rawCaOpt)
+std::optional<ContentAddress> ContentAddress::parseOpt(std::string_view rawCaOpt)
{
- return rawCaOpt == "" ? std::optional<ContentAddress>() : parseContentAddress(rawCaOpt);
+ return rawCaOpt == ""
+ ? std::nullopt
+ : std::optional { ContentAddress::parse(rawCaOpt) };
};
std::string renderContentAddress(std::optional<ContentAddress> ca)
{
- return ca ? renderContentAddress(*ca) : "";
+ return ca ? ca->render() : "";
}
-Hash getContentAddressHash(const ContentAddress & ca)
+ContentAddress ContentAddress::fromParts(
+ ContentAddressMethod method, Hash hash) noexcept
{
return std::visit(overloaded {
- [](const TextHash & th) {
+ [&](TextIngestionMethod _) -> ContentAddress {
+ return TextHash {
+ .hash = std::move(hash),
+ };
+ },
+ [&](FileIngestionMethod m2) -> ContentAddress {
+ return FixedOutputHash {
+ .method = std::move(m2),
+ .hash = std::move(hash),
+ };
+ },
+ }, method.raw);
+}
+
+ContentAddressMethod ContentAddress::getMethod() const
+{
+ return std::visit(overloaded {
+ [](const TextHash & th) -> ContentAddressMethod {
+ return TextIngestionMethod {};
+ },
+ [](const FixedOutputHash & fsh) -> ContentAddressMethod {
+ return fsh.method;
+ },
+ }, raw);
+}
+
+const Hash & ContentAddress::getHash() const
+{
+ return std::visit(overloaded {
+ [](const TextHash & th) -> auto & {
return th.hash;
},
- [](const FixedOutputHash & fsh) {
+ [](const FixedOutputHash & fsh) -> auto & {
return fsh.hash;
- }
- }, ca);
+ },
+ }, raw);
+}
+
+std::string ContentAddress::printMethodAlgo() const
+{
+ return getMethod().renderPrefix()
+ + printHashType(getHash().type);
+}
+
+bool StoreReferences::empty() const
+{
+ return !self && others.empty();
+}
+
+size_t StoreReferences::size() const
+{
+ return (self ? 1 : 0) + others.size();
+}
+
+ContentAddressWithReferences ContentAddressWithReferences::withoutRefs(const ContentAddress & ca) noexcept
+{
+ return std::visit(overloaded {
+ [&](const TextHash & h) -> ContentAddressWithReferences {
+ return TextInfo {
+ .hash = h,
+ .references = {},
+ };
+ },
+ [&](const FixedOutputHash & h) -> ContentAddressWithReferences {
+ return FixedOutputInfo {
+ .hash = h,
+ .references = {},
+ };
+ },
+ }, ca.raw);
+}
+
+std::optional<ContentAddressWithReferences> ContentAddressWithReferences::fromPartsOpt(
+ ContentAddressMethod method, Hash hash, StoreReferences refs) noexcept
+{
+ return std::visit(overloaded {
+ [&](TextIngestionMethod _) -> std::optional<ContentAddressWithReferences> {
+ if (refs.self)
+ return std::nullopt;
+ return ContentAddressWithReferences {
+ TextInfo {
+ .hash = { .hash = std::move(hash) },
+ .references = std::move(refs.others),
+ }
+ };
+ },
+ [&](FileIngestionMethod m2) -> std::optional<ContentAddressWithReferences> {
+ return ContentAddressWithReferences {
+ FixedOutputInfo {
+ .hash = {
+ .method = m2,
+ .hash = std::move(hash),
+ },
+ .references = std::move(refs),
+ }
+ };
+ },
+ }, method.raw);
+}
+
+ContentAddressMethod ContentAddressWithReferences::getMethod() const
+{
+ return std::visit(overloaded {
+ [](const TextInfo & th) -> ContentAddressMethod {
+ return TextIngestionMethod {};
+ },
+ [](const FixedOutputInfo & fsh) -> ContentAddressMethod {
+ return fsh.hash.method;
+ },
+ }, raw);
+}
+
+Hash ContentAddressWithReferences::getHash() const
+{
+ return std::visit(overloaded {
+ [](const TextInfo & th) {
+ return th.hash.hash;
+ },
+ [](const FixedOutputInfo & fsh) {
+ return fsh.hash.hash;
+ },
+ }, raw);
}
}
diff --git a/src/libstore/content-address.hh b/src/libstore/content-address.hh
index f6a6f5140..e1e80448b 100644
--- a/src/libstore/content-address.hh
+++ b/src/libstore/content-address.hh
@@ -1,77 +1,317 @@
#pragma once
+///@file
#include <variant>
#include "hash.hh"
+#include "path.hh"
+#include "comparator.hh"
namespace nix {
+/*
+ * Content addressing method
+ */
+
+/* We only have one way to hash text with references, so this is a single-value
+ type, mainly useful with std::variant.
+*/
+
+/**
+ * The single way we can serialize "text" file system objects.
+ *
+ * Somewhat obscure, used by \ref Derivation derivations and
+ * `builtins.toFile` currently.
+ *
+ * TextIngestionMethod is identical to FileIngestionMethod::Fixed except that
+ * the former may not have self-references and is tagged `text:${algo}:${hash}`
+ * rather than `fixed:${algo}:${hash}`. The contents of the store path are
+ * ingested and hashed identically, aside from the slightly different tag and
+ * restriction on self-references.
+ */
+struct TextIngestionMethod : std::monostate { };
+
+/**
+ * An enumeration of the main ways we can serialize file system
+ * objects.
+ */
enum struct FileIngestionMethod : uint8_t {
+ /**
+ * Flat-file hashing. Directly ingest the contents of a single file
+ */
Flat = false,
+ /**
+ * Recursive (or NAR) hashing. Serializes the file-system object in Nix
+ * Archive format and ingest that
+ */
Recursive = true
};
+/**
+ * Compute the prefix to the hash algorithm which indicates how the
+ * files were ingested.
+ */
+std::string makeFileIngestionPrefix(FileIngestionMethod m);
+
+/**
+ * An enumeration of all the ways we can serialize file system objects.
+ *
+ * Just the type of a content address. Combine with the hash itself, and
+ * we have a `ContentAddress` as defined below. Combine that, in turn,
+ * with info on references, and we have `ContentAddressWithReferences`,
+ * as defined further below.
+ */
+struct ContentAddressMethod
+{
+ typedef std::variant<
+ TextIngestionMethod,
+ FileIngestionMethod
+ > Raw;
+
+ Raw raw;
+
+ GENERATE_CMP(ContentAddressMethod, me->raw);
+
+ /* The moral equivalent of `using Raw::Raw;` */
+ ContentAddressMethod(auto &&... arg)
+ : raw(std::forward<decltype(arg)>(arg)...)
+ { }
+
+
+ /**
+ * Parse the prefix tag which indicates how the files
+ * were ingested, with the fixed output case not prefixed for back
+ * compat.
+ *
+ * @param [in] m A string that should begin with the prefix.
+ * @param [out] m The remainder of the string after the prefix.
+ */
+ static ContentAddressMethod parsePrefix(std::string_view & m);
+
+ /**
+ * Render the prefix tag which indicates how the files wre ingested.
+ *
+ * The rough inverse of `parsePrefix()`.
+ */
+ std::string renderPrefix() const;
+
+ /**
+ * Parse a content addressing method and hash type.
+ */
+ static std::pair<ContentAddressMethod, HashType> parse(std::string_view rawCaMethod);
+
+ /**
+ * Render a content addressing method and hash type in a
+ * nicer way, prefixing both cases.
+ *
+ * The rough inverse of `parse()`.
+ */
+ std::string render(HashType ht) const;
+};
+
+
+/*
+ * Mini content address
+ */
+
+/**
+ * Somewhat obscure, used by \ref Derivation derivations and
+ * `builtins.toFile` currently.
+ */
struct TextHash {
+ /**
+ * Hash of the contents of the text/file.
+ */
Hash hash;
+
+ GENERATE_CMP(TextHash, me->hash);
};
-/// Pair of a hash, and how the file system was ingested
+/**
+ * Used by most store objects that are content-addressed.
+ */
struct FixedOutputHash {
+ /**
+ * How the file system objects are serialized
+ */
FileIngestionMethod method;
+ /**
+ * Hash of that serialization
+ */
Hash hash;
+
std::string printMethodAlgo() const;
+
+ GENERATE_CMP(FixedOutputHash, me->method, me->hash);
};
-/*
- We've accumulated several types of content-addressed paths over the years;
- fixed-output derivations support multiple hash algorithms and serialisation
- methods (flat file vs NAR). Thus, ‘ca’ has one of the following forms:
+/**
+ * We've accumulated several types of content-addressed paths over the
+ * years; fixed-output derivations support multiple hash algorithms and
+ * serialisation methods (flat file vs NAR). Thus, ‘ca’ has one of the
+ * following forms:
+ *
+ * - ‘text:sha256:<sha256 hash of file contents>’: For paths
+ * computed by Store::makeTextPath() / Store::addTextToStore().
+ *
+ * - ‘fixed:<r?>:<ht>:<h>’: For paths computed by
+ * Store::makeFixedOutputPath() / Store::addToStore().
+ */
+struct ContentAddress
+{
+ typedef std::variant<
+ TextHash,
+ FixedOutputHash
+ > Raw;
- * ‘text:sha256:<sha256 hash of file contents>’: For paths
- computed by makeTextPath() / addTextToStore().
+ Raw raw;
- * ‘fixed:<r?>:<ht>:<h>’: For paths computed by
- makeFixedOutputPath() / addToStore().
-*/
-typedef std::variant<
- TextHash, // for paths computed by makeTextPath() / addTextToStore
- FixedOutputHash // for path computed by makeFixedOutputPath
-> ContentAddress;
+ GENERATE_CMP(ContentAddress, me->raw);
-/* Compute the prefix to the hash algorithm which indicates how the files were
- ingested. */
-std::string makeFileIngestionPrefix(const FileIngestionMethod m);
+ /* The moral equivalent of `using Raw::Raw;` */
+ ContentAddress(auto &&... arg)
+ : raw(std::forward<decltype(arg)>(arg)...)
+ { }
-/* Compute the content-addressability assertion (ValidPathInfo::ca)
- for paths created by makeFixedOutputPath() / addToStore(). */
-std::string makeFixedOutputCA(FileIngestionMethod method, const Hash & hash);
+ /**
+ * Compute the content-addressability assertion
+ * (`ValidPathInfo::ca`) for paths created by
+ * `Store::makeFixedOutputPath()` / `Store::addToStore()`.
+ */
+ std::string render() const;
-std::string renderContentAddress(ContentAddress ca);
+ static ContentAddress parse(std::string_view rawCa);
-std::string renderContentAddress(std::optional<ContentAddress> ca);
+ static std::optional<ContentAddress> parseOpt(std::string_view rawCaOpt);
-ContentAddress parseContentAddress(std::string_view rawCa);
+ /**
+ * Create a `ContentAddress` from 2 parts:
+ *
+ * @param method Way ingesting the file system data.
+ *
+ * @param hash Hash of ingested file system data.
+ */
+ static ContentAddress fromParts(
+ ContentAddressMethod method, Hash hash) noexcept;
-std::optional<ContentAddress> parseContentAddressOpt(std::string_view rawCaOpt);
+ ContentAddressMethod getMethod() const;
+
+ const Hash & getHash() const;
+
+ std::string printMethodAlgo() const;
+};
+
+/**
+ * Render the `ContentAddress` if it exists to a string, return empty
+ * string otherwise.
+ */
+std::string renderContentAddress(std::optional<ContentAddress> ca);
-Hash getContentAddressHash(const ContentAddress & ca);
/*
- We only have one way to hash text with references, so this is single-value
- type is only useful in std::variant.
-*/
-struct TextHashMethod { };
-struct FixedOutputHashMethod {
- FileIngestionMethod fileIngestionMethod;
- HashType hashType;
+ * Full content address
+ *
+ * See the schema for store paths in store-api.cc
+ */
+
+/**
+ * A set of references to other store objects.
+ *
+ * References to other store objects are tracked with store paths, self
+ * references however are tracked with a boolean.
+ */
+struct StoreReferences {
+ /**
+ * References to other store objects
+ */
+ StorePathSet others;
+
+ /**
+ * Reference to this store object
+ */
+ bool self = false;
+
+ /**
+ * @return true iff no references, i.e. others is empty and self is
+ * false.
+ */
+ bool empty() const;
+
+ /**
+ * Returns the numbers of references, i.e. the size of others + 1
+ * iff self is true.
+ */
+ size_t size() const;
+
+ GENERATE_CMP(StoreReferences, me->self, me->others);
};
-typedef std::variant<
- TextHashMethod,
- FixedOutputHashMethod
- > ContentAddressMethod;
+// This matches the additional info that we need for makeTextPath
+struct TextInfo {
+ TextHash hash;
+ /**
+ * References to other store objects only; self references
+ * disallowed
+ */
+ StorePathSet references;
-ContentAddressMethod parseContentAddressMethod(std::string_view rawCaMethod);
+ GENERATE_CMP(TextInfo, me->hash, me->references);
+};
+
+struct FixedOutputInfo {
+ FixedOutputHash hash;
+ /**
+ * References to other store objects or this one.
+ */
+ StoreReferences references;
+
+ GENERATE_CMP(FixedOutputInfo, me->hash, me->references);
+};
+
+/**
+ * Ways of content addressing but not a complete ContentAddress.
+ *
+ * A ContentAddress without a Hash.
+ */
+struct ContentAddressWithReferences
+{
+ typedef std::variant<
+ TextInfo,
+ FixedOutputInfo
+ > Raw;
+
+ Raw raw;
-std::string renderContentAddressMethod(ContentAddressMethod caMethod);
+ GENERATE_CMP(ContentAddressWithReferences, me->raw);
+
+ /* The moral equivalent of `using Raw::Raw;` */
+ ContentAddressWithReferences(auto &&... arg)
+ : raw(std::forward<decltype(arg)>(arg)...)
+ { }
+
+ /**
+ * Create a `ContentAddressWithReferences` from a mere
+ * `ContentAddress`, by claiming no references.
+ */
+ static ContentAddressWithReferences withoutRefs(const ContentAddress &) noexcept;
+
+ /**
+ * Create a `ContentAddressWithReferences` from 3 parts:
+ *
+ * @param method Way ingesting the file system data.
+ *
+ * @param hash Hash of ingested file system data.
+ *
+ * @param refs References to other store objects or oneself.
+ *
+ * Do note that not all combinations are supported; `nullopt` is
+ * returns for invalid combinations.
+ */
+ static std::optional<ContentAddressWithReferences> fromPartsOpt(
+ ContentAddressMethod method, Hash hash, StoreReferences refs) noexcept;
+
+ ContentAddressMethod getMethod() const;
+
+ Hash getHash() const;
+};
}
diff --git a/src/libstore/crypto.hh b/src/libstore/crypto.hh
index 03f85c103..35216d470 100644
--- a/src/libstore/crypto.hh
+++ b/src/libstore/crypto.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
@@ -11,8 +12,10 @@ struct Key
std::string name;
std::string key;
- /* Construct Key from a string in the format
- ‘<name>:<key-in-base64>’. */
+ /**
+ * Construct Key from a string in the format
+ * ‘<name>:<key-in-base64>’.
+ */
Key(std::string_view s);
std::string to_string() const;
@@ -28,7 +31,9 @@ struct SecretKey : Key
{
SecretKey(std::string_view s);
- /* Return a detached signature of the given string. */
+ /**
+ * Return a detached signature of the given string.
+ */
std::string signDetached(std::string_view s) const;
PublicKey toPublicKey() const;
@@ -52,8 +57,10 @@ private:
typedef std::map<std::string, PublicKey> PublicKeys;
-/* Return true iff ‘sig’ is a correct signature over ‘data’ using one
- of the given public keys. */
+/**
+ * @return true iff ‘sig’ is a correct signature over ‘data’ using one
+ * of the given public keys.
+ */
bool verifyDetached(const std::string & data, const std::string & sig,
const PublicKeys & publicKeys);
diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc
index 5e6fd011f..b6dd83684 100644
--- a/src/libstore/daemon.cc
+++ b/src/libstore/daemon.cc
@@ -67,12 +67,12 @@ struct TunnelLogger : public Logger
state->pendingMsgs.push_back(s);
}
- void log(Verbosity lvl, const FormatOrString & fs) override
+ void log(Verbosity lvl, std::string_view s) override
{
if (lvl > verbosity) return;
StringSink buf;
- buf << STDERR_NEXT << (fs.s + "\n");
+ buf << STDERR_NEXT << (s + "\n");
enqueueMsg(buf.s);
}
@@ -231,10 +231,10 @@ struct ClientSettings
try {
if (name == "ssh-auth-sock") // obsolete
;
- else if (name == settings.experimentalFeatures.name) {
+ else if (name == experimentalFeatureSettings.experimentalFeatures.name) {
// We don’t want to forward the experimental features to
// the daemon, as that could cause some pretty weird stuff
- if (parseFeatures(tokenizeString<StringSet>(value)) != settings.experimentalFeatures.get())
+ if (parseFeatures(tokenizeString<StringSet>(value)) != experimentalFeatureSettings.experimentalFeatures.get())
debug("Ignoring the client-specified experimental features");
} else if (name == settings.pluginFiles.name) {
if (tokenizeString<Paths>(value) != settings.pluginFiles.get())
@@ -263,7 +263,7 @@ static std::vector<DerivedPath> readDerivedPaths(Store & store, unsigned int cli
{
std::vector<DerivedPath> reqs;
if (GET_PROTOCOL_MINOR(clientVersion) >= 30) {
- reqs = worker_proto::read(store, from, Phantom<std::vector<DerivedPath>> {});
+ reqs = WorkerProto<std::vector<DerivedPath>>::read(store, from);
} else {
for (auto & s : readStrings<Strings>(from))
reqs.push_back(parsePathWithOutputs(store, s).toDerivedPath());
@@ -287,7 +287,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
case wopQueryValidPaths: {
- auto paths = worker_proto::read(*store, from, Phantom<StorePathSet> {});
+ auto paths = WorkerProto<StorePathSet>::read(*store, from);
SubstituteFlag substitute = NoSubstitute;
if (GET_PROTOCOL_MINOR(clientVersion) >= 27) {
@@ -300,7 +300,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
auto res = store->queryValidPaths(paths, substitute);
logger->stopWork();
- worker_proto::write(*store, to, res);
+ workerProtoWrite(*store, to, res);
break;
}
@@ -316,11 +316,11 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
case wopQuerySubstitutablePaths: {
- auto paths = worker_proto::read(*store, from, Phantom<StorePathSet> {});
+ auto paths = WorkerProto<StorePathSet>::read(*store, from);
logger->startWork();
auto res = store->querySubstitutablePaths(paths);
logger->stopWork();
- worker_proto::write(*store, to, res);
+ workerProtoWrite(*store, to, res);
break;
}
@@ -349,7 +349,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
paths = store->queryValidDerivers(path);
else paths = store->queryDerivationOutputs(path);
logger->stopWork();
- worker_proto::write(*store, to, paths);
+ workerProtoWrite(*store, to, paths);
break;
}
@@ -367,7 +367,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
logger->startWork();
auto outputs = store->queryPartialDerivationOutputMap(path);
logger->stopWork();
- worker_proto::write(*store, to, outputs);
+ workerProtoWrite(*store, to, outputs);
break;
}
@@ -393,7 +393,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
if (GET_PROTOCOL_MINOR(clientVersion) >= 25) {
auto name = readString(from);
auto camStr = readString(from);
- auto refs = worker_proto::read(*store, from, Phantom<StorePathSet> {});
+ auto refs = WorkerProto<StorePathSet>::read(*store, from);
bool repairBool;
from >> repairBool;
auto repair = RepairFlag{repairBool};
@@ -401,21 +401,25 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
logger->startWork();
auto pathInfo = [&]() {
// NB: FramedSource must be out of scope before logger->stopWork();
- ContentAddressMethod contentAddressMethod = parseContentAddressMethod(camStr);
+ auto [contentAddressMethod, hashType_] = ContentAddressMethod::parse(camStr);
+ auto hashType = hashType_; // work around clang bug
FramedSource source(from);
// TODO this is essentially RemoteStore::addCAToStore. Move it up to Store.
return std::visit(overloaded {
- [&](TextHashMethod &) {
+ [&](const TextIngestionMethod &) {
+ if (hashType != htSHA256)
+ throw UnimplementedError("When adding text-hashed data called '%s', only SHA-256 is supported but '%s' was given",
+ name, printHashType(hashType));
// We could stream this by changing Store
std::string contents = source.drain();
auto path = store->addTextToStore(name, contents, refs, repair);
return store->queryPathInfo(path);
},
- [&](FixedOutputHashMethod & fohm) {
- auto path = store->addToStoreFromDump(source, name, fohm.fileIngestionMethod, fohm.hashType, repair, refs);
+ [&](const FileIngestionMethod & fim) {
+ auto path = store->addToStoreFromDump(source, name, fim, hashType, repair, refs);
return store->queryPathInfo(path);
},
- }, contentAddressMethod);
+ }, contentAddressMethod.raw);
}();
logger->stopWork();
@@ -491,7 +495,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
case wopAddTextToStore: {
std::string suffix = readString(from);
std::string s = readString(from);
- auto refs = worker_proto::read(*store, from, Phantom<StorePathSet> {});
+ auto refs = WorkerProto<StorePathSet>::read(*store, from);
logger->startWork();
auto path = store->addTextToStore(suffix, s, refs, NoRepair);
logger->stopWork();
@@ -563,7 +567,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
auto results = store->buildPathsWithResults(drvs, mode);
logger->stopWork();
- worker_proto::write(*store, to, results);
+ workerProtoWrite(*store, to, results);
break;
}
@@ -637,7 +641,10 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
to << res.timesBuilt << res.isNonDeterministic << res.startTime << res.stopTime;
}
if (GET_PROTOCOL_MINOR(clientVersion) >= 28) {
- worker_proto::write(*store, to, res.builtOutputs);
+ DrvOutputs builtOutputs;
+ for (auto & [output, realisation] : res.builtOutputs)
+ builtOutputs.insert_or_assign(realisation.id, realisation);
+ workerProtoWrite(*store, to, builtOutputs);
}
break;
}
@@ -702,7 +709,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
case wopCollectGarbage: {
GCOptions options;
options.action = (GCOptions::GCAction) readInt(from);
- options.pathsToDelete = worker_proto::read(*store, from, Phantom<StorePathSet> {});
+ options.pathsToDelete = WorkerProto<StorePathSet>::read(*store, from);
from >> options.ignoreLiveness >> options.maxFreed;
// obsolete fields
readInt(from);
@@ -772,7 +779,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
else {
to << 1
<< (i->second.deriver ? store->printStorePath(*i->second.deriver) : "");
- worker_proto::write(*store, to, i->second.references);
+ workerProtoWrite(*store, to, i->second.references);
to << i->second.downloadSize
<< i->second.narSize;
}
@@ -783,11 +790,11 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
SubstitutablePathInfos infos;
StorePathCAMap pathsMap = {};
if (GET_PROTOCOL_MINOR(clientVersion) < 22) {
- auto paths = worker_proto::read(*store, from, Phantom<StorePathSet> {});
+ auto paths = WorkerProto<StorePathSet>::read(*store, from);
for (auto & path : paths)
pathsMap.emplace(path, std::nullopt);
} else
- pathsMap = worker_proto::read(*store, from, Phantom<StorePathCAMap> {});
+ pathsMap = WorkerProto<StorePathCAMap>::read(*store, from);
logger->startWork();
store->querySubstitutablePathInfos(pathsMap, infos);
logger->stopWork();
@@ -795,7 +802,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
for (auto & i : infos) {
to << store->printStorePath(i.first)
<< (i.second.deriver ? store->printStorePath(*i.second.deriver) : "");
- worker_proto::write(*store, to, i.second.references);
+ workerProtoWrite(*store, to, i.second.references);
to << i.second.downloadSize << i.second.narSize;
}
break;
@@ -805,7 +812,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
logger->startWork();
auto paths = store->queryAllValidPaths();
logger->stopWork();
- worker_proto::write(*store, to, paths);
+ workerProtoWrite(*store, to, paths);
break;
}
@@ -877,10 +884,10 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
ValidPathInfo info { path, narHash };
if (deriver != "")
info.deriver = store->parseStorePath(deriver);
- info.references = worker_proto::read(*store, from, Phantom<StorePathSet> {});
+ info.references = WorkerProto<StorePathSet>::read(*store, from);
from >> info.registrationTime >> info.narSize >> info.ultimate;
info.sigs = readStrings<StringSet>(from);
- info.ca = parseContentAddressOpt(readString(from));
+ info.ca = ContentAddress::parseOpt(readString(from));
from >> repair >> dontCheckSigs;
if (!trusted && dontCheckSigs)
dontCheckSigs = false;
@@ -928,9 +935,9 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
uint64_t downloadSize, narSize;
store->queryMissing(targets, willBuild, willSubstitute, unknown, downloadSize, narSize);
logger->stopWork();
- worker_proto::write(*store, to, willBuild);
- worker_proto::write(*store, to, willSubstitute);
- worker_proto::write(*store, to, unknown);
+ workerProtoWrite(*store, to, willBuild);
+ workerProtoWrite(*store, to, willSubstitute);
+ workerProtoWrite(*store, to, unknown);
to << downloadSize << narSize;
break;
}
@@ -943,7 +950,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
store->registerDrvOutput(Realisation{
.id = outputId, .outPath = outputPath});
} else {
- auto realisation = worker_proto::read(*store, from, Phantom<Realisation>());
+ auto realisation = WorkerProto<Realisation>::read(*store, from);
store->registerDrvOutput(realisation);
}
logger->stopWork();
@@ -958,11 +965,11 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
if (GET_PROTOCOL_MINOR(clientVersion) < 31) {
std::set<StorePath> outPaths;
if (info) outPaths.insert(info->outPath);
- worker_proto::write(*store, to, outPaths);
+ workerProtoWrite(*store, to, outPaths);
} else {
std::set<Realisation> realisations;
if (info) realisations.insert(*info);
- worker_proto::write(*store, to, realisations);
+ workerProtoWrite(*store, to, realisations);
}
break;
}
@@ -1032,6 +1039,15 @@ void processConnection(
if (GET_PROTOCOL_MINOR(clientVersion) >= 33)
to << nixVersion;
+ if (GET_PROTOCOL_MINOR(clientVersion) >= 35) {
+ // We and the underlying store both need to trust the client for
+ // it to be trusted.
+ auto temp = trusted
+ ? store->isTrustedClient()
+ : std::optional { NotTrusted };
+ workerProtoWrite(*store, to, temp);
+ }
+
/* Send startup error messages to the client. */
tunnelLogger->startWork();
@@ -1055,6 +1071,8 @@ void processConnection(
opCount++;
+ debug("performing daemon worker op: %d", op);
+
try {
performOp(tunnelLogger, store, trusted, recursive, clientVersion, from, to, op);
} catch (Error & e) {
diff --git a/src/libstore/daemon.hh b/src/libstore/daemon.hh
index 8c765615c..1964c0d99 100644
--- a/src/libstore/daemon.hh
+++ b/src/libstore/daemon.hh
@@ -1,11 +1,11 @@
#pragma once
+///@file
#include "serialise.hh"
#include "store-api.hh"
namespace nix::daemon {
-enum TrustedFlag : bool { NotTrusted = false, Trusted = true };
enum RecursiveFlag : bool { NotRecursive = false, Recursive = true };
void processConnection(
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
index 05dc9a3cc..ccb165d68 100644
--- a/src/libstore/derivations.cc
+++ b/src/libstore/derivations.cc
@@ -1,7 +1,9 @@
#include "derivations.hh"
+#include "downstream-placeholder.hh"
#include "store-api.hh"
#include "globals.hh"
#include "util.hh"
+#include "split.hh"
#include "worker-protocol.hh"
#include "fs-accessor.hh"
#include <boost/container/small_vector.hpp>
@@ -35,9 +37,9 @@ std::optional<StorePath> DerivationOutput::path(const Store & store, std::string
StorePath DerivationOutput::CAFixed::path(const Store & store, std::string_view drvName, std::string_view outputName) const
{
- return store.makeFixedOutputPath(
- hash.method, hash.hash,
- outputPathName(drvName, outputName));
+ return store.makeFixedOutputPathFromCA(
+ outputPathName(drvName, outputName),
+ ContentAddressWithReferences::withoutRefs(ca));
}
@@ -211,32 +213,30 @@ static StringSet parseStrings(std::istream & str, bool arePaths)
static DerivationOutput parseDerivationOutput(const Store & store,
- std::string_view pathS, std::string_view hashAlgo, std::string_view hash)
+ std::string_view pathS, std::string_view hashAlgo, std::string_view hashS)
{
if (hashAlgo != "") {
- auto method = FileIngestionMethod::Flat;
- if (hashAlgo.substr(0, 2) == "r:") {
- method = FileIngestionMethod::Recursive;
- hashAlgo = hashAlgo.substr(2);
- }
+ ContentAddressMethod method = ContentAddressMethod::parsePrefix(hashAlgo);
+ if (method == TextIngestionMethod {})
+ experimentalFeatureSettings.require(Xp::DynamicDerivations);
const auto hashType = parseHashType(hashAlgo);
- if (hash == "impure") {
- settings.requireExperimentalFeature(Xp::ImpureDerivations);
+ if (hashS == "impure") {
+ experimentalFeatureSettings.require(Xp::ImpureDerivations);
assert(pathS == "");
return DerivationOutput::Impure {
.method = std::move(method),
.hashType = std::move(hashType),
};
- } else if (hash != "") {
+ } else if (hashS != "") {
validatePath(pathS);
+ auto hash = Hash::parseNonSRIUnprefixed(hashS, hashType);
return DerivationOutput::CAFixed {
- .hash = FixedOutputHash {
- .method = std::move(method),
- .hash = Hash::parseNonSRIUnprefixed(hash, hashType),
- },
+ .ca = ContentAddress::fromParts(
+ std::move(method),
+ std::move(hash)),
};
} else {
- settings.requireExperimentalFeature(Xp::CaDerivations);
+ experimentalFeatureSettings.require(Xp::CaDerivations);
assert(pathS == "");
return DerivationOutput::CAFloating {
.method = std::move(method),
@@ -313,6 +313,15 @@ Derivation parseDerivation(const Store & store, std::string && s, std::string_vi
}
+/**
+ * Print a derivation string literal to an `std::string`.
+ *
+ * This syntax does not generalize to the expression language, which needs to
+ * escape `$`.
+ *
+ * @param res Where to print to
+ * @param s Which logical string to print
+ */
static void printString(std::string & res, std::string_view s)
{
boost::container::small_vector<char, 64 * 1024> buffer;
@@ -384,12 +393,12 @@ std::string Derivation::unparse(const Store & store, bool maskOutputs,
},
[&](const DerivationOutput::CAFixed & dof) {
s += ','; printUnquotedString(s, maskOutputs ? "" : store.printStorePath(dof.path(store, name, i.first)));
- s += ','; printUnquotedString(s, dof.hash.printMethodAlgo());
- s += ','; printUnquotedString(s, dof.hash.hash.to_string(Base16, false));
+ s += ','; printUnquotedString(s, dof.ca.printMethodAlgo());
+ s += ','; printUnquotedString(s, dof.ca.getHash().to_string(Base16, false));
},
[&](const DerivationOutput::CAFloating & dof) {
s += ','; printUnquotedString(s, "");
- s += ','; printUnquotedString(s, makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType));
+ s += ','; printUnquotedString(s, dof.method.renderPrefix() + printHashType(dof.hashType));
s += ','; printUnquotedString(s, "");
},
[&](const DerivationOutput::Deferred &) {
@@ -400,7 +409,7 @@ std::string Derivation::unparse(const Store & store, bool maskOutputs,
[&](const DerivationOutputImpure & doi) {
// FIXME
s += ','; printUnquotedString(s, "");
- s += ','; printUnquotedString(s, makeFileIngestionPrefix(doi.method) + printHashType(doi.hashType));
+ s += ','; printUnquotedString(s, doi.method.renderPrefix() + printHashType(doi.hashType));
s += ','; printUnquotedString(s, "impure");
}
}, i.second.raw());
@@ -617,8 +626,8 @@ DrvHash hashDerivationModulo(Store & store, const Derivation & drv, bool maskOut
for (const auto & i : drv.outputs) {
auto & dof = std::get<DerivationOutput::CAFixed>(i.second.raw());
auto hash = hashString(htSHA256, "fixed:out:"
- + dof.hash.printMethodAlgo() + ":"
- + dof.hash.hash.to_string(Base16, false) + ":"
+ + dof.ca.printMethodAlgo() + ":"
+ + dof.ca.getHash().to_string(Base16, false) + ":"
+ store.printStorePath(dof.path(store, drv.name, i.first)));
outputHashes.insert_or_assign(i.first, std::move(hash));
}
@@ -740,7 +749,7 @@ Source & readDerivation(Source & in, const Store & store, BasicDerivation & drv,
drv.outputs.emplace(std::move(name), std::move(output));
}
- drv.inputSrcs = worker_proto::read(store, in, Phantom<StorePathSet> {});
+ drv.inputSrcs = WorkerProto<StorePathSet>::read(store, in);
in >> drv.platform >> drv.builder;
drv.args = readStrings<Strings>(in);
@@ -768,12 +777,12 @@ void writeDerivation(Sink & out, const Store & store, const BasicDerivation & dr
},
[&](const DerivationOutput::CAFixed & dof) {
out << store.printStorePath(dof.path(store, drv.name, i.first))
- << dof.hash.printMethodAlgo()
- << dof.hash.hash.to_string(Base16, false);
+ << dof.ca.printMethodAlgo()
+ << dof.ca.getHash().to_string(Base16, false);
},
[&](const DerivationOutput::CAFloating & dof) {
out << ""
- << (makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType))
+ << (dof.method.renderPrefix() + printHashType(dof.hashType))
<< "";
},
[&](const DerivationOutput::Deferred &) {
@@ -783,12 +792,12 @@ void writeDerivation(Sink & out, const Store & store, const BasicDerivation & dr
},
[&](const DerivationOutput::Impure & doi) {
out << ""
- << (makeFileIngestionPrefix(doi.method) + printHashType(doi.hashType))
+ << (doi.method.renderPrefix() + printHashType(doi.hashType))
<< "impure";
},
}, i.second.raw());
}
- worker_proto::write(store, out, drv.inputSrcs);
+ workerProtoWrite(store, out, drv.inputSrcs);
out << drv.platform << drv.builder << drv.args;
out << drv.env.size();
for (auto & i : drv.env)
@@ -802,13 +811,7 @@ std::string hashPlaceholder(const std::string_view outputName)
return "/" + hashString(htSHA256, concatStrings("nix-output:", outputName)).to_string(Base32, false);
}
-std::string downstreamPlaceholder(const Store & store, const StorePath & drvPath, std::string_view outputName)
-{
- auto drvNameWithExtension = drvPath.name();
- auto drvName = drvNameWithExtension.substr(0, drvNameWithExtension.size() - 4);
- auto clearText = "nix-upstream-output:" + std::string { drvPath.hashPart() } + ":" + outputPathName(drvName, outputName);
- return "/" + hashString(htSHA256, clearText).to_string(Base32, false);
-}
+
static void rewriteDerivation(Store & store, BasicDerivation & drv, const StringMap & rewrites)
@@ -872,7 +875,7 @@ std::optional<BasicDerivation> Derivation::tryResolve(
for (auto & outputName : inputOutputs) {
if (auto actualPath = get(inputDrvOutputs, { inputDrv, outputName })) {
inputRewrites.emplace(
- downstreamPlaceholder(store, inputDrv, outputName),
+ DownstreamPlaceholder::unknownCaOutput(inputDrv, outputName).render(),
store.printStorePath(*actualPath));
resolved.inputSrcs.insert(*actualPath);
} else {
@@ -889,6 +892,67 @@ std::optional<BasicDerivation> Derivation::tryResolve(
return resolved;
}
+
+void Derivation::checkInvariants(Store & store, const StorePath & drvPath) const
+{
+ assert(drvPath.isDerivation());
+ std::string drvName(drvPath.name());
+ drvName = drvName.substr(0, drvName.size() - drvExtension.size());
+
+ if (drvName != name) {
+ throw Error("Derivation '%s' has name '%s' which does not match its path", store.printStorePath(drvPath), name);
+ }
+
+ auto envHasRightPath = [&](const StorePath & actual, const std::string & varName)
+ {
+ auto j = env.find(varName);
+ if (j == env.end() || store.parseStorePath(j->second) != actual)
+ throw Error("derivation '%s' has incorrect environment variable '%s', should be '%s'",
+ store.printStorePath(drvPath), varName, store.printStorePath(actual));
+ };
+
+
+ // Don't need the answer, but do this anyways to assert is proper
+ // combination. The code below is more general and naturally allows
+ // combinations that are currently prohibited.
+ type();
+
+ std::optional<DrvHash> hashesModulo;
+ for (auto & i : outputs) {
+ std::visit(overloaded {
+ [&](const DerivationOutput::InputAddressed & doia) {
+ if (!hashesModulo) {
+ // somewhat expensive so we do lazily
+ hashesModulo = hashDerivationModulo(store, *this, true);
+ }
+ auto currentOutputHash = get(hashesModulo->hashes, i.first);
+ if (!currentOutputHash)
+ throw Error("derivation '%s' has unexpected output '%s' (local-store / hashesModulo) named '%s'",
+ store.printStorePath(drvPath), store.printStorePath(doia.path), i.first);
+ StorePath recomputed = store.makeOutputPath(i.first, *currentOutputHash, drvName);
+ if (doia.path != recomputed)
+ throw Error("derivation '%s' has incorrect output '%s', should be '%s'",
+ store.printStorePath(drvPath), store.printStorePath(doia.path), store.printStorePath(recomputed));
+ envHasRightPath(doia.path, i.first);
+ },
+ [&](const DerivationOutput::CAFixed & dof) {
+ auto path = dof.path(store, drvName, i.first);
+ envHasRightPath(path, i.first);
+ },
+ [&](const DerivationOutput::CAFloating &) {
+ /* Nothing to check */
+ },
+ [&](const DerivationOutput::Deferred &) {
+ /* Nothing to check */
+ },
+ [&](const DerivationOutput::Impure &) {
+ /* Nothing to check */
+ },
+ }, i.second.raw());
+ }
+}
+
+
const Hash impureOutputHash = hashString(htSHA256, "impure");
nlohmann::json DerivationOutput::toJSON(
@@ -901,25 +965,97 @@ nlohmann::json DerivationOutput::toJSON(
},
[&](const DerivationOutput::CAFixed & dof) {
res["path"] = store.printStorePath(dof.path(store, drvName, outputName));
- res["hashAlgo"] = dof.hash.printMethodAlgo();
- res["hash"] = dof.hash.hash.to_string(Base16, false);
+ res["hashAlgo"] = dof.ca.printMethodAlgo();
+ res["hash"] = dof.ca.getHash().to_string(Base16, false);
+ // FIXME print refs?
},
[&](const DerivationOutput::CAFloating & dof) {
- res["hashAlgo"] = makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType);
+ res["hashAlgo"] = dof.method.renderPrefix() + printHashType(dof.hashType);
},
[&](const DerivationOutput::Deferred &) {},
[&](const DerivationOutput::Impure & doi) {
- res["hashAlgo"] = makeFileIngestionPrefix(doi.method) + printHashType(doi.hashType);
+ res["hashAlgo"] = doi.method.renderPrefix() + printHashType(doi.hashType);
res["impure"] = true;
},
}, raw());
return res;
}
+
+DerivationOutput DerivationOutput::fromJSON(
+ const Store & store, std::string_view drvName, std::string_view outputName,
+ const nlohmann::json & _json,
+ const ExperimentalFeatureSettings & xpSettings)
+{
+ std::set<std::string_view> keys;
+ auto json = (std::map<std::string, nlohmann::json>) _json;
+
+ for (const auto & [key, _] : json)
+ keys.insert(key);
+
+ auto methodAlgo = [&]() -> std::pair<ContentAddressMethod, HashType> {
+ std::string hashAlgo = json["hashAlgo"];
+ // remaining to parse, will be mutated by parsers
+ std::string_view s = hashAlgo;
+ ContentAddressMethod method = ContentAddressMethod::parsePrefix(s);
+ if (method == TextIngestionMethod {})
+ xpSettings.require(Xp::DynamicDerivations);
+ auto hashType = parseHashType(s);
+ return { std::move(method), std::move(hashType) };
+ };
+
+ if (keys == (std::set<std::string_view> { "path" })) {
+ return DerivationOutput::InputAddressed {
+ .path = store.parseStorePath((std::string) json["path"]),
+ };
+ }
+
+ else if (keys == (std::set<std::string_view> { "path", "hashAlgo", "hash" })) {
+ auto [method, hashType] = methodAlgo();
+ auto dof = DerivationOutput::CAFixed {
+ .ca = ContentAddress::fromParts(
+ std::move(method),
+ Hash::parseNonSRIUnprefixed((std::string) json["hash"], hashType)),
+ };
+ if (dof.path(store, drvName, outputName) != store.parseStorePath((std::string) json["path"]))
+ throw Error("Path doesn't match derivation output");
+ return dof;
+ }
+
+ else if (keys == (std::set<std::string_view> { "hashAlgo" })) {
+ xpSettings.require(Xp::CaDerivations);
+ auto [method, hashType] = methodAlgo();
+ return DerivationOutput::CAFloating {
+ .method = std::move(method),
+ .hashType = std::move(hashType),
+ };
+ }
+
+ else if (keys == (std::set<std::string_view> { })) {
+ return DerivationOutput::Deferred {};
+ }
+
+ else if (keys == (std::set<std::string_view> { "hashAlgo", "impure" })) {
+ xpSettings.require(Xp::ImpureDerivations);
+ auto [method, hashType] = methodAlgo();
+ return DerivationOutput::Impure {
+ .method = std::move(method),
+ .hashType = hashType,
+ };
+ }
+
+ else {
+ throw Error("invalid JSON for derivation output");
+ }
+}
+
+
nlohmann::json Derivation::toJSON(const Store & store) const
{
nlohmann::json res = nlohmann::json::object();
+ res["name"] = name;
+
{
nlohmann::json & outputsObj = res["outputs"];
outputsObj = nlohmann::json::object();
@@ -950,4 +1086,43 @@ nlohmann::json Derivation::toJSON(const Store & store) const
return res;
}
+
+Derivation Derivation::fromJSON(
+ const Store & store,
+ const nlohmann::json & json)
+{
+ Derivation res;
+
+ res.name = json["name"];
+
+ {
+ auto & outputsObj = json["outputs"];
+ for (auto & [outputName, output] : outputsObj.items()) {
+ res.outputs.insert_or_assign(
+ outputName,
+ DerivationOutput::fromJSON(store, res.name, outputName, output));
+ }
+ }
+
+ {
+ auto & inputsList = json["inputSrcs"];
+ for (auto & input : inputsList)
+ res.inputSrcs.insert(store.parseStorePath(static_cast<const std::string &>(input)));
+ }
+
+ {
+ auto & inputDrvsObj = json["inputDrvs"];
+ for (auto & [inputDrvPath, inputOutputs] : inputDrvsObj.items())
+ res.inputDrvs[store.parseStorePath(inputDrvPath)] =
+ static_cast<const StringSet &>(inputOutputs);
+ }
+
+ res.platform = json["system"];
+ res.builder = json["builder"];
+ res.args = json["args"];
+ res.env = json["env"];
+
+ return res;
+}
+
}
diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh
index 8456b29e7..fa79f77fd 100644
--- a/src/libstore/derivations.hh
+++ b/src/libstore/derivations.hh
@@ -1,11 +1,14 @@
#pragma once
+///@file
#include "path.hh"
#include "types.hh"
#include "hash.hh"
#include "content-address.hh"
#include "repair-flag.hh"
+#include "derived-path.hh"
#include "sync.hh"
+#include "comparator.hh"
#include <map>
#include <variant>
@@ -17,43 +20,85 @@ class Store;
/* Abstract syntax of derivations. */
-/* The traditional non-fixed-output derivation type. */
+/**
+ * The traditional non-fixed-output derivation type.
+ */
struct DerivationOutputInputAddressed
{
StorePath path;
+
+ GENERATE_CMP(DerivationOutputInputAddressed, me->path);
};
-/* Fixed-output derivations, whose output paths are content addressed
- according to that fixed output. */
+/**
+ * Fixed-output derivations, whose output paths are content
+ * addressed according to that fixed output.
+ */
struct DerivationOutputCAFixed
{
- FixedOutputHash hash; /* hash used for expected hash computation */
+ /**
+ * Method and hash used for expected hash computation.
+ *
+ * References are not allowed by fiat.
+ */
+ ContentAddress ca;
+
+ /**
+ * Return the \ref StorePath "store path" corresponding to this output
+ *
+ * @param drvName The name of the derivation this is an output of, without the `.drv`.
+ * @param outputName The name of this output.
+ */
StorePath path(const Store & store, std::string_view drvName, std::string_view outputName) const;
+
+ GENERATE_CMP(DerivationOutputCAFixed, me->ca);
};
-/* Floating-output derivations, whose output paths are content addressed, but
- not fixed, and so are dynamically calculated from whatever the output ends
- up being. */
+/**
+ * Floating-output derivations, whose output paths are content
+ * addressed, but not fixed, and so are dynamically calculated from
+ * whatever the output ends up being.
+ * */
struct DerivationOutputCAFloating
{
- /* information used for expected hash computation */
- FileIngestionMethod method;
+ /**
+ * How the file system objects will be serialized for hashing
+ */
+ ContentAddressMethod method;
+
+ /**
+ * How the serialization will be hashed
+ */
HashType hashType;
+
+ GENERATE_CMP(DerivationOutputCAFloating, me->method, me->hashType);
};
-/* Input-addressed output which depends on a (CA) derivation whose hash isn't
- * known yet.
+/**
+ * Input-addressed output which depends on a (CA) derivation whose hash
+ * isn't known yet.
*/
-struct DerivationOutputDeferred {};
+struct DerivationOutputDeferred {
+ GENERATE_CMP(DerivationOutputDeferred);
+};
-/* Impure output which is moved to a content-addressed location (like
- CAFloating) but isn't registered as a realization.
+/**
+ * Impure output which is moved to a content-addressed location (like
+ * CAFloating) but isn't registered as a realization.
*/
struct DerivationOutputImpure
{
- /* information used for expected hash computation */
- FileIngestionMethod method;
+ /**
+ * How the file system objects will be serialized for hashing
+ */
+ ContentAddressMethod method;
+
+ /**
+ * How the serialization will be hashed
+ */
HashType hashType;
+
+ GENERATE_CMP(DerivationOutputImpure, me->method, me->hashType);
};
typedef std::variant<
@@ -64,6 +109,9 @@ typedef std::variant<
DerivationOutputImpure
> _DerivationOutputRaw;
+/**
+ * A single output of a BasicDerivation (and Derivation).
+ */
struct DerivationOutput : _DerivationOutputRaw
{
using Raw = _DerivationOutputRaw;
@@ -75,9 +123,12 @@ struct DerivationOutput : _DerivationOutputRaw
using Deferred = DerivationOutputDeferred;
using Impure = DerivationOutputImpure;
- /* Note, when you use this function you should make sure that you're passing
- the right derivation name. When in doubt, you should use the safer
- interface provided by BasicDerivation::outputsAndOptPaths */
+ /**
+ * \note when you use this function you should make sure that you're
+ * passing the right derivation name. When in doubt, you should use
+ * the safer interface provided by
+ * BasicDerivation::outputsAndOptPaths
+ */
std::optional<StorePath> path(const Store & store, std::string_view drvName, std::string_view outputName) const;
inline const Raw & raw() const {
@@ -88,30 +139,74 @@ struct DerivationOutput : _DerivationOutputRaw
const Store & store,
std::string_view drvName,
std::string_view outputName) const;
+ /**
+ * @param xpSettings Stop-gap to avoid globals during unit tests.
+ */
+ static DerivationOutput fromJSON(
+ const Store & store,
+ std::string_view drvName,
+ std::string_view outputName,
+ const nlohmann::json & json,
+ const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
};
typedef std::map<std::string, DerivationOutput> DerivationOutputs;
-/* These are analogues to the previous DerivationOutputs data type, but they
- also contains, for each output, the (optional) store path in which it would
- be written. To calculate values of these types, see the corresponding
- functions in BasicDerivation */
+/**
+ * These are analogues to the previous DerivationOutputs data type,
+ * but they also contains, for each output, the (optional) store
+ * path in which it would be written. To calculate values of these
+ * types, see the corresponding functions in BasicDerivation.
+ */
typedef std::map<std::string, std::pair<DerivationOutput, std::optional<StorePath>>>
DerivationOutputsAndOptPaths;
-/* For inputs that are sub-derivations, we specify exactly which
- output IDs we are interested in. */
+/**
+ * For inputs that are sub-derivations, we specify exactly which
+ * output IDs we are interested in.
+ */
typedef std::map<StorePath, StringSet> DerivationInputs;
+/**
+ * Input-addressed derivation types
+ */
struct DerivationType_InputAddressed {
+ /**
+ * True iff the derivation type can't be determined statically,
+ * for instance because it (transitively) depends on a content-addressed
+ * derivation.
+ */
bool deferred;
};
+/**
+ * Content-addressed derivation types
+ */
struct DerivationType_ContentAddressed {
+ /**
+ * Whether the derivation should be built safely inside a sandbox.
+ */
bool sandboxed;
+ /**
+ * Whether the derivation's outputs' content-addresses are "fixed"
+ * or "floating.
+ *
+ * - Fixed: content-addresses are written down as part of the
+ * derivation itself. If the outputs don't end up matching the
+ * build fails.
+ *
+ * - Floating: content-addresses are not written down, we do not
+ * know them until we perform the build.
+ */
bool fixed;
};
+/**
+ * Impure derivation type
+ *
+ * This is similar at buil-time to the content addressed, not standboxed, not fixed
+ * type, but has some restrictions on its usage.
+ */
struct DerivationType_Impure {
};
@@ -128,30 +223,38 @@ struct DerivationType : _DerivationTypeRaw {
using ContentAddressed = DerivationType_ContentAddressed;
using Impure = DerivationType_Impure;
- /* Do the outputs of the derivation have paths calculated from their content,
- or from the derivation itself? */
+ /**
+ * Do the outputs of the derivation have paths calculated from their
+ * content, or from the derivation itself?
+ */
bool isCA() const;
- /* Is the content of the outputs fixed a-priori via a hash? Never true for
- non-CA derivations. */
+ /**
+ * Is the content of the outputs fixed <em>a priori</em> via a hash?
+ * Never true for non-CA derivations.
+ */
bool isFixed() const;
- /* Whether the derivation is fully sandboxed. If false, the
- sandbox is opened up, e.g. the derivation has access to the
- network. Note that whether or not we actually sandbox the
- derivation is controlled separately. Always true for non-CA
- derivations. */
+ /**
+ * Whether the derivation is fully sandboxed. If false, the sandbox
+ * is opened up, e.g. the derivation has access to the network. Note
+ * that whether or not we actually sandbox the derivation is
+ * controlled separately. Always true for non-CA derivations.
+ */
bool isSandboxed() const;
- /* Whether the derivation is expected to produce the same result
- every time, and therefore it only needs to be built once. This
- is only false for derivations that have the attribute '__impure
- = true'. */
+ /**
+ * Whether the derivation is expected to produce the same result
+ * every time, and therefore it only needs to be built once. This is
+ * only false for derivations that have the attribute '__impure =
+ * true'.
+ */
bool isPure() const;
- /* Does the derivation knows its own output paths?
- Only true when there's no floating-ca derivation involved in the
- closure, or if fixed output.
+ /**
+ * Does the derivation knows its own output paths?
+ * Only true when there's no floating-ca derivation involved in the
+ * closure, or if fixed output.
*/
bool hasKnownOutputPaths() const;
@@ -162,8 +265,14 @@ struct DerivationType : _DerivationTypeRaw {
struct BasicDerivation
{
- DerivationOutputs outputs; /* keyed on symbolic IDs */
- StorePathSet inputSrcs; /* inputs that are sources */
+ /**
+ * keyed on symbolic IDs
+ */
+ DerivationOutputs outputs;
+ /**
+ * inputs that are sources
+ */
+ StorePathSet inputSrcs;
std::string platform;
Path builder;
Strings args;
@@ -175,128 +284,197 @@ struct BasicDerivation
bool isBuiltin() const;
- /* Return true iff this is a fixed-output derivation. */
+ /**
+ * Return true iff this is a fixed-output derivation.
+ */
DerivationType type() const;
- /* Return the output names of a derivation. */
+ /**
+ * Return the output names of a derivation.
+ */
StringSet outputNames() const;
- /* Calculates the maps that contains all the DerivationOutputs, but
- augmented with knowledge of the Store paths they would be written
- into. */
+ /**
+ * Calculates the maps that contains all the DerivationOutputs, but
+ * augmented with knowledge of the Store paths they would be written
+ * into.
+ */
DerivationOutputsAndOptPaths outputsAndOptPaths(const Store & store) const;
static std::string_view nameFromPath(const StorePath & storePath);
+
+ GENERATE_CMP(BasicDerivation,
+ me->outputs,
+ me->inputSrcs,
+ me->platform,
+ me->builder,
+ me->args,
+ me->env,
+ me->name);
};
struct Derivation : BasicDerivation
{
- DerivationInputs inputDrvs; /* inputs that are sub-derivations */
+ /**
+ * inputs that are sub-derivations
+ */
+ DerivationInputs inputDrvs;
- /* Print a derivation. */
+ /**
+ * Print a derivation.
+ */
std::string unparse(const Store & store, bool maskOutputs,
std::map<std::string, StringSet> * actualInputs = nullptr) const;
- /* Return the underlying basic derivation but with these changes:
-
- 1. Input drvs are emptied, but the outputs of them that were used are
- added directly to input sources.
-
- 2. Input placeholders are replaced with realized input store paths. */
+ /**
+ * Return the underlying basic derivation but with these changes:
+ *
+ * 1. Input drvs are emptied, but the outputs of them that were used
+ * are added directly to input sources.
+ *
+ * 2. Input placeholders are replaced with realized input store
+ * paths.
+ */
std::optional<BasicDerivation> tryResolve(Store & store) const;
- /* Like the above, but instead of querying the Nix database for
- realisations, uses a given mapping from input derivation paths
- + output names to actual output store paths. */
+ /**
+ * Like the above, but instead of querying the Nix database for
+ * realisations, uses a given mapping from input derivation paths +
+ * output names to actual output store paths.
+ */
std::optional<BasicDerivation> tryResolve(
Store & store,
const std::map<std::pair<StorePath, std::string>, StorePath> & inputDrvOutputs) const;
+ /**
+ * Check that the derivation is valid and does not present any
+ * illegal states.
+ *
+ * This is mainly a matter of checking the outputs, where our C++
+ * representation supports all sorts of combinations we do not yet
+ * allow.
+ */
+ void checkInvariants(Store & store, const StorePath & drvPath) const;
+
Derivation() = default;
Derivation(const BasicDerivation & bd) : BasicDerivation(bd) { }
Derivation(BasicDerivation && bd) : BasicDerivation(std::move(bd)) { }
nlohmann::json toJSON(const Store & store) const;
+ static Derivation fromJSON(
+ const Store & store,
+ const nlohmann::json & json);
+
+ GENERATE_CMP(Derivation,
+ static_cast<const BasicDerivation &>(*me),
+ me->inputDrvs);
};
class Store;
-/* Write a derivation to the Nix store, and return its path. */
+/**
+ * Write a derivation to the Nix store, and return its path.
+ */
StorePath writeDerivation(Store & store,
const Derivation & drv,
RepairFlag repair = NoRepair,
bool readOnly = false);
-/* Read a derivation from a file. */
+/**
+ * Read a derivation from a file.
+ */
Derivation parseDerivation(const Store & store, std::string && s, std::string_view name);
-// FIXME: remove
+/**
+ * \todo Remove.
+ *
+ * Use Path::isDerivation instead.
+ */
bool isDerivation(std::string_view fileName);
-/* Calculate the name that will be used for the store path for this
- output.
-
- This is usually <drv-name>-<output-name>, but is just <drv-name> when
- the output name is "out". */
+/**
+ * Calculate the name that will be used for the store path for this
+ * output.
+ *
+ * This is usually <drv-name>-<output-name>, but is just <drv-name> when
+ * the output name is "out".
+ */
std::string outputPathName(std::string_view drvName, std::string_view outputName);
-// The hashes modulo of a derivation.
-//
-// Each output is given a hash, although in practice only the content-addressed
-// derivations (fixed-output or not) will have a different hash for each
-// output.
+/**
+ * The hashes modulo of a derivation.
+ *
+ * Each output is given a hash, although in practice only the content-addressed
+ * derivations (fixed-output or not) will have a different hash for each
+ * output.
+ */
struct DrvHash {
+ /**
+ * Map from output names to hashes
+ */
std::map<std::string, Hash> hashes;
enum struct Kind : bool {
- // Statically determined derivations.
- // This hash will be directly used to compute the output paths
+ /**
+ * Statically determined derivations.
+ * This hash will be directly used to compute the output paths
+ */
Regular,
- // Floating-output derivations (and their reverse dependencies).
+
+ /**
+ * Floating-output derivations (and their reverse dependencies).
+ */
Deferred,
};
+ /**
+ * The kind of derivation this is, simplified for just "derivation hash
+ * modulo" purposes.
+ */
Kind kind;
};
void operator |= (DrvHash::Kind & self, const DrvHash::Kind & other) noexcept;
-/* Returns hashes with the details of fixed-output subderivations
- expunged.
-
- A fixed-output derivation is a derivation whose outputs have a
- specified content hash and hash algorithm. (Currently they must have
- exactly one output (`out'), which is specified using the `outputHash'
- and `outputHashAlgo' attributes, but the algorithm doesn't assume
- this.) We don't want changes to such derivations to propagate upwards
- through the dependency graph, changing output paths everywhere.
-
- For instance, if we change the url in a call to the `fetchurl'
- function, we do not want to rebuild everything depending on it---after
- all, (the hash of) the file being downloaded is unchanged. So the
- *output paths* should not change. On the other hand, the *derivation
- paths* should change to reflect the new dependency graph.
-
- For fixed-output derivations, this returns a map from the name of
- each output to its hash, unique up to the output's contents.
-
- For regular derivations, it returns a single hash of the derivation
- ATerm, after subderivations have been likewise expunged from that
- derivation.
+/**
+ * Returns hashes with the details of fixed-output subderivations
+ * expunged.
+ *
+ * A fixed-output derivation is a derivation whose outputs have a
+ * specified content hash and hash algorithm. (Currently they must have
+ * exactly one output (`out`), which is specified using the `outputHash`
+ * and `outputHashAlgo` attributes, but the algorithm doesn't assume
+ * this.) We don't want changes to such derivations to propagate upwards
+ * through the dependency graph, changing output paths everywhere.
+ *
+ * For instance, if we change the url in a call to the `fetchurl`
+ * function, we do not want to rebuild everything depending on it---after
+ * all, (the hash of) the file being downloaded is unchanged. So the
+ * *output paths* should not change. On the other hand, the *derivation
+ * paths* should change to reflect the new dependency graph.
+ *
+ * For fixed-output derivations, this returns a map from the name of
+ * each output to its hash, unique up to the output's contents.
+ *
+ * For regular derivations, it returns a single hash of the derivation
+ * ATerm, after subderivations have been likewise expunged from that
+ * derivation.
*/
DrvHash hashDerivationModulo(Store & store, const Derivation & drv, bool maskOutputs);
-/*
- Return a map associating each output to a hash that uniquely identifies its
- derivation (modulo the self-references).
-
- FIXME: what is the Hash in this map?
+/**
+ * Return a map associating each output to a hash that uniquely identifies its
+ * derivation (modulo the self-references).
+ *
+ * \todo What is the Hash in this map?
*/
std::map<std::string, Hash> staticOutputHashes(Store & store, const Derivation & drv);
-/* Memoisation of hashDerivationModulo(). */
+/**
+ * Memoisation of hashDerivationModulo().
+ */
typedef std::map<StorePath, DrvHash> DrvHashes;
// FIXME: global, though at least thread-safe.
@@ -308,23 +486,16 @@ struct Sink;
Source & readDerivation(Source & in, const Store & store, BasicDerivation & drv, std::string_view name);
void writeDerivation(Sink & out, const Store & store, const BasicDerivation & drv);
-/* This creates an opaque and almost certainly unique string
- deterministically from the output name.
-
- It is used as a placeholder to allow derivations to refer to their
- own outputs without needing to use the hash of a derivation in
- itself, making the hash near-impossible to calculate. */
+/**
+ * This creates an opaque and almost certainly unique string
+ * deterministically from the output name.
+ *
+ * It is used as a placeholder to allow derivations to refer to their
+ * own outputs without needing to use the hash of a derivation in
+ * itself, making the hash near-impossible to calculate.
+ */
std::string hashPlaceholder(const std::string_view outputName);
-/* This creates an opaque and almost certainly unique string
- deterministically from a derivation path and output name.
-
- It is used as a placeholder to allow derivations to refer to
- content-addressed paths whose content --- and thus the path
- themselves --- isn't yet known. This occurs when a derivation has a
- dependency which is a CA derivation. */
-std::string downstreamPlaceholder(const Store & store, const StorePath & drvPath, std::string_view outputName);
-
extern const Hash impureOutputHash;
}
diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc
index e0d86a42f..9a2ffda39 100644
--- a/src/libstore/derived-path.cc
+++ b/src/libstore/derived-path.cc
@@ -62,15 +62,31 @@ std::string DerivedPath::Opaque::to_string(const Store & store) const
std::string DerivedPath::Built::to_string(const Store & store) const
{
return store.printStorePath(drvPath)
- + "!"
+ + '^'
+ + outputs.to_string();
+}
+
+std::string DerivedPath::Built::to_string_legacy(const Store & store) const
+{
+ return store.printStorePath(drvPath)
+ + '!'
+ outputs.to_string();
}
std::string DerivedPath::to_string(const Store & store) const
{
- return std::visit(
- [&](const auto & req) { return req.to_string(store); },
- this->raw());
+ return std::visit(overloaded {
+ [&](const DerivedPath::Built & req) { return req.to_string(store); },
+ [&](const DerivedPath::Opaque & req) { return req.to_string(store); },
+ }, this->raw());
+}
+
+std::string DerivedPath::to_string_legacy(const Store & store) const
+{
+ return std::visit(overloaded {
+ [&](const DerivedPath::Built & req) { return req.to_string_legacy(store); },
+ [&](const DerivedPath::Opaque & req) { return req.to_string(store); },
+ }, this->raw());
}
@@ -87,14 +103,24 @@ DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_vi
};
}
-DerivedPath DerivedPath::parse(const Store & store, std::string_view s)
+static inline DerivedPath parseWith(const Store & store, std::string_view s, std::string_view separator)
{
- size_t n = s.find("!");
+ size_t n = s.find(separator);
return n == s.npos
? (DerivedPath) DerivedPath::Opaque::parse(store, s)
: (DerivedPath) DerivedPath::Built::parse(store, s.substr(0, n), s.substr(n + 1));
}
+DerivedPath DerivedPath::parse(const Store & store, std::string_view s)
+{
+ return parseWith(store, s, "^");
+}
+
+DerivedPath DerivedPath::parseLegacy(const Store & store, std::string_view s)
+{
+ return parseWith(store, s, "!");
+}
+
RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const
{
RealisedPath::Set res;
@@ -105,7 +131,7 @@ RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const
auto drvHashes =
staticOutputHashes(store, store.readDerivation(p.drvPath));
for (auto& [outputName, outputPath] : p.outputs) {
- if (settings.isExperimentalFeatureEnabled(
+ if (experimentalFeatureSettings.isEnabled(
Xp::CaDerivations)) {
auto drvOutput = get(drvHashes, outputName);
if (!drvOutput)
diff --git a/src/libstore/derived-path.hh b/src/libstore/derived-path.hh
index 9e0cce377..5f7acbebc 100644
--- a/src/libstore/derived-path.hh
+++ b/src/libstore/derived-path.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "util.hh"
#include "path.hh"
@@ -47,8 +48,18 @@ struct DerivedPathBuilt {
StorePath drvPath;
OutputsSpec outputs;
+ /**
+ * Uses `^` as the separator
+ */
std::string to_string(const Store & store) const;
- static DerivedPathBuilt parse(const Store & store, std::string_view, std::string_view);
+ /**
+ * Uses `!` as the separator
+ */
+ std::string to_string_legacy(const Store & store) const;
+ /**
+ * The caller splits on the separator, so it works for both variants.
+ */
+ static DerivedPathBuilt parse(const Store & store, std::string_view drvPath, std::string_view outputs);
nlohmann::json toJSON(ref<Store> store) const;
GENERATE_CMP(DerivedPathBuilt, me->drvPath, me->outputs);
@@ -80,8 +91,22 @@ struct DerivedPath : _DerivedPathRaw {
return static_cast<const Raw &>(*this);
}
+ /**
+ * Uses `^` as the separator
+ */
std::string to_string(const Store & store) const;
+ /**
+ * Uses `!` as the separator
+ */
+ std::string to_string_legacy(const Store & store) const;
+ /**
+ * Uses `^` as the separator
+ */
static DerivedPath parse(const Store & store, std::string_view);
+ /**
+ * Uses `!` as the separator
+ */
+ static DerivedPath parseLegacy(const Store & store, std::string_view);
};
/**
@@ -105,7 +130,7 @@ using _BuiltPathRaw = std::variant<
>;
/**
- * A built path. Similar to a `DerivedPath`, but enriched with the corresponding
+ * A built path. Similar to a DerivedPath, but enriched with the corresponding
* output path(s).
*/
struct BuiltPath : _BuiltPathRaw {
diff --git a/src/libstore/downstream-placeholder.cc b/src/libstore/downstream-placeholder.cc
new file mode 100644
index 000000000..1752738f2
--- /dev/null
+++ b/src/libstore/downstream-placeholder.cc
@@ -0,0 +1,39 @@
+#include "downstream-placeholder.hh"
+#include "derivations.hh"
+
+namespace nix {
+
+std::string DownstreamPlaceholder::render() const
+{
+ return "/" + hash.to_string(Base32, false);
+}
+
+
+DownstreamPlaceholder DownstreamPlaceholder::unknownCaOutput(
+ const StorePath & drvPath,
+ std::string_view outputName)
+{
+ auto drvNameWithExtension = drvPath.name();
+ auto drvName = drvNameWithExtension.substr(0, drvNameWithExtension.size() - 4);
+ auto clearText = "nix-upstream-output:" + std::string { drvPath.hashPart() } + ":" + outputPathName(drvName, outputName);
+ return DownstreamPlaceholder {
+ hashString(htSHA256, clearText)
+ };
+}
+
+DownstreamPlaceholder DownstreamPlaceholder::unknownDerivation(
+ const DownstreamPlaceholder & placeholder,
+ std::string_view outputName,
+ const ExperimentalFeatureSettings & xpSettings)
+{
+ xpSettings.require(Xp::DynamicDerivations);
+ auto compressed = compressHash(placeholder.hash, 20);
+ auto clearText = "nix-computed-output:"
+ + compressed.to_string(Base32, false)
+ + ":" + std::string { outputName };
+ return DownstreamPlaceholder {
+ hashString(htSHA256, clearText)
+ };
+}
+
+}
diff --git a/src/libstore/downstream-placeholder.hh b/src/libstore/downstream-placeholder.hh
new file mode 100644
index 000000000..f0c0dee77
--- /dev/null
+++ b/src/libstore/downstream-placeholder.hh
@@ -0,0 +1,75 @@
+#pragma once
+///@file
+
+#include "hash.hh"
+#include "path.hh"
+
+namespace nix {
+
+/**
+ * Downstream Placeholders are opaque and almost certainly unique values
+ * used to allow derivations to refer to store objects which are yet to
+ * be built and for we do not yet have store paths for.
+ *
+ * They correspond to `DerivedPaths` that are not `DerivedPath::Opaque`,
+ * except for the cases involving input addressing or fixed outputs
+ * where we do know a store path for the derivation output in advance.
+ *
+ * Unlike `DerivationPath`, however, `DownstreamPlaceholder` is
+ * purposefully opaque and obfuscated. This is so they are hard to
+ * create by accident, and so substituting them (once we know what the
+ * path to store object is) is unlikely to capture other stuff it
+ * shouldn't.
+ *
+ * We use them with `Derivation`: the `render()` method is called to
+ * render an opaque string which can be used in the derivation, and the
+ * resolving logic can substitute those strings for store paths when
+ * resolving `Derivation.inputDrvs` to `BasicDerivation.inputSrcs`.
+ */
+class DownstreamPlaceholder
+{
+ /**
+ * `DownstreamPlaceholder` is just a newtype of `Hash`.
+ * This its only field.
+ */
+ Hash hash;
+
+ /**
+ * Newtype constructor
+ */
+ DownstreamPlaceholder(Hash hash) : hash(hash) { }
+
+public:
+ /**
+ * This creates an opaque and almost certainly unique string
+ * deterministically from the placeholder.
+ */
+ std::string render() const;
+
+ /**
+ * Create a placeholder for an unknown output of a content-addressed
+ * derivation.
+ *
+ * The derivation itself is known (we have a store path for it), but
+ * the output doesn't yet have a known store path.
+ */
+ static DownstreamPlaceholder unknownCaOutput(
+ const StorePath & drvPath,
+ std::string_view outputName);
+
+ /**
+ * Create a placehold for the output of an unknown derivation.
+ *
+ * The derivation is not yet known because it is a dynamic
+ * derivaiton --- it is itself an output of another derivation ---
+ * and we just have (another) placeholder for it.
+ *
+ * @param xpSettings Stop-gap to avoid globals during unit tests.
+ */
+ static DownstreamPlaceholder unknownDerivation(
+ const DownstreamPlaceholder & drvPlaceholder,
+ std::string_view outputName,
+ const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings);
+};
+
+}
diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc
index b4fbe0b70..74d6ed3b5 100644
--- a/src/libstore/dummy-store.cc
+++ b/src/libstore/dummy-store.cc
@@ -7,6 +7,13 @@ struct DummyStoreConfig : virtual StoreConfig {
using StoreConfig::StoreConfig;
const std::string name() override { return "Dummy Store"; }
+
+ std::string doc() override
+ {
+ return
+ #include "dummy-store.md"
+ ;
+ }
};
struct DummyStore : public virtual DummyStoreConfig, public virtual Store
@@ -32,6 +39,14 @@ struct DummyStore : public virtual DummyStoreConfig, public virtual Store
callback(nullptr);
}
+ /**
+ * The dummy store is incapable of *not* trusting! :)
+ */
+ virtual std::optional<TrustedFlag> isTrustedClient() override
+ {
+ return Trusted;
+ }
+
static std::set<std::string> uriSchemes() {
return {"dummy"};
}
@@ -56,6 +71,9 @@ struct DummyStore : public virtual DummyStoreConfig, public virtual Store
void queryRealisationUncached(const DrvOutput &,
Callback<std::shared_ptr<const Realisation>> callback) noexcept override
{ callback(nullptr); }
+
+ virtual ref<FSAccessor> getFSAccessor() override
+ { unsupported("getFSAccessor"); }
};
static RegisterStoreImplementation<DummyStore, DummyStoreConfig> regDummyStore;
diff --git a/src/libstore/dummy-store.md b/src/libstore/dummy-store.md
new file mode 100644
index 000000000..eb7b4ba0d
--- /dev/null
+++ b/src/libstore/dummy-store.md
@@ -0,0 +1,13 @@
+R"(
+
+**Store URL format**: `dummy://`
+
+This store type represents a store that contains no store paths and
+cannot be written to. It's useful when you want to use the Nix
+evaluator when no actual Nix store exists, e.g.
+
+```console
+# nix eval --store dummy:// --expr '1 + 2'
+```
+
+)"
diff --git a/src/libstore/export-import.cc b/src/libstore/export-import.cc
index 9875da909..5ea263a86 100644
--- a/src/libstore/export-import.cc
+++ b/src/libstore/export-import.cc
@@ -16,7 +16,7 @@ void Store::exportPaths(const StorePathSet & paths, Sink & sink)
//logger->incExpected(doneLabel, sorted.size());
for (auto & path : sorted) {
- //Activity act(*logger, lvlInfo, format("exporting path '%s'") % path);
+ //Activity act(*logger, lvlInfo, "exporting path '%s'", path);
sink << 1;
exportPath(path, sink);
//logger->incProgress(doneLabel);
@@ -45,7 +45,7 @@ void Store::exportPath(const StorePath & path, Sink & sink)
teeSink
<< exportMagic
<< printStorePath(path);
- worker_proto::write(*this, teeSink, info->references);
+ workerProtoWrite(*this, teeSink, info->references);
teeSink
<< (info->deriver ? printStorePath(*info->deriver) : "")
<< 0;
@@ -71,9 +71,9 @@ StorePaths Store::importPaths(Source & source, CheckSigsFlag checkSigs)
auto path = parseStorePath(readString(source));
- //Activity act(*logger, lvlInfo, format("importing path '%s'") % info.path);
+ //Activity act(*logger, lvlInfo, "importing path '%s'", info.path);
- auto references = worker_proto::read(*this, source, Phantom<StorePathSet> {});
+ auto references = WorkerProto<StorePathSet>::read(*this, source);
auto deriver = readString(source);
auto narHash = hashString(htSHA256, saved.s);
diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc
index 6a4778d1f..2346accbe 100644
--- a/src/libstore/filetransfer.cc
+++ b/src/libstore/filetransfer.cc
@@ -185,7 +185,7 @@ struct curlFileTransfer : public FileTransfer
{
size_t realSize = size * nmemb;
std::string line((char *) contents, realSize);
- printMsg(lvlVomit, format("got header for '%s': %s") % request.uri % trim(line));
+ printMsg(lvlVomit, "got header for '%s': %s", request.uri, trim(line));
static std::regex statusLine("HTTP/[^ ]+ +[0-9]+(.*)", std::regex::extended | std::regex::icase);
std::smatch match;
if (std::regex_match(line, match, statusLine)) {
@@ -209,7 +209,7 @@ struct curlFileTransfer : public FileTransfer
long httpStatus = 0;
curl_easy_getinfo(req, CURLINFO_RESPONSE_CODE, &httpStatus);
if (result.etag == request.expectedETag && httpStatus == 200) {
- debug(format("shutting down on 200 HTTP response with expected ETag"));
+ debug("shutting down on 200 HTTP response with expected ETag");
return 0;
}
} else if (name == "content-encoding")
@@ -318,7 +318,7 @@ struct curlFileTransfer : public FileTransfer
if (request.verifyTLS) {
if (settings.caFile != "")
- curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str());
+ curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.get().c_str());
} else {
curl_easy_setopt(req, CURLOPT_SSL_VERIFYPEER, 0);
curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0);
@@ -407,6 +407,10 @@ struct curlFileTransfer : public FileTransfer
err = Misc;
} else {
// Don't bother retrying on certain cURL errors either
+
+ // Allow selecting a subset of enum values
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wswitch-enum"
switch (code) {
case CURLE_FAILED_INIT:
case CURLE_URL_MALFORMAT:
@@ -427,6 +431,7 @@ struct curlFileTransfer : public FileTransfer
default: // Shut up warnings
break;
}
+ #pragma GCC diagnostic pop
}
attempt++;
diff --git a/src/libstore/filetransfer.hh b/src/libstore/filetransfer.hh
index 07d58f53a..378c6ff78 100644
--- a/src/libstore/filetransfer.hh
+++ b/src/libstore/filetransfer.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
#include "hash.hh"
@@ -87,39 +88,56 @@ struct FileTransfer
{
virtual ~FileTransfer() { }
- /* Enqueue a data transfer request, returning a future to the result of
- the download. The future may throw a FileTransferError
- exception. */
+ /**
+ * Enqueue a data transfer request, returning a future to the result of
+ * the download. The future may throw a FileTransferError
+ * exception.
+ */
virtual void enqueueFileTransfer(const FileTransferRequest & request,
Callback<FileTransferResult> callback) = 0;
std::future<FileTransferResult> enqueueFileTransfer(const FileTransferRequest & request);
- /* Synchronously download a file. */
+ /**
+ * Synchronously download a file.
+ */
FileTransferResult download(const FileTransferRequest & request);
- /* Synchronously upload a file. */
+ /**
+ * Synchronously upload a file.
+ */
FileTransferResult upload(const FileTransferRequest & request);
- /* Download a file, writing its data to a sink. The sink will be
- invoked on the thread of the caller. */
+ /**
+ * Download a file, writing its data to a sink. The sink will be
+ * invoked on the thread of the caller.
+ */
void download(FileTransferRequest && request, Sink & sink);
enum Error { NotFound, Forbidden, Misc, Transient, Interrupted };
};
-/* Return a shared FileTransfer object. Using this object is preferred
- because it enables connection reuse and HTTP/2 multiplexing. */
+/**
+ * @return a shared FileTransfer object.
+ *
+ * Using this object is preferred because it enables connection reuse
+ * and HTTP/2 multiplexing.
+ */
ref<FileTransfer> getFileTransfer();
-/* Return a new FileTransfer object. */
+/**
+ * @return a new FileTransfer object
+ *
+ * Prefer getFileTransfer() to this; see its docs for why.
+ */
ref<FileTransfer> makeFileTransfer();
class FileTransferError : public Error
{
public:
FileTransfer::Error error;
- std::optional<std::string> response; // intentionally optional
+ /// intentionally optional
+ std::optional<std::string> response;
template<typename... Args>
FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args & ... args);
diff --git a/src/libstore/fs-accessor.hh b/src/libstore/fs-accessor.hh
index c825e84f2..1df19e647 100644
--- a/src/libstore/fs-accessor.hh
+++ b/src/libstore/fs-accessor.hh
@@ -1,11 +1,14 @@
#pragma once
+///@file
#include "types.hh"
namespace nix {
-/* An abstract class for accessing a filesystem-like structure, such
- as a (possibly remote) Nix store or the contents of a NAR file. */
+/**
+ * An abstract class for accessing a filesystem-like structure, such
+ * as a (possibly remote) Nix store or the contents of a NAR file.
+ */
class FSAccessor
{
public:
@@ -14,8 +17,17 @@ public:
struct Stat
{
Type type = tMissing;
- uint64_t fileSize = 0; // regular files only
+ /**
+ * regular files only
+ */
+ uint64_t fileSize = 0;
+ /**
+ * regular files only
+ */
bool isExecutable = false; // regular files only
+ /**
+ * regular files only
+ */
uint64_t narOffset = 0; // regular files only
};
diff --git a/src/libstore/gc-store.hh b/src/libstore/gc-store.hh
index b3cbbad74..2c26c65c4 100644
--- a/src/libstore/gc-store.hh
+++ b/src/libstore/gc-store.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "store-api.hh"
@@ -11,19 +12,20 @@ typedef std::unordered_map<StorePath, std::unordered_set<std::string>> Roots;
struct GCOptions
{
- /* Garbage collector operation:
-
- - `gcReturnLive': return the set of paths reachable from
- (i.e. in the closure of) the roots.
-
- - `gcReturnDead': return the set of paths not reachable from
- the roots.
-
- - `gcDeleteDead': actually delete the latter set.
-
- - `gcDeleteSpecific': delete the paths listed in
- `pathsToDelete', insofar as they are not reachable.
- */
+ /**
+ * Garbage collector operation:
+ *
+ * - `gcReturnLive`: return the set of paths reachable from
+ * (i.e. in the closure of) the roots.
+ *
+ * - `gcReturnDead`: return the set of paths not reachable from
+ * the roots.
+ *
+ * - `gcDeleteDead`: actually delete the latter set.
+ *
+ * - `gcDeleteSpecific`: delete the paths listed in
+ * `pathsToDelete`, insofar as they are not reachable.
+ */
typedef enum {
gcReturnLive,
gcReturnDead,
@@ -33,28 +35,38 @@ struct GCOptions
GCAction action{gcDeleteDead};
- /* If `ignoreLiveness' is set, then reachability from the roots is
- ignored (dangerous!). However, the paths must still be
- unreferenced *within* the store (i.e., there can be no other
- store paths that depend on them). */
+ /**
+ * If `ignoreLiveness` is set, then reachability from the roots is
+ * ignored (dangerous!). However, the paths must still be
+ * unreferenced *within* the store (i.e., there can be no other
+ * store paths that depend on them).
+ */
bool ignoreLiveness{false};
- /* For `gcDeleteSpecific', the paths to delete. */
+ /**
+ * For `gcDeleteSpecific`, the paths to delete.
+ */
StorePathSet pathsToDelete;
- /* Stop after at least `maxFreed' bytes have been freed. */
+ /**
+ * Stop after at least `maxFreed` bytes have been freed.
+ */
uint64_t maxFreed{std::numeric_limits<uint64_t>::max()};
};
struct GCResults
{
- /* Depending on the action, the GC roots, or the paths that would
- be or have been deleted. */
+ /**
+ * Depending on the action, the GC roots, or the paths that would
+ * be or have been deleted.
+ */
PathSet paths;
- /* For `gcReturnDead', `gcDeleteDead' and `gcDeleteSpecific', the
- number of bytes that would be or was freed. */
+ /**
+ * For `gcReturnDead`, `gcDeleteDead` and `gcDeleteSpecific`, the
+ * number of bytes that would be or was freed.
+ */
uint64_t bytesFreed = 0;
};
@@ -63,21 +75,27 @@ struct GcStore : public virtual Store
{
inline static std::string operationName = "Garbage collection";
- /* Add an indirect root, which is merely a symlink to `path' from
- /nix/var/nix/gcroots/auto/<hash of `path'>. `path' is supposed
- to be a symlink to a store path. The garbage collector will
- automatically remove the indirect root when it finds that
- `path' has disappeared. */
+ /**
+ * Add an indirect root, which is merely a symlink to `path` from
+ * `/nix/var/nix/gcroots/auto/<hash of path>`. `path` is supposed
+ * to be a symlink to a store path. The garbage collector will
+ * automatically remove the indirect root when it finds that
+ * `path` has disappeared.
+ */
virtual void addIndirectRoot(const Path & path) = 0;
- /* Find the roots of the garbage collector. Each root is a pair
- (link, storepath) where `link' is the path of the symlink
- outside of the Nix store that point to `storePath'. If
- 'censor' is true, privacy-sensitive information about roots
- found in /proc is censored. */
+ /**
+ * Find the roots of the garbage collector. Each root is a pair
+ * `(link, storepath)` where `link` is the path of the symlink
+ * outside of the Nix store that point to `storePath`. If
+ * `censor` is true, privacy-sensitive information about roots
+ * found in `/proc` is censored.
+ */
virtual Roots findRoots(bool censor) = 0;
- /* Perform a garbage collection. */
+ /**
+ * Perform a garbage collection.
+ */
virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0;
};
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
index 996f26a95..0038ec802 100644
--- a/src/libstore/gc.cc
+++ b/src/libstore/gc.cc
@@ -34,8 +34,7 @@ static void makeSymlink(const Path & link, const Path & target)
createDirs(dirOf(link));
/* Create the new symlink. */
- Path tempLink = (format("%1%.tmp-%2%-%3%")
- % link % getpid() % random()).str();
+ Path tempLink = fmt("%1%.tmp-%2%-%3%", link, getpid(), random());
createSymlink(target, tempLink);
/* Atomically replace the old one. */
@@ -197,7 +196,7 @@ void LocalStore::findTempRoots(Roots & tempRoots, bool censor)
pid_t pid = std::stoi(i.name);
- debug(format("reading temporary root file '%1%'") % path);
+ debug("reading temporary root file '%1%'", path);
AutoCloseFD fd(open(path.c_str(), O_CLOEXEC | O_RDWR, 0666));
if (!fd) {
/* It's okay if the file has disappeared. */
@@ -263,7 +262,7 @@ void LocalStore::findRoots(const Path & path, unsigned char type, Roots & roots)
target = absPath(target, dirOf(path));
if (!pathExists(target)) {
if (isInDir(path, stateDir + "/" + gcRootsDir + "/auto")) {
- printInfo(format("removing stale link from '%1%' to '%2%'") % path % target);
+ printInfo("removing stale link from '%1%' to '%2%'", path, target);
unlink(path.c_str());
}
} else {
@@ -372,29 +371,29 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
while (errno = 0, ent = readdir(procDir.get())) {
checkInterrupt();
if (std::regex_match(ent->d_name, digitsRegex)) {
- readProcLink(fmt("/proc/%s/exe" ,ent->d_name), unchecked);
- readProcLink(fmt("/proc/%s/cwd", ent->d_name), unchecked);
-
- auto fdStr = fmt("/proc/%s/fd", ent->d_name);
- auto fdDir = AutoCloseDir(opendir(fdStr.c_str()));
- if (!fdDir) {
- if (errno == ENOENT || errno == EACCES)
- continue;
- throw SysError("opening %1%", fdStr);
- }
- struct dirent * fd_ent;
- while (errno = 0, fd_ent = readdir(fdDir.get())) {
- if (fd_ent->d_name[0] != '.')
- readProcLink(fmt("%s/%s", fdStr, fd_ent->d_name), unchecked);
- }
- if (errno) {
- if (errno == ESRCH)
- continue;
- throw SysError("iterating /proc/%1%/fd", ent->d_name);
- }
- fdDir.reset();
-
try {
+ readProcLink(fmt("/proc/%s/exe" ,ent->d_name), unchecked);
+ readProcLink(fmt("/proc/%s/cwd", ent->d_name), unchecked);
+
+ auto fdStr = fmt("/proc/%s/fd", ent->d_name);
+ auto fdDir = AutoCloseDir(opendir(fdStr.c_str()));
+ if (!fdDir) {
+ if (errno == ENOENT || errno == EACCES)
+ continue;
+ throw SysError("opening %1%", fdStr);
+ }
+ struct dirent * fd_ent;
+ while (errno = 0, fd_ent = readdir(fdDir.get())) {
+ if (fd_ent->d_name[0] != '.')
+ readProcLink(fmt("%s/%s", fdStr, fd_ent->d_name), unchecked);
+ }
+ if (errno) {
+ if (errno == ESRCH)
+ continue;
+ throw SysError("iterating /proc/%1%/fd", ent->d_name);
+ }
+ fdDir.reset();
+
auto mapFile = fmt("/proc/%s/maps", ent->d_name);
auto mapLines = tokenizeString<std::vector<std::string>>(readFile(mapFile), "\n");
for (const auto & line : mapLines) {
@@ -863,7 +862,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
continue;
}
- printMsg(lvlTalkative, format("deleting unused link '%1%'") % path);
+ printMsg(lvlTalkative, "deleting unused link '%1%'", path);
if (unlink(path.c_str()) == -1)
throw SysError("deleting '%1%'", path);
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index 8e33a3dec..4c66d08ee 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -7,12 +7,23 @@
#include <algorithm>
#include <map>
+#include <mutex>
#include <thread>
#include <dlfcn.h>
#include <sys/utsname.h>
#include <nlohmann/json.hpp>
+#include <sodium/core.h>
+
+#ifdef __GLIBC__
+#include <gnu/lib-names.h>
+#include <nss.h>
+#include <dlfcn.h>
+#endif
+
+#include "config-impl.hh"
+
namespace nix {
@@ -30,28 +41,22 @@ static GlobalConfig::Register rSettings(&settings);
Settings::Settings()
: nixPrefix(NIX_PREFIX)
- , nixStore(canonPath(getEnv("NIX_STORE_DIR").value_or(getEnv("NIX_STORE").value_or(NIX_STORE_DIR))))
- , nixDataDir(canonPath(getEnv("NIX_DATA_DIR").value_or(NIX_DATA_DIR)))
- , nixLogDir(canonPath(getEnv("NIX_LOG_DIR").value_or(NIX_LOG_DIR)))
- , nixStateDir(canonPath(getEnv("NIX_STATE_DIR").value_or(NIX_STATE_DIR)))
- , nixConfDir(canonPath(getEnv("NIX_CONF_DIR").value_or(NIX_CONF_DIR)))
+ , nixStore(canonPath(getEnvNonEmpty("NIX_STORE_DIR").value_or(getEnvNonEmpty("NIX_STORE").value_or(NIX_STORE_DIR))))
+ , nixDataDir(canonPath(getEnvNonEmpty("NIX_DATA_DIR").value_or(NIX_DATA_DIR)))
+ , nixLogDir(canonPath(getEnvNonEmpty("NIX_LOG_DIR").value_or(NIX_LOG_DIR)))
+ , nixStateDir(canonPath(getEnvNonEmpty("NIX_STATE_DIR").value_or(NIX_STATE_DIR)))
+ , nixConfDir(canonPath(getEnvNonEmpty("NIX_CONF_DIR").value_or(NIX_CONF_DIR)))
, nixUserConfFiles(getUserConfigFiles())
- , nixBinDir(canonPath(getEnv("NIX_BIN_DIR").value_or(NIX_BIN_DIR)))
+ , nixBinDir(canonPath(getEnvNonEmpty("NIX_BIN_DIR").value_or(NIX_BIN_DIR)))
, nixManDir(canonPath(NIX_MAN_DIR))
- , nixDaemonSocketFile(canonPath(getEnv("NIX_DAEMON_SOCKET_PATH").value_or(nixStateDir + DEFAULT_SOCKET_PATH)))
+ , nixDaemonSocketFile(canonPath(getEnvNonEmpty("NIX_DAEMON_SOCKET_PATH").value_or(nixStateDir + DEFAULT_SOCKET_PATH)))
{
buildUsersGroup = getuid() == 0 ? "nixbld" : "";
- lockCPU = getEnv("NIX_AFFINITY_HACK") == "1";
allowSymlinkedStore = getEnv("NIX_IGNORE_SYMLINK_STORE") == "1";
- caFile = getEnv("NIX_SSL_CERT_FILE").value_or(getEnv("SSL_CERT_FILE").value_or(""));
- if (caFile == "") {
- for (auto & fn : {"/etc/ssl/certs/ca-certificates.crt", "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"})
- if (pathExists(fn)) {
- caFile = fn;
- break;
- }
- }
+ auto sslOverride = getEnv("NIX_SSL_CERT_FILE").value_or(getEnv("SSL_CERT_FILE").value_or(""));
+ if (sslOverride != "")
+ caFile = sslOverride;
/* Backwards compatibility. */
auto s = getEnv("NIX_REMOTE_SYSTEMS");
@@ -166,18 +171,6 @@ StringSet Settings::getDefaultExtraPlatforms()
return extraPlatforms;
}
-bool Settings::isExperimentalFeatureEnabled(const ExperimentalFeature & feature)
-{
- auto & f = experimentalFeatures.get();
- return std::find(f.begin(), f.end(), feature) != f.end();
-}
-
-void Settings::requireExperimentalFeature(const ExperimentalFeature & feature)
-{
- if (!isExperimentalFeatureEnabled(feature))
- throw MissingExperimentalFeature(feature);
-}
-
bool Settings::isWSL1()
{
struct utsname utsbuf;
@@ -187,6 +180,13 @@ bool Settings::isWSL1()
return hasSuffix(utsbuf.release, "-Microsoft");
}
+Path Settings::getDefaultSSLCertFile()
+{
+ for (auto & fn : {"/etc/ssl/certs/ca-certificates.crt", "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"})
+ if (pathExists(fn)) return fn;
+ return "";
+}
+
const std::string nixVersion = PACKAGE_VERSION;
NLOHMANN_JSON_SERIALIZE_ENUM(SandboxMode, {
@@ -195,18 +195,18 @@ NLOHMANN_JSON_SERIALIZE_ENUM(SandboxMode, {
{SandboxMode::smDisabled, false},
});
-template<> void BaseSetting<SandboxMode>::set(const std::string & str, bool append)
+template<> SandboxMode BaseSetting<SandboxMode>::parse(const std::string & str) const
{
- if (str == "true") value = smEnabled;
- else if (str == "relaxed") value = smRelaxed;
- else if (str == "false") value = smDisabled;
+ if (str == "true") return smEnabled;
+ else if (str == "relaxed") return smRelaxed;
+ else if (str == "false") return smDisabled;
else throw UsageError("option '%s' has invalid value '%s'", name, str);
}
-template<> bool BaseSetting<SandboxMode>::isAppendable()
+template<> struct BaseSetting<SandboxMode>::trait
{
- return false;
-}
+ static constexpr bool appendable = false;
+};
template<> std::string BaseSetting<SandboxMode>::to_string() const
{
@@ -238,23 +238,23 @@ template<> void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::s
});
}
-void MaxBuildJobsSetting::set(const std::string & str, bool append)
+unsigned int MaxBuildJobsSetting::parse(const std::string & str) const
{
- if (str == "auto") value = std::max(1U, std::thread::hardware_concurrency());
+ if (str == "auto") return std::max(1U, std::thread::hardware_concurrency());
else {
if (auto n = string2Int<decltype(value)>(str))
- value = *n;
+ return *n;
else
throw UsageError("configuration setting '%s' should be 'auto' or an integer", name);
}
}
-void PluginFilesSetting::set(const std::string & str, bool append)
+Paths PluginFilesSetting::parse(const std::string & str) const
{
if (pluginsLoaded)
throw UsageError("plugin-files set after plugins were loaded, you may need to move the flag before the subcommand");
- BaseSetting<Paths>::set(str, append);
+ return BaseSetting<Paths>::parse(str);
}
@@ -291,6 +291,42 @@ void initPlugins()
settings.pluginFiles.pluginsLoaded = true;
}
+static void preloadNSS()
+{
+ /* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of
+ one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already
+ been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to
+ load its lookup libraries in the parent before any child gets a chance to. */
+ static std::once_flag dns_resolve_flag;
+
+ std::call_once(dns_resolve_flag, []() {
+#ifdef __GLIBC__
+ /* On linux, glibc will run every lookup through the nss layer.
+ * That means every lookup goes, by default, through nscd, which acts as a local
+ * cache.
+ * Because we run builds in a sandbox, we also remove access to nscd otherwise
+ * lookups would leak into the sandbox.
+ *
+ * But now we have a new problem, we need to make sure the nss_dns backend that
+ * does the dns lookups when nscd is not available is loaded or available.
+ *
+ * We can't make it available without leaking nix's environment, so instead we'll
+ * load the backend, and configure nss so it does not try to run dns lookups
+ * through nscd.
+ *
+ * This is technically only used for builtins:fetch* functions so we only care
+ * about dns.
+ *
+ * All other platforms are unaffected.
+ */
+ if (!dlopen(LIBNSS_DNS_SO, RTLD_NOW))
+ warn("unable to load nss_dns backend");
+ // FIXME: get hosts entry from nsswitch.conf.
+ __nss_configure_lookup("hosts", "files dns");
+#endif
+ });
+}
+
static bool initLibStoreDone = false;
void assertLibStoreInitialized() {
@@ -301,6 +337,24 @@ void assertLibStoreInitialized() {
}
void initLibStore() {
+
+ initLibUtil();
+
+ if (sodium_init() == -1)
+ throw Error("could not initialise libsodium");
+
+ loadConfFile();
+
+ preloadNSS();
+
+ /* On macOS, don't use the per-session TMPDIR (as set e.g. by
+ sshd). This breaks build users because they don't have access
+ to the TMPDIR, in particular in ‘nix-store --serve’. */
+#if __APPLE__
+ if (hasPrefix(getEnv("TMPDIR").value_or("/tmp"), "/var/folders/"))
+ unsetenv("TMPDIR");
+#endif
+
initLibStoreDone = true;
}
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index 93086eaf8..31dfe5b4e 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -1,9 +1,9 @@
#pragma once
+///@file
#include "types.hh"
#include "config.hh"
#include "util.hh"
-#include "experimental-features.hh"
#include <map>
#include <limits>
@@ -26,7 +26,7 @@ struct MaxBuildJobsSetting : public BaseSetting<unsigned int>
options->addSetting(this);
}
- void set(const std::string & str, bool append = false) override;
+ unsigned int parse(const std::string & str) const override;
};
struct PluginFilesSetting : public BaseSetting<Paths>
@@ -43,7 +43,7 @@ struct PluginFilesSetting : public BaseSetting<Paths>
options->addSetting(this);
}
- void set(const std::string & str, bool append = false) override;
+ Paths parse(const std::string & str) const override;
};
const uint32_t maxIdsPerBuild =
@@ -64,40 +64,63 @@ class Settings : public Config {
bool isWSL1();
+ Path getDefaultSSLCertFile();
+
public:
Settings();
Path nixPrefix;
- /* The directory where we store sources and derived files. */
+ /**
+ * The directory where we store sources and derived files.
+ */
Path nixStore;
Path nixDataDir; /* !!! fix */
- /* The directory where we log various operations. */
+ /**
+ * The directory where we log various operations.
+ */
Path nixLogDir;
- /* The directory where state is stored. */
+ /**
+ * The directory where state is stored.
+ */
Path nixStateDir;
- /* The directory where system configuration files are stored. */
+ /**
+ * The directory where system configuration files are stored.
+ */
Path nixConfDir;
- /* A list of user configuration files to load. */
+ /**
+ * A list of user configuration files to load.
+ */
std::vector<Path> nixUserConfFiles;
- /* The directory where the main programs are stored. */
+ /**
+ * The directory where the main programs are stored.
+ */
Path nixBinDir;
- /* The directory where the man pages are stored. */
+ /**
+ * The directory where the man pages are stored.
+ */
Path nixManDir;
- /* File name of the socket the daemon listens to. */
+ /**
+ * File name of the socket the daemon listens to.
+ */
Path nixDaemonSocketFile;
Setting<std::string> storeUri{this, getEnv("NIX_REMOTE").value_or("auto"), "store",
- "The default Nix store to use."};
+ R"(
+ The [URL of the Nix store](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format)
+ to use for most operations.
+ See [`nix help-stores`](@docroot@/command-ref/new-cli/nix3-help-stores.md)
+ for supported store types and settings.
+ )"};
Setting<bool> keepFailed{this, false, "keep-failed",
"Whether to keep temporary directories of failed builds."};
@@ -114,7 +137,9 @@ public:
)",
{"build-fallback"}};
- /* Whether to show build log output in real time. */
+ /**
+ * Whether to show build log output in real time.
+ */
bool verboseBuild = true;
Setting<size_t> logLines{this, 10, "log-lines",
@@ -134,6 +159,15 @@ public:
)",
{"build-max-jobs"}};
+ Setting<unsigned int> maxSubstitutionJobs{
+ this, 16, "max-substitution-jobs",
+ R"(
+ This option defines the maximum number of substitution jobs that Nix
+ will try to run in parallel. The default is `16`. The minimum value
+ one can choose is `1` and lower values will be interpreted as `1`.
+ )",
+ {"substitution-max-jobs"}};
+
Setting<unsigned int> buildCores{
this,
getDefaultCores(),
@@ -150,8 +184,10 @@ public:
)",
{"build-cores"}, false};
- /* Read-only mode. Don't copy stuff to the store, don't change
- the database. */
+ /**
+ * Read-only mode. Don't copy stuff to the store, don't change
+ * the database.
+ */
bool readOnlyMode = false;
Setting<std::string> thisSystem{
@@ -301,16 +337,6 @@ public:
users in `build-users-group`.
UIDs are allocated starting at 872415232 (0x34000000) on Linux and 56930 on macOS.
-
- > **Warning**
- > This is an experimental feature.
-
- To enable it, add the following to [`nix.conf`](#):
-
- ```
- extra-experimental-features = auto-allocate-uids
- auto-allocate-uids = true
- ```
)"};
Setting<uint32_t> startId{this,
@@ -340,16 +366,6 @@ public:
Cgroups are required and enabled automatically for derivations
that require the `uid-range` system feature.
-
- > **Warning**
- > This is an experimental feature.
-
- To enable it, add the following to [`nix.conf`](#):
-
- ```
- extra-experimental-features = cgroups
- use-cgroups = true
- ```
)"};
#endif
@@ -451,9 +467,6 @@ public:
)",
{"env-keep-derivations"}};
- /* Whether to lock the Nix client and worker to the same CPU. */
- bool lockCPU;
-
Setting<SandboxMode> sandboxMode{
this,
#if __linux__
@@ -678,8 +691,9 @@ public:
Strings{"https://cache.nixos.org/"},
"substituters",
R"(
- A list of URLs of substituters, separated by whitespace. Substituters
- are tried based on their Priority value, which each substituter can set
+ A list of [URLs of Nix stores](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format)
+ to be used as substituters, separated by whitespace.
+ Substituters are tried based on their Priority value, which each substituter can set
independently. Lower value means higher priority.
The default is `https://cache.nixos.org`, with a Priority of 40.
@@ -697,7 +711,8 @@ public:
Setting<StringSet> trustedSubstituters{
this, {}, "trusted-substituters",
R"(
- A list of URLs of substituters, separated by whitespace. These are
+ A list of [URLs of Nix stores](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format),
+ separated by whitespace. These are
not used by default, but can be enabled by users of the Nix daemon
by specifying `--option substituters urls` on the command
line. Unprivileged users are only allowed to pass a subset of the
@@ -826,8 +841,22 @@ public:
> `.netrc`.
)"};
- /* Path to the SSL CA file used */
- Path caFile;
+ Setting<Path> caFile{
+ this, getDefaultSSLCertFile(), "ssl-cert-file",
+ R"(
+ The path of a file containing CA certificates used to
+ authenticate `https://` downloads. Nix by default will use
+ the first of the following files that exists:
+
+ 1. `/etc/ssl/certs/ca-certificates.crt`
+ 2. `/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt`
+
+ The path can be overridden by the following environment
+ variables, in order of precedence:
+
+ 1. `NIX_SSL_CERT_FILE`
+ 2. `SSL_CERT_FILE`
+ )"};
#if __linux__
Setting<bool> filterSyscalls{
@@ -932,13 +961,6 @@ public:
are loaded as plugins (non-recursively).
)"};
- Setting<std::set<ExperimentalFeature>> experimentalFeatures{this, {}, "experimental-features",
- "Experimental Nix features to enable."};
-
- bool isExperimentalFeatureEnabled(const ExperimentalFeature &);
-
- void requireExperimentalFeature(const ExperimentalFeature &);
-
Setting<size_t> narBufferSize{this, 32 * 1024 * 1024, "nar-buffer-size",
"Maximum size of NARs before spilling them to disk."};
@@ -959,7 +981,7 @@ public:
this, false, "use-xdg-base-directories",
R"(
If set to `true`, Nix will conform to the [XDG Base Directory Specification] for files in `$HOME`.
- The environment variables used to implement this are documented in the [Environment Variables section](@docroot@/installation/env-variables.md).
+ The environment variables used to implement this are documented in the [Environment Variables section](@docroot@/command-ref/env-common.md).
[XDG Base Directory Specification]: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
@@ -981,8 +1003,10 @@ public:
// FIXME: don't use a global variable.
extern Settings settings;
-/* This should be called after settings are initialized, but before
- anything else */
+/**
+ * This should be called after settings are initialized, but before
+ * anything else
+ */
void initPlugins();
void loadConfFile();
@@ -992,12 +1016,16 @@ std::vector<Path> getUserConfigFiles();
extern const std::string nixVersion;
-/* NB: This is not sufficient. You need to call initNix() */
+/**
+ * NB: This is not sufficient. You need to call initNix()
+ */
void initLibStore();
-/* It's important to initialize before doing _anything_, which is why we
- call upon the programmer to handle this correctly. However, we only add
- this in a key locations, so as not to litter the code. */
+/**
+ * It's important to initialize before doing _anything_, which is why we
+ * call upon the programmer to handle this correctly. However, we only add
+ * this in a key locations, so as not to litter the code.
+ */
void assertLibStoreInitialized();
}
diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc
index 1479822a9..85c5eed4c 100644
--- a/src/libstore/http-binary-cache-store.cc
+++ b/src/libstore/http-binary-cache-store.cc
@@ -12,7 +12,14 @@ struct HttpBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
{
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
- const std::string name() override { return "Http Binary Cache Store"; }
+ const std::string name() override { return "HTTP Binary Cache Store"; }
+
+ std::string doc() override
+ {
+ return
+ #include "http-binary-cache-store.md"
+ ;
+ }
};
class HttpBinaryCacheStore : public virtual HttpBinaryCacheStoreConfig, public virtual BinaryCacheStore
@@ -187,6 +194,18 @@ protected:
}});
}
+ /**
+ * This isn't actually necessary read only. We support "upsert" now, so we
+ * have a notion of authentication via HTTP POST/PUT.
+ *
+ * For now, we conservatively say we don't know.
+ *
+ * \todo try to expose our HTTP authentication status.
+ */
+ std::optional<TrustedFlag> isTrustedClient() override
+ {
+ return std::nullopt;
+ }
};
static RegisterStoreImplementation<HttpBinaryCacheStore, HttpBinaryCacheStoreConfig> regHttpBinaryCacheStore;
diff --git a/src/libstore/http-binary-cache-store.md b/src/libstore/http-binary-cache-store.md
new file mode 100644
index 000000000..20c26d0c2
--- /dev/null
+++ b/src/libstore/http-binary-cache-store.md
@@ -0,0 +1,8 @@
+R"(
+
+**Store URL format**: `http://...`, `https://...`
+
+This store allows a binary cache to be accessed via the HTTP
+protocol.
+
+)"
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
index 2c9dd2680..2b7bebe9d 100644
--- a/src/libstore/legacy-ssh-store.cc
+++ b/src/libstore/legacy-ssh-store.cc
@@ -1,3 +1,4 @@
+#include "ssh-store-config.hh"
#include "archive.hh"
#include "pool.hh"
#include "remote-store.hh"
@@ -12,17 +13,24 @@
namespace nix {
-struct LegacySSHStoreConfig : virtual StoreConfig
+struct LegacySSHStoreConfig : virtual CommonSSHStoreConfig
{
- using StoreConfig::StoreConfig;
- const Setting<int> maxConnections{(StoreConfig*) this, 1, "max-connections", "maximum number of concurrent SSH connections"};
- const Setting<Path> sshKey{(StoreConfig*) this, "", "ssh-key", "path to an SSH private key"};
- const Setting<std::string> sshPublicHostKey{(StoreConfig*) this, "", "base64-ssh-public-host-key", "The public half of the host's SSH key"};
- const Setting<bool> compress{(StoreConfig*) this, false, "compress", "whether to compress the connection"};
- const Setting<Path> remoteProgram{(StoreConfig*) this, "nix-store", "remote-program", "path to the nix-store executable on the remote system"};
- const Setting<std::string> remoteStore{(StoreConfig*) this, "", "remote-store", "URI of the store on the remote system"};
-
- const std::string name() override { return "Legacy SSH Store"; }
+ using CommonSSHStoreConfig::CommonSSHStoreConfig;
+
+ const Setting<Path> remoteProgram{(StoreConfig*) this, "nix-store", "remote-program",
+ "Path to the `nix-store` executable on the remote machine."};
+
+ const Setting<int> maxConnections{(StoreConfig*) this, 1, "max-connections",
+ "Maximum number of concurrent SSH connections."};
+
+ const std::string name() override { return "SSH Store"; }
+
+ std::string doc() override
+ {
+ return
+ #include "legacy-ssh-store.md"
+ ;
+ }
};
struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Store
@@ -51,6 +59,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
LegacySSHStore(const std::string & scheme, const std::string & host, const Params & params)
: StoreConfig(params)
+ , CommonSSHStoreConfig(params)
, LegacySSHStoreConfig(params)
, Store(params)
, host(host)
@@ -137,7 +146,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
auto deriver = readString(conn->from);
if (deriver != "")
info->deriver = parseStorePath(deriver);
- info->references = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
+ info->references = WorkerProto<StorePathSet>::read(*this, conn->from);
readLongLong(conn->from); // download size
info->narSize = readLongLong(conn->from);
@@ -147,7 +156,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
throw Error("NAR hash is now mandatory");
info->narHash = Hash::parseAnyPrefixed(s);
}
- info->ca = parseContentAddressOpt(readString(conn->from));
+ info->ca = ContentAddress::parseOpt(readString(conn->from));
info->sigs = readStrings<StringSet>(conn->from);
auto s = readString(conn->from);
@@ -171,7 +180,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
<< printStorePath(info.path)
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< info.narHash.to_string(Base16, false);
- worker_proto::write(*this, conn->to, info.references);
+ workerProtoWrite(*this, conn->to, info.references);
conn->to
<< info.registrationTime
<< info.narSize
@@ -200,7 +209,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
conn->to
<< exportMagic
<< printStorePath(info.path);
- worker_proto::write(*this, conn->to, info.references);
+ workerProtoWrite(*this, conn->to, info.references);
conn->to
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< 0
@@ -278,19 +287,18 @@ public:
conn->to.flush();
- BuildResult status {
- .path = DerivedPath::Built {
- .drvPath = drvPath,
- .outputs = OutputsSpec::All { },
- },
- };
+ BuildResult status;
status.status = (BuildResult::Status) readInt(conn->from);
conn->from >> status.errorMsg;
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3)
conn->from >> status.timesBuilt >> status.isNonDeterministic >> status.startTime >> status.stopTime;
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 6) {
- status.builtOutputs = worker_proto::read(*this, conn->from, Phantom<DrvOutputs> {});
+ auto builtOutputs = WorkerProto<DrvOutputs>::read(*this, conn->from);
+ for (auto && [output, realisation] : builtOutputs)
+ status.builtOutputs.insert_or_assign(
+ std::move(output.outputName),
+ std::move(realisation));
}
return status;
}
@@ -321,7 +329,7 @@ public:
conn->to.flush();
- BuildResult result { .path = DerivedPath::Opaque { StorePath::dummy } };
+ BuildResult result;
result.status = (BuildResult::Status) readInt(conn->from);
if (!result.success()) {
@@ -333,6 +341,20 @@ public:
void ensurePath(const StorePath & path) override
{ unsupported("ensurePath"); }
+ virtual ref<FSAccessor> getFSAccessor() override
+ { unsupported("getFSAccessor"); }
+
+ /**
+ * The default instance would schedule the work on the client side, but
+ * for consistency with `buildPaths` and `buildDerivation` it should happen
+ * on the remote side.
+ *
+ * We make this fail for now so we can add implement this properly later
+ * without it being a breaking change.
+ */
+ void repairPath(const StorePath & path) override
+ { unsupported("repairPath"); }
+
void computeFSClosure(const StorePathSet & paths,
StorePathSet & out, bool flipDirection = false,
bool includeOutputs = false, bool includeDerivers = false) override
@@ -347,10 +369,10 @@ public:
conn->to
<< cmdQueryClosure
<< includeOutputs;
- worker_proto::write(*this, conn->to, paths);
+ workerProtoWrite(*this, conn->to, paths);
conn->to.flush();
- for (auto & i : worker_proto::read(*this, conn->from, Phantom<StorePathSet> {}))
+ for (auto & i : WorkerProto<StorePathSet>::read(*this, conn->from))
out.insert(i);
}
@@ -363,10 +385,10 @@ public:
<< cmdQueryValidPaths
<< false // lock
<< maybeSubstitute;
- worker_proto::write(*this, conn->to, paths);
+ workerProtoWrite(*this, conn->to, paths);
conn->to.flush();
- return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
+ return WorkerProto<StorePathSet>::read(*this, conn->from);
}
void connect() override
@@ -380,6 +402,15 @@ public:
return conn->remoteVersion;
}
+ /**
+ * The legacy ssh protocol doesn't support checking for trusted-user.
+ * Try using ssh-ng:// instead if you want to know.
+ */
+ std::optional<TrustedFlag> isTrustedClient() override
+ {
+ return std::nullopt;
+ }
+
void queryRealisationUncached(const DrvOutput &,
Callback<std::shared_ptr<const Realisation>> callback) noexcept override
// TODO: Implement
diff --git a/src/libstore/legacy-ssh-store.md b/src/libstore/legacy-ssh-store.md
new file mode 100644
index 000000000..043acebd6
--- /dev/null
+++ b/src/libstore/legacy-ssh-store.md
@@ -0,0 +1,8 @@
+R"(
+
+**Store URL format**: `ssh://[username@]hostname`
+
+This store type allows limited access to a remote store on another
+machine via SSH.
+
+)"
diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc
index f20b1fa02..5481dd762 100644
--- a/src/libstore/local-binary-cache-store.cc
+++ b/src/libstore/local-binary-cache-store.cc
@@ -11,6 +11,13 @@ struct LocalBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
const std::string name() override { return "Local Binary Cache Store"; }
+
+ std::string doc() override
+ {
+ return
+ #include "local-binary-cache-store.md"
+ ;
+ }
};
class LocalBinaryCacheStore : public virtual LocalBinaryCacheStoreConfig, public virtual BinaryCacheStore
@@ -88,6 +95,10 @@ protected:
return paths;
}
+ std::optional<TrustedFlag> isTrustedClient() override
+ {
+ return Trusted;
+ }
};
void LocalBinaryCacheStore::init()
diff --git a/src/libstore/local-binary-cache-store.md b/src/libstore/local-binary-cache-store.md
new file mode 100644
index 000000000..93fddc840
--- /dev/null
+++ b/src/libstore/local-binary-cache-store.md
@@ -0,0 +1,16 @@
+R"(
+
+**Store URL format**: `file://`*path*
+
+This store allows reading and writing a binary cache stored in *path*
+in the local filesystem. If *path* does not exist, it will be created.
+
+For example, the following builds or downloads `nixpkgs#hello` into
+the local store and then copies it to the binary cache in
+`/tmp/binary-cache`:
+
+```
+# nix copy --to file:///tmp/binary-cache nixpkgs#hello
+```
+
+)"
diff --git a/src/libstore/local-fs-store.hh b/src/libstore/local-fs-store.hh
index 947707341..a03bb88f5 100644
--- a/src/libstore/local-fs-store.hh
+++ b/src/libstore/local-fs-store.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "store-api.hh"
#include "gc-store.hh"
@@ -9,20 +10,28 @@ namespace nix {
struct LocalFSStoreConfig : virtual StoreConfig
{
using StoreConfig::StoreConfig;
+
// FIXME: the (StoreConfig*) cast works around a bug in gcc that causes
// it to omit the call to the Setting constructor. Clang works fine
// either way.
+
const PathSetting rootDir{(StoreConfig*) this, true, "",
- "root", "directory prefixed to all other paths"};
+ "root",
+ "Directory prefixed to all other paths."};
+
const PathSetting stateDir{(StoreConfig*) this, false,
rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir,
- "state", "directory where Nix will store state"};
+ "state",
+ "Directory where Nix will store state."};
+
const PathSetting logDir{(StoreConfig*) this, false,
rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir,
- "log", "directory where Nix will store state"};
+ "log",
+ "directory where Nix will store log files."};
+
const PathSetting realStoreDir{(StoreConfig*) this, false,
rootDir != "" ? rootDir + "/nix/store" : storeDir, "real",
- "physical path to the Nix store"};
+ "Physical path of the Nix store."};
};
class LocalFSStore : public virtual LocalFSStoreConfig,
@@ -39,7 +48,9 @@ public:
void narFromPath(const StorePath & path, Sink & sink) override;
ref<FSAccessor> getFSAccessor() override;
- /* Register a permanent GC root. */
+ /**
+ * Register a permanent GC root.
+ */
Path addPermRoot(const StorePath & storePath, const Path & gcRoot);
virtual Path getRealStoreDir() { return realStoreDir; }
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index 82edaa9bf..7fb312c37 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -44,6 +44,13 @@
namespace nix {
+std::string LocalStoreConfig::doc()
+{
+ return
+ #include "local-store.md"
+ ;
+}
+
struct LocalStore::State::Stmts {
/* Some precompiled SQLite statements. */
SQLiteStmt RegisterValidPath;
@@ -280,7 +287,7 @@ LocalStore::LocalStore(const Params & params)
else if (curSchema == 0) { /* new store */
curSchema = nixSchemaVersion;
openDB(*state, true);
- writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str(), 0666, true);
+ writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, true);
}
else if (curSchema < nixSchemaVersion) {
@@ -329,14 +336,14 @@ LocalStore::LocalStore(const Params & params)
txn.commit();
}
- writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str(), 0666, true);
+ writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, true);
lockFile(globalLock.get(), ltRead, true);
}
else openDB(*state, false);
- if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
+ if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
migrateCASchema(state->db, dbDir + "/ca-schema", globalLock);
}
@@ -366,7 +373,7 @@ LocalStore::LocalStore(const Params & params)
state->stmts->QueryPathFromHashPart.create(state->db,
"select path from ValidPaths where path >= ? limit 1;");
state->stmts->QueryValidPaths.create(state->db, "select path from ValidPaths");
- if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
+ if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
state->stmts->RegisterRealisedOutput.create(state->db,
R"(
insert into Realisations (drvPath, outputName, outputPath, signatures)
@@ -413,6 +420,13 @@ LocalStore::LocalStore(const Params & params)
}
+LocalStore::LocalStore(std::string scheme, std::string path, const Params & params)
+ : LocalStore(params)
+{
+ throw UnimplementedError("LocalStore");
+}
+
+
AutoCloseFD LocalStore::openGCLock()
{
Path fnGCLock = stateDir + "/gc.lock";
@@ -697,64 +711,9 @@ void canonicalisePathMetaData(const Path & path,
}
-void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivation & drv)
-{
- assert(drvPath.isDerivation());
- std::string drvName(drvPath.name());
- drvName = drvName.substr(0, drvName.size() - drvExtension.size());
-
- auto envHasRightPath = [&](const StorePath & actual, const std::string & varName)
- {
- auto j = drv.env.find(varName);
- if (j == drv.env.end() || parseStorePath(j->second) != actual)
- throw Error("derivation '%s' has incorrect environment variable '%s', should be '%s'",
- printStorePath(drvPath), varName, printStorePath(actual));
- };
-
-
- // Don't need the answer, but do this anyways to assert is proper
- // combination. The code below is more general and naturally allows
- // combinations that are currently prohibited.
- drv.type();
-
- std::optional<DrvHash> hashesModulo;
- for (auto & i : drv.outputs) {
- std::visit(overloaded {
- [&](const DerivationOutput::InputAddressed & doia) {
- if (!hashesModulo) {
- // somewhat expensive so we do lazily
- hashesModulo = hashDerivationModulo(*this, drv, true);
- }
- auto currentOutputHash = get(hashesModulo->hashes, i.first);
- if (!currentOutputHash)
- throw Error("derivation '%s' has unexpected output '%s' (local-store / hashesModulo) named '%s'",
- printStorePath(drvPath), printStorePath(doia.path), i.first);
- StorePath recomputed = makeOutputPath(i.first, *currentOutputHash, drvName);
- if (doia.path != recomputed)
- throw Error("derivation '%s' has incorrect output '%s', should be '%s'",
- printStorePath(drvPath), printStorePath(doia.path), printStorePath(recomputed));
- envHasRightPath(doia.path, i.first);
- },
- [&](const DerivationOutput::CAFixed & dof) {
- StorePath path = makeFixedOutputPath(dof.hash.method, dof.hash.hash, drvName);
- envHasRightPath(path, i.first);
- },
- [&](const DerivationOutput::CAFloating &) {
- /* Nothing to check */
- },
- [&](const DerivationOutput::Deferred &) {
- /* Nothing to check */
- },
- [&](const DerivationOutput::Impure &) {
- /* Nothing to check */
- },
- }, i.second.raw());
- }
-}
-
void LocalStore::registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs)
{
- settings.requireExperimentalFeature(Xp::CaDerivations);
+ experimentalFeatureSettings.require(Xp::CaDerivations);
if (checkSigs == NoCheckSigs || !realisationIsUntrusted(info))
registerDrvOutput(info);
else
@@ -763,7 +722,7 @@ void LocalStore::registerDrvOutput(const Realisation & info, CheckSigsFlag check
void LocalStore::registerDrvOutput(const Realisation & info)
{
- settings.requireExperimentalFeature(Xp::CaDerivations);
+ experimentalFeatureSettings.require(Xp::CaDerivations);
retrySQLite<void>([&]() {
auto state(_state.lock());
if (auto oldR = queryRealisation_(*state, info.id)) {
@@ -862,7 +821,7 @@ uint64_t LocalStore::addValidPath(State & state,
derivations). Note that if this throws an error, then the
DB transaction is rolled back, so the path validity
registration above is undone. */
- if (checkOutputs) checkDerivationOutputs(info.path, drv);
+ if (checkOutputs) drv.checkInvariants(*this, info.path);
for (auto & i : drv.outputsAndOptPaths(*this)) {
/* Floating CA derivations have indeterminate output paths until
@@ -930,7 +889,7 @@ std::shared_ptr<const ValidPathInfo> LocalStore::queryPathInfoInternal(State & s
if (s) info->sigs = tokenizeString<StringSet>(s, " ");
s = (const char *) sqlite3_column_text(state.stmts->QueryPathInfo, 7);
- if (s) info->ca = parseContentAddressOpt(s);
+ if (s) info->ca = ContentAddress::parseOpt(s);
/* Get the references. */
auto useQueryReferences(state.stmts->QueryReferences.use()(info->id));
@@ -1052,7 +1011,7 @@ LocalStore::queryPartialDerivationOutputMap(const StorePath & path_)
return outputs;
});
- if (!settings.isExperimentalFeatureEnabled(Xp::CaDerivations))
+ if (!experimentalFeatureSettings.isEnabled(Xp::CaDerivations))
return outputs;
auto drv = readInvalidDerivation(path);
@@ -1120,54 +1079,6 @@ StorePathSet LocalStore::querySubstitutablePaths(const StorePathSet & paths)
}
-// FIXME: move this, it's not specific to LocalStore.
-void LocalStore::querySubstitutablePathInfos(const StorePathCAMap & paths, SubstitutablePathInfos & infos)
-{
- if (!settings.useSubstitutes) return;
- for (auto & sub : getDefaultSubstituters()) {
- for (auto & path : paths) {
- if (infos.count(path.first))
- // Choose first succeeding substituter.
- continue;
-
- auto subPath(path.first);
-
- // Recompute store path so that we can use a different store root.
- if (path.second) {
- subPath = makeFixedOutputPathFromCA(path.first.name(), *path.second);
- if (sub->storeDir == storeDir)
- assert(subPath == path.first);
- if (subPath != path.first)
- debug("replaced path '%s' with '%s' for substituter '%s'", printStorePath(path.first), sub->printStorePath(subPath), sub->getUri());
- } else if (sub->storeDir != storeDir) continue;
-
- debug("checking substituter '%s' for path '%s'", sub->getUri(), sub->printStorePath(subPath));
- try {
- auto info = sub->queryPathInfo(subPath);
-
- if (sub->storeDir != storeDir && !(info->isContentAddressed(*sub) && info->references.empty()))
- continue;
-
- auto narInfo = std::dynamic_pointer_cast<const NarInfo>(
- std::shared_ptr<const ValidPathInfo>(info));
- infos.insert_or_assign(path.first, SubstitutablePathInfo{
- info->deriver,
- info->references,
- narInfo ? narInfo->fileSize : 0,
- info->narSize});
- } catch (InvalidPath &) {
- } catch (SubstituterDisabled &) {
- } catch (Error & e) {
- if (settings.tryFallback)
- logError(e.info());
- else
- throw;
- }
- }
- }
-}
-
-
void LocalStore::registerValidPath(const ValidPathInfo & info)
{
registerValidPaths({{info.path, info}});
@@ -1209,8 +1120,7 @@ void LocalStore::registerValidPaths(const ValidPathInfos & infos)
for (auto & [_, i] : infos)
if (i.path.isDerivation()) {
// FIXME: inefficient; we already loaded the derivation in addValidPath().
- checkDerivationOutputs(i.path,
- readInvalidDerivation(i.path));
+ readInvalidDerivation(i.path).checkInvariants(*this, i.path);
}
/* Do a topological sort of the paths. This will throw an
@@ -1312,7 +1222,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
printStorePath(info.path), info.narSize, hashResult.second);
if (info.ca) {
- if (auto foHash = std::get_if<FixedOutputHash>(&*info.ca)) {
+ if (auto foHash = std::get_if<FixedOutputHash>(&info.ca->raw)) {
auto actualFoHash = hashCAPath(
foHash->method,
foHash->hash.type,
@@ -1325,7 +1235,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
actualFoHash.hash.to_string(Base32, true));
}
}
- if (auto textHash = std::get_if<TextHash>(&*info.ca)) {
+ if (auto textHash = std::get_if<TextHash>(&info.ca->raw)) {
auto actualTextHash = hashString(htSHA256, readFile(realPath));
if (textHash->hash != actualTextHash) {
throw Error("ca hash mismatch importing path '%s';\n specified: %s\n got: %s",
@@ -1411,7 +1321,19 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name
auto [hash, size] = hashSink->finish();
- auto dstPath = makeFixedOutputPath(method, hash, name, references);
+ ContentAddressWithReferences desc = FixedOutputInfo {
+ .hash = {
+ .method = method,
+ .hash = hash,
+ },
+ .references = {
+ .others = references,
+ // caller is not capable of creating a self-reference, because this is content-addressed without modulus
+ .self = false,
+ },
+ };
+
+ auto dstPath = makeFixedOutputPathFromCA(name, desc);
addTempRoot(dstPath);
@@ -1431,7 +1353,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name
autoGC();
if (inMemory) {
- StringSource dumpSource { dump };
+ StringSource dumpSource { dump };
/* Restore from the NAR in memory. */
if (method == FileIngestionMethod::Recursive)
restorePath(realPath, dumpSource);
@@ -1455,10 +1377,13 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name
optimisePath(realPath, repair);
- ValidPathInfo info { dstPath, narHash.first };
+ ValidPathInfo info {
+ *this,
+ name,
+ std::move(desc),
+ narHash.first
+ };
info.narSize = narHash.second;
- info.references = references;
- info.ca = FixedOutputHash { .method = method, .hash = hash };
registerValidPath(info);
}
@@ -1475,7 +1400,10 @@ StorePath LocalStore::addTextToStore(
const StorePathSet & references, RepairFlag repair)
{
auto hash = hashString(htSHA256, s);
- auto dstPath = makeTextPath(name, hash, references);
+ auto dstPath = makeTextPath(name, TextInfo {
+ { .hash = hash },
+ references,
+ });
addTempRoot(dstPath);
@@ -1560,7 +1488,7 @@ void LocalStore::invalidatePathChecked(const StorePath & path)
bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
{
- printInfo(format("reading the Nix store..."));
+ printInfo("reading the Nix store...");
bool errors = false;
@@ -1719,6 +1647,11 @@ unsigned int LocalStore::getProtocol()
return PROTOCOL_VERSION;
}
+std::optional<TrustedFlag> LocalStore::isTrustedClient()
+{
+ return Trusted;
+}
+
#if defined(FS_IOC_SETFLAGS) && defined(FS_IOC_GETFLAGS) && defined(FS_IMMUTABLE_FL)
@@ -1950,5 +1883,6 @@ std::optional<std::string> LocalStore::getVersion()
return nixVersion;
}
+static RegisterStoreImplementation<LocalStore, LocalStoreConfig> regLocalStore;
} // namespace nix
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
index a84eb7c26..70debad38 100644
--- a/src/libstore/local-store.hh
+++ b/src/libstore/local-store.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "sqlite.hh"
@@ -18,10 +19,14 @@
namespace nix {
-/* Nix store and database schema version. Version 1 (or 0) was Nix <=
- 0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10.
- Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is
- Nix 1.0. Version 7 is Nix 1.3. Version 10 is 2.0. */
+/**
+ * Nix store and database schema version.
+ *
+ * Version 1 (or 0) was Nix <=
+ * 0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10.
+ * Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is
+ * Nix 1.0. Version 7 is Nix 1.3. Version 10 is 2.0.
+ */
const int nixSchemaVersion = 10;
@@ -38,40 +43,52 @@ struct LocalStoreConfig : virtual LocalFSStoreConfig
Setting<bool> requireSigs{(StoreConfig*) this,
settings.requireSigs,
- "require-sigs", "whether store paths should have a trusted signature on import"};
+ "require-sigs",
+ "Whether store paths copied into this store should have a trusted signature."};
const std::string name() override { return "Local Store"; }
-};
+ std::string doc() override;
+};
class LocalStore : public virtual LocalStoreConfig, public virtual LocalFSStore, public virtual GcStore
{
private:
- /* Lock file used for upgrading. */
+ /**
+ * Lock file used for upgrading.
+ */
AutoCloseFD globalLock;
struct State
{
- /* The SQLite database object. */
+ /**
+ * The SQLite database object.
+ */
SQLite db;
struct Stmts;
std::unique_ptr<Stmts> stmts;
- /* The last time we checked whether to do an auto-GC, or an
- auto-GC finished. */
+ /**
+ * The last time we checked whether to do an auto-GC, or an
+ * auto-GC finished.
+ */
std::chrono::time_point<std::chrono::steady_clock> lastGCCheck;
- /* Whether auto-GC is running. If so, get gcFuture to wait for
- the GC to finish. */
+ /**
+ * Whether auto-GC is running. If so, get gcFuture to wait for
+ * the GC to finish.
+ */
bool gcRunning = false;
std::shared_future<void> gcFuture;
- /* How much disk space was available after the previous
- auto-GC. If the current available disk space is below
- minFree but not much below availAfterGC, then there is no
- point in starting a new GC. */
+ /**
+ * How much disk space was available after the previous
+ * auto-GC. If the current available disk space is below
+ * minFree but not much below availAfterGC, then there is no
+ * point in starting a new GC.
+ */
uint64_t availAfterGC = std::numeric_limits<uint64_t>::max();
std::unique_ptr<PublicKeys> publicKeys;
@@ -94,16 +111,26 @@ private:
public:
- // Hack for build-remote.cc.
+ /**
+ * Hack for build-remote.cc.
+ */
PathSet locksHeld;
- /* Initialise the local store, upgrading the schema if
- necessary. */
+ /**
+ * Initialise the local store, upgrading the schema if
+ * necessary.
+ */
LocalStore(const Params & params);
+ LocalStore(std::string scheme, std::string path, const Params & params);
~LocalStore();
- /* Implementations of abstract store API methods. */
+ static std::set<std::string> uriSchemes()
+ { return {}; }
+
+ /**
+ * Implementations of abstract store API methods.
+ */
std::string getUri() override;
@@ -127,9 +154,6 @@ public:
StorePathSet querySubstitutablePaths(const StorePathSet & paths) override;
- void querySubstitutablePathInfos(const StorePathCAMap & paths,
- SubstitutablePathInfos & infos) override;
-
bool pathInfoIsUntrusted(const ValidPathInfo &) override;
bool realisationIsUntrusted(const Realisation & ) override;
@@ -151,13 +175,19 @@ private:
void createTempRootsFile();
- /* The file to which we write our temporary roots. */
+ /**
+ * The file to which we write our temporary roots.
+ */
Sync<AutoCloseFD> _fdTempRoots;
- /* The global GC lock. */
+ /**
+ * The global GC lock.
+ */
Sync<AutoCloseFD> _fdGCLock;
- /* Connection to the garbage collector. */
+ /**
+ * Connection to the garbage collector.
+ */
Sync<AutoCloseFD> _fdRootsSocket;
public:
@@ -176,42 +206,52 @@ public:
void collectGarbage(const GCOptions & options, GCResults & results) override;
- /* Optimise the disk space usage of the Nix store by hard-linking
- files with the same contents. */
+ /**
+ * Optimise the disk space usage of the Nix store by hard-linking
+ * files with the same contents.
+ */
void optimiseStore(OptimiseStats & stats);
void optimiseStore() override;
- /* Optimise a single store path. Optionally, test the encountered
- symlinks for corruption. */
+ /**
+ * Optimise a single store path. Optionally, test the encountered
+ * symlinks for corruption.
+ */
void optimisePath(const Path & path, RepairFlag repair);
bool verifyStore(bool checkContents, RepairFlag repair) override;
- /* Register the validity of a path, i.e., that `path' exists, that
- the paths referenced by it exists, and in the case of an output
- path of a derivation, that it has been produced by a successful
- execution of the derivation (or something equivalent). Also
- register the hash of the file system contents of the path. The
- hash must be a SHA-256 hash. */
+ /**
+ * Register the validity of a path, i.e., that `path` exists, that
+ * the paths referenced by it exists, and in the case of an output
+ * path of a derivation, that it has been produced by a successful
+ * execution of the derivation (or something equivalent). Also
+ * register the hash of the file system contents of the path. The
+ * hash must be a SHA-256 hash.
+ */
void registerValidPath(const ValidPathInfo & info);
void registerValidPaths(const ValidPathInfos & infos);
unsigned int getProtocol() override;
- void vacuumDB();
+ std::optional<TrustedFlag> isTrustedClient() override;
- void repairPath(const StorePath & path) override;
+ void vacuumDB();
void addSignatures(const StorePath & storePath, const StringSet & sigs) override;
- /* If free disk space in /nix/store if below minFree, delete
- garbage until it exceeds maxFree. */
+ /**
+ * If free disk space in /nix/store if below minFree, delete
+ * garbage until it exceeds maxFree.
+ */
void autoGC(bool sync = true);
- /* Register the store path 'output' as the output named 'outputName' of
- derivation 'deriver'. */
+ /**
+ * Register the store path 'output' as the output named 'outputName' of
+ * derivation 'deriver'.
+ */
void registerDrvOutput(const Realisation & info) override;
void registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs) override;
void cacheDrvOutputMapping(
@@ -241,7 +281,9 @@ private:
void invalidatePath(State & state, const StorePath & path);
- /* Delete a path from the Nix store. */
+ /**
+ * Delete a path from the Nix store.
+ */
void invalidatePathChecked(const StorePath & path);
void verifyPath(const Path & path, const StringSet & store,
@@ -264,8 +306,6 @@ private:
std::pair<Path, AutoCloseFD> createTempDirInStore();
- void checkDerivationOutputs(const StorePath & drvPath, const Derivation & drv);
-
typedef std::unordered_set<ino_t> InodeHash;
InodeHash loadInodeHash();
@@ -276,8 +316,10 @@ private:
bool isValidPath_(State & state, const StorePath & path);
void queryReferrers(State & state, const StorePath & path, StorePathSet & referrers);
- /* Add signatures to a ValidPathInfo or Realisation using the secret keys
- specified by the ‘secret-key-files’ option. */
+ /**
+ * Add signatures to a ValidPathInfo or Realisation using the secret keys
+ * specified by the ‘secret-key-files’ option.
+ */
void signPathInfo(ValidPathInfo & info);
void signRealisation(Realisation &);
@@ -307,18 +349,23 @@ typedef std::pair<dev_t, ino_t> Inode;
typedef std::set<Inode> InodesSeen;
-/* "Fix", or canonicalise, the meta-data of the files in a store path
- after it has been built. In particular:
- - the last modification date on each file is set to 1 (i.e.,
- 00:00:01 1/1/1970 UTC)
- - the permissions are set of 444 or 555 (i.e., read-only with or
- without execute permission; setuid bits etc. are cleared)
- - the owner and group are set to the Nix user and group, if we're
- running as root.
- If uidRange is not empty, this function will throw an error if it
- encounters files owned by a user outside of the closed interval
- [uidRange->first, uidRange->second].
-*/
+/**
+ * "Fix", or canonicalise, the meta-data of the files in a store path
+ * after it has been built. In particular:
+ *
+ * - the last modification date on each file is set to 1 (i.e.,
+ * 00:00:01 1/1/1970 UTC)
+ *
+ * - the permissions are set of 444 or 555 (i.e., read-only with or
+ * without execute permission; setuid bits etc. are cleared)
+ *
+ * - the owner and group are set to the Nix user and group, if we're
+ * running as root.
+ *
+ * If uidRange is not empty, this function will throw an error if it
+ * encounters files owned by a user outside of the closed interval
+ * [uidRange->first, uidRange->second].
+ */
void canonicalisePathMetaData(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange,
diff --git a/src/libstore/local-store.md b/src/libstore/local-store.md
new file mode 100644
index 000000000..8174df839
--- /dev/null
+++ b/src/libstore/local-store.md
@@ -0,0 +1,39 @@
+R"(
+
+**Store URL format**: `local`, *root*
+
+This store type accesses a Nix store in the local filesystem directly
+(i.e. not via the Nix daemon). *root* is an absolute path that is
+prefixed to other directories such as the Nix store directory. The
+store pseudo-URL `local` denotes a store that uses `/` as its root
+directory.
+
+A store that uses a *root* other than `/` is called a *chroot
+store*. With such stores, the store directory is "logically" still
+`/nix/store`, so programs stored in them can only be built and
+executed by `chroot`-ing into *root*. Chroot stores only support
+building and running on Linux when [`mount namespaces`](https://man7.org/linux/man-pages/man7/mount_namespaces.7.html) and [`user namespaces`](https://man7.org/linux/man-pages/man7/user_namespaces.7.html) are
+enabled.
+
+For example, the following uses `/tmp/root` as the chroot environment
+to build or download `nixpkgs#hello` and then execute it:
+
+```console
+# nix run --store /tmp/root nixpkgs#hello
+Hello, world!
+```
+
+Here, the "physical" store location is `/tmp/root/nix/store`, and
+Nix's store metadata is in `/tmp/root/nix/var/nix/db`.
+
+It is also possible, but not recommended, to change the "logical"
+location of the Nix store from its default of `/nix/store`. This makes
+it impossible to use default substituters such as
+`https://cache.nixos.org/`, and thus you may have to build everything
+locally. Here is an example:
+
+```console
+# nix build --store 'local?store=/tmp/my-nix/store&state=/tmp/my-nix/state&log=/tmp/my-nix/log' nixpkgs#hello
+```
+
+)"
diff --git a/src/libstore/local.mk b/src/libstore/local.mk
index e5e24501e..0be0bf310 100644
--- a/src/libstore/local.mk
+++ b/src/libstore/local.mk
@@ -57,12 +57,6 @@ $(d)/local-store.cc: $(d)/schema.sql.gen.hh $(d)/ca-specific-schema.sql.gen.hh
$(d)/build.cc:
-%.gen.hh: %
- @echo 'R"foo(' >> $@.tmp
- $(trace-gen) cat $< >> $@.tmp
- @echo ')foo"' >> $@.tmp
- @mv $@.tmp $@
-
clean-files += $(d)/schema.sql.gen.hh $(d)/ca-specific-schema.sql.gen.hh
$(eval $(call install-file-in, $(d)/nix-store.pc, $(libdir)/pkgconfig, 0644))
diff --git a/src/libstore/lock.cc b/src/libstore/lock.cc
index 4fe1fcf56..7202a64b3 100644
--- a/src/libstore/lock.cc
+++ b/src/libstore/lock.cc
@@ -129,7 +129,7 @@ struct AutoUserLock : UserLock
useUserNamespace = false;
#endif
- settings.requireExperimentalFeature(Xp::AutoAllocateUids);
+ experimentalFeatureSettings.require(Xp::AutoAllocateUids);
assert(settings.startId > 0);
assert(settings.uidCount % maxIdsPerBuild == 0);
assert((uint64_t) settings.startId + (uint64_t) settings.uidCount <= std::numeric_limits<uid_t>::max());
diff --git a/src/libstore/lock.hh b/src/libstore/lock.hh
index 7f1934510..1c268e1fb 100644
--- a/src/libstore/lock.hh
+++ b/src/libstore/lock.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
@@ -12,14 +13,18 @@ struct UserLock
{
virtual ~UserLock() { }
- /* Get the first and last UID. */
+ /**
+ * Get the first and last UID.
+ */
std::pair<uid_t, uid_t> getUIDRange()
{
auto first = getUID();
return {first, first + getUIDCount() - 1};
}
- /* Get the first UID. */
+ /**
+ * Get the first UID.
+ */
virtual uid_t getUID() = 0;
virtual uid_t getUIDCount() = 0;
@@ -29,8 +34,10 @@ struct UserLock
virtual std::vector<gid_t> getSupplementaryGIDs() = 0;
};
-/* Acquire a user lock for a UID range of size `nrIds`. Note that this
- may return nullptr if no user is available. */
+/**
+ * Acquire a user lock for a UID range of size `nrIds`. Note that this
+ * may return nullptr if no user is available.
+ */
std::unique_ptr<UserLock> acquireUserLock(uid_t nrIds, bool useUserNamespace);
bool useBuildUsers();
diff --git a/src/libstore/log-store.hh b/src/libstore/log-store.hh
index e4d95bab6..a84f7dbeb 100644
--- a/src/libstore/log-store.hh
+++ b/src/libstore/log-store.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "store-api.hh"
@@ -9,8 +10,10 @@ struct LogStore : public virtual Store
{
inline static std::string operationName = "Build log storage and retrieval";
- /* Return the build log of the specified store path, if available,
- or null otherwise. */
+ /**
+ * Return the build log of the specified store path, if available,
+ * or null otherwise.
+ */
std::optional<std::string> getBuildLog(const StorePath & path);
virtual std::optional<std::string> getBuildLogExact(const StorePath & path) = 0;
diff --git a/src/libstore/machines.hh b/src/libstore/machines.hh
index 834626de9..1adeaf1f0 100644
--- a/src/libstore/machines.hh
+++ b/src/libstore/machines.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
diff --git a/src/libstore/make-content-addressed.cc b/src/libstore/make-content-addressed.cc
index 64d172918..53fe04704 100644
--- a/src/libstore/make-content-addressed.cc
+++ b/src/libstore/make-content-addressed.cc
@@ -27,18 +27,17 @@ std::map<StorePath, StorePath> makeContentAddressed(
StringMap rewrites;
- StorePathSet references;
- bool hasSelfReference = false;
+ StoreReferences refs;
for (auto & ref : oldInfo->references) {
if (ref == path)
- hasSelfReference = true;
+ refs.self = true;
else {
auto i = remappings.find(ref);
auto replacement = i != remappings.end() ? i->second : ref;
// FIXME: warn about unremapped paths?
if (replacement != ref)
rewrites.insert_or_assign(srcStore.printStorePath(ref), srcStore.printStorePath(replacement));
- references.insert(std::move(replacement));
+ refs.others.insert(std::move(replacement));
}
}
@@ -49,24 +48,28 @@ std::map<StorePath, StorePath> makeContentAddressed(
auto narModuloHash = hashModuloSink.finish().first;
- auto dstPath = dstStore.makeFixedOutputPath(
- FileIngestionMethod::Recursive, narModuloHash, path.name(), references, hasSelfReference);
+ ValidPathInfo info {
+ dstStore,
+ path.name(),
+ FixedOutputInfo {
+ .hash = {
+ .method = FileIngestionMethod::Recursive,
+ .hash = narModuloHash,
+ },
+ .references = std::move(refs),
+ },
+ Hash::dummy,
+ };
- printInfo("rewriting '%s' to '%s'", pathS, srcStore.printStorePath(dstPath));
+ printInfo("rewriting '%s' to '%s'", pathS, dstStore.printStorePath(info.path));
StringSink sink2;
- RewritingSink rsink2(oldHashPart, std::string(dstPath.hashPart()), sink2);
+ RewritingSink rsink2(oldHashPart, std::string(info.path.hashPart()), sink2);
rsink2(sink.s);
rsink2.flush();
- ValidPathInfo info { dstPath, hashString(htSHA256, sink2.s) };
- info.references = std::move(references);
- if (hasSelfReference) info.references.insert(info.path);
+ info.narHash = hashString(htSHA256, sink2.s);
info.narSize = sink.s.size();
- info.ca = FixedOutputHash {
- .method = FileIngestionMethod::Recursive,
- .hash = narModuloHash,
- };
StringSource source(sink2.s);
dstStore.addToStore(info, source);
diff --git a/src/libstore/make-content-addressed.hh b/src/libstore/make-content-addressed.hh
index c4a66ed41..2ce6ec7bc 100644
--- a/src/libstore/make-content-addressed.hh
+++ b/src/libstore/make-content-addressed.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "store-api.hh"
diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc
index b28768459..50336c779 100644
--- a/src/libstore/misc.cc
+++ b/src/libstore/misc.cc
@@ -83,14 +83,15 @@ void Store::computeFSClosure(const StorePath & startPath,
}
-std::optional<ContentAddress> getDerivationCA(const BasicDerivation & drv)
+const ContentAddress * getDerivationCA(const BasicDerivation & drv)
{
auto out = drv.outputs.find("out");
- if (out != drv.outputs.end()) {
- if (const auto * v = std::get_if<DerivationOutput::CAFixed>(&out->second.raw()))
- return v->hash;
+ if (out == drv.outputs.end())
+ return nullptr;
+ if (auto dof = std::get_if<DerivationOutput::CAFixed>(&out->second)) {
+ return &dof->ca;
}
- return std::nullopt;
+ return nullptr;
}
void Store::queryMissing(const std::vector<DerivedPath> & targets,
@@ -140,7 +141,13 @@ void Store::queryMissing(const std::vector<DerivedPath> & targets,
if (drvState_->lock()->done) return;
SubstitutablePathInfos infos;
- querySubstitutablePathInfos({{outPath, getDerivationCA(*drv)}}, infos);
+ auto * cap = getDerivationCA(*drv);
+ querySubstitutablePathInfos({
+ {
+ outPath,
+ cap ? std::optional { *cap } : std::nullopt,
+ },
+ }, infos);
if (infos.empty()) {
drvState_->lock()->done = true;
@@ -326,7 +333,7 @@ OutputPathMap resolveDerivedPath(Store & store, const DerivedPath::Built & bfd,
throw Error(
"the derivation '%s' doesn't have an output named '%s'",
store.printStorePath(bfd.drvPath), output);
- if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
+ if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
DrvOutput outputId { *outputHash, output };
auto realisation = store.queryRealisation(outputId);
if (!realisation)
diff --git a/src/libstore/names.hh b/src/libstore/names.hh
index 3977fc6cc..d82b99bb4 100644
--- a/src/libstore/names.hh
+++ b/src/libstore/names.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <memory>
diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc
index 9a0003588..f0dfcb19b 100644
--- a/src/libstore/nar-accessor.cc
+++ b/src/libstore/nar-accessor.cc
@@ -275,6 +275,7 @@ json listNar(ref<FSAccessor> accessor, const Path & path, bool recurse)
obj["type"] = "symlink";
obj["target"] = accessor->readLink(path);
break;
+ case FSAccessor::Type::tMissing:
default:
throw Error("path '%s' does not exist in NAR", path);
}
diff --git a/src/libstore/nar-accessor.hh b/src/libstore/nar-accessor.hh
index 7d998ae0b..5e19bd3c7 100644
--- a/src/libstore/nar-accessor.hh
+++ b/src/libstore/nar-accessor.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <functional>
@@ -9,24 +10,30 @@ namespace nix {
struct Source;
-/* Return an object that provides access to the contents of a NAR
- file. */
+/**
+ * Return an object that provides access to the contents of a NAR
+ * file.
+ */
ref<FSAccessor> makeNarAccessor(std::string && nar);
ref<FSAccessor> makeNarAccessor(Source & source);
-/* Create a NAR accessor from a NAR listing (in the format produced by
- listNar()). The callback getNarBytes(offset, length) is used by the
- readFile() method of the accessor to get the contents of files
- inside the NAR. */
+/**
+ * Create a NAR accessor from a NAR listing (in the format produced by
+ * listNar()). The callback getNarBytes(offset, length) is used by the
+ * readFile() method of the accessor to get the contents of files
+ * inside the NAR.
+ */
typedef std::function<std::string(uint64_t, uint64_t)> GetNarBytes;
ref<FSAccessor> makeLazyNarAccessor(
const std::string & listing,
GetNarBytes getNarBytes);
-/* Write a JSON representation of the contents of a NAR (except file
- contents). */
+/**
+ * Write a JSON representation of the contents of a NAR (except file
+ * contents).
+ */
nlohmann::json listNar(ref<FSAccessor> accessor, const Path & path, bool recurse);
}
diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc
index 2645f468b..c7176d30f 100644
--- a/src/libstore/nar-info-disk-cache.cc
+++ b/src/libstore/nar-info-disk-cache.cc
@@ -273,7 +273,7 @@ public:
narInfo->deriver = StorePath(queryNAR.getStr(9));
for (auto & sig : tokenizeString<Strings>(queryNAR.getStr(10), " "))
narInfo->sigs.insert(sig);
- narInfo->ca = parseContentAddressOpt(queryNAR.getStr(11));
+ narInfo->ca = ContentAddress::parseOpt(queryNAR.getStr(11));
return {oValid, narInfo};
});
diff --git a/src/libstore/nar-info-disk-cache.hh b/src/libstore/nar-info-disk-cache.hh
index 4877f56d8..bbd1d05d5 100644
--- a/src/libstore/nar-info-disk-cache.hh
+++ b/src/libstore/nar-info-disk-cache.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "ref.hh"
#include "nar-info.hh"
@@ -42,8 +43,10 @@ public:
const std::string & uri, const DrvOutput & id) = 0;
};
-/* Return a singleton cache object that can be used concurrently by
- multiple threads. */
+/**
+ * Return a singleton cache object that can be used concurrently by
+ * multiple threads.
+ */
ref<NarInfoDiskCache> getNarInfoDiskCache();
ref<NarInfoDiskCache> getTestNarInfoDiskCache(Path dbPath);
diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc
index 071d8355e..d17253741 100644
--- a/src/libstore/nar-info.cc
+++ b/src/libstore/nar-info.cc
@@ -7,15 +7,18 @@ namespace nix {
NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & whence)
: ValidPathInfo(StorePath(StorePath::dummy), Hash(Hash::dummy)) // FIXME: hack
{
- auto corrupt = [&]() {
- return Error("NAR info file '%1%' is corrupt", whence);
+ unsigned line = 1;
+
+ auto corrupt = [&](const char * reason) {
+ return Error("NAR info file '%1%' is corrupt: %2%", whence,
+ std::string(reason) + (line > 0 ? " at line " + std::to_string(line) : ""));
};
auto parseHashField = [&](const std::string & s) {
try {
return Hash::parseAnyPrefixed(s);
} catch (BadHash &) {
- throw corrupt();
+ throw corrupt("bad hash");
}
};
@@ -26,12 +29,12 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string &
while (pos < s.size()) {
size_t colon = s.find(':', pos);
- if (colon == std::string::npos) throw corrupt();
+ if (colon == std::string::npos) throw corrupt("expecting ':'");
std::string name(s, pos, colon - pos);
size_t eol = s.find('\n', colon + 2);
- if (eol == std::string::npos) throw corrupt();
+ if (eol == std::string::npos) throw corrupt("expecting '\\n'");
std::string value(s, colon + 2, eol - colon - 2);
@@ -47,7 +50,7 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string &
fileHash = parseHashField(value);
else if (name == "FileSize") {
auto n = string2Int<decltype(fileSize)>(value);
- if (!n) throw corrupt();
+ if (!n) throw corrupt("invalid FileSize");
fileSize = *n;
}
else if (name == "NarHash") {
@@ -56,12 +59,12 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string &
}
else if (name == "NarSize") {
auto n = string2Int<decltype(narSize)>(value);
- if (!n) throw corrupt();
+ if (!n) throw corrupt("invalid NarSize");
narSize = *n;
}
else if (name == "References") {
auto refs = tokenizeString<Strings>(value, " ");
- if (!references.empty()) throw corrupt();
+ if (!references.empty()) throw corrupt("extra References");
for (auto & r : refs)
references.insert(StorePath(r));
}
@@ -72,17 +75,26 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string &
else if (name == "Sig")
sigs.insert(value);
else if (name == "CA") {
- if (ca) throw corrupt();
+ if (ca) throw corrupt("extra CA");
// FIXME: allow blank ca or require skipping field?
- ca = parseContentAddressOpt(value);
+ ca = ContentAddress::parseOpt(value);
}
pos = eol + 1;
+ line += 1;
}
if (compression == "") compression = "bzip2";
- if (!havePath || !haveNarHash || url.empty() || narSize == 0) throw corrupt();
+ if (!havePath || !haveNarHash || url.empty() || narSize == 0) {
+ line = 0; // don't include line information in the error
+ throw corrupt(
+ !havePath ? "StorePath missing" :
+ !haveNarHash ? "NarHash missing" :
+ url.empty() ? "URL missing" :
+ narSize == 0 ? "NarSize missing or zero"
+ : "?");
+ }
}
std::string NarInfo::to_string(const Store & store) const
diff --git a/src/libstore/nar-info.hh b/src/libstore/nar-info.hh
index 01683ec73..5dbdafac3 100644
--- a/src/libstore/nar-info.hh
+++ b/src/libstore/nar-info.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
#include "hash.hh"
@@ -16,6 +17,9 @@ struct NarInfo : ValidPathInfo
uint64_t fileSize = 0;
NarInfo() = delete;
+ NarInfo(const Store & store, std::string && name, ContentAddressWithReferences && ca, Hash narHash)
+ : ValidPathInfo(store, std::move(name), std::move(ca), narHash)
+ { }
NarInfo(StorePath && path, Hash narHash) : ValidPathInfo(std::move(path), narHash) { }
NarInfo(const ValidPathInfo & info) : ValidPathInfo(info) { }
NarInfo(const Store & store, const std::string & s, const std::string & whence);
diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc
index 4d2781180..4a79cf4a1 100644
--- a/src/libstore/optimise-store.cc
+++ b/src/libstore/optimise-store.cc
@@ -55,7 +55,7 @@ LocalStore::InodeHash LocalStore::loadInodeHash()
}
if (errno) throw SysError("reading directory '%1%'", linksDir);
- printMsg(lvlTalkative, format("loaded %1% hash inodes") % inodeHash.size());
+ printMsg(lvlTalkative, "loaded %1% hash inodes", inodeHash.size());
return inodeHash;
}
@@ -73,7 +73,7 @@ Strings LocalStore::readDirectoryIgnoringInodes(const Path & path, const InodeHa
checkInterrupt();
if (inodeHash.count(dirent->d_ino)) {
- debug(format("'%1%' is already linked") % dirent->d_name);
+ debug("'%1%' is already linked", dirent->d_name);
continue;
}
@@ -102,7 +102,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
if (std::regex_search(path, std::regex("\\.app/Contents/.+$")))
{
- debug(format("'%1%' is not allowed to be linked in macOS") % path);
+ debug("'%1%' is not allowed to be linked in macOS", path);
return;
}
#endif
@@ -146,7 +146,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
contents of the symlink (i.e. the result of readlink()), not
the contents of the target (which may not even exist). */
Hash hash = hashPath(htSHA256, path).first;
- debug(format("'%1%' has hash '%2%'") % path % hash.to_string(Base32, true));
+ debug("'%1%' has hash '%2%'", path, hash.to_string(Base32, true));
/* Check if this is a known hash. */
Path linkPath = linksDir + "/" + hash.to_string(Base32, false);
@@ -196,11 +196,11 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
auto stLink = lstat(linkPath);
if (st.st_ino == stLink.st_ino) {
- debug(format("'%1%' is already linked to '%2%'") % path % linkPath);
+ debug("'%1%' is already linked to '%2%'", path, linkPath);
return;
}
- printMsg(lvlTalkative, format("linking '%1%' to '%2%'") % path % linkPath);
+ printMsg(lvlTalkative, "linking '%1%' to '%2%'", path, linkPath);
/* Make the containing directory writable, but only if it's not
the store itself (we don't want or need to mess with its
@@ -213,8 +213,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
its timestamp back to 0. */
MakeReadOnly makeReadOnly(mustToggle ? dirOfPath : "");
- Path tempLink = (format("%1%/.tmp-link-%2%-%3%")
- % realStoreDir % getpid() % random()).str();
+ Path tempLink = fmt("%1%/.tmp-link-%2%-%3%", realStoreDir, getpid(), random());
if (link(linkPath.c_str(), tempLink.c_str()) == -1) {
if (errno == EMLINK) {
@@ -222,7 +221,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
systems). This is likely to happen with empty files.
Just shrug and ignore. */
if (st.st_size)
- printInfo(format("'%1%' has maximum number of links") % linkPath);
+ printInfo("'%1%' has maximum number of links", linkPath);
return;
}
throw SysError("cannot link '%1%' to '%2%'", tempLink, linkPath);
diff --git a/src/libstore/outputs-spec.hh b/src/libstore/outputs-spec.hh
index 46bc35ebc..5a726fe90 100644
--- a/src/libstore/outputs-spec.hh
+++ b/src/libstore/outputs-spec.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <cassert>
#include <optional>
@@ -9,6 +10,9 @@
namespace nix {
+/**
+ * A non-empty set of outputs, specified by name
+ */
struct OutputNames : std::set<std::string> {
using std::set<std::string>::set;
@@ -18,6 +22,9 @@ struct OutputNames : std::set<std::string> {
: std::set<std::string>(s)
{ assert(!empty()); }
+ /**
+ * Needs to be "inherited manually"
+ */
OutputNames(std::set<std::string> && s)
: std::set<std::string>(s)
{ assert(!empty()); }
@@ -28,6 +35,9 @@ struct OutputNames : std::set<std::string> {
OutputNames() = delete;
};
+/**
+ * The set of all outputs, without needing to name them explicitly
+ */
struct AllOutputs : std::monostate { };
typedef std::variant<AllOutputs, OutputNames> _OutputsSpecRaw;
@@ -36,7 +46,9 @@ struct OutputsSpec : _OutputsSpecRaw {
using Raw = _OutputsSpecRaw;
using Raw::Raw;
- /* Force choosing a variant */
+ /**
+ * Force choosing a variant
+ */
OutputsSpec() = delete;
using Names = OutputNames;
@@ -52,14 +64,20 @@ struct OutputsSpec : _OutputsSpecRaw {
bool contains(const std::string & output) const;
- /* Create a new OutputsSpec which is the union of this and that. */
+ /**
+ * Create a new OutputsSpec which is the union of this and that.
+ */
OutputsSpec union_(const OutputsSpec & that) const;
- /* Whether this OutputsSpec is a subset of that. */
+ /**
+ * Whether this OutputsSpec is a subset of that.
+ */
bool isSubsetOf(const OutputsSpec & outputs) const;
- /* Parse a string of the form 'output1,...outputN' or
- '*', returning the outputs spec. */
+ /**
+ * Parse a string of the form 'output1,...outputN' or '*', returning
+ * the outputs spec.
+ */
static OutputsSpec parse(std::string_view s);
static std::optional<OutputsSpec> parseOpt(std::string_view s);
@@ -81,8 +99,10 @@ struct ExtendedOutputsSpec : _ExtendedOutputsSpecRaw {
return static_cast<const Raw &>(*this);
}
- /* Parse a string of the form 'prefix^output1,...outputN' or
- 'prefix^*', returning the prefix and the extended outputs spec. */
+ /**
+ * Parse a string of the form 'prefix^output1,...outputN' or
+ * 'prefix^*', returning the prefix and the extended outputs spec.
+ */
static std::pair<std::string_view, ExtendedOutputsSpec> parse(std::string_view s);
static std::optional<std::pair<std::string_view, ExtendedOutputsSpec>> parseOpt(std::string_view s);
diff --git a/src/libstore/parsed-derivations.hh b/src/libstore/parsed-derivations.hh
index bfb3857c0..71085a604 100644
--- a/src/libstore/parsed-derivations.hh
+++ b/src/libstore/parsed-derivations.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "derivations.hh"
#include "store-api.hh"
diff --git a/src/libstore/path-info.cc b/src/libstore/path-info.cc
index bd55a9d06..97b72faa3 100644
--- a/src/libstore/path-info.cc
+++ b/src/libstore/path-info.cc
@@ -1,5 +1,6 @@
#include "path-info.hh"
#include "worker-protocol.hh"
+#include "store-api.hh"
namespace nix {
@@ -21,25 +22,45 @@ void ValidPathInfo::sign(const Store & store, const SecretKey & secretKey)
sigs.insert(secretKey.signDetached(fingerprint(store)));
}
-
-bool ValidPathInfo::isContentAddressed(const Store & store) const
+std::optional<ContentAddressWithReferences> ValidPathInfo::contentAddressWithReferences() const
{
- if (! ca) return false;
-
- auto caPath = std::visit(overloaded {
- [&](const TextHash & th) {
- return store.makeTextPath(path.name(), th.hash, references);
+ if (! ca)
+ return std::nullopt;
+
+ return std::visit(overloaded {
+ [&](const TextHash & th) -> ContentAddressWithReferences {
+ assert(references.count(path) == 0);
+ return TextInfo {
+ .hash = th,
+ .references = references,
+ };
},
- [&](const FixedOutputHash & fsh) {
+ [&](const FixedOutputHash & foh) -> ContentAddressWithReferences {
auto refs = references;
bool hasSelfReference = false;
if (refs.count(path)) {
hasSelfReference = true;
refs.erase(path);
}
- return store.makeFixedOutputPath(fsh.method, fsh.hash, path.name(), refs, hasSelfReference);
- }
- }, *ca);
+ return FixedOutputInfo {
+ .hash = foh,
+ .references = {
+ .others = std::move(refs),
+ .self = hasSelfReference,
+ },
+ };
+ },
+ }, ca->raw);
+}
+
+bool ValidPathInfo::isContentAddressed(const Store & store) const
+{
+ auto fullCaOpt = contentAddressWithReferences();
+
+ if (! fullCaOpt)
+ return false;
+
+ auto caPath = store.makeFixedOutputPathFromCA(path.name(), *fullCaOpt);
bool res = caPath == path;
@@ -77,6 +98,29 @@ Strings ValidPathInfo::shortRefs() const
}
+ValidPathInfo::ValidPathInfo(
+ const Store & store,
+ std::string_view name,
+ ContentAddressWithReferences && ca,
+ Hash narHash)
+ : path(store.makeFixedOutputPathFromCA(name, ca))
+ , narHash(narHash)
+{
+ std::visit(overloaded {
+ [this](TextInfo && ti) {
+ this->references = std::move(ti.references);
+ this->ca = std::move((TextHash &&) ti);
+ },
+ [this](FixedOutputInfo && foi) {
+ this->references = std::move(foi.references.others);
+ if (foi.references.self)
+ this->references.insert(path);
+ this->ca = std::move((FixedOutputHash &&) foi);
+ },
+ }, std::move(ca).raw);
+}
+
+
ValidPathInfo ValidPathInfo::read(Source & source, const Store & store, unsigned int format)
{
return read(source, store, format, store.parseStorePath(readString(source)));
@@ -88,12 +132,12 @@ ValidPathInfo ValidPathInfo::read(Source & source, const Store & store, unsigned
auto narHash = Hash::parseAny(readString(source), htSHA256);
ValidPathInfo info(path, narHash);
if (deriver != "") info.deriver = store.parseStorePath(deriver);
- info.references = worker_proto::read(store, source, Phantom<StorePathSet> {});
+ info.references = WorkerProto<StorePathSet>::read(store, source);
source >> info.registrationTime >> info.narSize;
if (format >= 16) {
source >> info.ultimate;
info.sigs = readStrings<StringSet>(source);
- info.ca = parseContentAddressOpt(readString(source));
+ info.ca = ContentAddress::parseOpt(readString(source));
}
return info;
}
@@ -109,7 +153,7 @@ void ValidPathInfo::write(
sink << store.printStorePath(path);
sink << (deriver ? store.printStorePath(*deriver) : "")
<< narHash.to_string(Base16, false);
- worker_proto::write(store, sink, references);
+ workerProtoWrite(store, sink, references);
sink << registrationTime << narSize;
if (format >= 16) {
sink << ultimate
diff --git a/src/libstore/path-info.hh b/src/libstore/path-info.hh
index a7fcbd232..221523622 100644
--- a/src/libstore/path-info.hh
+++ b/src/libstore/path-info.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "crypto.hh"
#include "path.hh"
@@ -18,8 +19,14 @@ struct SubstitutablePathInfo
{
std::optional<StorePath> deriver;
StorePathSet references;
- uint64_t downloadSize; /* 0 = unknown or inapplicable */
- uint64_t narSize; /* 0 = unknown */
+ /**
+ * 0 = unknown or inapplicable
+ */
+ uint64_t downloadSize;
+ /**
+ * 0 = unknown
+ */
+ uint64_t narSize;
};
typedef std::map<StorePath, SubstitutablePathInfo> SubstitutablePathInfos;
@@ -29,35 +36,40 @@ struct ValidPathInfo
{
StorePath path;
std::optional<StorePath> deriver;
- // TODO document this
+ /**
+ * \todo document this
+ */
Hash narHash;
StorePathSet references;
time_t registrationTime = 0;
uint64_t narSize = 0; // 0 = unknown
uint64_t id; // internal use only
- /* Whether the path is ultimately trusted, that is, it's a
- derivation output that was built locally. */
+ /**
+ * Whether the path is ultimately trusted, that is, it's a
+ * derivation output that was built locally.
+ */
bool ultimate = false;
StringSet sigs; // note: not necessarily verified
- /* If non-empty, an assertion that the path is content-addressed,
- i.e., that the store path is computed from a cryptographic hash
- of the contents of the path, plus some other bits of data like
- the "name" part of the path. Such a path doesn't need
- signatures, since we don't have to trust anybody's claim that
- the path is the output of a particular derivation. (In the
- extensional store model, we have to trust that the *contents*
- of an output path of a derivation were actually produced by
- that derivation. In the intensional model, we have to trust
- that a particular output path was produced by a derivation; the
- path then implies the contents.)
-
- Ideally, the content-addressability assertion would just be a Boolean,
- and the store path would be computed from the name component, ‘narHash’
- and ‘references’. However, we support many types of content addresses.
- */
+ /**
+ * If non-empty, an assertion that the path is content-addressed,
+ * i.e., that the store path is computed from a cryptographic hash
+ * of the contents of the path, plus some other bits of data like
+ * the "name" part of the path. Such a path doesn't need
+ * signatures, since we don't have to trust anybody's claim that
+ * the path is the output of a particular derivation. (In the
+ * extensional store model, we have to trust that the *contents*
+ * of an output path of a derivation were actually produced by
+ * that derivation. In the intensional model, we have to trust
+ * that a particular output path was produced by a derivation; the
+ * path then implies the contents.)
+ *
+ * Ideally, the content-addressability assertion would just be a Boolean,
+ * and the store path would be computed from the name component, ‘narHash’
+ * and ‘references’. However, we support many types of content addresses.
+ */
std::optional<ContentAddress> ca;
bool operator == (const ValidPathInfo & i) const
@@ -68,27 +80,42 @@ struct ValidPathInfo
&& references == i.references;
}
- /* Return a fingerprint of the store path to be used in binary
- cache signatures. It contains the store path, the base-32
- SHA-256 hash of the NAR serialisation of the path, the size of
- the NAR, and the sorted references. The size field is strictly
- speaking superfluous, but might prevent endless/excessive data
- attacks. */
+ /**
+ * Return a fingerprint of the store path to be used in binary
+ * cache signatures. It contains the store path, the base-32
+ * SHA-256 hash of the NAR serialisation of the path, the size of
+ * the NAR, and the sorted references. The size field is strictly
+ * speaking superfluous, but might prevent endless/excessive data
+ * attacks.
+ */
std::string fingerprint(const Store & store) const;
void sign(const Store & store, const SecretKey & secretKey);
- /* Return true iff the path is verifiably content-addressed. */
+ /**
+ * @return The `ContentAddressWithReferences` that determines the
+ * store path for a content-addressed store object, `std::nullopt`
+ * for an input-addressed store object.
+ */
+ std::optional<ContentAddressWithReferences> contentAddressWithReferences() const;
+
+ /**
+ * @return true iff the path is verifiably content-addressed.
+ */
bool isContentAddressed(const Store & store) const;
static const size_t maxSigs = std::numeric_limits<size_t>::max();
- /* Return the number of signatures on this .narinfo that were
- produced by one of the specified keys, or maxSigs if the path
- is content-addressed. */
+ /**
+ * Return the number of signatures on this .narinfo that were
+ * produced by one of the specified keys, or maxSigs if the path
+ * is content-addressed.
+ */
size_t checkSignatures(const Store & store, const PublicKeys & publicKeys) const;
- /* Verify a single signature. */
+ /**
+ * Verify a single signature.
+ */
bool checkSignature(const Store & store, const PublicKeys & publicKeys, const std::string & sig) const;
Strings shortRefs() const;
@@ -98,6 +125,9 @@ struct ValidPathInfo
ValidPathInfo(StorePath && path, Hash narHash) : path(std::move(path)), narHash(narHash) { };
ValidPathInfo(const StorePath & path, Hash narHash) : path(path), narHash(narHash) { };
+ ValidPathInfo(const Store & store,
+ std::string_view name, ContentAddressWithReferences && ca, Hash narHash);
+
virtual ~ValidPathInfo() { }
static ValidPathInfo read(Source & source, const Store & store, unsigned int format);
diff --git a/src/libstore/path-regex.hh b/src/libstore/path-regex.hh
index 6893c3876..4f8dc4c1f 100644
--- a/src/libstore/path-regex.hh
+++ b/src/libstore/path-regex.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
namespace nix {
diff --git a/src/libstore/path-with-outputs.hh b/src/libstore/path-with-outputs.hh
index 5d25656a5..d75850868 100644
--- a/src/libstore/path-with-outputs.hh
+++ b/src/libstore/path-with-outputs.hh
@@ -1,17 +1,19 @@
#pragma once
+///@file
#include "path.hh"
#include "derived-path.hh"
namespace nix {
-/* This is a deprecated old type just for use by the old CLI, and older
- versions of the RPC protocols. In new code don't use it; you want
- `DerivedPath` instead.
-
- `DerivedPath` is better because it handles more cases, and does so more
- explicitly without devious punning tricks.
-*/
+/**
+ * This is a deprecated old type just for use by the old CLI, and older
+ * versions of the RPC protocols. In new code don't use it; you want
+ * `DerivedPath` instead.
+ *
+ * `DerivedPath` is better because it handles more cases, and does so more
+ * explicitly without devious punning tricks.
+ */
struct StorePathWithOutputs
{
StorePath path;
@@ -30,9 +32,11 @@ std::pair<std::string_view, StringSet> parsePathWithOutputs(std::string_view s);
class Store;
-/* Split a string specifying a derivation and a set of outputs
- (/nix/store/hash-foo!out1,out2,...) into the derivation path
- and the outputs. */
+/**
+ * Split a string specifying a derivation and a set of outputs
+ * (/nix/store/hash-foo!out1,out2,...) into the derivation path
+ * and the outputs.
+ */
StorePathWithOutputs parsePathWithOutputs(const Store & store, std::string_view pathWithOutputs);
StorePathWithOutputs followLinksToStorePathWithOutputs(const Store & store, std::string_view pathWithOutputs);
diff --git a/src/libstore/path.cc b/src/libstore/path.cc
index 46be54281..552e83114 100644
--- a/src/libstore/path.cc
+++ b/src/libstore/path.cc
@@ -9,8 +9,8 @@ static void checkName(std::string_view path, std::string_view name)
if (name.empty())
throw BadStorePath("store path '%s' has an empty name", path);
if (name.size() > StorePath::MaxPathLen)
- throw BadStorePath("store path '%s' has a name longer than '%d characters",
- StorePath::MaxPathLen, path);
+ throw BadStorePath("store path '%s' has a name longer than %d characters",
+ path, StorePath::MaxPathLen);
// See nameRegexStr for the definition
for (auto c : name)
if (!((c >= '0' && c <= '9')
diff --git a/src/libstore/path.hh b/src/libstore/path.hh
index 1e5579b90..4ca6747b3 100644
--- a/src/libstore/path.hh
+++ b/src/libstore/path.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <string_view>
@@ -8,13 +9,22 @@ namespace nix {
struct Hash;
+/**
+ * \ref StorePath "Store path" is the fundamental reference type of Nix.
+ * A store paths refers to a Store object.
+ *
+ * See glossary.html#gloss-store-path for more information on a
+ * conceptual level.
+ */
class StorePath
{
std::string baseName;
public:
- /* Size of the hash part of store paths, in base-32 characters. */
+ /**
+ * Size of the hash part of store paths, in base-32 characters.
+ */
constexpr static size_t HashLen = 32; // i.e. 160 bits
constexpr static size_t MaxPathLen = 211;
@@ -45,8 +55,9 @@ public:
return baseName != other.baseName;
}
- /* Check whether a file name ends with the extension for
- derivations. */
+ /**
+ * Check whether a file name ends with the extension for derivations.
+ */
bool isDerivation() const;
std::string_view name() const
@@ -67,7 +78,10 @@ public:
typedef std::set<StorePath> StorePathSet;
typedef std::vector<StorePath> StorePaths;
-/* Extension of derivations in the Nix store. */
+/**
+ * The file extension of \ref Derivation derivations when serialized
+ * into store objects.
+ */
const std::string drvExtension = ".drv";
}
diff --git a/src/libstore/pathlocks.cc b/src/libstore/pathlocks.cc
index 42023cd0a..adc763e6a 100644
--- a/src/libstore/pathlocks.cc
+++ b/src/libstore/pathlocks.cc
@@ -96,7 +96,7 @@ bool PathLocks::lockPaths(const PathSet & paths,
checkInterrupt();
Path lockPath = path + ".lock";
- debug(format("locking path '%1%'") % path);
+ debug("locking path '%1%'", path);
AutoCloseFD fd;
@@ -118,7 +118,7 @@ bool PathLocks::lockPaths(const PathSet & paths,
}
}
- debug(format("lock acquired on '%1%'") % lockPath);
+ debug("lock acquired on '%1%'", lockPath);
/* Check that the lock file hasn't become stale (i.e.,
hasn't been unlinked). */
@@ -130,7 +130,7 @@ bool PathLocks::lockPaths(const PathSet & paths,
a lock on a deleted file. This means that other
processes may create and acquire a lock on
`lockPath', and proceed. So we must retry. */
- debug(format("open lock file '%1%' has become stale") % lockPath);
+ debug("open lock file '%1%' has become stale", lockPath);
else
break;
}
@@ -163,7 +163,7 @@ void PathLocks::unlock()
"error (ignored): cannot close lock file on '%1%'",
i.second);
- debug(format("lock released on '%1%'") % i.second);
+ debug("lock released on '%1%'", i.second);
}
fds.clear();
diff --git a/src/libstore/pathlocks.hh b/src/libstore/pathlocks.hh
index 5e3a734b4..4921df352 100644
--- a/src/libstore/pathlocks.hh
+++ b/src/libstore/pathlocks.hh
@@ -1,15 +1,20 @@
#pragma once
+///@file
#include "util.hh"
namespace nix {
-/* Open (possibly create) a lock file and return the file descriptor.
- -1 is returned if create is false and the lock could not be opened
- because it doesn't exist. Any other error throws an exception. */
+/**
+ * Open (possibly create) a lock file and return the file descriptor.
+ * -1 is returned if create is false and the lock could not be opened
+ * because it doesn't exist. Any other error throws an exception.
+ */
AutoCloseFD openLockFile(const Path & path, bool create);
-/* Delete an open lock file. */
+/**
+ * Delete an open lock file.
+ */
void deleteLockFile(const Path & path, int fd);
enum LockType { ltRead, ltWrite, ltNone };
diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc
index c551c5f3e..ba5c8583f 100644
--- a/src/libstore/profiles.cc
+++ b/src/libstore/profiles.cc
@@ -64,7 +64,7 @@ std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path pro
static void makeName(const Path & profile, GenerationNumber num,
Path & outLink)
{
- Path prefix = (format("%1%-%2%") % profile % num).str();
+ Path prefix = fmt("%1%-%2%", profile, num);
outLink = prefix + "-link";
}
@@ -269,7 +269,7 @@ void switchGeneration(
void lockProfile(PathLocks & lock, const Path & profile)
{
- lock.lockPaths({profile}, (format("waiting for lock on profile '%1%'") % profile).str());
+ lock.lockPaths({profile}, fmt("waiting for lock on profile '%1%'", profile));
lock.setDeletion(true);
}
@@ -282,28 +282,48 @@ std::string optimisticLockProfile(const Path & profile)
Path profilesDir()
{
- auto profileRoot = createNixStateDir() + "/profiles";
+ auto profileRoot =
+ (getuid() == 0)
+ ? rootProfilesDir()
+ : createNixStateDir() + "/profiles";
createDirs(profileRoot);
return profileRoot;
}
+Path rootProfilesDir()
+{
+ return settings.nixStateDir + "/profiles/per-user/root";
+}
+
Path getDefaultProfile()
{
Path profileLink = settings.useXDGBaseDirectories ? createNixStateDir() + "/profile" : getHome() + "/.nix-profile";
try {
- auto profile =
- getuid() == 0
- ? settings.nixStateDir + "/profiles/default"
- : profilesDir() + "/profile";
+ auto profile = profilesDir() + "/profile";
if (!pathExists(profileLink)) {
replaceSymlink(profile, profileLink);
}
+ // Backwards compatibiliy measure: Make root's profile available as
+ // `.../default` as it's what NixOS and most of the init scripts expect
+ Path globalProfileLink = settings.nixStateDir + "/profiles/default";
+ if (getuid() == 0 && !pathExists(globalProfileLink)) {
+ replaceSymlink(profile, globalProfileLink);
+ }
return absPath(readLink(profileLink), dirOf(profileLink));
} catch (Error &) {
return profileLink;
}
}
+Path defaultChannelsDir()
+{
+ return profilesDir() + "/channels";
+}
+
+Path rootChannelsDir()
+{
+ return rootProfilesDir() + "/channels";
+}
}
diff --git a/src/libstore/profiles.hh b/src/libstore/profiles.hh
index fbf95b850..4e1f42e83 100644
--- a/src/libstore/profiles.hh
+++ b/src/libstore/profiles.hh
@@ -1,6 +1,7 @@
#pragma once
+///@file
-#include "types.hh"
+ #include "types.hh"
#include "pathlocks.hh"
#include <time.h>
@@ -23,9 +24,11 @@ struct Generation
typedef std::list<Generation> Generations;
-/* Returns the list of currently present generations for the specified
- profile, sorted by generation number. Also returns the number of
- the current generation. */
+/**
+ * Returns the list of currently present generations for the specified
+ * profile, sorted by generation number. Also returns the number of
+ * the current generation.
+ */
std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path profile);
class LocalFSStore;
@@ -46,35 +49,60 @@ void deleteGenerationsOlderThan(const Path & profile, std::string_view timeSpec,
void switchLink(Path link, Path target);
-/* Roll back a profile to the specified generation, or to the most
- recent one older than the current. */
+/**
+ * Roll back a profile to the specified generation, or to the most
+ * recent one older than the current.
+ */
void switchGeneration(
const Path & profile,
std::optional<GenerationNumber> dstGen,
bool dryRun);
-/* Ensure exclusive access to a profile. Any command that modifies
- the profile first acquires this lock. */
+/**
+ * Ensure exclusive access to a profile. Any command that modifies
+ * the profile first acquires this lock.
+ */
void lockProfile(PathLocks & lock, const Path & profile);
-/* Optimistic locking is used by long-running operations like `nix-env
- -i'. Instead of acquiring the exclusive lock for the entire
- duration of the operation, we just perform the operation
- optimistically (without an exclusive lock), and check at the end
- whether the profile changed while we were busy (i.e., the symlink
- target changed). If so, the operation is restarted. Restarting is
- generally cheap, since the build results are still in the Nix
- store. Most of the time, only the user environment has to be
- rebuilt. */
+/**
+ * Optimistic locking is used by long-running operations like `nix-env
+ * -i'. Instead of acquiring the exclusive lock for the entire
+ * duration of the operation, we just perform the operation
+ * optimistically (without an exclusive lock), and check at the end
+ * whether the profile changed while we were busy (i.e., the symlink
+ * target changed). If so, the operation is restarted. Restarting is
+ * generally cheap, since the build results are still in the Nix
+ * store. Most of the time, only the user environment has to be
+ * rebuilt.
+ */
std::string optimisticLockProfile(const Path & profile);
-/* Creates and returns the path to a directory suitable for storing the user’s
- profiles. */
+/**
+ * Create and return the path to a directory suitable for storing the user’s
+ * profiles.
+ */
Path profilesDir();
-/* Resolve the default profile (~/.nix-profile by default, $XDG_STATE_HOME/
- nix/profile if XDG Base Directory Support is enabled), and create if doesn't
- exist */
+/**
+ * Return the path to the profile directory for root (but don't try creating it)
+ */
+Path rootProfilesDir();
+
+/**
+ * Create and return the path to the file used for storing the users's channels
+ */
+Path defaultChannelsDir();
+
+/**
+ * Return the path to the channel directory for root (but don't try creating it)
+ */
+Path rootChannelsDir();
+
+/**
+ * Resolve the default profile (~/.nix-profile by default,
+ * $XDG_STATE_HOME/nix/profile if XDG Base Directory Support is enabled),
+ * and create if doesn't exist
+ */
Path getDefaultProfile();
}
diff --git a/src/libstore/realisation.cc b/src/libstore/realisation.cc
index d63ec5ea2..93ddb5b20 100644
--- a/src/libstore/realisation.cc
+++ b/src/libstore/realisation.cc
@@ -136,6 +136,19 @@ size_t Realisation::checkSignatures(const PublicKeys & publicKeys) const
return good;
}
+
+SingleDrvOutputs filterDrvOutputs(const OutputsSpec& wanted, SingleDrvOutputs&& outputs)
+{
+ SingleDrvOutputs ret = std::move(outputs);
+ for (auto it = ret.begin(); it != ret.end(); ) {
+ if (!wanted.contains(it->first))
+ it = ret.erase(it);
+ else
+ ++it;
+ }
+ return ret;
+}
+
StorePath RealisedPath::path() const {
return std::visit([](auto && arg) { return arg.getPath(); }, raw);
}
diff --git a/src/libstore/realisation.hh b/src/libstore/realisation.hh
index 48d0283de..2a093c128 100644
--- a/src/libstore/realisation.hh
+++ b/src/libstore/realisation.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <variant>
@@ -11,10 +12,27 @@
namespace nix {
class Store;
+struct OutputsSpec;
+/**
+ * A general `Realisation` key.
+ *
+ * This is similar to a `DerivedPath::Opaque`, but the derivation is
+ * identified by its "hash modulo" instead of by its store path.
+ */
struct DrvOutput {
- // The hash modulo of the derivation
+ /**
+ * The hash modulo of the derivation.
+ *
+ * Computed from the derivation itself for most types of
+ * derivations, but computed from the (fixed) content address of the
+ * output for fixed-output derivations.
+ */
Hash drvHash;
+
+ /**
+ * The name of the output.
+ */
std::string outputName;
std::string to_string() const;
@@ -59,8 +77,31 @@ struct Realisation {
GENERATE_CMP(Realisation, me->id, me->outPath);
};
+/**
+ * Collection type for a single derivation's outputs' `Realisation`s.
+ *
+ * Since these are the outputs of a single derivation, we know the
+ * output names are unique so we can use them as the map key.
+ */
+typedef std::map<std::string, Realisation> SingleDrvOutputs;
+
+/**
+ * Collection type for multiple derivations' outputs' `Realisation`s.
+ *
+ * `DrvOutput` is used because in general the derivations are not all
+ * the same, so we need to identify firstly which derivation, and
+ * secondly which output of that derivation.
+ */
typedef std::map<DrvOutput, Realisation> DrvOutputs;
+/**
+ * Filter a SingleDrvOutputs to include only specific output names
+ *
+ * Moves the `outputs` input.
+ */
+SingleDrvOutputs filterDrvOutputs(const OutputsSpec&, SingleDrvOutputs&&);
+
+
struct OpaquePath {
StorePath path;
diff --git a/src/libstore/references.cc b/src/libstore/references.cc
index 3bb297fc8..345f4528b 100644
--- a/src/libstore/references.cc
+++ b/src/libstore/references.cc
@@ -39,8 +39,7 @@ static void search(
if (!match) continue;
std::string ref(s.substr(i, refLength));
if (hashes.erase(ref)) {
- debug(format("found reference to '%1%' at offset '%2%'")
- % ref % i);
+ debug("found reference to '%1%' at offset '%2%'", ref, i);
seen.insert(ref);
}
++i;
diff --git a/src/libstore/references.hh b/src/libstore/references.hh
index 6f381f96c..52d71b333 100644
--- a/src/libstore/references.hh
+++ b/src/libstore/references.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "hash.hh"
#include "path.hh"
diff --git a/src/libstore/remote-fs-accessor.hh b/src/libstore/remote-fs-accessor.hh
index 99f5544ef..e2673b6f6 100644
--- a/src/libstore/remote-fs-accessor.hh
+++ b/src/libstore/remote-fs-accessor.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "fs-accessor.hh"
#include "ref.hh"
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index d1296627a..c3dfb5979 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -18,133 +18,6 @@
namespace nix {
-namespace worker_proto {
-
-std::string read(const Store & store, Source & from, Phantom<std::string> _)
-{
- return readString(from);
-}
-
-void write(const Store & store, Sink & out, const std::string & str)
-{
- out << str;
-}
-
-
-StorePath read(const Store & store, Source & from, Phantom<StorePath> _)
-{
- return store.parseStorePath(readString(from));
-}
-
-void write(const Store & store, Sink & out, const StorePath & storePath)
-{
- out << store.printStorePath(storePath);
-}
-
-
-ContentAddress read(const Store & store, Source & from, Phantom<ContentAddress> _)
-{
- return parseContentAddress(readString(from));
-}
-
-void write(const Store & store, Sink & out, const ContentAddress & ca)
-{
- out << renderContentAddress(ca);
-}
-
-
-DerivedPath read(const Store & store, Source & from, Phantom<DerivedPath> _)
-{
- auto s = readString(from);
- return DerivedPath::parse(store, s);
-}
-
-void write(const Store & store, Sink & out, const DerivedPath & req)
-{
- out << req.to_string(store);
-}
-
-
-Realisation read(const Store & store, Source & from, Phantom<Realisation> _)
-{
- std::string rawInput = readString(from);
- return Realisation::fromJSON(
- nlohmann::json::parse(rawInput),
- "remote-protocol"
- );
-}
-
-void write(const Store & store, Sink & out, const Realisation & realisation)
-{
- out << realisation.toJSON().dump();
-}
-
-
-DrvOutput read(const Store & store, Source & from, Phantom<DrvOutput> _)
-{
- return DrvOutput::parse(readString(from));
-}
-
-void write(const Store & store, Sink & out, const DrvOutput & drvOutput)
-{
- out << drvOutput.to_string();
-}
-
-
-BuildResult read(const Store & store, Source & from, Phantom<BuildResult> _)
-{
- auto path = worker_proto::read(store, from, Phantom<DerivedPath> {});
- BuildResult res { .path = path };
- res.status = (BuildResult::Status) readInt(from);
- from
- >> res.errorMsg
- >> res.timesBuilt
- >> res.isNonDeterministic
- >> res.startTime
- >> res.stopTime;
- res.builtOutputs = worker_proto::read(store, from, Phantom<DrvOutputs> {});
- return res;
-}
-
-void write(const Store & store, Sink & to, const BuildResult & res)
-{
- worker_proto::write(store, to, res.path);
- to
- << res.status
- << res.errorMsg
- << res.timesBuilt
- << res.isNonDeterministic
- << res.startTime
- << res.stopTime;
- worker_proto::write(store, to, res.builtOutputs);
-}
-
-
-std::optional<StorePath> read(const Store & store, Source & from, Phantom<std::optional<StorePath>> _)
-{
- auto s = readString(from);
- return s == "" ? std::optional<StorePath> {} : store.parseStorePath(s);
-}
-
-void write(const Store & store, Sink & out, const std::optional<StorePath> & storePathOpt)
-{
- out << (storePathOpt ? store.printStorePath(*storePathOpt) : "");
-}
-
-
-std::optional<ContentAddress> read(const Store & store, Source & from, Phantom<std::optional<ContentAddress>> _)
-{
- return parseContentAddressOpt(readString(from));
-}
-
-void write(const Store & store, Sink & out, const std::optional<ContentAddress> & caOpt)
-{
- out << (caOpt ? renderContentAddress(*caOpt) : "");
-}
-
-}
-
-
/* TODO: Separate these store impls into different files, give them better names */
RemoteStore::RemoteStore(const Params & params)
: RemoteStoreConfig(params)
@@ -226,6 +99,13 @@ void RemoteStore::initConnection(Connection & conn)
conn.daemonNixVersion = readString(conn.from);
}
+ if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 35) {
+ conn.remoteTrustsUs = WorkerProto<std::optional<TrustedFlag>>::read(*this, conn.from);
+ } else {
+ // We don't know the answer; protocol to old.
+ conn.remoteTrustsUs = std::nullopt;
+ }
+
auto ex = conn.processStderr();
if (ex) std::rethrow_exception(ex);
}
@@ -265,7 +145,7 @@ void RemoteStore::setOptions(Connection & conn)
overrides.erase(settings.buildCores.name);
overrides.erase(settings.useSubstitutes.name);
overrides.erase(loggerSettings.showTrace.name);
- overrides.erase(settings.experimentalFeatures.name);
+ overrides.erase(experimentalFeatureSettings.experimentalFeatures.name);
overrides.erase(settings.pluginFiles.name);
conn.to << overrides.size();
for (auto & i : overrides)
@@ -347,12 +227,12 @@ StorePathSet RemoteStore::queryValidPaths(const StorePathSet & paths, Substitute
return res;
} else {
conn->to << wopQueryValidPaths;
- worker_proto::write(*this, conn->to, paths);
+ workerProtoWrite(*this, conn->to, paths);
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 27) {
conn->to << (settings.buildersUseSubstitutes ? 1 : 0);
}
conn.processStderr();
- return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
+ return WorkerProto<StorePathSet>::read(*this, conn->from);
}
}
@@ -362,7 +242,7 @@ StorePathSet RemoteStore::queryAllValidPaths()
auto conn(getConnection());
conn->to << wopQueryAllValidPaths;
conn.processStderr();
- return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
+ return WorkerProto<StorePathSet>::read(*this, conn->from);
}
@@ -379,9 +259,9 @@ StorePathSet RemoteStore::querySubstitutablePaths(const StorePathSet & paths)
return res;
} else {
conn->to << wopQuerySubstitutablePaths;
- worker_proto::write(*this, conn->to, paths);
+ workerProtoWrite(*this, conn->to, paths);
conn.processStderr();
- return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
+ return WorkerProto<StorePathSet>::read(*this, conn->from);
}
}
@@ -403,7 +283,7 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S
auto deriver = readString(conn->from);
if (deriver != "")
info.deriver = parseStorePath(deriver);
- info.references = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
+ info.references = WorkerProto<StorePathSet>::read(*this, conn->from);
info.downloadSize = readLongLong(conn->from);
info.narSize = readLongLong(conn->from);
infos.insert_or_assign(i.first, std::move(info));
@@ -416,9 +296,9 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S
StorePathSet paths;
for (auto & path : pathsMap)
paths.insert(path.first);
- worker_proto::write(*this, conn->to, paths);
+ workerProtoWrite(*this, conn->to, paths);
} else
- worker_proto::write(*this, conn->to, pathsMap);
+ workerProtoWrite(*this, conn->to, pathsMap);
conn.processStderr();
size_t count = readNum<size_t>(conn->from);
for (size_t n = 0; n < count; n++) {
@@ -426,7 +306,7 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S
auto deriver = readString(conn->from);
if (deriver != "")
info.deriver = parseStorePath(deriver);
- info.references = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
+ info.references = WorkerProto<StorePathSet>::read(*this, conn->from);
info.downloadSize = readLongLong(conn->from);
info.narSize = readLongLong(conn->from);
}
@@ -469,7 +349,7 @@ void RemoteStore::queryReferrers(const StorePath & path,
auto conn(getConnection());
conn->to << wopQueryReferrers << printStorePath(path);
conn.processStderr();
- for (auto & i : worker_proto::read(*this, conn->from, Phantom<StorePathSet> {}))
+ for (auto & i : WorkerProto<StorePathSet>::read(*this, conn->from))
referrers.insert(i);
}
@@ -479,7 +359,7 @@ StorePathSet RemoteStore::queryValidDerivers(const StorePath & path)
auto conn(getConnection());
conn->to << wopQueryValidDerivers << printStorePath(path);
conn.processStderr();
- return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
+ return WorkerProto<StorePathSet>::read(*this, conn->from);
}
@@ -491,7 +371,7 @@ StorePathSet RemoteStore::queryDerivationOutputs(const StorePath & path)
auto conn(getConnection());
conn->to << wopQueryDerivationOutputs << printStorePath(path);
conn.processStderr();
- return worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
+ return WorkerProto<StorePathSet>::read(*this, conn->from);
}
@@ -501,7 +381,7 @@ std::map<std::string, std::optional<StorePath>> RemoteStore::queryPartialDerivat
auto conn(getConnection());
conn->to << wopQueryDerivationOutputMap << printStorePath(path);
conn.processStderr();
- return worker_proto::read(*this, conn->from, Phantom<std::map<std::string, std::optional<StorePath>>> {});
+ return WorkerProto<std::map<std::string, std::optional<StorePath>>>::read(*this, conn->from);
} else {
// Fallback for old daemon versions.
// For floating-CA derivations (and their co-dependencies) this is an
@@ -534,6 +414,7 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
Source & dump,
std::string_view name,
ContentAddressMethod caMethod,
+ HashType hashType,
const StorePathSet & references,
RepairFlag repair)
{
@@ -545,8 +426,8 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
conn->to
<< wopAddToStore
<< name
- << renderContentAddressMethod(caMethod);
- worker_proto::write(*this, conn->to, references);
+ << caMethod.render(hashType);
+ workerProtoWrite(*this, conn->to, references);
conn->to << repair;
// The dump source may invoke the store, so we need to make some room.
@@ -565,26 +446,29 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
if (repair) throw Error("repairing is not supported when building through the Nix daemon protocol < 1.25");
std::visit(overloaded {
- [&](const TextHashMethod & thm) -> void {
+ [&](const TextIngestionMethod & thm) -> void {
+ if (hashType != htSHA256)
+ throw UnimplementedError("When adding text-hashed data called '%s', only SHA-256 is supported but '%s' was given",
+ name, printHashType(hashType));
std::string s = dump.drain();
conn->to << wopAddTextToStore << name << s;
- worker_proto::write(*this, conn->to, references);
+ workerProtoWrite(*this, conn->to, references);
conn.processStderr();
},
- [&](const FixedOutputHashMethod & fohm) -> void {
+ [&](const FileIngestionMethod & fim) -> void {
conn->to
<< wopAddToStore
<< name
- << ((fohm.hashType == htSHA256 && fohm.fileIngestionMethod == FileIngestionMethod::Recursive) ? 0 : 1) /* backwards compatibility hack */
- << (fohm.fileIngestionMethod == FileIngestionMethod::Recursive ? 1 : 0)
- << printHashType(fohm.hashType);
+ << ((hashType == htSHA256 && fim == FileIngestionMethod::Recursive) ? 0 : 1) /* backwards compatibility hack */
+ << (fim == FileIngestionMethod::Recursive ? 1 : 0)
+ << printHashType(hashType);
try {
conn->to.written = 0;
connections->incCapacity();
{
Finally cleanup([&]() { connections->decCapacity(); });
- if (fohm.fileIngestionMethod == FileIngestionMethod::Recursive) {
+ if (fim == FileIngestionMethod::Recursive) {
dump.drainInto(conn->to);
} else {
std::string contents = dump.drain();
@@ -603,7 +487,7 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
}
}
- }, caMethod);
+ }, caMethod.raw);
auto path = parseStorePath(readString(conn->from));
// Release our connection to prevent a deadlock in queryPathInfo().
conn_.reset();
@@ -615,7 +499,7 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
StorePath RemoteStore::addToStoreFromDump(Source & dump, std::string_view name,
FileIngestionMethod method, HashType hashType, RepairFlag repair, const StorePathSet & references)
{
- return addCAToStore(dump, name, FixedOutputHashMethod{ .fileIngestionMethod = method, .hashType = hashType }, references, repair)->path;
+ return addCAToStore(dump, name, method, hashType, references, repair)->path;
}
@@ -634,7 +518,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
sink
<< exportMagic
<< printStorePath(info.path);
- worker_proto::write(*this, sink, info.references);
+ workerProtoWrite(*this, sink, info.references);
sink
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< 0 // == no legacy signature
@@ -644,7 +528,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
conn.processStderr(0, source2.get());
- auto importedPaths = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
+ auto importedPaths = WorkerProto<StorePathSet>::read(*this, conn->from);
assert(importedPaths.size() <= 1);
}
@@ -653,7 +537,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
<< printStorePath(info.path)
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< info.narHash.to_string(Base16, false);
- worker_proto::write(*this, conn->to, info.references);
+ workerProtoWrite(*this, conn->to, info.references);
conn->to << info.registrationTime << info.narSize
<< info.ultimate << info.sigs << renderContentAddress(info.ca)
<< repair << !checkSigs;
@@ -715,7 +599,7 @@ StorePath RemoteStore::addTextToStore(
RepairFlag repair)
{
StringSource source(s);
- return addCAToStore(source, name, TextHashMethod{}, references, repair)->path;
+ return addCAToStore(source, name, TextIngestionMethod {}, htSHA256, references, repair)->path;
}
void RemoteStore::registerDrvOutput(const Realisation & info)
@@ -726,7 +610,7 @@ void RemoteStore::registerDrvOutput(const Realisation & info)
conn->to << info.id.to_string();
conn->to << std::string(info.outPath.to_string());
} else {
- worker_proto::write(*this, conn->to, info);
+ workerProtoWrite(*this, conn->to, info);
}
conn.processStderr();
}
@@ -748,14 +632,14 @@ void RemoteStore::queryRealisationUncached(const DrvOutput & id,
auto real = [&]() -> std::shared_ptr<const Realisation> {
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
- auto outPaths = worker_proto::read(
- *this, conn->from, Phantom<std::set<StorePath>> {});
+ auto outPaths = WorkerProto<std::set<StorePath>>::read(
+ *this, conn->from);
if (outPaths.empty())
return nullptr;
return std::make_shared<const Realisation>(Realisation { .id = id, .outPath = *outPaths.begin() });
} else {
- auto realisations = worker_proto::read(
- *this, conn->from, Phantom<std::set<Realisation>> {});
+ auto realisations = WorkerProto<std::set<Realisation>>::read(
+ *this, conn->from);
if (realisations.empty())
return nullptr;
return std::make_shared<const Realisation>(*realisations.begin());
@@ -769,7 +653,7 @@ void RemoteStore::queryRealisationUncached(const DrvOutput & id,
static void writeDerivedPaths(RemoteStore & store, ConnectionHandle & conn, const std::vector<DerivedPath> & reqs)
{
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 30) {
- worker_proto::write(store, conn->to, reqs);
+ workerProtoWrite(store, conn->to, reqs);
} else {
Strings ss;
for (auto & p : reqs) {
@@ -824,7 +708,7 @@ void RemoteStore::buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMod
readInt(conn->from);
}
-std::vector<BuildResult> RemoteStore::buildPathsWithResults(
+std::vector<KeyedBuildResult> RemoteStore::buildPathsWithResults(
const std::vector<DerivedPath> & paths,
BuildMode buildMode,
std::shared_ptr<Store> evalStore)
@@ -839,7 +723,7 @@ std::vector<BuildResult> RemoteStore::buildPathsWithResults(
writeDerivedPaths(*this, conn, paths);
conn->to << buildMode;
conn.processStderr();
- return worker_proto::read(*this, conn->from, Phantom<std::vector<BuildResult>> {});
+ return WorkerProto<std::vector<KeyedBuildResult>>::read(*this, conn->from);
} else {
// Avoid deadlock.
conn_.reset();
@@ -848,21 +732,25 @@ std::vector<BuildResult> RemoteStore::buildPathsWithResults(
// fails, but meh.
buildPaths(paths, buildMode, evalStore);
- std::vector<BuildResult> results;
+ std::vector<KeyedBuildResult> results;
for (auto & path : paths) {
std::visit(
overloaded {
[&](const DerivedPath::Opaque & bo) {
- results.push_back(BuildResult {
- .status = BuildResult::Substituted,
- .path = bo,
+ results.push_back(KeyedBuildResult {
+ {
+ .status = BuildResult::Substituted,
+ },
+ /* .path = */ bo,
});
},
[&](const DerivedPath::Built & bfd) {
- BuildResult res {
- .status = BuildResult::Built,
- .path = bfd,
+ KeyedBuildResult res {
+ {
+ .status = BuildResult::Built
+ },
+ /* .path = */ bfd,
};
OutputPathMap outputs;
@@ -876,15 +764,15 @@ std::vector<BuildResult> RemoteStore::buildPathsWithResults(
"the derivation '%s' doesn't have an output named '%s'",
printStorePath(bfd.drvPath), output);
auto outputId = DrvOutput{ *outputHash, output };
- if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
+ if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
auto realisation =
queryRealisation(outputId);
if (!realisation)
throw MissingRealisation(outputId);
- res.builtOutputs.emplace(realisation->id, *realisation);
+ res.builtOutputs.emplace(output, *realisation);
} else {
res.builtOutputs.emplace(
- outputId,
+ output,
Realisation {
.id = outputId,
.outPath = outputPath,
@@ -911,20 +799,18 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD
writeDerivation(conn->to, *this, drv);
conn->to << buildMode;
conn.processStderr();
- BuildResult res {
- .path = DerivedPath::Built {
- .drvPath = drvPath,
- .outputs = OutputsSpec::All { },
- },
- };
+ BuildResult res;
res.status = (BuildResult::Status) readInt(conn->from);
conn->from >> res.errorMsg;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 29) {
conn->from >> res.timesBuilt >> res.isNonDeterministic >> res.startTime >> res.stopTime;
}
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 28) {
- auto builtOutputs = worker_proto::read(*this, conn->from, Phantom<DrvOutputs> {});
- res.builtOutputs = builtOutputs;
+ auto builtOutputs = WorkerProto<DrvOutputs>::read(*this, conn->from);
+ for (auto && [output, realisation] : builtOutputs)
+ res.builtOutputs.insert_or_assign(
+ std::move(output.outputName),
+ std::move(realisation));
}
return res;
}
@@ -979,7 +865,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
conn->to
<< wopCollectGarbage << options.action;
- worker_proto::write(*this, conn->to, options.pathsToDelete);
+ workerProtoWrite(*this, conn->to, options.pathsToDelete);
conn->to << options.ignoreLiveness
<< options.maxFreed
/* removed options */
@@ -1038,9 +924,9 @@ void RemoteStore::queryMissing(const std::vector<DerivedPath> & targets,
conn->to << wopQueryMissing;
writeDerivedPaths(*this, conn, targets);
conn.processStderr();
- willBuild = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
- willSubstitute = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
- unknown = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
+ willBuild = WorkerProto<StorePathSet>::read(*this, conn->from);
+ willSubstitute = WorkerProto<StorePathSet>::read(*this, conn->from);
+ unknown = WorkerProto<StorePathSet>::read(*this, conn->from);
conn->from >> downloadSize >> narSize;
return;
}
@@ -1082,6 +968,11 @@ unsigned int RemoteStore::getProtocol()
return conn->daemonVersion;
}
+std::optional<TrustedFlag> RemoteStore::isTrustedClient()
+{
+ auto conn(getConnection());
+ return conn->remoteTrustsUs;
+}
void RemoteStore::flushBadConnections()
{
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
index 8cd7cc822..4f3971bfd 100644
--- a/src/libstore/remote-store.hh
+++ b/src/libstore/remote-store.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <limits>
#include <string>
@@ -22,15 +23,19 @@ struct RemoteStoreConfig : virtual StoreConfig
{
using StoreConfig::StoreConfig;
- const Setting<int> maxConnections{(StoreConfig*) this, 1,
- "max-connections", "maximum number of concurrent connections to the Nix daemon"};
+ const Setting<int> maxConnections{(StoreConfig*) this, 1, "max-connections",
+ "Maximum number of concurrent connections to the Nix daemon."};
- const Setting<unsigned int> maxConnectionAge{(StoreConfig*) this, std::numeric_limits<unsigned int>::max(),
- "max-connection-age", "number of seconds to reuse a connection"};
+ const Setting<unsigned int> maxConnectionAge{(StoreConfig*) this,
+ std::numeric_limits<unsigned int>::max(),
+ "max-connection-age",
+ "Maximum age of a connection before it is closed."};
};
-/* FIXME: RemoteStore is a misnomer - should be something like
- DaemonStore. */
+/**
+ * \todo RemoteStore is a misnomer - should be something like
+ * DaemonStore.
+ */
class RemoteStore : public virtual RemoteStoreConfig,
public virtual Store,
public virtual GcStore,
@@ -66,15 +71,20 @@ public:
void querySubstitutablePathInfos(const StorePathCAMap & paths,
SubstitutablePathInfos & infos) override;
- /* Add a content-addressable store path. `dump` will be drained. */
+ /**
+ * Add a content-addressable store path. `dump` will be drained.
+ */
ref<const ValidPathInfo> addCAToStore(
Source & dump,
std::string_view name,
ContentAddressMethod caMethod,
+ HashType hashType,
const StorePathSet & references,
RepairFlag repair);
- /* Add a content-addressable store path. Does not support references. `dump` will be drained. */
+ /**
+ * Add a content-addressable store path. Does not support references. `dump` will be drained.
+ */
StorePath addToStoreFromDump(Source & dump, std::string_view name,
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair, const StorePathSet & references = StorePathSet()) override;
@@ -105,7 +115,7 @@ public:
void buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override;
- std::vector<BuildResult> buildPathsWithResults(
+ std::vector<KeyedBuildResult> buildPathsWithResults(
const std::vector<DerivedPath> & paths,
BuildMode buildMode,
std::shared_ptr<Store> evalStore) override;
@@ -127,6 +137,17 @@ public:
bool verifyStore(bool checkContents, RepairFlag repair) override;
+ /**
+ * The default instance would schedule the work on the client side, but
+ * for consistency with `buildPaths` and `buildDerivation` it should happen
+ * on the remote side.
+ *
+ * We make this fail for now so we can add implement this properly later
+ * without it being a breaking change.
+ */
+ void repairPath(const StorePath & path) override
+ { unsupported("repairPath"); }
+
void addSignatures(const StorePath & storePath, const StringSet & sigs) override;
void queryMissing(const std::vector<DerivedPath> & targets,
@@ -141,6 +162,8 @@ public:
unsigned int getProtocol() override;
+ std::optional<TrustedFlag> isTrustedClient() override;
+
void flushBadConnections();
struct Connection
@@ -148,6 +171,7 @@ public:
FdSink to;
FdSource from;
unsigned int daemonVersion;
+ std::optional<TrustedFlag> remoteTrustsUs;
std::optional<std::string> daemonNixVersion;
std::chrono::time_point<std::chrono::steady_clock> startTime;
diff --git a/src/libstore/repair-flag.hh b/src/libstore/repair-flag.hh
index a13cda312..f412d6a20 100644
--- a/src/libstore/repair-flag.hh
+++ b/src/libstore/repair-flag.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
namespace nix {
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
index 8d76eee99..d2fc6abaf 100644
--- a/src/libstore/s3-binary-cache-store.cc
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -40,12 +40,12 @@ struct S3Error : public Error
/* Helper: given an Outcome<R, E>, return R in case of success, or
throw an exception in case of an error. */
template<typename R, typename E>
-R && checkAws(const FormatOrString & fs, Aws::Utils::Outcome<R, E> && outcome)
+R && checkAws(std::string_view s, Aws::Utils::Outcome<R, E> && outcome)
{
if (!outcome.IsSuccess())
throw S3Error(
outcome.GetError().GetErrorType(),
- fs.s + ": " + outcome.GetError().GetMessage());
+ s + ": " + outcome.GetError().GetMessage());
return outcome.GetResultWithOwnership();
}
@@ -192,19 +192,72 @@ S3BinaryCacheStore::S3BinaryCacheStore(const Params & params)
struct S3BinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
{
using BinaryCacheStoreConfig::BinaryCacheStoreConfig;
- const Setting<std::string> profile{(StoreConfig*) this, "", "profile", "The name of the AWS configuration profile to use."};
- const Setting<std::string> region{(StoreConfig*) this, Aws::Region::US_EAST_1, "region", {"aws-region"}};
- const Setting<std::string> scheme{(StoreConfig*) this, "", "scheme", "The scheme to use for S3 requests, https by default."};
- const Setting<std::string> endpoint{(StoreConfig*) this, "", "endpoint", "An optional override of the endpoint to use when talking to S3."};
- const Setting<std::string> narinfoCompression{(StoreConfig*) this, "", "narinfo-compression", "compression method for .narinfo files"};
- const Setting<std::string> lsCompression{(StoreConfig*) this, "", "ls-compression", "compression method for .ls files"};
- const Setting<std::string> logCompression{(StoreConfig*) this, "", "log-compression", "compression method for log/* files"};
+
+ const Setting<std::string> profile{(StoreConfig*) this, "", "profile",
+ R"(
+ The name of the AWS configuration profile to use. By default
+ Nix will use the `default` profile.
+ )"};
+
+ const Setting<std::string> region{(StoreConfig*) this, Aws::Region::US_EAST_1, "region",
+ R"(
+ The region of the S3 bucket. If your bucket is not in
+ `us–east-1`, you should always explicitly specify the region
+ parameter.
+ )"};
+
+ const Setting<std::string> scheme{(StoreConfig*) this, "", "scheme",
+ R"(
+ The scheme used for S3 requests, `https` (default) or `http`. This
+ option allows you to disable HTTPS for binary caches which don't
+ support it.
+
+ > **Note**
+ >
+ > HTTPS should be used if the cache might contain sensitive
+ > information.
+ )"};
+
+ const Setting<std::string> endpoint{(StoreConfig*) this, "", "endpoint",
+ R"(
+ The URL of the endpoint of an S3-compatible service such as MinIO.
+ Do not specify this setting if you're using Amazon S3.
+
+ > **Note**
+ >
+ > This endpoint must support HTTPS and will use path-based
+ > addressing instead of virtual host based addressing.
+ )"};
+
+ const Setting<std::string> narinfoCompression{(StoreConfig*) this, "", "narinfo-compression",
+ "Compression method for `.narinfo` files."};
+
+ const Setting<std::string> lsCompression{(StoreConfig*) this, "", "ls-compression",
+ "Compression method for `.ls` files."};
+
+ const Setting<std::string> logCompression{(StoreConfig*) this, "", "log-compression",
+ R"(
+ Compression method for `log/*` files. It is recommended to
+ use a compression method supported by most web browsers
+ (e.g. `brotli`).
+ )"};
+
const Setting<bool> multipartUpload{
- (StoreConfig*) this, false, "multipart-upload", "whether to use multi-part uploads"};
+ (StoreConfig*) this, false, "multipart-upload",
+ "Whether to use multi-part uploads."};
+
const Setting<uint64_t> bufferSize{
- (StoreConfig*) this, 5 * 1024 * 1024, "buffer-size", "size (in bytes) of each part in multi-part uploads"};
+ (StoreConfig*) this, 5 * 1024 * 1024, "buffer-size",
+ "Size (in bytes) of each part in multi-part uploads."};
const std::string name() override { return "S3 Binary Cache Store"; }
+
+ std::string doc() override
+ {
+ return
+ #include "s3-binary-cache-store.md"
+ ;
+ }
};
struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual S3BinaryCacheStore
@@ -430,9 +483,9 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
std::string marker;
do {
- debug(format("listing bucket 's3://%s' from key '%s'...") % bucketName % marker);
+ debug("listing bucket 's3://%s' from key '%s'...", bucketName, marker);
- auto res = checkAws(format("AWS error listing bucket '%s'") % bucketName,
+ auto res = checkAws(fmt("AWS error listing bucket '%s'", bucketName),
s3Helper.client->ListObjects(
Aws::S3::Model::ListObjectsRequest()
.WithBucket(bucketName)
@@ -441,8 +494,8 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
auto & contents = res.GetContents();
- debug(format("got %d keys, next marker '%s'")
- % contents.size() % res.GetNextMarker());
+ debug("got %d keys, next marker '%s'",
+ contents.size(), res.GetNextMarker());
for (auto object : contents) {
auto & key = object.GetKey();
@@ -456,6 +509,16 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
return paths;
}
+ /**
+ * For now, we conservatively say we don't know.
+ *
+ * \todo try to expose our S3 authentication status.
+ */
+ std::optional<TrustedFlag> isTrustedClient() override
+ {
+ return std::nullopt;
+ }
+
static std::set<std::string> uriSchemes() { return {"s3"}; }
};
diff --git a/src/libstore/s3-binary-cache-store.hh b/src/libstore/s3-binary-cache-store.hh
index bce828b11..c62ea5147 100644
--- a/src/libstore/s3-binary-cache-store.hh
+++ b/src/libstore/s3-binary-cache-store.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "binary-cache-store.hh"
diff --git a/src/libstore/s3-binary-cache-store.md b/src/libstore/s3-binary-cache-store.md
new file mode 100644
index 000000000..70fe0eb09
--- /dev/null
+++ b/src/libstore/s3-binary-cache-store.md
@@ -0,0 +1,8 @@
+R"(
+
+**Store URL format**: `s3://`*bucket-name*
+
+This store allows reading and writing a binary cache stored in an AWS
+S3 bucket.
+
+)"
diff --git a/src/libstore/s3.hh b/src/libstore/s3.hh
index cdb3e5908..f0aeb3bed 100644
--- a/src/libstore/s3.hh
+++ b/src/libstore/s3.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#if ENABLE_S3
diff --git a/src/libstore/serve-protocol.hh b/src/libstore/serve-protocol.hh
index 3f76baa82..553fd3a09 100644
--- a/src/libstore/serve-protocol.hh
+++ b/src/libstore/serve-protocol.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
namespace nix {
diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc
index 871f2f3be..df334c23c 100644
--- a/src/libstore/sqlite.cc
+++ b/src/libstore/sqlite.cc
@@ -239,14 +239,11 @@ SQLiteTxn::~SQLiteTxn()
}
}
-void handleSQLiteBusy(const SQLiteBusy & e)
+void handleSQLiteBusy(const SQLiteBusy & e, time_t & nextWarning)
{
- static std::atomic<time_t> lastWarned{0};
-
time_t now = time(0);
-
- if (now > lastWarned + 10) {
- lastWarned = now;
+ if (now > nextWarning) {
+ nextWarning = now + 10;
logWarning({
.msg = hintfmt(e.what())
});
diff --git a/src/libstore/sqlite.hh b/src/libstore/sqlite.hh
index 1853731a2..6e14852cb 100644
--- a/src/libstore/sqlite.hh
+++ b/src/libstore/sqlite.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <functional>
#include <string>
@@ -10,7 +11,9 @@ struct sqlite3_stmt;
namespace nix {
-/* RAII wrapper to close a SQLite database automatically. */
+/**
+ * RAII wrapper to close a SQLite database automatically.
+ */
struct SQLite
{
sqlite3 * db = 0;
@@ -22,7 +25,9 @@ struct SQLite
~SQLite();
operator sqlite3 * () { return db; }
- /* Disable synchronous mode, set truncate journal mode. */
+ /**
+ * Disable synchronous mode, set truncate journal mode.
+ */
void isCache();
void exec(const std::string & stmt);
@@ -30,7 +35,9 @@ struct SQLite
uint64_t getLastInsertedRowId();
};
-/* RAII wrapper to create and destroy SQLite prepared statements. */
+/**
+ * RAII wrapper to create and destroy SQLite prepared statements.
+ */
struct SQLiteStmt
{
sqlite3 * db = 0;
@@ -42,7 +49,9 @@ struct SQLiteStmt
~SQLiteStmt();
operator sqlite3_stmt * () { return stmt; }
- /* Helper for binding / executing statements. */
+ /**
+ * Helper for binding / executing statements.
+ */
class Use
{
friend struct SQLiteStmt;
@@ -55,7 +64,9 @@ struct SQLiteStmt
~Use();
- /* Bind the next parameter. */
+ /**
+ * Bind the next parameter.
+ */
Use & operator () (std::string_view value, bool notNull = true);
Use & operator () (const unsigned char * data, size_t len, bool notNull = true);
Use & operator () (int64_t value, bool notNull = true);
@@ -63,11 +74,15 @@ struct SQLiteStmt
int step();
- /* Execute a statement that does not return rows. */
+ /**
+ * Execute a statement that does not return rows.
+ */
void exec();
- /* For statements that return 0 or more rows. Returns true iff
- a row is available. */
+ /**
+ * For statements that return 0 or more rows. Returns true iff
+ * a row is available.
+ */
bool next();
std::string getStr(int col);
@@ -81,8 +96,10 @@ struct SQLiteStmt
}
};
-/* RAII helper that ensures transactions are aborted unless explicitly
- committed. */
+/**
+ * RAII helper that ensures transactions are aborted unless explicitly
+ * committed.
+ */
struct SQLiteTxn
{
bool active = false;
@@ -122,18 +139,22 @@ protected:
MakeError(SQLiteBusy, SQLiteError);
-void handleSQLiteBusy(const SQLiteBusy & e);
+void handleSQLiteBusy(const SQLiteBusy & e, time_t & nextWarning);
-/* Convenience function for retrying a SQLite transaction when the
- database is busy. */
+/**
+ * Convenience function for retrying a SQLite transaction when the
+ * database is busy.
+ */
template<typename T, typename F>
T retrySQLite(F && fun)
{
+ time_t nextWarning = time(0) + 1;
+
while (true) {
try {
return fun();
} catch (SQLiteBusy & e) {
- handleSQLiteBusy(e);
+ handleSQLiteBusy(e, nextWarning);
}
}
}
diff --git a/src/libstore/ssh-store-config.hh b/src/libstore/ssh-store-config.hh
new file mode 100644
index 000000000..c27a5d00f
--- /dev/null
+++ b/src/libstore/ssh-store-config.hh
@@ -0,0 +1,29 @@
+#pragma once
+///@file
+
+#include "store-api.hh"
+
+namespace nix {
+
+struct CommonSSHStoreConfig : virtual StoreConfig
+{
+ using StoreConfig::StoreConfig;
+
+ const Setting<Path> sshKey{(StoreConfig*) this, "", "ssh-key",
+ "Path to the SSH private key used to authenticate to the remote machine."};
+
+ const Setting<std::string> sshPublicHostKey{(StoreConfig*) this, "", "base64-ssh-public-host-key",
+ "The public host key of the remote machine."};
+
+ const Setting<bool> compress{(StoreConfig*) this, false, "compress",
+ "Whether to enable SSH compression."};
+
+ const Setting<std::string> remoteStore{(StoreConfig*) this, "", "remote-store",
+ R"(
+ [Store URL](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format)
+ to be used on the remote machine. The default is `auto`
+ (i.e. use the Nix daemon or `/nix/store` directly).
+ )"};
+};
+
+}
diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc
index cfa063803..962221ad2 100644
--- a/src/libstore/ssh-store.cc
+++ b/src/libstore/ssh-store.cc
@@ -1,3 +1,4 @@
+#include "ssh-store-config.hh"
#include "store-api.hh"
#include "remote-store.hh"
#include "remote-fs-accessor.hh"
@@ -8,17 +9,22 @@
namespace nix {
-struct SSHStoreConfig : virtual RemoteStoreConfig
+struct SSHStoreConfig : virtual RemoteStoreConfig, virtual CommonSSHStoreConfig
{
using RemoteStoreConfig::RemoteStoreConfig;
+ using CommonSSHStoreConfig::CommonSSHStoreConfig;
- const Setting<Path> sshKey{(StoreConfig*) this, "", "ssh-key", "path to an SSH private key"};
- const Setting<std::string> sshPublicHostKey{(StoreConfig*) this, "", "base64-ssh-public-host-key", "The public half of the host's SSH key"};
- const Setting<bool> compress{(StoreConfig*) this, false, "compress", "whether to compress the connection"};
- const Setting<Path> remoteProgram{(StoreConfig*) this, "nix-daemon", "remote-program", "path to the nix-daemon executable on the remote system"};
- const Setting<std::string> remoteStore{(StoreConfig*) this, "", "remote-store", "URI of the store on the remote system"};
+ const Setting<Path> remoteProgram{(StoreConfig*) this, "nix-daemon", "remote-program",
+ "Path to the `nix-daemon` executable on the remote machine."};
- const std::string name() override { return "SSH Store"; }
+ const std::string name() override { return "Experimental SSH Store"; }
+
+ std::string doc() override
+ {
+ return
+ #include "ssh-store.md"
+ ;
+ }
};
class SSHStore : public virtual SSHStoreConfig, public virtual RemoteStore
@@ -28,6 +34,7 @@ public:
SSHStore(const std::string & scheme, const std::string & host, const Params & params)
: StoreConfig(params)
, RemoteStoreConfig(params)
+ , CommonSSHStoreConfig(params)
, SSHStoreConfig(params)
, Store(params)
, RemoteStore(params)
diff --git a/src/libstore/ssh-store.md b/src/libstore/ssh-store.md
new file mode 100644
index 000000000..881537e71
--- /dev/null
+++ b/src/libstore/ssh-store.md
@@ -0,0 +1,8 @@
+R"(
+
+**Store URL format**: `ssh-ng://[username@]hostname`
+
+Experimental store type that allows full access to a Nix store on a
+remote machine.
+
+)"
diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc
index 69bfe3418..fae99d75b 100644
--- a/src/libstore/ssh.cc
+++ b/src/libstore/ssh.cc
@@ -1,4 +1,5 @@
#include "ssh.hh"
+#include "finally.hh"
namespace nix {
@@ -35,6 +36,14 @@ void SSHMaster::addCommonSSHOpts(Strings & args)
}
if (compress)
args.push_back("-C");
+
+ args.push_back("-oPermitLocalCommand=yes");
+ args.push_back("-oLocalCommand=echo started");
+}
+
+bool SSHMaster::isMasterRunning() {
+ auto res = runProgram(RunOptions {.program = "ssh", .args = {"-O", "check", host}, .mergeStderrToStdout = true});
+ return res.first == 0;
}
std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string & command)
@@ -49,6 +58,11 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string
ProcessOptions options;
options.dieWithParent = false;
+ if (!fakeSSH && !useMaster) {
+ logger->pause();
+ }
+ Finally cleanup = [&]() { logger->resume(); };
+
conn->sshPid = startProcess([&]() {
restoreProcessContext();
@@ -86,6 +100,18 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string
in.readSide = -1;
out.writeSide = -1;
+ // Wait for the SSH connection to be established,
+ // So that we don't overwrite the password prompt with our progress bar.
+ if (!fakeSSH && !useMaster && !isMasterRunning()) {
+ std::string reply;
+ try {
+ reply = readLine(out.readSide.get());
+ } catch (EndOfFile & e) { }
+
+ if (reply != "started")
+ throw Error("failed to start SSH connection to '%s'", host);
+ }
+
conn->out = std::move(out.readSide);
conn->in = std::move(in.writeSide);
@@ -109,6 +135,11 @@ Path SSHMaster::startMaster()
ProcessOptions options;
options.dieWithParent = false;
+ logger->pause();
+ Finally cleanup = [&]() { logger->resume(); };
+
+ bool wasMasterRunning = isMasterRunning();
+
state->sshMaster = startProcess([&]() {
restoreProcessContext();
@@ -117,11 +148,7 @@ Path SSHMaster::startMaster()
if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
throw SysError("duping over stdout");
- Strings args =
- { "ssh", host.c_str(), "-M", "-N", "-S", state->socketPath
- , "-o", "LocalCommand=echo started"
- , "-o", "PermitLocalCommand=yes"
- };
+ Strings args = { "ssh", host.c_str(), "-M", "-N", "-S", state->socketPath };
if (verbosity >= lvlChatty)
args.push_back("-v");
addCommonSSHOpts(args);
@@ -132,13 +159,15 @@ Path SSHMaster::startMaster()
out.writeSide = -1;
- std::string reply;
- try {
- reply = readLine(out.readSide.get());
- } catch (EndOfFile & e) { }
+ if (!wasMasterRunning) {
+ std::string reply;
+ try {
+ reply = readLine(out.readSide.get());
+ } catch (EndOfFile & e) { }
- if (reply != "started")
- throw Error("failed to start SSH master connection to '%s'", host);
+ if (reply != "started")
+ throw Error("failed to start SSH master connection to '%s'", host);
+ }
return state->socketPath;
}
diff --git a/src/libstore/ssh.hh b/src/libstore/ssh.hh
index dabbcedda..94b952af9 100644
--- a/src/libstore/ssh.hh
+++ b/src/libstore/ssh.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "util.hh"
#include "sync.hh"
@@ -27,6 +28,7 @@ private:
Sync<State> state_;
void addCommonSSHOpts(Strings & args);
+ bool isMasterRunning();
public:
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index f32c2d30c..5bee1af9f 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -7,6 +7,7 @@
#include "nar-info-disk-cache.hh"
#include "thread-pool.hh"
#include "url.hh"
+#include "references.hh"
#include "archive.hh"
#include "callback.hh"
#include "remote-store.hh"
@@ -98,10 +99,12 @@ StorePath Store::followLinksToStorePath(std::string_view path) const
silly, but it's done that way for compatibility). <id> is the
name of the output (usually, "out").
- <h2> = base-16 representation of a SHA-256 hash of:
+ <h2> = base-16 representation of a SHA-256 hash of <s2>
+
+ <s2> =
if <type> = "text:...":
the string written to the resulting store path
- if <type> = "source":
+ if <type> = "source:...":
the serialisation of the path from which this store path is
copied, as returned by hashPath()
if <type> = "output:<id>":
@@ -162,63 +165,63 @@ StorePath Store::makeOutputPath(std::string_view id,
}
+/* Stuff the references (if any) into the type. This is a bit
+ hacky, but we can't put them in, say, <s2> (per the grammar above)
+ since that would be ambiguous. */
static std::string makeType(
const Store & store,
std::string && type,
- const StorePathSet & references,
- bool hasSelfReference = false)
+ const StoreReferences & references)
{
- for (auto & i : references) {
+ for (auto & i : references.others) {
type += ":";
type += store.printStorePath(i);
}
- if (hasSelfReference) type += ":self";
+ if (references.self) type += ":self";
return std::move(type);
}
-StorePath Store::makeFixedOutputPath(
- FileIngestionMethod method,
- const Hash & hash,
- std::string_view name,
- const StorePathSet & references,
- bool hasSelfReference) const
+StorePath Store::makeFixedOutputPath(std::string_view name, const FixedOutputInfo & info) const
{
- if (hash.type == htSHA256 && method == FileIngestionMethod::Recursive) {
- return makeStorePath(makeType(*this, "source", references, hasSelfReference), hash, name);
+ if (info.hash.hash.type == htSHA256 && info.hash.method == FileIngestionMethod::Recursive) {
+ return makeStorePath(makeType(*this, "source", info.references), info.hash.hash, name);
} else {
- assert(references.empty());
+ assert(info.references.size() == 0);
return makeStorePath("output:out",
hashString(htSHA256,
"fixed:out:"
- + makeFileIngestionPrefix(method)
- + hash.to_string(Base16, true) + ":"),
+ + makeFileIngestionPrefix(info.hash.method)
+ + info.hash.hash.to_string(Base16, true) + ":"),
name);
}
}
-StorePath Store::makeFixedOutputPathFromCA(std::string_view name, ContentAddress ca,
- const StorePathSet & references, bool hasSelfReference) const
+
+StorePath Store::makeTextPath(std::string_view name, const TextInfo & info) const
+{
+ assert(info.hash.hash.type == htSHA256);
+ return makeStorePath(
+ makeType(*this, "text", StoreReferences {
+ .others = info.references,
+ .self = false,
+ }),
+ info.hash.hash,
+ name);
+}
+
+
+StorePath Store::makeFixedOutputPathFromCA(std::string_view name, const ContentAddressWithReferences & ca) const
{
// New template
return std::visit(overloaded {
- [&](const TextHash & th) {
- return makeTextPath(name, th.hash, references);
+ [&](const TextInfo & ti) {
+ return makeTextPath(name, ti);
},
- [&](const FixedOutputHash & fsh) {
- return makeFixedOutputPath(fsh.method, fsh.hash, name, references, hasSelfReference);
+ [&](const FixedOutputInfo & foi) {
+ return makeFixedOutputPath(name, foi);
}
- }, ca);
-}
-
-StorePath Store::makeTextPath(std::string_view name, const Hash & hash,
- const StorePathSet & references) const
-{
- assert(hash.type == htSHA256);
- /* Stuff the references (if any) into the type. This is a bit
- hacky, but we can't put them in `s' since that would be
- ambiguous. */
- return makeStorePath(makeType(*this, "text", references), hash, name);
+ }, ca.raw);
}
@@ -228,7 +231,14 @@ std::pair<StorePath, Hash> Store::computeStorePathForPath(std::string_view name,
Hash h = method == FileIngestionMethod::Recursive
? hashPath(hashAlgo, srcPath, filter).first
: hashFile(hashAlgo, srcPath);
- return std::make_pair(makeFixedOutputPath(method, h, name), h);
+ FixedOutputInfo caInfo {
+ .hash = {
+ .method = method,
+ .hash = h,
+ },
+ .references = {},
+ };
+ return std::make_pair(makeFixedOutputPath(name, caInfo), h);
}
@@ -237,7 +247,10 @@ StorePath Store::computeStorePathForText(
std::string_view s,
const StorePathSet & references) const
{
- return makeTextPath(name, hashString(htSHA256, s), references);
+ return makeTextPath(name, TextInfo {
+ { .hash = hashString(htSHA256, s) },
+ references,
+ });
}
@@ -425,11 +438,18 @@ ValidPathInfo Store::addToStoreSlow(std::string_view name, const Path & srcPath,
throw Error("hash mismatch for '%s'", srcPath);
ValidPathInfo info {
- makeFixedOutputPath(method, hash, name),
+ *this,
+ name,
+ FixedOutputInfo {
+ .hash = {
+ .method = method,
+ .hash = hash,
+ },
+ .references = {},
+ },
narHash,
};
info.narSize = narSize;
- info.ca = FixedOutputHash { .method = method, .hash = hash };
if (!isValidPath(info.path)) {
auto source = sinkToSource([&](Sink & scratchpadSink) {
@@ -445,10 +465,10 @@ StringSet StoreConfig::getDefaultSystemFeatures()
{
auto res = settings.systemFeatures.get();
- if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations))
+ if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations))
res.insert("ca-derivations");
- if (settings.isExperimentalFeatureEnabled(Xp::RecursiveNix))
+ if (experimentalFeatureSettings.isEnabled(Xp::RecursiveNix))
res.insert("recursive-nix");
return res;
@@ -507,6 +527,57 @@ StorePathSet Store::queryDerivationOutputs(const StorePath & path)
return outputPaths;
}
+
+void Store::querySubstitutablePathInfos(const StorePathCAMap & paths, SubstitutablePathInfos & infos)
+{
+ if (!settings.useSubstitutes) return;
+ for (auto & sub : getDefaultSubstituters()) {
+ for (auto & path : paths) {
+ if (infos.count(path.first))
+ // Choose first succeeding substituter.
+ continue;
+
+ auto subPath(path.first);
+
+ // Recompute store path so that we can use a different store root.
+ if (path.second) {
+ subPath = makeFixedOutputPathFromCA(
+ path.first.name(),
+ ContentAddressWithReferences::withoutRefs(*path.second));
+ if (sub->storeDir == storeDir)
+ assert(subPath == path.first);
+ if (subPath != path.first)
+ debug("replaced path '%s' with '%s' for substituter '%s'", printStorePath(path.first), sub->printStorePath(subPath), sub->getUri());
+ } else if (sub->storeDir != storeDir) continue;
+
+ debug("checking substituter '%s' for path '%s'", sub->getUri(), sub->printStorePath(subPath));
+ try {
+ auto info = sub->queryPathInfo(subPath);
+
+ if (sub->storeDir != storeDir && !(info->isContentAddressed(*sub) && info->references.empty()))
+ continue;
+
+ auto narInfo = std::dynamic_pointer_cast<const NarInfo>(
+ std::shared_ptr<const ValidPathInfo>(info));
+ infos.insert_or_assign(path.first, SubstitutablePathInfo{
+ .deriver = info->deriver,
+ .references = info->references,
+ .downloadSize = narInfo ? narInfo->fileSize : 0,
+ .narSize = info->narSize,
+ });
+ } catch (InvalidPath &) {
+ } catch (SubstituterDisabled &) {
+ } catch (Error & e) {
+ if (settings.tryFallback)
+ logError(e.info());
+ else
+ throw;
+ }
+ }
+ }
+}
+
+
bool Store::isValidPath(const StorePath & storePath)
{
{
@@ -790,13 +861,13 @@ std::string Store::makeValidityRegistration(const StorePathSet & paths,
if (showHash) {
s += info->narHash.to_string(Base16, false) + "\n";
- s += (format("%1%\n") % info->narSize).str();
+ s += fmt("%1%\n", info->narSize);
}
auto deriver = showDerivers && info->deriver ? printStorePath(*info->deriver) : "";
s += deriver + "\n";
- s += (format("%1%\n") % info->references.size()).str();
+ s += fmt("%1%\n", info->references.size());
for (auto & j : info->references)
s += printStorePath(j) + "\n";
@@ -977,7 +1048,9 @@ void copyStorePath(
// recompute store path on the chance dstStore does it differently
if (info->ca && info->references.empty()) {
auto info2 = make_ref<ValidPathInfo>(*info);
- info2->path = dstStore.makeFixedOutputPathFromCA(info->path.name(), *info->ca);
+ info2->path = dstStore.makeFixedOutputPathFromCA(
+ info->path.name(),
+ info->contentAddressWithReferences().value());
if (dstStore.storeDir == srcStore.storeDir)
assert(info->path == info2->path);
info = info2;
@@ -1017,7 +1090,7 @@ std::map<StorePath, StorePath> copyPaths(
for (auto & path : paths) {
storePaths.insert(path.path());
if (auto realisation = std::get_if<Realisation>(&path.raw)) {
- settings.requireExperimentalFeature(Xp::CaDerivations);
+ experimentalFeatureSettings.require(Xp::CaDerivations);
toplevelRealisations.insert(*realisation);
}
}
@@ -1089,7 +1162,9 @@ std::map<StorePath, StorePath> copyPaths(
auto storePathForSrc = currentPathInfo.path;
auto storePathForDst = storePathForSrc;
if (currentPathInfo.ca && currentPathInfo.references.empty()) {
- storePathForDst = dstStore.makeFixedOutputPathFromCA(storePathForSrc.name(), *currentPathInfo.ca);
+ storePathForDst = dstStore.makeFixedOutputPathFromCA(
+ currentPathInfo.path.name(),
+ currentPathInfo.contentAddressWithReferences().value());
if (dstStore.storeDir == srcStore.storeDir)
assert(storePathForDst == storePathForSrc);
if (storePathForDst != storePathForSrc)
@@ -1101,6 +1176,9 @@ std::map<StorePath, StorePath> copyPaths(
return storePathForDst;
};
+ // total is accessed by each copy, which are each handled in separate threads
+ std::atomic<uint64_t> total = 0;
+
for (auto & missingPath : sortedMissing) {
auto info = srcStore.queryPathInfo(missingPath);
@@ -1121,7 +1199,13 @@ std::map<StorePath, StorePath> copyPaths(
{storePathS, srcUri, dstUri});
PushActivity pact(act.id);
- srcStore.narFromPath(missingPath, sink);
+ LambdaSink progressSink([&](std::string_view data) {
+ total += data.size();
+ act.progress(total, info->narSize);
+ });
+ TeeSink tee { sink, progressSink };
+
+ srcStore.narFromPath(missingPath, tee);
});
pathsToCopy.push_back(std::pair{infoForDst, std::move(source)});
}
@@ -1242,7 +1326,7 @@ std::optional<StorePath> Store::getBuildDerivationPath(const StorePath & path)
}
}
- if (!settings.isExperimentalFeatureEnabled(Xp::CaDerivations) || !isValidPath(path))
+ if (!experimentalFeatureSettings.isEnabled(Xp::CaDerivations) || !isValidPath(path))
return path;
auto drv = readDerivation(path);
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index 4d8db3596..2ecbe2708 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "nar-info.hh"
#include "realisation.hh"
@@ -55,7 +56,10 @@ namespace nix {
*/
MakeError(SubstError, Error);
-MakeError(BuildError, Error); // denotes a permanent build failure
+/**
+ * denotes a permanent build failure
+ */
+MakeError(BuildError, Error);
MakeError(InvalidPath, Error);
MakeError(Unsupported, Error);
MakeError(SubstituteGone, Error);
@@ -78,13 +82,17 @@ enum CheckSigsFlag : bool { NoCheckSigs = false, CheckSigs = true };
enum SubstituteFlag : bool { NoSubstitute = false, Substitute = true };
enum AllowInvalidFlag : bool { DisallowInvalid = false, AllowInvalid = true };
-/* Magic header of exportPath() output (obsolete). */
+/**
+ * Magic header of exportPath() output (obsolete).
+ */
const uint32_t exportMagic = 0x4558494e;
enum BuildMode { bmNormal, bmRepair, bmCheck };
+enum TrustedFlag : bool { NotTrusted = false, Trusted = true };
struct BuildResult;
+struct KeyedBuildResult;
typedef std::map<StorePath, std::optional<ContentAddress>> StorePathCAMap;
@@ -101,17 +109,41 @@ struct StoreConfig : public Config
virtual const std::string name() = 0;
+ virtual std::string doc()
+ {
+ return "";
+ }
+
const PathSetting storeDir_{this, false, settings.nixStore,
- "store", "path to the Nix store"};
+ "store",
+ R"(
+ Logical location of the Nix store, usually
+ `/nix/store`. Note that you can only copy store paths
+ between stores if they have the same `store` setting.
+ )"};
const Path storeDir = storeDir_;
- const Setting<int> pathInfoCacheSize{this, 65536, "path-info-cache-size", "size of the in-memory store path information cache"};
+ const Setting<int> pathInfoCacheSize{this, 65536, "path-info-cache-size",
+ "Size of the in-memory store path metadata cache."};
- const Setting<bool> isTrusted{this, false, "trusted", "whether paths from this store can be used as substitutes even when they lack trusted signatures"};
+ const Setting<bool> isTrusted{this, false, "trusted",
+ R"(
+ Whether paths from this store can be used as substitutes
+ even if they are not signed by a key listed in the
+ [`trusted-public-keys`](@docroot@/command-ref/conf-file.md#conf-trusted-public-keys)
+ setting.
+ )"};
- Setting<int> priority{this, 0, "priority", "priority of this substituter (lower value means higher priority)"};
+ Setting<int> priority{this, 0, "priority",
+ R"(
+ Priority of this store when used as a substituter. A lower value means a higher priority.
+ )"};
- Setting<bool> wantMassQuery{this, false, "want-mass-query", "whether this substituter can be queried efficiently for path validity"};
+ Setting<bool> wantMassQuery{this, false, "want-mass-query",
+ R"(
+ Whether this store (when used as a substituter) can be
+ queried efficiently for path validity.
+ )"};
Setting<StringSet> systemFeatures{this, getDefaultSystemFeatures(),
"system-features",
@@ -125,23 +157,30 @@ public:
typedef std::map<std::string, std::string> Params;
-
-
protected:
struct PathInfoCacheValue {
- // Time of cache entry creation or update
+ /**
+ * Time of cache entry creation or update
+ */
std::chrono::time_point<std::chrono::steady_clock> time_point = std::chrono::steady_clock::now();
- // Null if missing
+ /**
+ * Null if missing
+ */
std::shared_ptr<const ValidPathInfo> value;
- // Whether the value is valid as a cache entry. The path may not exist.
+ /**
+ * Whether the value is valid as a cache entry. The path may not
+ * exist.
+ */
bool isKnownNow();
- // Past tense, because a path can only be assumed to exists when
- // isKnownNow() && didExist()
+ /**
+ * Past tense, because a path can only be assumed to exists when
+ * isKnownNow() && didExist()
+ */
inline bool didExist() {
return value != nullptr;
}
@@ -175,35 +214,53 @@ public:
std::string printStorePath(const StorePath & path) const;
- // FIXME: remove
+ /**
+ * Deprecated
+ *
+ * \todo remove
+ */
StorePathSet parseStorePathSet(const PathSet & paths) const;
PathSet printStorePathSet(const StorePathSet & path) const;
- /* Display a set of paths in human-readable form (i.e., between quotes
- and separated by commas). */
+ /**
+ * Display a set of paths in human-readable form (i.e., between quotes
+ * and separated by commas).
+ */
std::string showPaths(const StorePathSet & paths);
- /* Return true if ‘path’ is in the Nix store (but not the Nix
- store itself). */
+ /**
+ * @return true if ‘path’ is in the Nix store (but not the Nix
+ * store itself).
+ */
bool isInStore(PathView path) const;
- /* Return true if ‘path’ is a store path, i.e. a direct child of
- the Nix store. */
+ /**
+ * @return true if ‘path’ is a store path, i.e. a direct child of the
+ * Nix store.
+ */
bool isStorePath(std::string_view path) const;
- /* Split a path like /nix/store/<hash>-<name>/<bla> into
- /nix/store/<hash>-<name> and /<bla>. */
+ /**
+ * Split a path like /nix/store/<hash>-<name>/<bla> into
+ * /nix/store/<hash>-<name> and /<bla>.
+ */
std::pair<StorePath, Path> toStorePath(PathView path) const;
- /* Follow symlinks until we end up with a path in the Nix store. */
+ /**
+ * Follow symlinks until we end up with a path in the Nix store.
+ */
Path followLinksToStore(std::string_view path) const;
- /* Same as followLinksToStore(), but apply toStorePath() to the
- result. */
+ /**
+ * Same as followLinksToStore(), but apply toStorePath() to the
+ * result.
+ */
StorePath followLinksToStorePath(std::string_view path) const;
- /* Constructs a unique store path name. */
+ /**
+ * Constructs a unique store path name.
+ */
StorePath makeStorePath(std::string_view type,
std::string_view hash, std::string_view name) const;
StorePath makeStorePath(std::string_view type,
@@ -212,45 +269,46 @@ public:
StorePath makeOutputPath(std::string_view id,
const Hash & hash, std::string_view name) const;
- StorePath makeFixedOutputPath(FileIngestionMethod method,
- const Hash & hash, std::string_view name,
- const StorePathSet & references = {},
- bool hasSelfReference = false) const;
+ StorePath makeFixedOutputPath(std::string_view name, const FixedOutputInfo & info) const;
- StorePath makeTextPath(std::string_view name, const Hash & hash,
- const StorePathSet & references = {}) const;
+ StorePath makeTextPath(std::string_view name, const TextInfo & info) const;
- StorePath makeFixedOutputPathFromCA(std::string_view name, ContentAddress ca,
- const StorePathSet & references = {},
- bool hasSelfReference = false) const;
+ StorePath makeFixedOutputPathFromCA(std::string_view name, const ContentAddressWithReferences & ca) const;
- /* This is the preparatory part of addToStore(); it computes the
- store path to which srcPath is to be copied. Returns the store
- path and the cryptographic hash of the contents of srcPath. */
+ /**
+ * Preparatory part of addToStore().
+ *
+ * @return the store path to which srcPath is to be copied
+ * and the cryptographic hash of the contents of srcPath.
+ */
std::pair<StorePath, Hash> computeStorePathForPath(std::string_view name,
const Path & srcPath, FileIngestionMethod method = FileIngestionMethod::Recursive,
HashType hashAlgo = htSHA256, PathFilter & filter = defaultPathFilter) const;
- /* Preparatory part of addTextToStore().
-
- !!! Computation of the path should take the references given to
- addTextToStore() into account, otherwise we have a (relatively
- minor) security hole: a caller can register a source file with
- bogus references. If there are too many references, the path may
- not be garbage collected when it has to be (not really a problem,
- the caller could create a root anyway), or it may be garbage
- collected when it shouldn't be (more serious).
-
- Hashing the references would solve this (bogus references would
- simply yield a different store path, so other users wouldn't be
- affected), but it has some backwards compatibility issues (the
- hashing scheme changes), so I'm not doing that for now. */
+ /**
+ * Preparatory part of addTextToStore().
+ *
+ * !!! Computation of the path should take the references given to
+ * addTextToStore() into account, otherwise we have a (relatively
+ * minor) security hole: a caller can register a source file with
+ * bogus references. If there are too many references, the path may
+ * not be garbage collected when it has to be (not really a problem,
+ * the caller could create a root anyway), or it may be garbage
+ * collected when it shouldn't be (more serious).
+ *
+ * Hashing the references would solve this (bogus references would
+ * simply yield a different store path, so other users wouldn't be
+ * affected), but it has some backwards compatibility issues (the
+ * hashing scheme changes), so I'm not doing that for now.
+ */
StorePath computeStorePathForText(
std::string_view name,
std::string_view s,
const StorePathSet & references) const;
- /* Check whether a path is valid. */
+ /**
+ * Check whether a path is valid.
+ */
bool isValidPath(const StorePath & path);
protected:
@@ -259,53 +317,68 @@ protected:
public:
- /* If requested, substitute missing paths. This
- implements nix-copy-closure's --use-substitutes
- flag. */
+ /**
+ * If requested, substitute missing paths. This
+ * implements nix-copy-closure's --use-substitutes
+ * flag.
+ */
void substitutePaths(const StorePathSet & paths);
- /* Query which of the given paths is valid. Optionally, try to
- substitute missing paths. */
+ /**
+ * Query which of the given paths is valid. Optionally, try to
+ * substitute missing paths.
+ */
virtual StorePathSet queryValidPaths(const StorePathSet & paths,
SubstituteFlag maybeSubstitute = NoSubstitute);
- /* Query the set of all valid paths. Note that for some store
- backends, the name part of store paths may be replaced by 'x'
- (i.e. you'll get /nix/store/<hash>-x rather than
- /nix/store/<hash>-<name>). Use queryPathInfo() to obtain the
- full store path. FIXME: should return a set of
- std::variant<StorePath, HashPart> to get rid of this hack. */
+ /**
+ * Query the set of all valid paths. Note that for some store
+ * backends, the name part of store paths may be replaced by 'x'
+ * (i.e. you'll get /nix/store/<hash>-x rather than
+ * /nix/store/<hash>-<name>). Use queryPathInfo() to obtain the
+ * full store path. FIXME: should return a set of
+ * std::variant<StorePath, HashPart> to get rid of this hack.
+ */
virtual StorePathSet queryAllValidPaths()
{ unsupported("queryAllValidPaths"); }
constexpr static const char * MissingName = "x";
- /* Query information about a valid path. It is permitted to omit
- the name part of the store path. */
+ /**
+ * Query information about a valid path. It is permitted to omit
+ * the name part of the store path.
+ */
ref<const ValidPathInfo> queryPathInfo(const StorePath & path);
- /* Asynchronous version of queryPathInfo(). */
+ /**
+ * Asynchronous version of queryPathInfo().
+ */
void queryPathInfo(const StorePath & path,
Callback<ref<const ValidPathInfo>> callback) noexcept;
- /* Query the information about a realisation. */
+ /**
+ * Query the information about a realisation.
+ */
std::shared_ptr<const Realisation> queryRealisation(const DrvOutput &);
- /* Asynchronous version of queryRealisation(). */
+ /**
+ * Asynchronous version of queryRealisation().
+ */
void queryRealisation(const DrvOutput &,
Callback<std::shared_ptr<const Realisation>> callback) noexcept;
- /* Check whether the given valid path info is sufficiently attested, by
- either being signed by a trusted public key or content-addressed, in
- order to be included in the given store.
-
- These same checks would be performed in addToStore, but this allows an
- earlier failure in the case where dependencies need to be added too, but
- the addToStore wouldn't fail until those dependencies are added. Also,
- we don't really want to add the dependencies listed in a nar info we
- don't trust anyyways.
- */
+ /**
+ * Check whether the given valid path info is sufficiently attested, by
+ * either being signed by a trusted public key or content-addressed, in
+ * order to be included in the given store.
+ *
+ * These same checks would be performed in addToStore, but this allows an
+ * earlier failure in the case where dependencies need to be added too, but
+ * the addToStore wouldn't fail until those dependencies are added. Also,
+ * we don't really want to add the dependencies listed in a nar info we
+ * don't trust anyyways.
+ */
virtual bool pathInfoIsUntrusted(const ValidPathInfo &)
{
return true;
@@ -325,53 +398,77 @@ protected:
public:
- /* Queries the set of incoming FS references for a store path.
- The result is not cleared. */
+ /**
+ * Queries the set of incoming FS references for a store path.
+ * The result is not cleared.
+ */
virtual void queryReferrers(const StorePath & path, StorePathSet & referrers)
{ unsupported("queryReferrers"); }
- /* Return all currently valid derivations that have `path' as an
- output. (Note that the result of `queryDeriver()' is the
- derivation that was actually used to produce `path', which may
- not exist anymore.) */
+ /**
+ * @return all currently valid derivations that have `path` as an
+ * output.
+ *
+ * (Note that the result of `queryDeriver()` is the derivation that
+ * was actually used to produce `path`, which may not exist
+ * anymore.)
+ */
virtual StorePathSet queryValidDerivers(const StorePath & path) { return {}; };
- /* Query the outputs of the derivation denoted by `path'. */
+ /**
+ * Query the outputs of the derivation denoted by `path`.
+ */
virtual StorePathSet queryDerivationOutputs(const StorePath & path);
- /* Query the mapping outputName => outputPath for the given derivation. All
- outputs are mentioned so ones mising the mapping are mapped to
- `std::nullopt`. */
+ /**
+ * Query the mapping outputName => outputPath for the given
+ * derivation. All outputs are mentioned so ones mising the mapping
+ * are mapped to `std::nullopt`.
+ */
virtual std::map<std::string, std::optional<StorePath>> queryPartialDerivationOutputMap(const StorePath & path);
- /* Query the mapping outputName=>outputPath for the given derivation.
- Assume every output has a mapping and throw an exception otherwise. */
+ /**
+ * Query the mapping outputName=>outputPath for the given derivation.
+ * Assume every output has a mapping and throw an exception otherwise.
+ */
OutputPathMap queryDerivationOutputMap(const StorePath & path);
- /* Query the full store path given the hash part of a valid store
- path, or empty if the path doesn't exist. */
+ /**
+ * Query the full store path given the hash part of a valid store
+ * path, or empty if the path doesn't exist.
+ */
virtual std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) = 0;
- /* Query which of the given paths have substitutes. */
+ /**
+ * Query which of the given paths have substitutes.
+ */
virtual StorePathSet querySubstitutablePaths(const StorePathSet & paths) { return {}; };
- /* Query substitute info (i.e. references, derivers and download
- sizes) of a map of paths to their optional ca values. The info
- of the first succeeding substituter for each path will be
- returned. If a path does not have substitute info, it's omitted
- from the resulting ‘infos’ map. */
+ /**
+ * Query substitute info (i.e. references, derivers and download
+ * sizes) of a map of paths to their optional ca values. The info of
+ * the first succeeding substituter for each path will be returned.
+ * If a path does not have substitute info, it's omitted from the
+ * resulting ‘infos’ map.
+ */
virtual void querySubstitutablePathInfos(const StorePathCAMap & paths,
- SubstitutablePathInfos & infos) { return; };
+ SubstitutablePathInfos & infos);
- /* Import a path into the store. */
+ /**
+ * Import a path into the store.
+ */
virtual void addToStore(const ValidPathInfo & info, Source & narSource,
RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs) = 0;
- // A list of paths infos along with a source providing the content of the
- // associated store path
+ /**
+ * A list of paths infos along with a source providing the content
+ * of the associated store path
+ */
using PathsSource = std::vector<std::pair<ValidPathInfo, std::unique_ptr<Source>>>;
- /* Import multiple paths into the store. */
+ /**
+ * Import multiple paths into the store.
+ */
virtual void addMultipleToStore(
Source & source,
RepairFlag repair = NoRepair,
@@ -383,10 +480,14 @@ public:
RepairFlag repair = NoRepair,
CheckSigsFlag checkSigs = CheckSigs);
- /* Copy the contents of a path to the store and register the
- validity the resulting path. The resulting path is returned.
- The function object `filter' can be used to exclude files (see
- libutil/archive.hh). */
+ /**
+ * Copy the contents of a path to the store and register the
+ * validity the resulting path.
+ *
+ * @return The resulting path is returned.
+ * @param filter This function can be used to exclude files (see
+ * libutil/archive.hh).
+ */
virtual StorePath addToStore(
std::string_view name,
const Path & srcPath,
@@ -396,26 +497,33 @@ public:
RepairFlag repair = NoRepair,
const StorePathSet & references = StorePathSet());
- /* Copy the contents of a path to the store and register the
- validity the resulting path, using a constant amount of
- memory. */
+ /**
+ * Copy the contents of a path to the store and register the
+ * validity the resulting path, using a constant amount of
+ * memory.
+ */
ValidPathInfo addToStoreSlow(std::string_view name, const Path & srcPath,
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256,
std::optional<Hash> expectedCAHash = {});
- /* Like addToStore(), but the contents of the path are contained
- in `dump', which is either a NAR serialisation (if recursive ==
- true) or simply the contents of a regular file (if recursive ==
- false).
- `dump` may be drained */
- // FIXME: remove?
+ /**
+ * Like addToStore(), but the contents of the path are contained
+ * in `dump`, which is either a NAR serialisation (if recursive ==
+ * true) or simply the contents of a regular file (if recursive ==
+ * false).
+ * `dump` may be drained
+ *
+ * \todo remove?
+ */
virtual StorePath addToStoreFromDump(Source & dump, std::string_view name,
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair,
const StorePathSet & references = StorePathSet())
{ unsupported("addToStoreFromDump"); }
- /* Like addToStore, but the contents written to the output path is
- a regular file containing the given string. */
+ /**
+ * Like addToStore, but the contents written to the output path is a
+ * regular file containing the given string.
+ */
virtual StorePath addTextToStore(
std::string_view name,
std::string_view s,
@@ -436,140 +544,178 @@ public:
virtual void registerDrvOutput(const Realisation & output, CheckSigsFlag checkSigs)
{ return registerDrvOutput(output); }
- /* Write a NAR dump of a store path. */
+ /**
+ * Write a NAR dump of a store path.
+ */
virtual void narFromPath(const StorePath & path, Sink & sink) = 0;
- /* For each path, if it's a derivation, build it. Building a
- derivation means ensuring that the output paths are valid. If
- they are already valid, this is a no-op. Otherwise, validity
- can be reached in two ways. First, if the output paths is
- substitutable, then build the path that way. Second, the
- output paths can be created by running the builder, after
- recursively building any sub-derivations. For inputs that are
- not derivations, substitute them. */
+ /**
+ * For each path, if it's a derivation, build it. Building a
+ * derivation means ensuring that the output paths are valid. If
+ * they are already valid, this is a no-op. Otherwise, validity
+ * can be reached in two ways. First, if the output paths is
+ * substitutable, then build the path that way. Second, the
+ * output paths can be created by running the builder, after
+ * recursively building any sub-derivations. For inputs that are
+ * not derivations, substitute them.
+ */
virtual void buildPaths(
const std::vector<DerivedPath> & paths,
BuildMode buildMode = bmNormal,
std::shared_ptr<Store> evalStore = nullptr);
- /* Like `buildPaths()`, but return a vector of `BuildResult`s
- corresponding to each element in `paths`. Note that in case of
- a build/substitution error, this function won't throw an
- exception, but return a `BuildResult` containing an error
- message. */
- virtual std::vector<BuildResult> buildPathsWithResults(
+ /**
+ * Like buildPaths(), but return a vector of \ref BuildResult
+ * BuildResults corresponding to each element in paths. Note that in
+ * case of a build/substitution error, this function won't throw an
+ * exception, but return a BuildResult containing an error message.
+ */
+ virtual std::vector<KeyedBuildResult> buildPathsWithResults(
const std::vector<DerivedPath> & paths,
BuildMode buildMode = bmNormal,
std::shared_ptr<Store> evalStore = nullptr);
- /* Build a single non-materialized derivation (i.e. not from an
- on-disk .drv file).
-
- ‘drvPath’ is used to deduplicate worker goals so it is imperative that
- is correct. That said, it doesn't literally need to be store path that
- would be calculated from writing this derivation to the store: it is OK
- if it instead is that of a Derivation which would resolve to this (by
- taking the outputs of it's input derivations and adding them as input
- sources) such that the build time referenceable-paths are the same.
-
- In the input-addressed case, we usually *do* use an "original"
- unresolved derivations's path, as that is what will be used in the
- `buildPaths` case. Also, the input-addressed output paths are verified
- only by that contents of that specific unresolved derivation, so it is
- nice to keep that information around so if the original derivation is
- ever obtained later, it can be verified whether the trusted user in fact
- used the proper output path.
-
- In the content-addressed case, we want to always use the
- resolved drv path calculated from the provided derivation. This serves
- two purposes:
-
- - It keeps the operation trustless, by ruling out a maliciously
- invalid drv path corresponding to a non-resolution-equivalent
- derivation.
-
- - For the floating case in particular, it ensures that the derivation
- to output mapping respects the resolution equivalence relation, so
- one cannot choose different resolution-equivalent derivations to
- subvert dependency coherence (i.e. the property that one doesn't end
- up with multiple different versions of dependencies without
- explicitly choosing to allow it).
- */
+ /**
+ * Build a single non-materialized derivation (i.e. not from an
+ * on-disk .drv file).
+ *
+ * @param drvPath This is used to deduplicate worker goals so it is
+ * imperative that is correct. That said, it doesn't literally need
+ * to be store path that would be calculated from writing this
+ * derivation to the store: it is OK if it instead is that of a
+ * Derivation which would resolve to this (by taking the outputs of
+ * it's input derivations and adding them as input sources) such
+ * that the build time referenceable-paths are the same.
+ *
+ * In the input-addressed case, we usually *do* use an "original"
+ * unresolved derivations's path, as that is what will be used in the
+ * buildPaths case. Also, the input-addressed output paths are verified
+ * only by that contents of that specific unresolved derivation, so it is
+ * nice to keep that information around so if the original derivation is
+ * ever obtained later, it can be verified whether the trusted user in fact
+ * used the proper output path.
+ *
+ * In the content-addressed case, we want to always use the resolved
+ * drv path calculated from the provided derivation. This serves two
+ * purposes:
+ *
+ * - It keeps the operation trustless, by ruling out a maliciously
+ * invalid drv path corresponding to a non-resolution-equivalent
+ * derivation.
+ *
+ * - For the floating case in particular, it ensures that the derivation
+ * to output mapping respects the resolution equivalence relation, so
+ * one cannot choose different resolution-equivalent derivations to
+ * subvert dependency coherence (i.e. the property that one doesn't end
+ * up with multiple different versions of dependencies without
+ * explicitly choosing to allow it).
+ */
virtual BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
BuildMode buildMode = bmNormal);
- /* Ensure that a path is valid. If it is not currently valid, it
- may be made valid by running a substitute (if defined for the
- path). */
+ /**
+ * Ensure that a path is valid. If it is not currently valid, it
+ * may be made valid by running a substitute (if defined for the
+ * path).
+ */
virtual void ensurePath(const StorePath & path);
- /* Add a store path as a temporary root of the garbage collector.
- The root disappears as soon as we exit. */
+ /**
+ * Add a store path as a temporary root of the garbage collector.
+ * The root disappears as soon as we exit.
+ */
virtual void addTempRoot(const StorePath & path)
{ debug("not creating temporary root, store doesn't support GC"); }
- /* Return a string representing information about the path that
- can be loaded into the database using `nix-store --load-db' or
- `nix-store --register-validity'. */
+ /**
+ * @return a string representing information about the path that
+ * can be loaded into the database using `nix-store --load-db` or
+ * `nix-store --register-validity`.
+ */
std::string makeValidityRegistration(const StorePathSet & paths,
bool showDerivers, bool showHash);
- /* Write a JSON representation of store path metadata, such as the
- hash and the references. If ‘includeImpureInfo’ is true,
- variable elements such as the registration time are
- included. If ‘showClosureSize’ is true, the closure size of
- each path is included. */
+ /**
+ * Write a JSON representation of store path metadata, such as the
+ * hash and the references.
+ *
+ * @param includeImpureInfo If true, variable elements such as the
+ * registration time are included.
+ *
+ * @param showClosureSize If true, the closure size of each path is
+ * included.
+ */
nlohmann::json pathInfoToJSON(const StorePathSet & storePaths,
bool includeImpureInfo, bool showClosureSize,
Base hashBase = Base32,
AllowInvalidFlag allowInvalid = DisallowInvalid);
- /* Return the size of the closure of the specified path, that is,
- the sum of the size of the NAR serialisation of each path in
- the closure. */
+ /**
+ * @return the size of the closure of the specified path, that is,
+ * the sum of the size of the NAR serialisation of each path in the
+ * closure.
+ */
std::pair<uint64_t, uint64_t> getClosureSize(const StorePath & storePath);
- /* Optimise the disk space usage of the Nix store by hard-linking files
- with the same contents. */
+ /**
+ * Optimise the disk space usage of the Nix store by hard-linking files
+ * with the same contents.
+ */
virtual void optimiseStore() { };
- /* Check the integrity of the Nix store. Returns true if errors
- remain. */
+ /**
+ * Check the integrity of the Nix store.
+ *
+ * @return true if errors remain.
+ */
virtual bool verifyStore(bool checkContents, RepairFlag repair = NoRepair) { return false; };
- /* Return an object to access files in the Nix store. */
- virtual ref<FSAccessor> getFSAccessor()
- { unsupported("getFSAccessor"); }
+ /**
+ * @return An object to access files in the Nix store.
+ */
+ virtual ref<FSAccessor> getFSAccessor() = 0;
- /* Repair the contents of the given path by redownloading it using
- a substituter (if available). */
- virtual void repairPath(const StorePath & path)
- { unsupported("repairPath"); }
+ /**
+ * Repair the contents of the given path by redownloading it using
+ * a substituter (if available).
+ */
+ virtual void repairPath(const StorePath & path);
- /* Add signatures to the specified store path. The signatures are
- not verified. */
+ /**
+ * Add signatures to the specified store path. The signatures are
+ * not verified.
+ */
virtual void addSignatures(const StorePath & storePath, const StringSet & sigs)
{ unsupported("addSignatures"); }
/* Utility functions. */
- /* Read a derivation, after ensuring its existence through
- ensurePath(). */
+ /**
+ * Read a derivation, after ensuring its existence through
+ * ensurePath().
+ */
Derivation derivationFromPath(const StorePath & drvPath);
- /* Read a derivation (which must already be valid). */
+ /**
+ * Read a derivation (which must already be valid).
+ */
Derivation readDerivation(const StorePath & drvPath);
- /* Read a derivation from a potentially invalid path. */
+ /**
+ * Read a derivation from a potentially invalid path.
+ */
Derivation readInvalidDerivation(const StorePath & drvPath);
- /* Place in `out' the set of all store paths in the file system
- closure of `storePath'; that is, all paths than can be directly
- or indirectly reached from it. `out' is not cleared. If
- `flipDirection' is true, the set of paths that can reach
- `storePath' is returned; that is, the closures under the
- `referrers' relation instead of the `references' relation is
- returned. */
+ /**
+ * @param [out] out Place in here the set of all store paths in the
+ * file system closure of `storePath`; that is, all paths than can
+ * be directly or indirectly reached from it. `out` is not cleared.
+ *
+ * @param flipDirection If true, the set of paths that can reach
+ * `storePath` is returned; that is, the closures under the
+ * `referrers` relation instead of the `references` relation is
+ * returned.
+ */
virtual void computeFSClosure(const StorePathSet & paths,
StorePathSet & out, bool flipDirection = false,
bool includeOutputs = false, bool includeDerivers = false);
@@ -578,27 +724,34 @@ public:
StorePathSet & out, bool flipDirection = false,
bool includeOutputs = false, bool includeDerivers = false);
- /* Given a set of paths that are to be built, return the set of
- derivations that will be built, and the set of output paths
- that will be substituted. */
+ /**
+ * Given a set of paths that are to be built, return the set of
+ * derivations that will be built, and the set of output paths that
+ * will be substituted.
+ */
virtual void queryMissing(const std::vector<DerivedPath> & targets,
StorePathSet & willBuild, StorePathSet & willSubstitute, StorePathSet & unknown,
uint64_t & downloadSize, uint64_t & narSize);
- /* Sort a set of paths topologically under the references
- relation. If p refers to q, then p precedes q in this list. */
+ /**
+ * Sort a set of paths topologically under the references
+ * relation. If p refers to q, then p precedes q in this list.
+ */
StorePaths topoSortPaths(const StorePathSet & paths);
- /* Export multiple paths in the format expected by ‘nix-store
- --import’. */
+ /**
+ * Export multiple paths in the format expected by ‘nix-store
+ * --import’.
+ */
void exportPaths(const StorePathSet & paths, Sink & sink);
void exportPath(const StorePath & path, Sink & sink);
- /* Import a sequence of NAR dumps created by exportPaths() into
- the Nix store. Optionally, the contents of the NARs are
- preloaded into the specified FS accessor to speed up subsequent
- access. */
+ /**
+ * Import a sequence of NAR dumps created by exportPaths() into the
+ * Nix store. Optionally, the contents of the NARs are preloaded
+ * into the specified FS accessor to speed up subsequent access.
+ */
StorePaths importPaths(Source & source, CheckSigsFlag checkSigs = CheckSigs);
struct Stats
@@ -620,8 +773,9 @@ public:
const Stats & getStats();
- /* Computes the full closure of of a set of store-paths for e.g.
- derivations that need this information for `exportReferencesGraph`.
+ /**
+ * Computes the full closure of of a set of store-paths for e.g.
+ * derivations that need this information for `exportReferencesGraph`.
*/
StorePathSet exportReferences(const StorePathSet & storePaths, const StorePathSet & inputPaths);
@@ -632,23 +786,40 @@ public:
*/
std::optional<StorePath> getBuildDerivationPath(const StorePath &);
- /* Hack to allow long-running processes like hydra-queue-runner to
- occasionally flush their path info cache. */
+ /**
+ * Hack to allow long-running processes like hydra-queue-runner to
+ * occasionally flush their path info cache.
+ */
void clearPathInfoCache()
{
state.lock()->pathInfoCache.clear();
}
- /* Establish a connection to the store, for store types that have
- a notion of connection. Otherwise this is a no-op. */
+ /**
+ * Establish a connection to the store, for store types that have
+ * a notion of connection. Otherwise this is a no-op.
+ */
virtual void connect() { };
- /* Get the protocol version of this store or it's connection. */
+ /**
+ * Get the protocol version of this store or it's connection.
+ */
virtual unsigned int getProtocol()
{
return 0;
};
+ /**
+ * @return/ whether store trusts *us*.
+ *
+ * `std::nullopt` means we do not know.
+ *
+ * @note This is the opposite of the StoreConfig::isTrusted
+ * store setting. That is about whether *we* trust the store.
+ */
+ virtual std::optional<TrustedFlag> isTrustedClient() = 0;
+
+
virtual Path toRealPath(const Path & storePath)
{
return storePath;
@@ -659,7 +830,7 @@ public:
return toRealPath(printStorePath(storePath));
}
- /*
+ /**
* Synchronises the options of the client with those of the daemon
* (a no-op when there’s no daemon)
*/
@@ -671,7 +842,13 @@ protected:
Stats stats;
- /* Unsupported methods. */
+ /**
+ * Helper for methods that are not unsupported: this is used for
+ * default definitions for virtual methods that are meant to be overriden.
+ *
+ * \todo Using this should be a last resort. It is better to make
+ * the method "virtual pure" and/or move it to a subclass.
+ */
[[noreturn]] void unsupported(const std::string & op)
{
throw Unsupported("operation '%s' is not supported by store '%s'", op, getUri());
@@ -680,7 +857,9 @@ protected:
};
-/* Copy a path from one store to another. */
+/**
+ * Copy a path from one store to another.
+ */
void copyStorePath(
Store & srcStore,
Store & dstStore,
@@ -689,12 +868,14 @@ void copyStorePath(
CheckSigsFlag checkSigs = CheckSigs);
-/* Copy store paths from one store to another. The paths may be copied
- in parallel. They are copied in a topologically sorted order (i.e.
- if A is a reference of B, then A is copied before B), but the set
- of store paths is not automatically closed; use copyClosure() for
- that. Returns a map of what each path was copied to the dstStore
- as. */
+/**
+ * Copy store paths from one store to another. The paths may be copied
+ * in parallel. They are copied in a topologically sorted order (i.e. if
+ * A is a reference of B, then A is copied before B), but the set of
+ * store paths is not automatically closed; use copyClosure() for that.
+ *
+ * @return a map of what each path was copied to the dstStore as.
+ */
std::map<StorePath, StorePath> copyPaths(
Store & srcStore, Store & dstStore,
const RealisedPath::Set &,
@@ -709,7 +890,9 @@ std::map<StorePath, StorePath> copyPaths(
CheckSigsFlag checkSigs = CheckSigs,
SubstituteFlag substitute = NoSubstitute);
-/* Copy the closure of `paths` from `srcStore` to `dstStore`. */
+/**
+ * Copy the closure of `paths` from `srcStore` to `dstStore`.
+ */
void copyClosure(
Store & srcStore, Store & dstStore,
const RealisedPath::Set & paths,
@@ -724,52 +907,61 @@ void copyClosure(
CheckSigsFlag checkSigs = CheckSigs,
SubstituteFlag substitute = NoSubstitute);
-/* Remove the temporary roots file for this process. Any temporary
- root becomes garbage after this point unless it has been registered
- as a (permanent) root. */
+/**
+ * Remove the temporary roots file for this process. Any temporary
+ * root becomes garbage after this point unless it has been registered
+ * as a (permanent) root.
+ */
void removeTempRoots();
-/* Resolve the derived path completely, failing if any derivation output
- is unknown. */
+/**
+ * Resolve the derived path completely, failing if any derivation output
+ * is unknown.
+ */
OutputPathMap resolveDerivedPath(Store &, const DerivedPath::Built &, Store * evalStore = nullptr);
-/* Return a Store object to access the Nix store denoted by
- ‘uri’ (slight misnomer...). Supported values are:
-
- * ‘local’: The Nix store in /nix/store and database in
- /nix/var/nix/db, accessed directly.
-
- * ‘daemon’: The Nix store accessed via a Unix domain socket
- connection to nix-daemon.
-
- * ‘unix://<path>’: The Nix store accessed via a Unix domain socket
- connection to nix-daemon, with the socket located at <path>.
-
- * ‘auto’ or ‘’: Equivalent to ‘local’ or ‘daemon’ depending on
- whether the user has write access to the local Nix
- store/database.
-
- * ‘file://<path>’: A binary cache stored in <path>.
-
- * ‘https://<path>’: A binary cache accessed via HTTP.
-
- * ‘s3://<path>’: A writable binary cache stored on Amazon's Simple
- Storage Service.
-
- * ‘ssh://[user@]<host>’: A remote Nix store accessed by running
- ‘nix-store --serve’ via SSH.
-
- You can pass parameters to the store implementation by appending
- ‘?key=value&key=value&...’ to the URI.
-*/
+/**
+ * @return a Store object to access the Nix store denoted by
+ * ‘uri’ (slight misnomer...).
+ *
+ * @param uri Supported values are:
+ *
+ * - ‘local’: The Nix store in /nix/store and database in
+ * /nix/var/nix/db, accessed directly.
+ *
+ * - ‘daemon’: The Nix store accessed via a Unix domain socket
+ * connection to nix-daemon.
+ *
+ * - ‘unix://<path>’: The Nix store accessed via a Unix domain socket
+ * connection to nix-daemon, with the socket located at <path>.
+ *
+ * - ‘auto’ or ‘’: Equivalent to ‘local’ or ‘daemon’ depending on
+ * whether the user has write access to the local Nix
+ * store/database.
+ *
+ * - ‘file://<path>’: A binary cache stored in <path>.
+ *
+ * - ‘https://<path>’: A binary cache accessed via HTTP.
+ *
+ * - ‘s3://<path>’: A writable binary cache stored on Amazon's Simple
+ * Storage Service.
+ *
+ * - ‘ssh://[user@]<host>’: A remote Nix store accessed by running
+ * ‘nix-store --serve’ via SSH.
+ *
+ * You can pass parameters to the store implementation by appending
+ * ‘?key=value&key=value&...’ to the URI.
+ */
ref<Store> openStore(const std::string & uri = settings.storeUri.get(),
const Store::Params & extraParams = Store::Params());
-/* Return the default substituter stores, defined by the
- ‘substituters’ option and various legacy options. */
+/**
+ * @return the default substituter stores, defined by the
+ * ‘substituters’ option and various legacy options.
+ */
std::list<ref<Store>> getDefaultSubstituters();
struct StoreFactory
@@ -812,8 +1004,10 @@ struct RegisterStoreImplementation
};
-/* Display a set of paths in human-readable form (i.e., between quotes
- and separated by commas). */
+/**
+ * Display a set of paths in human-readable form (i.e., between quotes
+ * and separated by commas).
+ */
std::string showPaths(const PathSet & paths);
@@ -822,10 +1016,12 @@ std::optional<ValidPathInfo> decodeValidPathInfo(
std::istream & str,
std::optional<HashResult> hashGiven = std::nullopt);
-/* Split URI into protocol+hierarchy part and its parameter set. */
+/**
+ * Split URI into protocol+hierarchy part and its parameter set.
+ */
std::pair<std::string, Store::Params> splitUriAndParams(const std::string & uri);
-std::optional<ContentAddress> getDerivationCA(const BasicDerivation & drv);
+const ContentAddress * getDerivationCA(const BasicDerivation & drv);
std::map<DrvOutput, StorePath> drvOutputReferences(
Store & store,
diff --git a/src/libstore/store-cast.hh b/src/libstore/store-cast.hh
index ff62fc359..2473e72c5 100644
--- a/src/libstore/store-cast.hh
+++ b/src/libstore/store-cast.hh
@@ -1,9 +1,17 @@
#pragma once
+///@file
#include "store-api.hh"
namespace nix {
+/**
+ * Helper to try downcasting a Store with a nice method if it fails.
+ *
+ * This is basically an alternative to the user-facing part of
+ * Store::unsupported that allows us to still have a nice message but
+ * better interface design.
+ */
template<typename T>
T & require(Store & store)
{
diff --git a/src/libstore/tests/derivation.cc b/src/libstore/tests/derivation.cc
index 12be8504d..6328ad370 100644
--- a/src/libstore/tests/derivation.cc
+++ b/src/libstore/tests/derivation.cc
@@ -1,6 +1,7 @@
#include <nlohmann/json.hpp>
#include <gtest/gtest.h>
+#include "experimental-features.hh"
#include "derivations.hh"
#include "tests/libstore.hh"
@@ -9,17 +10,62 @@ namespace nix {
class DerivationTest : public LibStoreTest
{
+public:
+ /**
+ * We set these in tests rather than the regular globals so we don't have
+ * to worry about race conditions if the tests run concurrently.
+ */
+ ExperimentalFeatureSettings mockXpSettings;
};
-#define TEST_JSON(TYPE, NAME, STR, VAL, ...) \
- TEST_F(DerivationTest, TYPE ## _ ## NAME ## _to_json) { \
- using nlohmann::literals::operator "" _json; \
- ASSERT_EQ( \
- STR ## _json, \
- (TYPE { VAL }).toJSON(*store __VA_OPT__(,) __VA_ARGS__)); \
+class CaDerivationTest : public DerivationTest
+{
+ void SetUp() override
+ {
+ mockXpSettings.set("experimental-features", "ca-derivations");
+ }
+};
+
+class DynDerivationTest : public DerivationTest
+{
+ void SetUp() override
+ {
+ mockXpSettings.set("experimental-features", "dynamic-derivations ca-derivations");
+ }
+};
+
+class ImpureDerivationTest : public DerivationTest
+{
+ void SetUp() override
+ {
+ mockXpSettings.set("experimental-features", "impure-derivations");
+ }
+};
+
+#define TEST_JSON(FIXTURE, NAME, STR, VAL, DRV_NAME, OUTPUT_NAME) \
+ TEST_F(FIXTURE, DerivationOutput_ ## NAME ## _to_json) { \
+ using nlohmann::literals::operator "" _json; \
+ ASSERT_EQ( \
+ STR ## _json, \
+ (DerivationOutput { VAL }).toJSON( \
+ *store, \
+ DRV_NAME, \
+ OUTPUT_NAME)); \
+ } \
+ \
+ TEST_F(FIXTURE, DerivationOutput_ ## NAME ## _from_json) { \
+ using nlohmann::literals::operator "" _json; \
+ ASSERT_EQ( \
+ DerivationOutput { VAL }, \
+ DerivationOutput::fromJSON( \
+ *store, \
+ DRV_NAME, \
+ OUTPUT_NAME, \
+ STR ## _json, \
+ mockXpSettings)); \
}
-TEST_JSON(DerivationOutput, inputAddressed,
+TEST_JSON(DerivationTest, inputAddressed,
R"({
"path": "/nix/store/c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-drv-name-output-name"
})",
@@ -28,21 +74,48 @@ TEST_JSON(DerivationOutput, inputAddressed,
}),
"drv-name", "output-name")
-TEST_JSON(DerivationOutput, caFixed,
+TEST_JSON(DerivationTest, caFixedFlat,
+ R"({
+ "hashAlgo": "sha256",
+ "hash": "894517c9163c896ec31a2adbd33c0681fd5f45b2c0ef08a64c92a03fb97f390f",
+ "path": "/nix/store/rhcg9h16sqvlbpsa6dqm57sbr2al6nzg-drv-name-output-name"
+ })",
+ (DerivationOutput::CAFixed {
+ .ca = FixedOutputHash {
+ .method = FileIngestionMethod::Flat,
+ .hash = Hash::parseAnyPrefixed("sha256-iUUXyRY8iW7DGirb0zwGgf1fRbLA7wimTJKgP7l/OQ8="),
+ },
+ }),
+ "drv-name", "output-name")
+
+TEST_JSON(DerivationTest, caFixedNAR,
R"({
"hashAlgo": "r:sha256",
"hash": "894517c9163c896ec31a2adbd33c0681fd5f45b2c0ef08a64c92a03fb97f390f",
"path": "/nix/store/c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-drv-name-output-name"
})",
(DerivationOutput::CAFixed {
- .hash = {
+ .ca = FixedOutputHash {
.method = FileIngestionMethod::Recursive,
.hash = Hash::parseAnyPrefixed("sha256-iUUXyRY8iW7DGirb0zwGgf1fRbLA7wimTJKgP7l/OQ8="),
},
}),
"drv-name", "output-name")
-TEST_JSON(DerivationOutput, caFloating,
+TEST_JSON(DynDerivationTest, caFixedText,
+ R"({
+ "hashAlgo": "text:sha256",
+ "hash": "894517c9163c896ec31a2adbd33c0681fd5f45b2c0ef08a64c92a03fb97f390f",
+ "path": "/nix/store/6s1zwabh956jvhv4w9xcdb5jiyanyxg1-drv-name-output-name"
+ })",
+ (DerivationOutput::CAFixed {
+ .ca = TextHash {
+ .hash = Hash::parseAnyPrefixed("sha256-iUUXyRY8iW7DGirb0zwGgf1fRbLA7wimTJKgP7l/OQ8="),
+ },
+ }),
+ "drv-name", "output-name")
+
+TEST_JSON(CaDerivationTest, caFloating,
R"({
"hashAlgo": "r:sha256"
})",
@@ -52,12 +125,12 @@ TEST_JSON(DerivationOutput, caFloating,
}),
"drv-name", "output-name")
-TEST_JSON(DerivationOutput, deferred,
+TEST_JSON(DerivationTest, deferred,
R"({ })",
DerivationOutput::Deferred { },
"drv-name", "output-name")
-TEST_JSON(DerivationOutput, impure,
+TEST_JSON(ImpureDerivationTest, impure,
R"({
"hashAlgo": "r:sha256",
"impure": true
@@ -68,8 +141,28 @@ TEST_JSON(DerivationOutput, impure,
}),
"drv-name", "output-name")
-TEST_JSON(Derivation, impure,
+#undef TEST_JSON
+
+#define TEST_JSON(NAME, STR, VAL, DRV_NAME) \
+ TEST_F(DerivationTest, Derivation_ ## NAME ## _to_json) { \
+ using nlohmann::literals::operator "" _json; \
+ ASSERT_EQ( \
+ STR ## _json, \
+ (Derivation { VAL }).toJSON(*store)); \
+ } \
+ \
+ TEST_F(DerivationTest, Derivation_ ## NAME ## _from_json) { \
+ using nlohmann::literals::operator "" _json; \
+ ASSERT_EQ( \
+ Derivation { VAL }, \
+ Derivation::fromJSON( \
+ *store, \
+ STR ## _json)); \
+ }
+
+TEST_JSON(simple,
R"({
+ "name": "my-derivation",
"inputSrcs": [
"/nix/store/c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-dep1"
],
@@ -92,6 +185,7 @@ TEST_JSON(Derivation, impure,
})",
({
Derivation drv;
+ drv.name = "my-derivation";
drv.inputSrcs = {
store->parseStorePath("/nix/store/c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-dep1"),
};
@@ -117,7 +211,8 @@ TEST_JSON(Derivation, impure,
},
};
drv;
- }))
+ }),
+ "drv-name")
#undef TEST_JSON
diff --git a/src/libstore/tests/derived-path.cc b/src/libstore/tests/derived-path.cc
index d1ac2c5e7..160443ec1 100644
--- a/src/libstore/tests/derived-path.cc
+++ b/src/libstore/tests/derived-path.cc
@@ -27,11 +27,13 @@ Gen<DerivedPath::Built> Arbitrary<DerivedPath::Built>::arbitrary()
Gen<DerivedPath> Arbitrary<DerivedPath>::arbitrary()
{
- switch (*gen::inRange<uint8_t>(0, 1)) {
+ switch (*gen::inRange<uint8_t>(0, std::variant_size_v<DerivedPath::Raw>)) {
case 0:
return gen::just<DerivedPath>(*gen::arbitrary<DerivedPath::Opaque>());
- default:
+ case 1:
return gen::just<DerivedPath>(*gen::arbitrary<DerivedPath::Built>());
+ default:
+ assert(false);
}
}
@@ -53,6 +55,14 @@ TEST_F(DerivedPathTest, force_init)
RC_GTEST_FIXTURE_PROP(
DerivedPathTest,
+ prop_legacy_round_rip,
+ (const DerivedPath & o))
+{
+ RC_ASSERT(o == DerivedPath::parseLegacy(*store, o.to_string_legacy(*store)));
+}
+
+RC_GTEST_FIXTURE_PROP(
+ DerivedPathTest,
prop_round_rip,
(const DerivedPath & o))
{
diff --git a/src/libstore/tests/derived-path.hh b/src/libstore/tests/derived-path.hh
index 3bc812440..506f3ccb1 100644
--- a/src/libstore/tests/derived-path.hh
+++ b/src/libstore/tests/derived-path.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <rapidcheck/gen/Arbitrary.h>
diff --git a/src/libstore/tests/downstream-placeholder.cc b/src/libstore/tests/downstream-placeholder.cc
new file mode 100644
index 000000000..ec3e1000f
--- /dev/null
+++ b/src/libstore/tests/downstream-placeholder.cc
@@ -0,0 +1,33 @@
+#include <gtest/gtest.h>
+
+#include "downstream-placeholder.hh"
+
+namespace nix {
+
+TEST(DownstreamPlaceholder, unknownCaOutput) {
+ ASSERT_EQ(
+ DownstreamPlaceholder::unknownCaOutput(
+ StorePath { "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv" },
+ "out").render(),
+ "/0c6rn30q4frawknapgwq386zq358m8r6msvywcvc89n6m5p2dgbz");
+}
+
+TEST(DownstreamPlaceholder, unknownDerivation) {
+ /**
+ * We set these in tests rather than the regular globals so we don't have
+ * to worry about race conditions if the tests run concurrently.
+ */
+ ExperimentalFeatureSettings mockXpSettings;
+ mockXpSettings.set("experimental-features", "dynamic-derivations ca-derivations");
+
+ ASSERT_EQ(
+ DownstreamPlaceholder::unknownDerivation(
+ DownstreamPlaceholder::unknownCaOutput(
+ StorePath { "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-foo.drv.drv" },
+ "out"),
+ "out",
+ mockXpSettings).render(),
+ "/0gn6agqxjyyalf0dpihgyf49xq5hqxgw100f0wydnj6yqrhqsb3w");
+}
+
+}
diff --git a/src/libstore/tests/libstore.hh b/src/libstore/tests/libstore.hh
index 05397659b..ef93457b5 100644
--- a/src/libstore/tests/libstore.hh
+++ b/src/libstore/tests/libstore.hh
@@ -1,3 +1,6 @@
+#pragma once
+///@file
+
#include <gtest/gtest.h>
#include <gmock/gmock.h>
diff --git a/src/libstore/tests/outputs-spec.cc b/src/libstore/tests/outputs-spec.cc
index 984d1d963..bf8deaa9d 100644
--- a/src/libstore/tests/outputs-spec.cc
+++ b/src/libstore/tests/outputs-spec.cc
@@ -206,15 +206,17 @@ using namespace nix;
Gen<OutputsSpec> Arbitrary<OutputsSpec>::arbitrary()
{
- switch (*gen::inRange<uint8_t>(0, 1)) {
+ switch (*gen::inRange<uint8_t>(0, std::variant_size_v<OutputsSpec::Raw>)) {
case 0:
return gen::just((OutputsSpec) OutputsSpec::All { });
- default:
+ case 1:
return gen::just((OutputsSpec) OutputsSpec::Names {
*gen::nonEmpty(gen::container<StringSet>(gen::map(
gen::arbitrary<StorePathName>(),
[](StorePathName n) { return n.name; }))),
});
+ default:
+ assert(false);
}
}
diff --git a/src/libstore/tests/outputs-spec.hh b/src/libstore/tests/outputs-spec.hh
index 2d455c817..ded331b33 100644
--- a/src/libstore/tests/outputs-spec.hh
+++ b/src/libstore/tests/outputs-spec.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <rapidcheck/gen/Arbitrary.h>
diff --git a/src/libstore/tests/path.hh b/src/libstore/tests/path.hh
index d7f1a8988..21cb62310 100644
--- a/src/libstore/tests/path.hh
+++ b/src/libstore/tests/path.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <rapidcheck/gen/Arbitrary.h>
diff --git a/src/libstore/uds-remote-store.cc b/src/libstore/uds-remote-store.cc
index 5c38323cd..0fb7c38e9 100644
--- a/src/libstore/uds-remote-store.cc
+++ b/src/libstore/uds-remote-store.cc
@@ -26,9 +26,9 @@ UDSRemoteStore::UDSRemoteStore(const Params & params)
UDSRemoteStore::UDSRemoteStore(
- const std::string scheme,
- std::string socket_path,
- const Params & params)
+ const std::string scheme,
+ std::string socket_path,
+ const Params & params)
: UDSRemoteStore(params)
{
path.emplace(socket_path);
diff --git a/src/libstore/uds-remote-store.hh b/src/libstore/uds-remote-store.hh
index d31a4d592..bd1dcb67c 100644
--- a/src/libstore/uds-remote-store.hh
+++ b/src/libstore/uds-remote-store.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "remote-store.hh"
#include "local-fs-store.hh"
@@ -15,6 +16,13 @@ struct UDSRemoteStoreConfig : virtual LocalFSStoreConfig, virtual RemoteStoreCon
}
const std::string name() override { return "Local Daemon Store"; }
+
+ std::string doc() override
+ {
+ return
+ #include "uds-remote-store.md"
+ ;
+ }
};
class UDSRemoteStore : public virtual UDSRemoteStoreConfig, public virtual LocalFSStore, public virtual RemoteStore
diff --git a/src/libstore/uds-remote-store.md b/src/libstore/uds-remote-store.md
new file mode 100644
index 000000000..8df0bd6ff
--- /dev/null
+++ b/src/libstore/uds-remote-store.md
@@ -0,0 +1,9 @@
+R"(
+
+**Store URL format**: `daemon`, `unix://`*path*
+
+This store type accesses a Nix store by talking to a Nix daemon
+listening on the Unix domain socket *path*. The store pseudo-URL
+`daemon` is equivalent to `unix:///nix/var/nix/daemon-socket/socket`.
+
+)"
diff --git a/src/libstore/worker-protocol.cc b/src/libstore/worker-protocol.cc
new file mode 100644
index 000000000..51bb12026
--- /dev/null
+++ b/src/libstore/worker-protocol.cc
@@ -0,0 +1,192 @@
+#include "serialise.hh"
+#include "util.hh"
+#include "path-with-outputs.hh"
+#include "store-api.hh"
+#include "build-result.hh"
+#include "worker-protocol.hh"
+#include "archive.hh"
+#include "derivations.hh"
+
+#include <nlohmann/json.hpp>
+
+namespace nix {
+
+std::string WorkerProto<std::string>::read(const Store & store, Source & from)
+{
+ return readString(from);
+}
+
+void WorkerProto<std::string>::write(const Store & store, Sink & out, const std::string & str)
+{
+ out << str;
+}
+
+
+StorePath WorkerProto<StorePath>::read(const Store & store, Source & from)
+{
+ return store.parseStorePath(readString(from));
+}
+
+void WorkerProto<StorePath>::write(const Store & store, Sink & out, const StorePath & storePath)
+{
+ out << store.printStorePath(storePath);
+}
+
+
+std::optional<TrustedFlag> WorkerProto<std::optional<TrustedFlag>>::read(const Store & store, Source & from)
+{
+ auto temp = readNum<uint8_t>(from);
+ switch (temp) {
+ case 0:
+ return std::nullopt;
+ case 1:
+ return { Trusted };
+ case 2:
+ return { NotTrusted };
+ default:
+ throw Error("Invalid trusted status from remote");
+ }
+}
+
+void WorkerProto<std::optional<TrustedFlag>>::write(const Store & store, Sink & out, const std::optional<TrustedFlag> & optTrusted)
+{
+ if (!optTrusted)
+ out << (uint8_t)0;
+ else {
+ switch (*optTrusted) {
+ case Trusted:
+ out << (uint8_t)1;
+ break;
+ case NotTrusted:
+ out << (uint8_t)2;
+ break;
+ default:
+ assert(false);
+ };
+ }
+}
+
+
+ContentAddress WorkerProto<ContentAddress>::read(const Store & store, Source & from)
+{
+ return ContentAddress::parse(readString(from));
+}
+
+void WorkerProto<ContentAddress>::write(const Store & store, Sink & out, const ContentAddress & ca)
+{
+ out << renderContentAddress(ca);
+}
+
+
+DerivedPath WorkerProto<DerivedPath>::read(const Store & store, Source & from)
+{
+ auto s = readString(from);
+ return DerivedPath::parseLegacy(store, s);
+}
+
+void WorkerProto<DerivedPath>::write(const Store & store, Sink & out, const DerivedPath & req)
+{
+ out << req.to_string_legacy(store);
+}
+
+
+Realisation WorkerProto<Realisation>::read(const Store & store, Source & from)
+{
+ std::string rawInput = readString(from);
+ return Realisation::fromJSON(
+ nlohmann::json::parse(rawInput),
+ "remote-protocol"
+ );
+}
+
+void WorkerProto<Realisation>::write(const Store & store, Sink & out, const Realisation & realisation)
+{
+ out << realisation.toJSON().dump();
+}
+
+
+DrvOutput WorkerProto<DrvOutput>::read(const Store & store, Source & from)
+{
+ return DrvOutput::parse(readString(from));
+}
+
+void WorkerProto<DrvOutput>::write(const Store & store, Sink & out, const DrvOutput & drvOutput)
+{
+ out << drvOutput.to_string();
+}
+
+
+KeyedBuildResult WorkerProto<KeyedBuildResult>::read(const Store & store, Source & from)
+{
+ auto path = WorkerProto<DerivedPath>::read(store, from);
+ auto br = WorkerProto<BuildResult>::read(store, from);
+ return KeyedBuildResult {
+ std::move(br),
+ /* .path = */ std::move(path),
+ };
+}
+
+void WorkerProto<KeyedBuildResult>::write(const Store & store, Sink & to, const KeyedBuildResult & res)
+{
+ workerProtoWrite(store, to, res.path);
+ workerProtoWrite(store, to, static_cast<const BuildResult &>(res));
+}
+
+
+BuildResult WorkerProto<BuildResult>::read(const Store & store, Source & from)
+{
+ BuildResult res;
+ res.status = (BuildResult::Status) readInt(from);
+ from
+ >> res.errorMsg
+ >> res.timesBuilt
+ >> res.isNonDeterministic
+ >> res.startTime
+ >> res.stopTime;
+ auto builtOutputs = WorkerProto<DrvOutputs>::read(store, from);
+ for (auto && [output, realisation] : builtOutputs)
+ res.builtOutputs.insert_or_assign(
+ std::move(output.outputName),
+ std::move(realisation));
+ return res;
+}
+
+void WorkerProto<BuildResult>::write(const Store & store, Sink & to, const BuildResult & res)
+{
+ to
+ << res.status
+ << res.errorMsg
+ << res.timesBuilt
+ << res.isNonDeterministic
+ << res.startTime
+ << res.stopTime;
+ DrvOutputs builtOutputs;
+ for (auto & [output, realisation] : res.builtOutputs)
+ builtOutputs.insert_or_assign(realisation.id, realisation);
+ workerProtoWrite(store, to, builtOutputs);
+}
+
+
+std::optional<StorePath> WorkerProto<std::optional<StorePath>>::read(const Store & store, Source & from)
+{
+ auto s = readString(from);
+ return s == "" ? std::optional<StorePath> {} : store.parseStorePath(s);
+}
+
+void WorkerProto<std::optional<StorePath>>::write(const Store & store, Sink & out, const std::optional<StorePath> & storePathOpt)
+{
+ out << (storePathOpt ? store.printStorePath(*storePathOpt) : "");
+}
+
+
+std::optional<ContentAddress> WorkerProto<std::optional<ContentAddress>>::read(const Store & store, Source & from)
+{
+ return ContentAddress::parseOpt(readString(from));
+}
+
+void WorkerProto<std::optional<ContentAddress>>::write(const Store & store, Sink & out, const std::optional<ContentAddress> & caOpt)
+{
+ out << (caOpt ? renderContentAddress(*caOpt) : "");
+}
+
+}
diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh
index 87088a3ac..f06332d17 100644
--- a/src/libstore/worker-protocol.hh
+++ b/src/libstore/worker-protocol.hh
@@ -1,6 +1,6 @@
#pragma once
+///@file
-#include "store-api.hh"
#include "serialise.hh"
namespace nix {
@@ -9,11 +9,15 @@ namespace nix {
#define WORKER_MAGIC_1 0x6e697863
#define WORKER_MAGIC_2 0x6478696f
-#define PROTOCOL_VERSION (1 << 8 | 34)
+#define PROTOCOL_VERSION (1 << 8 | 35)
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
+/**
+ * Enumeration of all the request types for the "worker protocol", used
+ * by unix:// and ssh-ng:// stores.
+ */
typedef enum {
wopIsValidPath = 1,
wopHasSubstitutes = 3,
@@ -74,114 +78,162 @@ typedef enum {
class Store;
struct Source;
-/* To guide overloading */
-template<typename T>
-struct Phantom {};
-
+// items being serialized
+struct DerivedPath;
+struct DrvOutput;
+struct Realisation;
+struct BuildResult;
+struct KeyedBuildResult;
+enum TrustedFlag : bool;
-namespace worker_proto {
-/* FIXME maybe move more stuff inside here */
-#define MAKE_WORKER_PROTO(TEMPLATE, T) \
- TEMPLATE T read(const Store & store, Source & from, Phantom< T > _); \
- TEMPLATE void write(const Store & store, Sink & out, const T & str)
+/**
+ * Data type for canonical pairs of serializers for the worker protocol.
+ *
+ * See https://en.cppreference.com/w/cpp/language/adl for the broader
+ * concept of what is going on here.
+ */
+template<typename T>
+struct WorkerProto {
+ static T read(const Store & store, Source & from);
+ static void write(const Store & store, Sink & out, const T & t);
+};
+
+/**
+ * Wrapper function around `WorkerProto<T>::write` that allows us to
+ * infer the type instead of having to write it down explicitly.
+ */
+template<typename T>
+void workerProtoWrite(const Store & store, Sink & out, const T & t)
+{
+ WorkerProto<T>::write(store, out, t);
+}
-MAKE_WORKER_PROTO(, std::string);
-MAKE_WORKER_PROTO(, StorePath);
-MAKE_WORKER_PROTO(, ContentAddress);
-MAKE_WORKER_PROTO(, DerivedPath);
-MAKE_WORKER_PROTO(, Realisation);
-MAKE_WORKER_PROTO(, DrvOutput);
-MAKE_WORKER_PROTO(, BuildResult);
+/**
+ * Declare a canonical serializer pair for the worker protocol.
+ *
+ * We specialize the struct merely to indicate that we are implementing
+ * the function for the given type.
+ *
+ * Some sort of `template<...>` must be used with the caller for this to
+ * be legal specialization syntax. See below for what that looks like in
+ * practice.
+ */
+#define MAKE_WORKER_PROTO(T) \
+ struct WorkerProto< T > { \
+ static T read(const Store & store, Source & from); \
+ static void write(const Store & store, Sink & out, const T & t); \
+ };
+
+template<>
+MAKE_WORKER_PROTO(std::string);
+template<>
+MAKE_WORKER_PROTO(StorePath);
+template<>
+MAKE_WORKER_PROTO(ContentAddress);
+template<>
+MAKE_WORKER_PROTO(DerivedPath);
+template<>
+MAKE_WORKER_PROTO(Realisation);
+template<>
+MAKE_WORKER_PROTO(DrvOutput);
+template<>
+MAKE_WORKER_PROTO(BuildResult);
+template<>
+MAKE_WORKER_PROTO(KeyedBuildResult);
+template<>
+MAKE_WORKER_PROTO(std::optional<TrustedFlag>);
-MAKE_WORKER_PROTO(template<typename T>, std::vector<T>);
-MAKE_WORKER_PROTO(template<typename T>, std::set<T>);
+template<typename T>
+MAKE_WORKER_PROTO(std::vector<T>);
+template<typename T>
+MAKE_WORKER_PROTO(std::set<T>);
-#define X_ template<typename K, typename V>
-#define Y_ std::map<K, V>
-MAKE_WORKER_PROTO(X_, Y_);
+template<typename K, typename V>
+#define X_ std::map<K, V>
+MAKE_WORKER_PROTO(X_);
#undef X_
-#undef Y_
-/* These use the empty string for the null case, relying on the fact
- that the underlying types never serialize to the empty string.
-
- We do this instead of a generic std::optional<T> instance because
- ordinal tags (0 or 1, here) are a bit of a compatability hazard. For
- the same reason, we don't have a std::variant<T..> instances (ordinal
- tags 0...n).
-
- We could the generic instances and then these as specializations for
- compatability, but that's proven a bit finnicky, and also makes the
- worker protocol harder to implement in other languages where such
- specializations may not be allowed.
+/**
+ * These use the empty string for the null case, relying on the fact
+ * that the underlying types never serialize to the empty string.
+ *
+ * We do this instead of a generic std::optional<T> instance because
+ * ordinal tags (0 or 1, here) are a bit of a compatability hazard. For
+ * the same reason, we don't have a std::variant<T..> instances (ordinal
+ * tags 0...n).
+ *
+ * We could the generic instances and then these as specializations for
+ * compatability, but that's proven a bit finnicky, and also makes the
+ * worker protocol harder to implement in other languages where such
+ * specializations may not be allowed.
*/
-MAKE_WORKER_PROTO(, std::optional<StorePath>);
-MAKE_WORKER_PROTO(, std::optional<ContentAddress>);
+template<>
+MAKE_WORKER_PROTO(std::optional<StorePath>);
+template<>
+MAKE_WORKER_PROTO(std::optional<ContentAddress>);
template<typename T>
-std::vector<T> read(const Store & store, Source & from, Phantom<std::vector<T>> _)
+std::vector<T> WorkerProto<std::vector<T>>::read(const Store & store, Source & from)
{
std::vector<T> resSet;
auto size = readNum<size_t>(from);
while (size--) {
- resSet.push_back(read(store, from, Phantom<T> {}));
+ resSet.push_back(WorkerProto<T>::read(store, from));
}
return resSet;
}
template<typename T>
-void write(const Store & store, Sink & out, const std::vector<T> & resSet)
+void WorkerProto<std::vector<T>>::write(const Store & store, Sink & out, const std::vector<T> & resSet)
{
out << resSet.size();
for (auto & key : resSet) {
- write(store, out, key);
+ WorkerProto<T>::write(store, out, key);
}
}
template<typename T>
-std::set<T> read(const Store & store, Source & from, Phantom<std::set<T>> _)
+std::set<T> WorkerProto<std::set<T>>::read(const Store & store, Source & from)
{
std::set<T> resSet;
auto size = readNum<size_t>(from);
while (size--) {
- resSet.insert(read(store, from, Phantom<T> {}));
+ resSet.insert(WorkerProto<T>::read(store, from));
}
return resSet;
}
template<typename T>
-void write(const Store & store, Sink & out, const std::set<T> & resSet)
+void WorkerProto<std::set<T>>::write(const Store & store, Sink & out, const std::set<T> & resSet)
{
out << resSet.size();
for (auto & key : resSet) {
- write(store, out, key);
+ WorkerProto<T>::write(store, out, key);
}
}
template<typename K, typename V>
-std::map<K, V> read(const Store & store, Source & from, Phantom<std::map<K, V>> _)
+std::map<K, V> WorkerProto<std::map<K, V>>::read(const Store & store, Source & from)
{
std::map<K, V> resMap;
auto size = readNum<size_t>(from);
while (size--) {
- auto k = read(store, from, Phantom<K> {});
- auto v = read(store, from, Phantom<V> {});
+ auto k = WorkerProto<K>::read(store, from);
+ auto v = WorkerProto<V>::read(store, from);
resMap.insert_or_assign(std::move(k), std::move(v));
}
return resMap;
}
template<typename K, typename V>
-void write(const Store & store, Sink & out, const std::map<K, V> & resMap)
+void WorkerProto<std::map<K, V>>::write(const Store & store, Sink & out, const std::map<K, V> & resMap)
{
out << resMap.size();
for (auto & i : resMap) {
- write(store, out, i.first);
- write(store, out, i.second);
+ WorkerProto<K>::write(store, out, i.first);
+ WorkerProto<V>::write(store, out, i.second);
}
}
}
-
-}
diff --git a/src/libutil/abstract-setting-to-json.hh b/src/libutil/abstract-setting-to-json.hh
index 2d82b54e7..7b6c3fcb5 100644
--- a/src/libutil/abstract-setting-to-json.hh
+++ b/src/libutil/abstract-setting-to-json.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <nlohmann/json.hpp>
#include "config.hh"
diff --git a/src/libutil/ansicolor.hh b/src/libutil/ansicolor.hh
index 38305e71c..86becafa6 100644
--- a/src/libutil/ansicolor.hh
+++ b/src/libutil/ansicolor.hh
@@ -1,8 +1,12 @@
#pragma once
+/**
+ * @file
+ *
+ * @brief Some ANSI escape sequences.
+ */
namespace nix {
-/* Some ANSI escape sequences. */
#define ANSI_NORMAL "\e[0m"
#define ANSI_BOLD "\e[1m"
#define ANSI_FAINT "\e[2m"
diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc
index 0e2b9d12c..268a798d9 100644
--- a/src/libutil/archive.cc
+++ b/src/libutil/archive.cc
@@ -87,7 +87,7 @@ static time_t dump(const Path & path, Sink & sink, PathFilter & filter)
std::string name(i.name);
size_t pos = i.name.find(caseHackSuffix);
if (pos != std::string::npos) {
- debug(format("removing case hack suffix from '%1%'") % (path + "/" + i.name));
+ debug("removing case hack suffix from '%1%'", path + "/" + i.name);
name.erase(pos);
}
if (!unhacked.emplace(name, i.name).second)
@@ -262,7 +262,7 @@ static void parse(ParseSink & sink, Source & source, const Path & path)
if (archiveSettings.useCaseHack) {
auto i = names.find(name);
if (i != names.end()) {
- debug(format("case collision between '%1%' and '%2%'") % i->first % name);
+ debug("case collision between '%1%' and '%2%'", i->first, name);
name += caseHackSuffix;
name += std::to_string(++i->second);
} else
diff --git a/src/libutil/archive.hh b/src/libutil/archive.hh
index e42dea540..2cf164a41 100644
--- a/src/libutil/archive.hh
+++ b/src/libutil/archive.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
#include "serialise.hh"
@@ -7,54 +8,73 @@
namespace nix {
-/* dumpPath creates a Nix archive of the specified path. The format
- is as follows:
-
- IF path points to a REGULAR FILE:
- dump(path) = attrs(
- [ ("type", "regular")
- , ("contents", contents(path))
- ])
-
- IF path points to a DIRECTORY:
- dump(path) = attrs(
- [ ("type", "directory")
- , ("entries", concat(map(f, sort(entries(path)))))
- ])
- where f(fn) = attrs(
- [ ("name", fn)
- , ("file", dump(path + "/" + fn))
- ])
-
- where:
-
- attrs(as) = concat(map(attr, as)) + encN(0)
- attrs((a, b)) = encS(a) + encS(b)
-
- encS(s) = encN(len(s)) + s + (padding until next 64-bit boundary)
-
- encN(n) = 64-bit little-endian encoding of n.
-
- contents(path) = the contents of a regular file.
-
- sort(strings) = lexicographic sort by 8-bit value (strcmp).
-
- entries(path) = the entries of a directory, without `.' and
- `..'.
-
- `+' denotes string concatenation. */
-
-
+/**
+ * dumpPath creates a Nix archive of the specified path.
+ *
+ * @param path the file system data to dump. Dumping is recursive so if
+ * this is a directory we dump it and all its children.
+ *
+ * @param [out] sink The serialised archive is fed into this sink.
+ *
+ * @param filter Can be used to skip certain files.
+ *
+ * The format is as follows:
+ *
+ * ```
+ * IF path points to a REGULAR FILE:
+ * dump(path) = attrs(
+ * [ ("type", "regular")
+ * , ("contents", contents(path))
+ * ])
+ *
+ * IF path points to a DIRECTORY:
+ * dump(path) = attrs(
+ * [ ("type", "directory")
+ * , ("entries", concat(map(f, sort(entries(path)))))
+ * ])
+ * where f(fn) = attrs(
+ * [ ("name", fn)
+ * , ("file", dump(path + "/" + fn))
+ * ])
+ *
+ * where:
+ *
+ * attrs(as) = concat(map(attr, as)) + encN(0)
+ * attrs((a, b)) = encS(a) + encS(b)
+ *
+ * encS(s) = encN(len(s)) + s + (padding until next 64-bit boundary)
+ *
+ * encN(n) = 64-bit little-endian encoding of n.
+ *
+ * contents(path) = the contents of a regular file.
+ *
+ * sort(strings) = lexicographic sort by 8-bit value (strcmp).
+ *
+ * entries(path) = the entries of a directory, without `.` and
+ * `..`.
+ *
+ * `+` denotes string concatenation.
+ * ```
+ */
void dumpPath(const Path & path, Sink & sink,
PathFilter & filter = defaultPathFilter);
-/* Same as `void dumpPath()`, but returns the last modified date of the path */
+/**
+ * Same as dumpPath(), but returns the last modified date of the path.
+ */
time_t dumpPathAndGetMtime(const Path & path, Sink & sink,
PathFilter & filter = defaultPathFilter);
+/**
+ * Dump an archive with a single file with these contents.
+ *
+ * @param s Contents of the file.
+ */
void dumpString(std::string_view s, Sink & sink);
-/* FIXME: fix this API, it sucks. */
+/**
+ * \todo Fix this API, it sucks.
+ */
struct ParseSink
{
virtual void createDirectory(const Path & path) { };
@@ -68,8 +88,10 @@ struct ParseSink
virtual void createSymlink(const Path & path, const std::string & target) { };
};
-/* If the NAR archive contains a single file at top-level, then save
- the contents of the file to `s'. Otherwise barf. */
+/**
+ * If the NAR archive contains a single file at top-level, then save
+ * the contents of the file to `s`. Otherwise barf.
+ */
struct RetrieveRegularNARSink : ParseSink
{
bool regular = true;
@@ -97,7 +119,9 @@ void parseDump(ParseSink & sink, Source & source);
void restorePath(const Path & path, Source & source);
-/* Read a NAR from 'source' and write it to 'sink'. */
+/**
+ * Read a NAR from 'source' and write it to 'sink'.
+ */
void copyNAR(Source & source, Sink & sink);
void copyPath(const Path & from, const Path & to);
diff --git a/src/libutil/args.cc b/src/libutil/args.cc
index 35686a8aa..081dbeb28 100644
--- a/src/libutil/args.cc
+++ b/src/libutil/args.cc
@@ -52,7 +52,7 @@ std::shared_ptr<Completions> completions;
std::string completionMarker = "___COMPLETE___";
-std::optional<std::string> needsCompletion(std::string_view s)
+static std::optional<std::string> needsCompletion(std::string_view s)
{
if (!completions) return {};
auto i = s.find(completionMarker);
@@ -120,6 +120,12 @@ void Args::parseCmdline(const Strings & _cmdline)
if (!argsSeen)
initialFlagsProcessed();
+
+ /* Now that we are done parsing, make sure that any experimental
+ * feature required by the flags is enabled */
+ for (auto & f : flagExperimentalFeatures)
+ experimentalFeatureSettings.require(f);
+
}
bool Args::processFlag(Strings::iterator & pos, Strings::iterator end)
@@ -128,12 +134,18 @@ bool Args::processFlag(Strings::iterator & pos, Strings::iterator end)
auto process = [&](const std::string & name, const Flag & flag) -> bool {
++pos;
+
+ if (auto & f = flag.experimentalFeature)
+ flagExperimentalFeatures.insert(*f);
+
std::vector<std::string> args;
bool anyCompleted = false;
for (size_t n = 0 ; n < flag.handler.arity; ++n) {
if (pos == end) {
if (flag.handler.arity == ArityAny || anyCompleted) break;
- throw UsageError("flag '%s' requires %d argument(s)", name, flag.handler.arity);
+ throw UsageError(
+ "flag '%s' requires %d argument(s), but only %d were given",
+ name, flag.handler.arity, n);
}
if (auto prefix = needsCompletion(*pos)) {
anyCompleted = true;
@@ -152,7 +164,11 @@ bool Args::processFlag(Strings::iterator & pos, Strings::iterator end)
for (auto & [name, flag] : longFlags) {
if (!hiddenCategories.count(flag->category)
&& hasPrefix(name, std::string(*prefix, 2)))
+ {
+ if (auto & f = flag->experimentalFeature)
+ flagExperimentalFeatures.insert(*f);
completions->add("--" + name, flag->description);
+ }
}
return false;
}
@@ -172,7 +188,8 @@ bool Args::processFlag(Strings::iterator & pos, Strings::iterator end)
if (prefix == "-") {
completions->add("--");
for (auto & [flagName, flag] : shortFlags)
- completions->add(std::string("-") + flagName, flag->description);
+ if (experimentalFeatureSettings.isEnabled(flag->experimentalFeature))
+ completions->add(std::string("-") + flagName, flag->description);
}
}
@@ -230,6 +247,11 @@ nlohmann::json Args::toJSON()
j["arity"] = flag->handler.arity;
if (!flag->labels.empty())
j["labels"] = flag->labels;
+ // TODO With C++23 use `std::optional::tranform`
+ if (auto & xp = flag->experimentalFeature)
+ j["experimental-feature"] = showExperimentalFeature(*xp);
+ else
+ j["experimental-feature"] = nullptr;
flags[name] = std::move(j);
}
@@ -326,6 +348,11 @@ Strings argvToStrings(int argc, char * * argv)
return args;
}
+std::optional<ExperimentalFeature> Command::experimentalFeature ()
+{
+ return { Xp::NixCommand };
+}
+
MultiCommand::MultiCommand(const Commands & commands_)
: commands(commands_)
{
@@ -389,6 +416,11 @@ nlohmann::json MultiCommand::toJSON()
cat["id"] = command->category();
cat["description"] = trim(categories[command->category()]);
j["category"] = std::move(cat);
+ // TODO With C++23 use `std::optional::tranform`
+ if (auto xp = command->experimentalFeature())
+ cat["experimental-feature"] = showExperimentalFeature(*xp);
+ else
+ cat["experimental-feature"] = nullptr;
cmds[name] = std::move(j);
}
diff --git a/src/libutil/args.hh b/src/libutil/args.hh
index 84866f12b..d90129796 100644
--- a/src/libutil/args.hh
+++ b/src/libutil/args.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <iostream>
#include <map>
@@ -18,16 +19,22 @@ class Args
{
public:
- /* Parse the command line, throwing a UsageError if something goes
- wrong. */
+ /**
+ * Parse the command line, throwing a UsageError if something goes
+ * wrong.
+ */
void parseCmdline(const Strings & cmdline);
- /* Return a short one-line description of the command. */
+ /**
+ * Return a short one-line description of the command.
+ */
virtual std::string description() { return ""; }
virtual bool forceImpureByDefault() { return false; }
- /* Return documentation about this command, in Markdown format. */
+ /**
+ * Return documentation about this command, in Markdown format.
+ */
virtual std::string doc() { return ""; }
protected:
@@ -117,6 +124,8 @@ protected:
Handler handler;
std::function<void(size_t, std::string_view)> completer;
+ std::optional<ExperimentalFeature> experimentalFeature;
+
static Flag mkHashTypeFlag(std::string && longName, HashType * ht);
static Flag mkHashTypeOptFlag(std::string && longName, std::optional<HashType> * oht);
};
@@ -144,13 +153,17 @@ protected:
std::set<std::string> hiddenCategories;
- /* Called after all command line flags before the first non-flag
- argument (if any) have been processed. */
+ /**
+ * Called after all command line flags before the first non-flag
+ * argument (if any) have been processed.
+ */
virtual void initialFlagsProcessed() {}
- /* Called after the command line has been processed if we need to generate
- completions. Useful for commands that need to know the whole command line
- in order to know what completions to generate. */
+ /**
+ * Called after the command line has been processed if we need to generate
+ * completions. Useful for commands that need to know the whole command line
+ * in order to know what completions to generate.
+ */
virtual void completionHook() { }
public:
@@ -164,7 +177,9 @@ public:
expectedArgs.emplace_back(std::move(arg));
}
- /* Expect a string argument. */
+ /**
+ * Expect a string argument.
+ */
void expectArg(const std::string & label, std::string * dest, bool optional = false)
{
expectArgs({
@@ -174,7 +189,9 @@ public:
});
}
- /* Expect 0 or more arguments. */
+ /**
+ * Expect 0 or more arguments.
+ */
void expectArgs(const std::string & label, std::vector<std::string> * dest)
{
expectArgs({
@@ -188,30 +205,48 @@ public:
friend class MultiCommand;
MultiCommand * parent = nullptr;
+
+private:
+
+ /**
+ * Experimental features needed when parsing args. These are checked
+ * after flag parsing is completed in order to support enabling
+ * experimental features coming after the flag that needs the
+ * experimental feature.
+ */
+ std::set<ExperimentalFeature> flagExperimentalFeatures;
};
-/* A command is an argument parser that can be executed by calling its
- run() method. */
+/**
+ * A command is an argument parser that can be executed by calling its
+ * run() method.
+ */
struct Command : virtual public Args
{
friend class MultiCommand;
virtual ~Command() { }
- virtual void prepare() { };
+ /**
+ * Entry point to the command
+ */
virtual void run() = 0;
typedef int Category;
static constexpr Category catDefault = 0;
+ virtual std::optional<ExperimentalFeature> experimentalFeature ();
+
virtual Category category() { return catDefault; }
};
typedef std::map<std::string, std::function<ref<Command>()>> Commands;
-/* An argument parser that supports multiple subcommands,
- i.e. ‘<command> <subcommand>’. */
+/**
+ * An argument parser that supports multiple subcommands,
+ * i.e. ‘<command> <subcommand>’.
+ */
class MultiCommand : virtual public Args
{
public:
@@ -219,7 +254,9 @@ public:
std::map<Command::Category, std::string> categories;
- // Selected command, if any.
+ /**
+ * Selected command, if any.
+ */
std::optional<std::pair<std::string, ref<Command>>> command;
MultiCommand(const Commands & commands);
@@ -254,8 +291,6 @@ enum CompletionType {
};
extern CompletionType completionType;
-std::optional<std::string> needsCompletion(std::string_view s);
-
void completePath(size_t, std::string_view prefix);
void completeDir(size_t, std::string_view prefix);
diff --git a/src/libutil/callback.hh b/src/libutil/callback.hh
index ef31794be..3710d1239 100644
--- a/src/libutil/callback.hh
+++ b/src/libutil/callback.hh
@@ -1,13 +1,16 @@
#pragma once
+///@file
#include <future>
#include <functional>
namespace nix {
-/* A callback is a wrapper around a lambda that accepts a valid of
- type T or an exception. (We abuse std::future<T> to pass the value or
- exception.) */
+/**
+ * A callback is a wrapper around a lambda that accepts a valid of
+ * type T or an exception. (We abuse std::future<T> to pass the value or
+ * exception.)
+ */
template<typename T>
class Callback
{
diff --git a/src/libutil/canon-path.cc b/src/libutil/canon-path.cc
index b132b4262..040464532 100644
--- a/src/libutil/canon-path.cc
+++ b/src/libutil/canon-path.cc
@@ -13,6 +13,11 @@ CanonPath::CanonPath(std::string_view raw, const CanonPath & root)
: path(absPath((Path) raw, root.abs()))
{ }
+CanonPath CanonPath::fromCwd(std::string_view path)
+{
+ return CanonPath(unchecked_t(), absPath((Path) path));
+}
+
std::optional<CanonPath> CanonPath::parent() const
{
if (isRoot()) return std::nullopt;
@@ -100,4 +105,30 @@ std::ostream & operator << (std::ostream & stream, const CanonPath & path)
return stream;
}
+std::string CanonPath::makeRelative(const CanonPath & path) const
+{
+ auto p1 = begin();
+ auto p2 = path.begin();
+
+ for (; p1 != end() && p2 != path.end() && *p1 == *p2; ++p1, ++p2) ;
+
+ if (p1 == end() && p2 == path.end())
+ return ".";
+ else if (p1 == end())
+ return std::string(p2.remaining);
+ else {
+ std::string res;
+ while (p1 != end()) {
+ ++p1;
+ if (!res.empty()) res += '/';
+ res += "..";
+ }
+ if (p2 != path.end()) {
+ if (!res.empty()) res += '/';
+ res += p2.remaining;
+ }
+ return res;
+ }
+}
+
}
diff --git a/src/libutil/canon-path.hh b/src/libutil/canon-path.hh
index 9d5984584..eefe05ed5 100644
--- a/src/libutil/canon-path.hh
+++ b/src/libutil/canon-path.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <string>
#include <optional>
@@ -8,28 +9,31 @@
namespace nix {
-/* A canonical representation of a path. It ensures the following:
-
- - It always starts with a slash.
-
- - It never ends with a slash, except if the path is "/".
-
- - A slash is never followed by a slash (i.e. no empty components).
-
- - There are no components equal to '.' or '..'.
-
- Note that the path does not need to correspond to an actually
- existing path, and there is no guarantee that symlinks are
- resolved.
-*/
+/**
+ * A canonical representation of a path. It ensures the following:
+ *
+ * - It always starts with a slash.
+ *
+ * - It never ends with a slash, except if the path is "/".
+ *
+ * - A slash is never followed by a slash (i.e. no empty components).
+ *
+ * - There are no components equal to '.' or '..'.
+ *
+ * Note that the path does not need to correspond to an actually
+ * existing path, and there is no guarantee that symlinks are
+ * resolved.
+ */
class CanonPath
{
std::string path;
public:
- /* Construct a canon path from a non-canonical path. Any '.', '..'
- or empty components are removed. */
+ /**
+ * Construct a canon path from a non-canonical path. Any '.', '..'
+ * or empty components are removed.
+ */
CanonPath(std::string_view raw);
explicit CanonPath(const char * raw)
@@ -42,11 +46,15 @@ public:
: path(std::move(path))
{ }
+ static CanonPath fromCwd(std::string_view path = ".");
+
static CanonPath root;
- /* If `raw` starts with a slash, return
- `CanonPath(raw)`. Otherwise return a `CanonPath` representing
- `root + "/" + raw`. */
+ /**
+ * If `raw` starts with a slash, return
+ * `CanonPath(raw)`. Otherwise return a `CanonPath` representing
+ * `root + "/" + raw`.
+ */
CanonPath(std::string_view raw, const CanonPath & root);
bool isRoot() const
@@ -58,8 +66,10 @@ public:
const std::string & abs() const
{ return path; }
- /* Like abs(), but return an empty string if this path is
- '/'. Thus the returned string never ends in a slash. */
+ /**
+ * Like abs(), but return an empty string if this path is
+ * '/'. Thus the returned string never ends in a slash.
+ */
const std::string & absOrEmpty() const
{
const static std::string epsilon;
@@ -85,6 +95,9 @@ public:
bool operator != (const Iterator & x) const
{ return remaining.data() != x.remaining.data(); }
+ bool operator == (const Iterator & x) const
+ { return !(*this != x); }
+
const std::string_view operator * () const
{ return remaining.substr(0, slash); }
@@ -104,7 +117,9 @@ public:
std::optional<CanonPath> parent() const;
- /* Remove the last component. Panics if this path is the root. */
+ /**
+ * Remove the last component. Panics if this path is the root.
+ */
void pop();
std::optional<std::string_view> dirOf() const
@@ -125,10 +140,12 @@ public:
bool operator != (const CanonPath & x) const
{ return path != x.path; }
- /* Compare paths lexicographically except that path separators
- are sorted before any other character. That is, in the sorted order
- a directory is always followed directly by its children. For
- instance, 'foo' < 'foo/bar' < 'foo!'. */
+ /**
+ * Compare paths lexicographically except that path separators
+ * are sorted before any other character. That is, in the sorted order
+ * a directory is always followed directly by its children. For
+ * instance, 'foo' < 'foo/bar' < 'foo!'.
+ */
bool operator < (const CanonPath & x) const
{
auto i = path.begin();
@@ -144,28 +161,44 @@ public:
return i == path.end() && j != x.path.end();
}
- /* Return true if `this` is equal to `parent` or a child of
- `parent`. */
+ /**
+ * Return true if `this` is equal to `parent` or a child of
+ * `parent`.
+ */
bool isWithin(const CanonPath & parent) const;
CanonPath removePrefix(const CanonPath & prefix) const;
- /* Append another path to this one. */
+ /**
+ * Append another path to this one.
+ */
void extend(const CanonPath & x);
- /* Concatenate two paths. */
+ /**
+ * Concatenate two paths.
+ */
CanonPath operator + (const CanonPath & x) const;
- /* Add a path component to this one. It must not contain any slashes. */
+ /**
+ * Add a path component to this one. It must not contain any slashes.
+ */
void push(std::string_view c);
CanonPath operator + (std::string_view c) const;
- /* Check whether access to this path is allowed, which is the case
- if 1) `this` is within any of the `allowed` paths; or 2) any of
- the `allowed` paths are within `this`. (The latter condition
- ensures access to the parents of allowed paths.) */
+ /**
+ * Check whether access to this path is allowed, which is the case
+ * if 1) `this` is within any of the `allowed` paths; or 2) any of
+ * the `allowed` paths are within `this`. (The latter condition
+ * ensures access to the parents of allowed paths.)
+ */
bool isAllowed(const std::set<CanonPath> & allowed) const;
+
+ /**
+ * Return a representation `x` of `path` relative to `this`, i.e.
+ * `CanonPath(this.makeRelative(x), this) == path`.
+ */
+ std::string makeRelative(const CanonPath & path) const;
};
std::ostream & operator << (std::ostream & stream, const CanonPath & path);
diff --git a/src/libutil/cgroup.hh b/src/libutil/cgroup.hh
index d08c8ad29..574ae8e5b 100644
--- a/src/libutil/cgroup.hh
+++ b/src/libutil/cgroup.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#if __linux__
@@ -18,10 +19,12 @@ struct CgroupStats
std::optional<std::chrono::microseconds> cpuUser, cpuSystem;
};
-/* Destroy the cgroup denoted by 'path'. The postcondition is that
- 'path' does not exist, and thus any processes in the cgroup have
- been killed. Also return statistics from the cgroup just before
- destruction. */
+/**
+ * Destroy the cgroup denoted by 'path'. The postcondition is that
+ * 'path' does not exist, and thus any processes in the cgroup have
+ * been killed. Also return statistics from the cgroup just before
+ * destruction.
+ */
CgroupStats destroyCgroup(const Path & cgroup);
}
diff --git a/src/libutil/chunked-vector.hh b/src/libutil/chunked-vector.hh
index 0a4f0b400..d914e2542 100644
--- a/src/libutil/chunked-vector.hh
+++ b/src/libutil/chunked-vector.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <cstdint>
#include <cstdlib>
@@ -7,20 +8,24 @@
namespace nix {
-/* Provides an indexable container like vector<> with memory overhead
- guarantees like list<> by allocating storage in chunks of ChunkSize
- elements instead of using a contiguous memory allocation like vector<>
- does. Not using a single vector that is resized reduces memory overhead
- on large data sets by on average (growth factor)/2, mostly
- eliminates copies within the vector during resizing, and provides stable
- references to its elements. */
+/**
+ * Provides an indexable container like vector<> with memory overhead
+ * guarantees like list<> by allocating storage in chunks of ChunkSize
+ * elements instead of using a contiguous memory allocation like vector<>
+ * does. Not using a single vector that is resized reduces memory overhead
+ * on large data sets by on average (growth factor)/2, mostly
+ * eliminates copies within the vector during resizing, and provides stable
+ * references to its elements.
+ */
template<typename T, size_t ChunkSize>
class ChunkedVector {
private:
uint32_t size_ = 0;
std::vector<std::vector<T>> chunks;
- /* keep this out of the ::add hot path */
+ /**
+ * Keep this out of the ::add hot path
+ */
[[gnu::noinline]]
auto & addChunk()
{
diff --git a/src/libutil/closure.hh b/src/libutil/closure.hh
index 779b9b2d5..16e3b93e4 100644
--- a/src/libutil/closure.hh
+++ b/src/libutil/closure.hh
@@ -1,3 +1,6 @@
+#pragma once
+///@file
+
#include <set>
#include <future>
#include "sync.hh"
diff --git a/src/libutil/comparator.hh b/src/libutil/comparator.hh
index eecd5b819..9f661c5c3 100644
--- a/src/libutil/comparator.hh
+++ b/src/libutil/comparator.hh
@@ -1,6 +1,8 @@
#pragma once
+///@file
-/* Awfull hacky generation of the comparison operators by doing a lexicographic
+/**
+ * Awful hacky generation of the comparison operators by doing a lexicographic
* comparison between the choosen fields.
*
* ```
@@ -15,12 +17,12 @@
* }
* ```
*/
-#define GENERATE_ONE_CMP(COMPARATOR, MY_TYPE, FIELDS...) \
+#define GENERATE_ONE_CMP(COMPARATOR, MY_TYPE, ...) \
bool operator COMPARATOR(const MY_TYPE& other) const { \
- const MY_TYPE* me = this; \
- auto fields1 = std::make_tuple( FIELDS ); \
- me = &other; \
- auto fields2 = std::make_tuple( FIELDS ); \
+ __VA_OPT__(const MY_TYPE* me = this;) \
+ auto fields1 = std::make_tuple( __VA_ARGS__ ); \
+ __VA_OPT__(me = &other;) \
+ auto fields2 = std::make_tuple( __VA_ARGS__ ); \
return fields1 COMPARATOR fields2; \
}
#define GENERATE_EQUAL(args...) GENERATE_ONE_CMP(==, args)
diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc
index 89180e7a7..ba0847cde 100644
--- a/src/libutil/compression.cc
+++ b/src/libutil/compression.cc
@@ -23,7 +23,7 @@ struct ChunkedCompressionSink : CompressionSink
{
uint8_t outbuf[32 * 1024];
- void write(std::string_view data) override
+ void writeUnbuffered(std::string_view data) override
{
const size_t CHUNK_SIZE = sizeof(outbuf) << 2;
while (!data.empty()) {
@@ -103,7 +103,7 @@ struct ArchiveCompressionSink : CompressionSink
throw Error(reason, archive_error_string(this->archive));
}
- void write(std::string_view data) override
+ void writeUnbuffered(std::string_view data) override
{
ssize_t result = archive_write_data(archive, data.data(), data.length());
if (result <= 0) check(result);
@@ -136,7 +136,7 @@ struct NoneSink : CompressionSink
warn("requested compression level '%d' not supported by compression method 'none'", level);
}
void finish() override { flush(); }
- void write(std::string_view data) override { nextSink(data); }
+ void writeUnbuffered(std::string_view data) override { nextSink(data); }
};
struct BrotliDecompressionSink : ChunkedCompressionSink
diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh
index c470b82a5..4e53a7b3c 100644
--- a/src/libutil/compression.hh
+++ b/src/libutil/compression.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "ref.hh"
#include "types.hh"
@@ -11,7 +12,7 @@ namespace nix {
struct CompressionSink : BufferedSink, FinishSink
{
using BufferedSink::operator ();
- using BufferedSink::write;
+ using BufferedSink::writeUnbuffered;
using FinishSink::finish;
};
diff --git a/src/libutil/compute-levels.hh b/src/libutil/compute-levels.hh
index 8ded295f9..093e7a915 100644
--- a/src/libutil/compute-levels.hh
+++ b/src/libutil/compute-levels.hh
@@ -1,3 +1,6 @@
+#pragma once
+///@file
+
#include "types.hh"
namespace nix {
diff --git a/src/libutil/config-impl.hh b/src/libutil/config-impl.hh
new file mode 100644
index 000000000..b6cae5ec3
--- /dev/null
+++ b/src/libutil/config-impl.hh
@@ -0,0 +1,71 @@
+#pragma once
+/**
+ * @file
+ *
+ * Template implementations (as opposed to mere declarations).
+ *
+ * One only needs to include this when one is declaring a
+ * `BaseClass<CustomType>` setting, or as derived class of such an
+ * instantiation.
+ */
+
+#include "config.hh"
+
+namespace nix {
+
+template<> struct BaseSetting<Strings>::trait
+{
+ static constexpr bool appendable = true;
+};
+template<> struct BaseSetting<StringSet>::trait
+{
+ static constexpr bool appendable = true;
+};
+template<> struct BaseSetting<StringMap>::trait
+{
+ static constexpr bool appendable = true;
+};
+template<> struct BaseSetting<std::set<ExperimentalFeature>>::trait
+{
+ static constexpr bool appendable = true;
+};
+
+template<typename T>
+struct BaseSetting<T>::trait
+{
+ static constexpr bool appendable = false;
+};
+
+template<typename T>
+bool BaseSetting<T>::isAppendable()
+{
+ return trait::appendable;
+}
+
+template<> void BaseSetting<Strings>::appendOrSet(Strings && newValue, bool append);
+template<> void BaseSetting<StringSet>::appendOrSet(StringSet && newValue, bool append);
+template<> void BaseSetting<StringMap>::appendOrSet(StringMap && newValue, bool append);
+template<> void BaseSetting<std::set<ExperimentalFeature>>::appendOrSet(std::set<ExperimentalFeature> && newValue, bool append);
+
+template<typename T>
+void BaseSetting<T>::appendOrSet(T && newValue, bool append)
+{
+ static_assert(!trait::appendable, "using default `appendOrSet` implementation with an appendable type");
+ assert(!append);
+ value = std::move(newValue);
+}
+
+template<typename T>
+void BaseSetting<T>::set(const std::string & str, bool append)
+{
+ if (experimentalFeatureSettings.isEnabled(experimentalFeature))
+ appendOrSet(parse(str), append);
+ else {
+ assert(experimentalFeature);
+ warn("Ignoring setting '%s' because experimental feature '%s' is not enabled",
+ name,
+ showExperimentalFeature(*experimentalFeature));
+ }
+}
+
+}
diff --git a/src/libutil/config.cc b/src/libutil/config.cc
index b349f2d80..085a884dc 100644
--- a/src/libutil/config.cc
+++ b/src/libutil/config.cc
@@ -3,6 +3,8 @@
#include "abstract-setting-to-json.hh"
#include "experimental-features.hh"
+#include "config-impl.hh"
+
#include <nlohmann/json.hpp>
namespace nix {
@@ -80,6 +82,8 @@ void Config::getSettings(std::map<std::string, SettingInfo> & res, bool overridd
void AbstractConfig::applyConfig(const std::string & contents, const std::string & path) {
unsigned int pos = 0;
+ std::vector<std::pair<std::string, std::string>> parsedContents;
+
while (pos < contents.size()) {
std::string line;
while (pos < contents.size() && contents[pos] != '\n')
@@ -125,8 +129,21 @@ void AbstractConfig::applyConfig(const std::string & contents, const std::string
auto i = tokens.begin();
advance(i, 2);
- set(name, concatStringsSep(" ", Strings(i, tokens.end()))); // FIXME: slow
+ parsedContents.push_back({
+ name,
+ concatStringsSep(" ", Strings(i, tokens.end())),
+ });
};
+
+ // First apply experimental-feature related settings
+ for (auto & [name, value] : parsedContents)
+ if (name == "experimental-features" || name == "extra-experimental-features")
+ set(name, value);
+
+ // Then apply other settings
+ for (auto & [name, value] : parsedContents)
+ if (name != "experimental-features" && name != "extra-experimental-features")
+ set(name, value);
}
void AbstractConfig::applyConfigFile(const Path & path)
@@ -147,9 +164,8 @@ nlohmann::json Config::toJSON()
{
auto res = nlohmann::json::object();
for (auto & s : _settings)
- if (!s.second.isAlias) {
+ if (!s.second.isAlias)
res.emplace(s.first, s.second.setting->toJSON());
- }
return res;
}
@@ -157,24 +173,28 @@ std::string Config::toKeyValue()
{
auto res = std::string();
for (auto & s : _settings)
- if (!s.second.isAlias) {
+ if (s.second.isAlias)
res += fmt("%s = %s\n", s.first, s.second.setting->to_string());
- }
return res;
}
void Config::convertToArgs(Args & args, const std::string & category)
{
- for (auto & s : _settings)
+ for (auto & s : _settings) {
if (!s.second.isAlias)
s.second.setting->convertToArg(args, category);
+ }
}
AbstractSetting::AbstractSetting(
const std::string & name,
const std::string & description,
- const std::set<std::string> & aliases)
- : name(name), description(stripIndentation(description)), aliases(aliases)
+ const std::set<std::string> & aliases,
+ std::optional<ExperimentalFeature> experimentalFeature)
+ : name(name)
+ , description(stripIndentation(description))
+ , aliases(aliases)
+ , experimentalFeature(experimentalFeature)
{
}
@@ -188,6 +208,10 @@ std::map<std::string, nlohmann::json> AbstractSetting::toJSONObject()
std::map<std::string, nlohmann::json> obj;
obj.emplace("description", description);
obj.emplace("aliases", aliases);
+ if (experimentalFeature)
+ obj.emplace("experimentalFeature", *experimentalFeature);
+ else
+ obj.emplace("experimentalFeature", nullptr);
return obj;
}
@@ -196,12 +220,6 @@ void AbstractSetting::convertToArg(Args & args, const std::string & category)
}
template<typename T>
-bool BaseSetting<T>::isAppendable()
-{
- return false;
-}
-
-template<typename T>
void BaseSetting<T>::convertToArg(Args & args, const std::string & category)
{
args.addFlag({
@@ -210,6 +228,7 @@ void BaseSetting<T>::convertToArg(Args & args, const std::string & category)
.category = category,
.labels = {"value"},
.handler = {[this](std::string s) { overridden = true; set(s); }},
+ .experimentalFeature = experimentalFeature,
});
if (isAppendable())
@@ -219,12 +238,13 @@ void BaseSetting<T>::convertToArg(Args & args, const std::string & category)
.category = category,
.labels = {"value"},
.handler = {[this](std::string s) { overridden = true; set(s, true); }},
+ .experimentalFeature = experimentalFeature,
});
}
-template<> void BaseSetting<std::string>::set(const std::string & str, bool append)
+template<> std::string BaseSetting<std::string>::parse(const std::string & str) const
{
- value = str;
+ return str;
}
template<> std::string BaseSetting<std::string>::to_string() const
@@ -233,11 +253,11 @@ template<> std::string BaseSetting<std::string>::to_string() const
}
template<typename T>
-void BaseSetting<T>::set(const std::string & str, bool append)
+T BaseSetting<T>::parse(const std::string & str) const
{
static_assert(std::is_integral<T>::value, "Integer required.");
if (auto n = string2Int<T>(str))
- value = *n;
+ return *n;
else
throw UsageError("setting '%s' has invalid value '%s'", name, str);
}
@@ -249,12 +269,12 @@ std::string BaseSetting<T>::to_string() const
return std::to_string(value);
}
-template<> void BaseSetting<bool>::set(const std::string & str, bool append)
+template<> bool BaseSetting<bool>::parse(const std::string & str) const
{
if (str == "true" || str == "yes" || str == "1")
- value = true;
+ return true;
else if (str == "false" || str == "no" || str == "0")
- value = false;
+ return false;
else
throw UsageError("Boolean setting '%s' has invalid value '%s'", name, str);
}
@@ -270,26 +290,27 @@ template<> void BaseSetting<bool>::convertToArg(Args & args, const std::string &
.longName = name,
.description = fmt("Enable the `%s` setting.", name),
.category = category,
- .handler = {[this]() { override(true); }}
+ .handler = {[this]() { override(true); }},
+ .experimentalFeature = experimentalFeature,
});
args.addFlag({
.longName = "no-" + name,
.description = fmt("Disable the `%s` setting.", name),
.category = category,
- .handler = {[this]() { override(false); }}
+ .handler = {[this]() { override(false); }},
+ .experimentalFeature = experimentalFeature,
});
}
-template<> void BaseSetting<Strings>::set(const std::string & str, bool append)
+template<> Strings BaseSetting<Strings>::parse(const std::string & str) const
{
- auto ss = tokenizeString<Strings>(str);
- if (!append) value.clear();
- for (auto & s : ss) value.push_back(std::move(s));
+ return tokenizeString<Strings>(str);
}
-template<> bool BaseSetting<Strings>::isAppendable()
+template<> void BaseSetting<Strings>::appendOrSet(Strings && newValue, bool append)
{
- return true;
+ if (!append) value.clear();
+ for (auto && s : std::move(newValue)) value.push_back(std::move(s));
}
template<> std::string BaseSetting<Strings>::to_string() const
@@ -297,16 +318,16 @@ template<> std::string BaseSetting<Strings>::to_string() const
return concatStringsSep(" ", value);
}
-template<> void BaseSetting<StringSet>::set(const std::string & str, bool append)
+template<> StringSet BaseSetting<StringSet>::parse(const std::string & str) const
{
- if (!append) value.clear();
- for (auto & s : tokenizeString<StringSet>(str))
- value.insert(s);
+ return tokenizeString<StringSet>(str);
}
-template<> bool BaseSetting<StringSet>::isAppendable()
+template<> void BaseSetting<StringSet>::appendOrSet(StringSet && newValue, bool append)
{
- return true;
+ if (!append) value.clear();
+ for (auto && s : std::move(newValue))
+ value.insert(s);
}
template<> std::string BaseSetting<StringSet>::to_string() const
@@ -314,21 +335,24 @@ template<> std::string BaseSetting<StringSet>::to_string() const
return concatStringsSep(" ", value);
}
-template<> void BaseSetting<std::set<ExperimentalFeature>>::set(const std::string & str, bool append)
+template<> std::set<ExperimentalFeature> BaseSetting<std::set<ExperimentalFeature>>::parse(const std::string & str) const
{
- if (!append) value.clear();
+ std::set<ExperimentalFeature> res;
for (auto & s : tokenizeString<StringSet>(str)) {
auto thisXpFeature = parseExperimentalFeature(s);
if (thisXpFeature)
- value.insert(thisXpFeature.value());
+ res.insert(thisXpFeature.value());
else
warn("unknown experimental feature '%s'", s);
}
+ return res;
}
-template<> bool BaseSetting<std::set<ExperimentalFeature>>::isAppendable()
+template<> void BaseSetting<std::set<ExperimentalFeature>>::appendOrSet(std::set<ExperimentalFeature> && newValue, bool append)
{
- return true;
+ if (!append) value.clear();
+ for (auto && s : std::move(newValue))
+ value.insert(s);
}
template<> std::string BaseSetting<std::set<ExperimentalFeature>>::to_string() const
@@ -339,20 +363,23 @@ template<> std::string BaseSetting<std::set<ExperimentalFeature>>::to_string() c
return concatStringsSep(" ", stringifiedXpFeatures);
}
-template<> void BaseSetting<StringMap>::set(const std::string & str, bool append)
+template<> StringMap BaseSetting<StringMap>::parse(const std::string & str) const
{
- if (!append) value.clear();
+ StringMap res;
for (auto & s : tokenizeString<Strings>(str)) {
auto eq = s.find_first_of('=');
if (std::string::npos != eq)
- value.emplace(std::string(s, 0, eq), std::string(s, eq + 1));
+ res.emplace(std::string(s, 0, eq), std::string(s, eq + 1));
// else ignored
}
+ return res;
}
-template<> bool BaseSetting<StringMap>::isAppendable()
+template<> void BaseSetting<StringMap>::appendOrSet(StringMap && newValue, bool append)
{
- return true;
+ if (!append) value.clear();
+ for (auto && [k, v] : std::move(newValue))
+ value.emplace(std::move(k), std::move(v));
}
template<> std::string BaseSetting<StringMap>::to_string() const
@@ -376,15 +403,15 @@ template class BaseSetting<StringSet>;
template class BaseSetting<StringMap>;
template class BaseSetting<std::set<ExperimentalFeature>>;
-void PathSetting::set(const std::string & str, bool append)
+Path PathSetting::parse(const std::string & str) const
{
if (str == "") {
if (allowEmpty)
- value = "";
+ return "";
else
throw UsageError("setting '%s' cannot be empty", name);
} else
- value = canonPath(str);
+ return canonPath(str);
}
bool GlobalConfig::set(const std::string & name, const std::string & value)
@@ -444,4 +471,30 @@ GlobalConfig::Register::Register(Config * config)
configRegistrations->emplace_back(config);
}
+ExperimentalFeatureSettings experimentalFeatureSettings;
+
+static GlobalConfig::Register rSettings(&experimentalFeatureSettings);
+
+bool ExperimentalFeatureSettings::isEnabled(const ExperimentalFeature & feature) const
+{
+ auto & f = experimentalFeatures.get();
+ return std::find(f.begin(), f.end(), feature) != f.end();
+}
+
+void ExperimentalFeatureSettings::require(const ExperimentalFeature & feature) const
+{
+ if (!isEnabled(feature))
+ throw MissingExperimentalFeature(feature);
+}
+
+bool ExperimentalFeatureSettings::isEnabled(const std::optional<ExperimentalFeature> & feature) const
+{
+ return !feature || isEnabled(*feature);
+}
+
+void ExperimentalFeatureSettings::require(const std::optional<ExperimentalFeature> & feature) const
+{
+ if (feature) require(*feature);
+}
+
}
diff --git a/src/libutil/config.hh b/src/libutil/config.hh
index 7ac43c854..2675baed7 100644
--- a/src/libutil/config.hh
+++ b/src/libutil/config.hh
@@ -1,12 +1,14 @@
+#pragma once
+///@file
+
#include <cassert>
#include <map>
#include <set>
-#include "types.hh"
-
#include <nlohmann/json_fwd.hpp>
-#pragma once
+#include "types.hh"
+#include "experimental-features.hh"
namespace nix {
@@ -123,21 +125,21 @@ public:
void reapplyUnknownSettings();
};
-/* A class to simplify providing configuration settings. The typical
- use is to inherit Config and add Setting<T> members:
-
- class MyClass : private Config
- {
- Setting<int> foo{this, 123, "foo", "the number of foos to use"};
- Setting<std::string> bar{this, "blabla", "bar", "the name of the bar"};
-
- MyClass() : Config(readConfigFile("/etc/my-app.conf"))
- {
- std::cout << foo << "\n"; // will print 123 unless overridden
- }
- };
-*/
-
+/**
+ * A class to simplify providing configuration settings. The typical
+ * use is to inherit Config and add Setting<T> members:
+ *
+ * class MyClass : private Config
+ * {
+ * Setting<int> foo{this, 123, "foo", "the number of foos to use"};
+ * Setting<std::string> bar{this, "blabla", "bar", "the name of the bar"};
+ *
+ * MyClass() : Config(readConfigFile("/etc/my-app.conf"))
+ * {
+ * std::cout << foo << "\n"; // will print 123 unless overridden
+ * }
+ * };
+ */
class Config : public AbstractConfig
{
friend class AbstractSetting;
@@ -194,12 +196,15 @@ public:
bool overridden = false;
+ std::optional<ExperimentalFeature> experimentalFeature;
+
protected:
AbstractSetting(
const std::string & name,
const std::string & description,
- const std::set<std::string> & aliases);
+ const std::set<std::string> & aliases,
+ std::optional<ExperimentalFeature> experimentalFeature = std::nullopt);
virtual ~AbstractSetting()
{
@@ -210,8 +215,11 @@ protected:
virtual void set(const std::string & value, bool append = false) = 0;
- virtual bool isAppendable()
- { return false; }
+ /**
+ * Whether the type is appendable; i.e. whether the `append`
+ * parameter to `set()` is allowed to be `true`.
+ */
+ virtual bool isAppendable() = 0;
virtual std::string to_string() const = 0;
@@ -224,7 +232,9 @@ protected:
bool isOverridden() const { return overridden; }
};
-/* A setting of type T. */
+/**
+ * A setting of type T.
+ */
template<typename T>
class BaseSetting : public AbstractSetting
{
@@ -234,14 +244,32 @@ protected:
const T defaultValue;
const bool documentDefault;
+ /**
+ * Parse the string into a `T`.
+ *
+ * Used by `set()`.
+ */
+ virtual T parse(const std::string & str) const;
+
+ /**
+ * Append or overwrite `value` with `newValue`.
+ *
+ * Some types to do not support appending in which case `append`
+ * should never be passed. The default handles this case.
+ *
+ * @param append Whether to append or overwrite.
+ */
+ virtual void appendOrSet(T && newValue, bool append);
+
public:
BaseSetting(const T & def,
const bool documentDefault,
const std::string & name,
const std::string & description,
- const std::set<std::string> & aliases = {})
- : AbstractSetting(name, description, aliases)
+ const std::set<std::string> & aliases = {},
+ std::optional<ExperimentalFeature> experimentalFeature = std::nullopt)
+ : AbstractSetting(name, description, aliases, experimentalFeature)
, value(def)
, defaultValue(def)
, documentDefault(documentDefault)
@@ -260,9 +288,25 @@ public:
template<typename U>
void setDefault(const U & v) { if (!overridden) value = v; }
- void set(const std::string & str, bool append = false) override;
+ /**
+ * Require any experimental feature the setting depends on
+ *
+ * Uses `parse()` to get the value from `str`, and `appendOrSet()`
+ * to set it.
+ */
+ void set(const std::string & str, bool append = false) override final;
- bool isAppendable() override;
+ /**
+ * C++ trick; This is template-specialized to compile-time indicate whether
+ * the type is appendable.
+ */
+ struct trait;
+
+ /**
+ * Always defined based on the C++ magic
+ * with `trait` above.
+ */
+ bool isAppendable() override final;
virtual void override(const T & v)
{
@@ -296,8 +340,9 @@ public:
const std::string & name,
const std::string & description,
const std::set<std::string> & aliases = {},
- const bool documentDefault = true)
- : BaseSetting<T>(def, documentDefault, name, description, aliases)
+ const bool documentDefault = true,
+ std::optional<ExperimentalFeature> experimentalFeature = std::nullopt)
+ : BaseSetting<T>(def, documentDefault, name, description, aliases, experimentalFeature)
{
options->addSetting(this);
}
@@ -305,8 +350,10 @@ public:
void operator =(const T & v) { this->assign(v); }
};
-/* A special setting for Paths. These are automatically canonicalised
- (e.g. "/foo//bar/" becomes "/foo/bar"). */
+/**
+ * A special setting for Paths. These are automatically canonicalised
+ * (e.g. "/foo//bar/" becomes "/foo/bar").
+ */
class PathSetting : public BaseSetting<Path>
{
bool allowEmpty;
@@ -325,7 +372,7 @@ public:
options->addSetting(this);
}
- void set(const std::string & str, bool append = false) override;
+ Path parse(const std::string & str) const override;
Path operator +(const char * p) const { return value + p; }
@@ -357,4 +404,52 @@ struct GlobalConfig : public AbstractConfig
extern GlobalConfig globalConfig;
+
+struct ExperimentalFeatureSettings : Config {
+
+ Setting<std::set<ExperimentalFeature>> experimentalFeatures{
+ this, {}, "experimental-features",
+ R"(
+ Experimental features that are enabled.
+
+ Example:
+
+ ```
+ experimental-features = nix-command flakes
+ ```
+
+ The following experimental features are available:
+
+ {{#include experimental-features-shortlist.md}}
+
+ Experimental features are [further documented in the manual](@docroot@/contributing/experimental-features.md).
+ )"};
+
+ /**
+ * Check whether the given experimental feature is enabled.
+ */
+ bool isEnabled(const ExperimentalFeature &) const;
+
+ /**
+ * Require an experimental feature be enabled, throwing an error if it is
+ * not.
+ */
+ void require(const ExperimentalFeature &) const;
+
+ /**
+ * `std::nullopt` pointer means no feature, which means there is nothing that could be
+ * disabled, and so the function returns true in that case.
+ */
+ bool isEnabled(const std::optional<ExperimentalFeature> &) const;
+
+ /**
+ * `std::nullopt` pointer means no feature, which means there is nothing that could be
+ * disabled, and so the function does nothing in that case.
+ */
+ void require(const std::optional<ExperimentalFeature> &) const;
+};
+
+// FIXME: don't use a global variable.
+extern ExperimentalFeatureSettings experimentalFeatureSettings;
+
}
diff --git a/src/libutil/error.cc b/src/libutil/error.cc
index e4f0d4677..c9d61942a 100644
--- a/src/libutil/error.cc
+++ b/src/libutil/error.cc
@@ -302,14 +302,14 @@ std::ostream & showErrorInfo(std::ostream & out, const ErrorInfo & einfo, bool s
if (!einfo.traces.empty()) {
size_t count = 0;
for (const auto & trace : einfo.traces) {
+ if (trace.hint.str().empty()) continue;
+ if (frameOnly && !trace.frame) continue;
+
if (!showTrace && count > 3) {
oss << "\n" << ANSI_WARNING "(stack trace truncated; use '--show-trace' to show the full trace)" ANSI_NORMAL << "\n";
break;
}
- if (trace.hint.str().empty()) continue;
- if (frameOnly && !trace.frame) continue;
-
count++;
frameOnly = trace.frame;
diff --git a/src/libutil/error.hh b/src/libutil/error.hh
index 0ebeaba61..6a0923081 100644
--- a/src/libutil/error.hh
+++ b/src/libutil/error.hh
@@ -1,4 +1,19 @@
#pragma once
+/**
+ * @file
+ *
+ * @brief This file defines two main structs/classes used in nix error handling.
+ *
+ * ErrorInfo provides a standard payload of error information, with conversion to string
+ * happening in the logger rather than at the call site.
+ *
+ * BaseError is the ancestor of nix specific exceptions (and Interrupted), and contains
+ * an ErrorInfo.
+ *
+ * ErrorInfo structs are sent to the logger as part of an exception, or directly with the
+ * logError or logWarning macros.
+ * See libutil/tests/logging.cc for usage examples.
+ */
#include "suggestions.hh"
#include "ref.hh"
@@ -26,22 +41,6 @@
namespace nix {
-/*
-
- This file defines two main structs/classes used in nix error handling.
-
- ErrorInfo provides a standard payload of error information, with conversion to string
- happening in the logger rather than at the call site.
-
- BaseError is the ancestor of nix specific exceptions (and Interrupted), and contains
- an ErrorInfo.
-
- ErrorInfo structs are sent to the logger as part of an exception, or directly with the
- logError or logWarning macros.
-
- See libutil/tests/logging.cc for usage examples.
-
- */
typedef enum {
lvlError = 0,
@@ -54,20 +53,26 @@ typedef enum {
lvlVomit
} Verbosity;
-// the lines of code surrounding an error.
+/**
+ * The lines of code surrounding an error.
+ */
struct LinesOfCode {
std::optional<std::string> prevLineOfCode;
std::optional<std::string> errLineOfCode;
std::optional<std::string> nextLineOfCode;
};
-/* An abstract type that represents a location in a source file. */
+/**
+ * An abstract type that represents a location in a source file.
+ */
struct AbstractPos
{
uint32_t line = 0;
uint32_t column = 0;
- /* Return the contents of the source file. */
+ /**
+ * Return the contents of the source file.
+ */
virtual std::optional<std::string> getSource() const
{ return std::nullopt; };
@@ -104,8 +109,10 @@ struct ErrorInfo {
std::ostream & showErrorInfo(std::ostream & out, const ErrorInfo & einfo, bool showTrace);
-/* BaseError should generally not be caught, as it has Interrupted as
- a subclass. Catch Error instead. */
+/**
+ * BaseError should generally not be caught, as it has Interrupted as
+ * a subclass. Catch Error instead.
+ */
class BaseError : public std::exception
{
protected:
diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc
index 58d762ebb..5aae0347b 100644
--- a/src/libutil/experimental-features.cc
+++ b/src/libutil/experimental-features.cc
@@ -5,29 +5,232 @@
namespace nix {
-std::map<ExperimentalFeature, std::string> stringifiedXpFeatures = {
- { Xp::CaDerivations, "ca-derivations" },
- { Xp::ImpureDerivations, "impure-derivations" },
- { Xp::Flakes, "flakes" },
- { Xp::NixCommand, "nix-command" },
- { Xp::RecursiveNix, "recursive-nix" },
- { Xp::NoUrlLiterals, "no-url-literals" },
- { Xp::FetchClosure, "fetch-closure" },
- { Xp::ReplFlake, "repl-flake" },
- { Xp::AutoAllocateUids, "auto-allocate-uids" },
- { Xp::Cgroups, "cgroups" },
- { Xp::DiscardReferences, "discard-references" },
+struct ExperimentalFeatureDetails
+{
+ ExperimentalFeature tag;
+ std::string_view name;
+ std::string_view description;
};
+constexpr std::array<ExperimentalFeatureDetails, 13> xpFeatureDetails = {{
+ {
+ .tag = Xp::CaDerivations,
+ .name = "ca-derivations",
+ .description = R"(
+ Allow derivations to be content-addressed in order to prevent
+ rebuilds when changes to the derivation do not result in changes to
+ the derivation's output. See
+ [__contentAddressed](@docroot@/language/advanced-attributes.md#adv-attr-__contentAddressed)
+ for details.
+ )",
+ },
+ {
+ .tag = Xp::ImpureDerivations,
+ .name = "impure-derivations",
+ .description = R"(
+ Allow derivations to produce non-fixed outputs by setting the
+ `__impure` derivation attribute to `true`. An impure derivation can
+ have differing outputs each time it is built.
+
+ Example:
+
+ ```
+ derivation {
+ name = "impure";
+ builder = /bin/sh;
+ __impure = true; # mark this derivation as impure
+ args = [ "-c" "read -n 10 random < /dev/random; echo $random > $out" ];
+ system = builtins.currentSystem;
+ }
+ ```
+
+ Each time this derivation is built, it can produce a different
+ output (as the builder outputs random bytes to `$out`). Impure
+ derivations also have access to the network, and only fixed-output
+ or other impure derivations can rely on impure derivations. Finally,
+ an impure derivation cannot also be
+ [content-addressed](#xp-feature-ca-derivations).
+ )",
+ },
+ {
+ .tag = Xp::Flakes,
+ .name = "flakes",
+ .description = R"(
+ Enable flakes. See the manual entry for [`nix
+ flake`](@docroot@/command-ref/new-cli/nix3-flake.md) for details.
+ )",
+ },
+ {
+ .tag = Xp::NixCommand,
+ .name = "nix-command",
+ .description = R"(
+ Enable the new `nix` subcommands. See the manual on
+ [`nix`](@docroot@/command-ref/new-cli/nix.md) for details.
+ )",
+ },
+ {
+ .tag = Xp::RecursiveNix,
+ .name = "recursive-nix",
+ .description = R"(
+ Allow derivation builders to call Nix, and thus build derivations
+ recursively.
+
+ Example:
+
+ ```
+ with import <nixpkgs> {};
+
+ runCommand "foo"
+ {
+ buildInputs = [ nix jq ];
+ NIX_PATH = "nixpkgs=${<nixpkgs>}";
+ }
+ ''
+ hello=$(nix-build -E '(import <nixpkgs> {}).hello.overrideDerivation (args: { name = "recursive-hello"; })')
+
+ mkdir -p $out/bin
+ ln -s $hello/bin/hello $out/bin/hello
+ ''
+ ```
+
+ An important restriction on recursive builders is disallowing
+ arbitrary substitutions. For example, running
+
+ ```
+ nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10
+ ```
+
+ in the above `runCommand` script would be disallowed, as this could
+ lead to derivations with hidden dependencies or breaking
+ reproducibility by relying on the current state of the Nix store. An
+ exception would be if
+ `/nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10` were
+ already in the build inputs or built by a previous recursive Nix
+ call.
+ )",
+ },
+ {
+ .tag = Xp::NoUrlLiterals,
+ .name = "no-url-literals",
+ .description = R"(
+ Disallow unquoted URLs as part of the Nix language syntax. The Nix
+ language allows for URL literals, like so:
+
+ ```
+ $ nix repl
+ Welcome to Nix 2.15.0. Type :? for help.
+
+ nix-repl> http://foo
+ "http://foo"
+ ```
+
+ But enabling this experimental feature will cause the Nix parser to
+ throw an error when encountering a URL literal:
+
+ ```
+ $ nix repl --extra-experimental-features 'no-url-literals'
+ Welcome to Nix 2.15.0. Type :? for help.
+
+ nix-repl> http://foo
+ error: URL literals are disabled
+
+ at «string»:1:1:
+
+ 1| http://foo
+ | ^
+
+ ```
+
+ While this is currently an experimental feature, unquoted URLs are
+ being deprecated and their usage is discouraged.
+
+ The reason is that, as opposed to path literals, URLs have no
+ special properties that distinguish them from regular strings, URLs
+ containing parameters have to be quoted anyway, and unquoted URLs
+ may confuse external tooling.
+ )",
+ },
+ {
+ .tag = Xp::FetchClosure,
+ .name = "fetch-closure",
+ .description = R"(
+ Enable the use of the [`fetchClosure`](@docroot@/language/builtins.md#builtins-fetchClosure) built-in function in the Nix language.
+ )",
+ },
+ {
+ .tag = Xp::ReplFlake,
+ .name = "repl-flake",
+ .description = R"(
+ Allow passing [installables](@docroot@/command-ref/new-cli/nix.md#installables) to `nix repl`, making its interface consistent with the other experimental commands.
+ )",
+ },
+ {
+ .tag = Xp::AutoAllocateUids,
+ .name = "auto-allocate-uids",
+ .description = R"(
+ Allows Nix to automatically pick UIDs for builds, rather than creating
+ `nixbld*` user accounts. See the [`auto-allocate-uids`](#conf-auto-allocate-uids) setting for details.
+ )",
+ },
+ {
+ .tag = Xp::Cgroups,
+ .name = "cgroups",
+ .description = R"(
+ Allows Nix to execute builds inside cgroups. See
+ the [`use-cgroups`](#conf-use-cgroups) setting for details.
+ )",
+ },
+ {
+ .tag = Xp::DiscardReferences,
+ .name = "discard-references",
+ .description = R"(
+ Allow the use of the [`unsafeDiscardReferences`](@docroot@/language/advanced-attributes.html#adv-attr-unsafeDiscardReferences) attribute in derivations
+ that use [structured attributes](@docroot@/language/advanced-attributes.html#adv-attr-structuredAttrs). This disables scanning of outputs for
+ runtime dependencies.
+ )",
+ },
+ {
+ .tag = Xp::DaemonTrustOverride,
+ .name = "daemon-trust-override",
+ .description = R"(
+ Allow forcing trusting or not trusting clients with
+ `nix-daemon`. This is useful for testing, but possibly also
+ useful for various experiments with `nix-daemon --stdio`
+ networking.
+ )",
+ },
+ {
+ .tag = Xp::DynamicDerivations,
+ .name = "dynamic-derivations",
+ .description = R"(
+ Allow the use of a few things related to dynamic derivations:
+
+ - "text hashing" derivation outputs, so we can build .drv
+ files.
+
+ - dependencies in derivations on the outputs of
+ derivations that are themselves derivations outputs.
+ )",
+ },
+}};
+
+static_assert(
+ []() constexpr {
+ for (auto [index, feature] : enumerate(xpFeatureDetails))
+ if (index != (size_t)feature.tag)
+ return false;
+ return true;
+ }(),
+ "array order does not match enum tag order");
+
const std::optional<ExperimentalFeature> parseExperimentalFeature(const std::string_view & name)
{
using ReverseXpMap = std::map<std::string_view, ExperimentalFeature>;
- static auto reverseXpMap = []()
- {
+ static std::unique_ptr<ReverseXpMap> reverseXpMap = []() {
auto reverseXpMap = std::make_unique<ReverseXpMap>();
- for (auto & [feature, name] : stringifiedXpFeatures)
- (*reverseXpMap)[name] = feature;
+ for (auto & xpFeature : xpFeatureDetails)
+ (*reverseXpMap)[xpFeature.name] = xpFeature.tag;
return reverseXpMap;
}();
@@ -37,20 +240,27 @@ const std::optional<ExperimentalFeature> parseExperimentalFeature(const std::str
return std::nullopt;
}
-std::string_view showExperimentalFeature(const ExperimentalFeature feature)
+std::string_view showExperimentalFeature(const ExperimentalFeature tag)
+{
+ assert((size_t)tag < xpFeatureDetails.size());
+ return xpFeatureDetails[(size_t)tag].name;
+}
+
+nlohmann::json documentExperimentalFeatures()
{
- const auto ret = get(stringifiedXpFeatures, feature);
- assert(ret);
- return *ret;
+ StringMap res;
+ for (auto & xpFeature : xpFeatureDetails)
+ res[std::string { xpFeature.name }] =
+ trim(stripIndentation(xpFeature.description));
+ return (nlohmann::json) res;
}
std::set<ExperimentalFeature> parseFeatures(const std::set<std::string> & rawFeatures)
{
std::set<ExperimentalFeature> res;
- for (auto & rawFeature : rawFeatures) {
+ for (auto & rawFeature : rawFeatures)
if (auto feature = parseExperimentalFeature(rawFeature))
res.insert(*feature);
- }
return res;
}
diff --git a/src/libutil/experimental-features.hh b/src/libutil/experimental-features.hh
index ac372e03e..409100592 100644
--- a/src/libutil/experimental-features.hh
+++ b/src/libutil/experimental-features.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "comparator.hh"
#include "error.hh"
@@ -10,9 +11,10 @@ namespace nix {
/**
* The list of available experimental features.
*
- * If you update this, don’t forget to also change the map defining their
- * string representation in the corresponding `.cc` file.
- **/
+ * If you update this, don’t forget to also change the map defining
+ * their string representation and documentation in the corresponding
+ * `.cc` file as well.
+ */
enum struct ExperimentalFeature
{
CaDerivations,
@@ -26,6 +28,8 @@ enum struct ExperimentalFeature
AutoAllocateUids,
Cgroups,
DiscardReferences,
+ DaemonTrustOverride,
+ DynamicDerivations,
};
/**
@@ -33,26 +37,52 @@ enum struct ExperimentalFeature
*/
using Xp = ExperimentalFeature;
+/**
+ * Parse an experimental feature (enum value) from its name. Experimental
+ * feature flag names are hyphenated and do not contain spaces.
+ */
const std::optional<ExperimentalFeature> parseExperimentalFeature(
const std::string_view & name);
+
+/**
+ * Show the name of an experimental feature. This is the opposite of
+ * parseExperimentalFeature().
+ */
std::string_view showExperimentalFeature(const ExperimentalFeature);
+/**
+ * Compute the documentation of all experimental features.
+ *
+ * See `doc/manual` for how this information is used.
+ */
+nlohmann::json documentExperimentalFeatures();
+
+/**
+ * Shorthand for `str << showExperimentalFeature(feature)`.
+ */
std::ostream & operator<<(
std::ostream & str,
const ExperimentalFeature & feature);
/**
- * Parse a set of strings to the corresponding set of experimental features,
- * ignoring (but warning for) any unkwown feature.
+ * Parse a set of strings to the corresponding set of experimental
+ * features, ignoring (but warning for) any unknown feature.
*/
std::set<ExperimentalFeature> parseFeatures(const std::set<std::string> &);
+/**
+ * An experimental feature was required for some (experimental)
+ * operation, but was not enabled.
+ */
class MissingExperimentalFeature : public Error
{
public:
+ /**
+ * The experimental feature that was required but not enabled.
+ */
ExperimentalFeature missingFeature;
- MissingExperimentalFeature(ExperimentalFeature);
+ MissingExperimentalFeature(ExperimentalFeature missingFeature);
};
/**
diff --git a/src/libutil/filesystem.cc b/src/libutil/filesystem.cc
index 3a732cff8..56be76ecc 100644
--- a/src/libutil/filesystem.cc
+++ b/src/libutil/filesystem.cc
@@ -15,9 +15,9 @@ static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
{
tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR").value_or("/tmp") : tmpRoot, true);
if (includePid)
- return (format("%1%/%2%-%3%-%4%") % tmpRoot % prefix % getpid() % counter++).str();
+ return fmt("%1%/%2%-%3%-%4%", tmpRoot, prefix, getpid(), counter++);
else
- return (format("%1%/%2%-%3%") % tmpRoot % prefix % counter++).str();
+ return fmt("%1%/%2%-%3%", tmpRoot, prefix, counter++);
}
Path createTempDir(const Path & tmpRoot, const Path & prefix,
diff --git a/src/libutil/finally.hh b/src/libutil/finally.hh
index dee2e8d2f..db654301f 100644
--- a/src/libutil/finally.hh
+++ b/src/libutil/finally.hh
@@ -1,6 +1,9 @@
#pragma once
+///@file
-/* A trivial class to run a function at the end of a scope. */
+/**
+ * A trivial class to run a function at the end of a scope.
+ */
template<typename Fn>
class Finally
{
diff --git a/src/libutil/fmt.hh b/src/libutil/fmt.hh
index e879fd3b8..727255b45 100644
--- a/src/libutil/fmt.hh
+++ b/src/libutil/fmt.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <boost/format.hpp>
#include <string>
@@ -8,30 +9,25 @@
namespace nix {
-/* Inherit some names from other namespaces for convenience. */
+/**
+ * Inherit some names from other namespaces for convenience.
+ */
using boost::format;
-/* A variadic template that does nothing. Useful to call a function
- for all variadic arguments but ignoring the result. */
+/**
+ * A variadic template that does nothing. Useful to call a function
+ * for all variadic arguments but ignoring the result.
+ */
struct nop { template<typename... T> nop(T...) {} };
-struct FormatOrString
-{
- std::string s;
- FormatOrString(std::string s) : s(std::move(s)) { };
- template<class F>
- FormatOrString(const F & f) : s(f.str()) { };
- FormatOrString(const char * s) : s(s) { };
-};
-
-
-/* A helper for formatting strings. ‘fmt(format, a_0, ..., a_n)’ is
- equivalent to ‘boost::format(format) % a_0 % ... %
- ... a_n’. However, ‘fmt(s)’ is equivalent to ‘s’ (so no %-expansion
- takes place). */
-
+/**
+ * A helper for formatting strings. ‘fmt(format, a_0, ..., a_n)’ is
+ * equivalent to ‘boost::format(format) % a_0 % ... %
+ * ... a_n’. However, ‘fmt(s)’ is equivalent to ‘s’ (so no %-expansion
+ * takes place).
+ */
template<class F>
inline void formatHelper(F & f)
{
@@ -53,11 +49,6 @@ inline std::string fmt(const char * s)
return s;
}
-inline std::string fmt(const FormatOrString & fs)
-{
- return fs.s;
-}
-
template<typename... Args>
inline std::string fmt(const std::string & fs, const Args & ... args)
{
diff --git a/src/libutil/git.hh b/src/libutil/git.hh
index cb13ef0e5..bf2b9a286 100644
--- a/src/libutil/git.hh
+++ b/src/libutil/git.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <string>
#include <string_view>
@@ -8,21 +9,23 @@ namespace nix {
namespace git {
-// A line from the output of `git ls-remote --symref`.
-//
-// These can be of two kinds:
-//
-// - Symbolic references of the form
-//
-// ref: {target} {reference}
-//
-// where {target} is itself a reference and {reference} is optional
-//
-// - Object references of the form
-//
-// {target} {reference}
-//
-// where {target} is a commit id and {reference} is mandatory
+/**
+ * A line from the output of `git ls-remote --symref`.
+ *
+ * These can be of two kinds:
+ *
+ * - Symbolic references of the form
+ *
+ * ref: {target} {reference}
+ *
+ * where {target} is itself a reference and {reference} is optional
+ *
+ * - Object references of the form
+ *
+ * {target} {reference}
+ *
+ * where {target} is a commit id and {reference} is mandatory
+ */
struct LsRemoteRefLine {
enum struct Kind {
Symbolic,
diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc
index d2fd0c15a..2c36d9d94 100644
--- a/src/libutil/hash.cc
+++ b/src/libutil/hash.cc
@@ -1,6 +1,7 @@
#include <iostream>
#include <cstring>
+#include <openssl/crypto.h>
#include <openssl/md5.h>
#include <openssl/sha.h>
@@ -16,7 +17,6 @@
namespace nix {
-
static size_t regularHashSize(HashType type) {
switch (type) {
case htMD5: return md5HashSize;
@@ -71,12 +71,13 @@ const std::string base16Chars = "0123456789abcdef";
static std::string printHash16(const Hash & hash)
{
- char buf[hash.hashSize * 2];
+ std::string buf;
+ buf.reserve(hash.hashSize * 2);
for (unsigned int i = 0; i < hash.hashSize; i++) {
- buf[i * 2] = base16Chars[hash.hash[i] >> 4];
- buf[i * 2 + 1] = base16Chars[hash.hash[i] & 0x0f];
+ buf.push_back(base16Chars[hash.hash[i] >> 4]);
+ buf.push_back(base16Chars[hash.hash[i] & 0x0f]);
}
- return std::string(buf, hash.hashSize * 2);
+ return buf;
}
@@ -130,7 +131,7 @@ std::string Hash::to_string(Base base, bool includeType) const
break;
case Base64:
case SRI:
- s += base64Encode(std::string((const char *) hash, hashSize));
+ s += base64Encode(std::string_view((const char *) hash, hashSize));
break;
}
return s;
@@ -342,7 +343,7 @@ HashSink::~HashSink()
delete ctx;
}
-void HashSink::write(std::string_view data)
+void HashSink::writeUnbuffered(std::string_view data)
{
bytes += data.size();
update(ht, *ctx, data);
@@ -403,7 +404,7 @@ HashType parseHashType(std::string_view s)
throw UsageError("unknown hash algorithm '%1%'", s);
}
-std::string printHashType(HashType ht)
+std::string_view printHashType(HashType ht)
{
switch (ht) {
case htMD5: return "md5";
diff --git a/src/libutil/hash.hh b/src/libutil/hash.hh
index 00f70a572..ae3ee40f4 100644
--- a/src/libutil/hash.hh
+++ b/src/libutil/hash.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
#include "serialise.hh"
@@ -33,62 +34,86 @@ struct Hash
HashType type;
- /* Create a zero-filled hash object. */
+ /**
+ * Create a zero-filled hash object.
+ */
Hash(HashType type);
- /* Parse the hash from a string representation in the format
- "[<type>:]<base16|base32|base64>" or "<type>-<base64>" (a
- Subresource Integrity hash expression). If the 'type' argument
- is not present, then the hash type must be specified in the
- string. */
+ /**
+ * Parse the hash from a string representation in the format
+ * "[<type>:]<base16|base32|base64>" or "<type>-<base64>" (a
+ * Subresource Integrity hash expression). If the 'type' argument
+ * is not present, then the hash type must be specified in the
+ * string.
+ */
static Hash parseAny(std::string_view s, std::optional<HashType> type);
- /* Parse a hash from a string representation like the above, except the
- type prefix is mandatory is there is no separate arguement. */
+ /**
+ * Parse a hash from a string representation like the above, except the
+ * type prefix is mandatory is there is no separate arguement.
+ */
static Hash parseAnyPrefixed(std::string_view s);
- /* Parse a plain hash that musst not have any prefix indicating the type.
- The type is passed in to disambiguate. */
+ /**
+ * Parse a plain hash that musst not have any prefix indicating the type.
+ * The type is passed in to disambiguate.
+ */
static Hash parseNonSRIUnprefixed(std::string_view s, HashType type);
static Hash parseSRI(std::string_view original);
private:
- /* The type must be provided, the string view must not include <type>
- prefix. `isSRI` helps disambigate the various base-* encodings. */
+ /**
+ * The type must be provided, the string view must not include <type>
+ * prefix. `isSRI` helps disambigate the various base-* encodings.
+ */
Hash(std::string_view s, HashType type, bool isSRI);
public:
- /* Check whether two hash are equal. */
+ /**
+ * Check whether two hash are equal.
+ */
bool operator == (const Hash & h2) const;
- /* Check whether two hash are not equal. */
+ /**
+ * Check whether two hash are not equal.
+ */
bool operator != (const Hash & h2) const;
- /* For sorting. */
+ /**
+ * For sorting.
+ */
bool operator < (const Hash & h) const;
- /* Returns the length of a base-16 representation of this hash. */
+ /**
+ * Returns the length of a base-16 representation of this hash.
+ */
size_t base16Len() const
{
return hashSize * 2;
}
- /* Returns the length of a base-32 representation of this hash. */
+ /**
+ * Returns the length of a base-32 representation of this hash.
+ */
size_t base32Len() const
{
return (hashSize * 8 - 1) / 5 + 1;
}
- /* Returns the length of a base-64 representation of this hash. */
+ /**
+ * Returns the length of a base-64 representation of this hash.
+ */
size_t base64Len() const
{
return ((4 * hashSize / 3) + 3) & ~3;
}
- /* Return a string representation of the hash, in base-16, base-32
- or base-64. By default, this is prefixed by the hash type
- (e.g. "sha256:"). */
+ /**
+ * Return a string representation of the hash, in base-16, base-32
+ * or base-64. By default, this is prefixed by the hash type
+ * (e.g. "sha256:").
+ */
std::string to_string(Base base, bool includeType) const;
std::string gitRev() const
@@ -104,36 +129,54 @@ public:
static Hash dummy;
};
-/* Helper that defaults empty hashes to the 0 hash. */
+/**
+ * Helper that defaults empty hashes to the 0 hash.
+ */
Hash newHashAllowEmpty(std::string_view hashStr, std::optional<HashType> ht);
-/* Print a hash in base-16 if it's MD5, or base-32 otherwise. */
+/**
+ * Print a hash in base-16 if it's MD5, or base-32 otherwise.
+ */
std::string printHash16or32(const Hash & hash);
-/* Compute the hash of the given string. */
+/**
+ * Compute the hash of the given string.
+ */
Hash hashString(HashType ht, std::string_view s);
-/* Compute the hash of the given file. */
+/**
+ * Compute the hash of the given file.
+ */
Hash hashFile(HashType ht, const Path & path);
-/* Compute the hash of the given path. The hash is defined as
- (essentially) hashString(ht, dumpPath(path)). */
+/**
+ * Compute the hash of the given path. The hash is defined as
+ * (essentially) hashString(ht, dumpPath(path)).
+ */
typedef std::pair<Hash, uint64_t> HashResult;
HashResult hashPath(HashType ht, const Path & path,
PathFilter & filter = defaultPathFilter);
-/* Compress a hash to the specified number of bytes by cyclically
- XORing bytes together. */
+/**
+ * Compress a hash to the specified number of bytes by cyclically
+ * XORing bytes together.
+ */
Hash compressHash(const Hash & hash, unsigned int newSize);
-/* Parse a string representing a hash type. */
+/**
+ * Parse a string representing a hash type.
+ */
HashType parseHashType(std::string_view s);
-/* Will return nothing on parse error */
+/**
+ * Will return nothing on parse error
+ */
std::optional<HashType> parseHashTypeOpt(std::string_view s);
-/* And the reverse. */
-std::string printHashType(HashType ht);
+/**
+ * And the reverse.
+ */
+std::string_view printHashType(HashType ht);
union Ctx;
@@ -154,7 +197,7 @@ public:
HashSink(HashType ht);
HashSink(const HashSink & h);
~HashSink();
- void write(std::string_view data) override;
+ void writeUnbuffered(std::string_view data) override;
HashResult finish() override;
HashResult currentHash();
};
diff --git a/src/libutil/hilite.hh b/src/libutil/hilite.hh
index f8bdbfc55..2d5cf7c6f 100644
--- a/src/libutil/hilite.hh
+++ b/src/libutil/hilite.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <regex>
#include <vector>
@@ -6,11 +7,13 @@
namespace nix {
-/* Highlight all the given matches in the given string `s` by wrapping
- them between `prefix` and `postfix`.
-
- If some matches overlap, then their union will be wrapped rather
- than the individual matches. */
+/**
+ * Highlight all the given matches in the given string `s` by wrapping
+ * them between `prefix` and `postfix`.
+ *
+ * If some matches overlap, then their union will be wrapped rather
+ * than the individual matches.
+ */
std::string hiliteMatches(
std::string_view s,
std::vector<std::smatch> matches,
diff --git a/src/libutil/json-impls.hh b/src/libutil/json-impls.hh
index bd75748ad..b26163a04 100644
--- a/src/libutil/json-impls.hh
+++ b/src/libutil/json-impls.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "nlohmann/json_fwd.hpp"
diff --git a/src/libutil/json-utils.hh b/src/libutil/json-utils.hh
index b8a031227..eb00e954f 100644
--- a/src/libutil/json-utils.hh
+++ b/src/libutil/json-utils.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <nlohmann/json.hpp>
diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc
index 56bdaf87a..5a2dd99af 100644
--- a/src/libutil/logging.cc
+++ b/src/libutil/logging.cc
@@ -54,7 +54,7 @@ public:
return printBuildLogs;
}
- void log(Verbosity lvl, const FormatOrString & fs) override
+ void log(Verbosity lvl, std::string_view s) override
{
if (lvl > verbosity) return;
@@ -65,14 +65,15 @@ public:
switch (lvl) {
case lvlError: c = '3'; break;
case lvlWarn: c = '4'; break;
- case lvlInfo: c = '5'; break;
+ case lvlNotice: case lvlInfo: c = '5'; break;
case lvlTalkative: case lvlChatty: c = '6'; break;
- default: c = '7';
+ case lvlDebug: case lvlVomit: c = '7';
+ default: c = '7'; break; // should not happen, and missing enum case is reported by -Werror=switch-enum
}
prefix = std::string("<") + c + ">";
}
- writeToStderr(prefix + filterANSIEscapes(fs.s, !tty) + "\n");
+ writeToStderr(prefix + filterANSIEscapes(s, !tty) + "\n");
}
void logEI(const ErrorInfo & ei) override
@@ -174,12 +175,12 @@ struct JSONLogger : Logger {
prevLogger.log(lvlError, "@nix " + json.dump(-1, ' ', false, nlohmann::json::error_handler_t::replace));
}
- void log(Verbosity lvl, const FormatOrString & fs) override
+ void log(Verbosity lvl, std::string_view s) override
{
nlohmann::json json;
json["action"] = "msg";
json["level"] = lvl;
- json["msg"] = fs.s;
+ json["msg"] = s;
write(json);
}
diff --git a/src/libutil/logging.hh b/src/libutil/logging.hh
index a68edd15a..5aa6bee95 100644
--- a/src/libutil/logging.hh
+++ b/src/libutil/logging.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
#include "error.hh"
@@ -72,14 +73,17 @@ public:
virtual void stop() { };
+ virtual void pause() { };
+ virtual void resume() { };
+
// Whether the logger prints the whole build log
virtual bool isVerbose() { return false; }
- virtual void log(Verbosity lvl, const FormatOrString & fs) = 0;
+ virtual void log(Verbosity lvl, std::string_view s) = 0;
- void log(const FormatOrString & fs)
+ void log(std::string_view s)
{
- log(lvlInfo, fs);
+ log(lvlInfo, s);
}
virtual void logEI(const ErrorInfo & ei) = 0;
@@ -179,12 +183,17 @@ bool handleJSONLogMessage(const std::string & msg,
const Activity & act, std::map<ActivityId, Activity> & activities,
bool trusted);
-extern Verbosity verbosity; /* suppress msgs > this */
-
-/* Print a message with the standard ErrorInfo format.
- In general, use these 'log' macros for reporting problems that may require user
- intervention or that need more explanation. Use the 'print' macros for more
- lightweight status messages. */
+/**
+ * suppress msgs > this
+ */
+extern Verbosity verbosity;
+
+/**
+ * Print a message with the standard ErrorInfo format.
+ * In general, use these 'log' macros for reporting problems that may require user
+ * intervention or that need more explanation. Use the 'print' macros for more
+ * lightweight status messages.
+ */
#define logErrorInfo(level, errorInfo...) \
do { \
if ((level) <= nix::verbosity) { \
@@ -195,9 +204,11 @@ extern Verbosity verbosity; /* suppress msgs > this */
#define logError(errorInfo...) logErrorInfo(lvlError, errorInfo)
#define logWarning(errorInfo...) logErrorInfo(lvlWarn, errorInfo)
-/* Print a string message if the current log level is at least the specified
- level. Note that this has to be implemented as a macro to ensure that the
- arguments are evaluated lazily. */
+/**
+ * Print a string message if the current log level is at least the specified
+ * level. Note that this has to be implemented as a macro to ensure that the
+ * arguments are evaluated lazily.
+ */
#define printMsgUsing(loggerParam, level, args...) \
do { \
auto __lvl = level; \
@@ -214,7 +225,9 @@ extern Verbosity verbosity; /* suppress msgs > this */
#define debug(args...) printMsg(lvlDebug, args)
#define vomit(args...) printMsg(lvlVomit, args)
-/* if verbosity >= lvlWarn, print a message with a yellow 'warning:' prefix. */
+/**
+ * if verbosity >= lvlWarn, print a message with a yellow 'warning:' prefix.
+ */
template<typename... Args>
inline void warn(const std::string & fs, const Args & ... args)
{
diff --git a/src/libutil/lru-cache.hh b/src/libutil/lru-cache.hh
index 6ef4a3e06..0e19517ed 100644
--- a/src/libutil/lru-cache.hh
+++ b/src/libutil/lru-cache.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <cassert>
#include <map>
@@ -7,7 +8,9 @@
namespace nix {
-/* A simple least-recently used cache. Not thread-safe. */
+/**
+ * A simple least-recently used cache. Not thread-safe.
+ */
template<typename Key, typename Value>
class LRUCache
{
@@ -31,7 +34,9 @@ public:
LRUCache(size_t capacity) : capacity(capacity) { }
- /* Insert or upsert an item in the cache. */
+ /**
+ * Insert or upsert an item in the cache.
+ */
void upsert(const Key & key, const Value & value)
{
if (capacity == 0) return;
@@ -39,7 +44,9 @@ public:
erase(key);
if (data.size() >= capacity) {
- /* Retire the oldest item. */
+ /**
+ * Retire the oldest item.
+ */
auto oldest = lru.begin();
data.erase(*oldest);
lru.erase(oldest);
@@ -63,14 +70,18 @@ public:
return true;
}
- /* Look up an item in the cache. If it exists, it becomes the most
- recently used item. */
+ /**
+ * Look up an item in the cache. If it exists, it becomes the most
+ * recently used item.
+ * */
std::optional<Value> get(const Key & key)
{
auto i = data.find(key);
if (i == data.end()) return {};
- /* Move this item to the back of the LRU list. */
+ /**
+ * Move this item to the back of the LRU list.
+ */
lru.erase(i->second.first.it);
auto j = lru.insert(lru.end(), i);
i->second.first.it = j;
diff --git a/src/libutil/monitor-fd.hh b/src/libutil/monitor-fd.hh
index 9518cf8aa..86d0115fc 100644
--- a/src/libutil/monitor-fd.hh
+++ b/src/libutil/monitor-fd.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <thread>
#include <atomic>
diff --git a/src/libutil/namespaces.hh b/src/libutil/namespaces.hh
index e82379b9c..0b7eeb66c 100644
--- a/src/libutil/namespaces.hh
+++ b/src/libutil/namespaces.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
namespace nix {
diff --git a/src/libutil/pool.hh b/src/libutil/pool.hh
index d49067bb9..6247b6125 100644
--- a/src/libutil/pool.hh
+++ b/src/libutil/pool.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <functional>
#include <limits>
@@ -11,33 +12,37 @@
namespace nix {
-/* This template class implements a simple pool manager of resources
- of some type R, such as database connections. It is used as
- follows:
-
- class Connection { ... };
-
- Pool<Connection> pool;
-
- {
- auto conn(pool.get());
- conn->exec("select ...");
- }
-
- Here, the Connection object referenced by ‘conn’ is automatically
- returned to the pool when ‘conn’ goes out of scope.
-*/
-
+/**
+ * This template class implements a simple pool manager of resources
+ * of some type R, such as database connections. It is used as
+ * follows:
+ *
+ * class Connection { ... };
+ *
+ * Pool<Connection> pool;
+ *
+ * {
+ * auto conn(pool.get());
+ * conn->exec("select ...");
+ * }
+ *
+ * Here, the Connection object referenced by ‘conn’ is automatically
+ * returned to the pool when ‘conn’ goes out of scope.
+ */
template <class R>
class Pool
{
public:
- /* A function that produces new instances of R on demand. */
+ /**
+ * A function that produces new instances of R on demand.
+ */
typedef std::function<ref<R>()> Factory;
- /* A function that checks whether an instance of R is still
- usable. Unusable instances are removed from the pool. */
+ /**
+ * A function that checks whether an instance of R is still
+ * usable. Unusable instances are removed from the pool.
+ */
typedef std::function<bool(const ref<R> &)> Validator;
private:
diff --git a/src/libutil/ref.hh b/src/libutil/ref.hh
index 7d38b059c..af5f8304c 100644
--- a/src/libutil/ref.hh
+++ b/src/libutil/ref.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <memory>
#include <exception>
@@ -6,8 +7,10 @@
namespace nix {
-/* A simple non-nullable reference-counted pointer. Actually a wrapper
- around std::shared_ptr that prevents null constructions. */
+/**
+ * A simple non-nullable reference-counted pointer. Actually a wrapper
+ * around std::shared_ptr that prevents null constructions.
+ */
template<typename T>
class ref
{
diff --git a/src/libutil/regex-combinators.hh b/src/libutil/regex-combinators.hh
index 0b997b25a..87d6aa678 100644
--- a/src/libutil/regex-combinators.hh
+++ b/src/libutil/regex-combinators.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <string_view>
diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc
index c653db9d0..3d5121a19 100644
--- a/src/libutil/serialise.cc
+++ b/src/libutil/serialise.cc
@@ -20,7 +20,7 @@ void BufferedSink::operator () (std::string_view data)
buffer size. */
if (bufPos + data.size() >= bufSize) {
flush();
- write(data);
+ writeUnbuffered(data);
break;
}
/* Otherwise, copy the bytes to the buffer. Flush the buffer
@@ -38,7 +38,7 @@ void BufferedSink::flush()
if (bufPos == 0) return;
size_t n = bufPos;
bufPos = 0; // don't trigger the assert() in ~BufferedSink()
- write({buffer.get(), n});
+ writeUnbuffered({buffer.get(), n});
}
@@ -48,7 +48,7 @@ FdSink::~FdSink()
}
-void FdSink::write(std::string_view data)
+void FdSink::writeUnbuffered(std::string_view data)
{
written += data.size();
try {
@@ -186,6 +186,22 @@ static DefaultStackAllocator defaultAllocatorSingleton;
StackAllocator *StackAllocator::defaultAllocator = &defaultAllocatorSingleton;
+std::shared_ptr<void> (*create_coro_gc_hook)() = []() -> std::shared_ptr<void> {
+ return {};
+};
+
+/* This class is used for entry and exit hooks on coroutines */
+class CoroutineContext {
+ /* Disable GC when entering the coroutine without the boehm patch,
+ * since it doesn't find the main thread stack in this case.
+ * std::shared_ptr<void> performs type-erasure, so it will call the right
+ * deleter. */
+ const std::shared_ptr<void> coro_gc_hook = create_coro_gc_hook();
+public:
+ CoroutineContext() {};
+ ~CoroutineContext() {};
+};
+
std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
{
struct SourceToSink : FinishSink
@@ -206,7 +222,8 @@ std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
if (in.empty()) return;
cur = in;
- if (!coro)
+ if (!coro) {
+ CoroutineContext ctx;
coro = coro_t::push_type(VirtualStackAllocator{}, [&](coro_t::pull_type & yield) {
LambdaSource source([&](char *out, size_t out_len) {
if (cur.empty()) {
@@ -223,17 +240,24 @@ std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
});
fun(source);
});
+ }
if (!*coro) { abort(); }
- if (!cur.empty()) (*coro)(false);
+ if (!cur.empty()) {
+ CoroutineContext ctx;
+ (*coro)(false);
+ }
}
void finish() override
{
if (!coro) return;
if (!*coro) abort();
- (*coro)(true);
+ {
+ CoroutineContext ctx;
+ (*coro)(true);
+ }
if (*coro) abort();
}
};
@@ -264,18 +288,23 @@ std::unique_ptr<Source> sinkToSource(
size_t read(char * data, size_t len) override
{
- if (!coro)
+ if (!coro) {
+ CoroutineContext ctx;
coro = coro_t::pull_type(VirtualStackAllocator{}, [&](coro_t::push_type & yield) {
LambdaSink sink([&](std::string_view data) {
if (!data.empty()) yield(std::string(data));
});
fun(sink);
});
+ }
if (!*coro) { eof(); abort(); }
if (pos == cur.size()) {
- if (!cur.empty()) (*coro)();
+ if (!cur.empty()) {
+ CoroutineContext ctx;
+ (*coro)();
+ }
cur = coro->get();
pos = 0;
}
@@ -415,7 +444,7 @@ Error readError(Source & source)
auto msg = readString(source);
ErrorInfo info {
.level = level,
- .msg = hintformat(std::move(format("%s") % msg)),
+ .msg = hintformat(fmt("%s", msg)),
};
auto havePos = readNum<size_t>(source);
assert(havePos == 0);
@@ -424,7 +453,7 @@ Error readError(Source & source)
havePos = readNum<size_t>(source);
assert(havePos == 0);
info.traces.push_back(Trace {
- .hint = hintformat(std::move(format("%s") % readString(source)))
+ .hint = hintformat(fmt("%s", readString(source)))
});
}
return Error(std::move(info));
diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh
index 7da5b07fd..333c254ea 100644
--- a/src/libutil/serialise.hh
+++ b/src/libutil/serialise.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <memory>
@@ -10,7 +11,9 @@ namespace boost::context { struct stack_context; }
namespace nix {
-/* Abstract destination of binary data. */
+/**
+ * Abstract destination of binary data.
+ */
struct Sink
{
virtual ~Sink() { }
@@ -18,7 +21,9 @@ struct Sink
virtual bool good() { return true; }
};
-/* Just throws away data. */
+/**
+ * Just throws away data.
+ */
struct NullSink : Sink
{
void operator () (std::string_view data) override
@@ -32,8 +37,10 @@ struct FinishSink : virtual Sink
};
-/* A buffered abstract sink. Warning: a BufferedSink should not be
- used from multiple threads concurrently. */
+/**
+ * A buffered abstract sink. Warning: a BufferedSink should not be
+ * used from multiple threads concurrently.
+ */
struct BufferedSink : virtual Sink
{
size_t bufSize, bufPos;
@@ -46,23 +53,31 @@ struct BufferedSink : virtual Sink
void flush();
- virtual void write(std::string_view data) = 0;
+protected:
+
+ virtual void writeUnbuffered(std::string_view data) = 0;
};
-/* Abstract source of binary data. */
+/**
+ * Abstract source of binary data.
+ */
struct Source
{
virtual ~Source() { }
- /* Store exactly ‘len’ bytes in the buffer pointed to by ‘data’.
- It blocks until all the requested data is available, or throws
- an error if it is not going to be available. */
+ /**
+ * Store exactly ‘len’ bytes in the buffer pointed to by ‘data’.
+ * It blocks until all the requested data is available, or throws
+ * an error if it is not going to be available.
+ */
void operator () (char * data, size_t len);
- /* Store up to ‘len’ in the buffer pointed to by ‘data’, and
- return the number of bytes stored. It blocks until at least
- one byte is available. */
+ /**
+ * Store up to ‘len’ in the buffer pointed to by ‘data’, and
+ * return the number of bytes stored. It blocks until at least
+ * one byte is available.
+ */
virtual size_t read(char * data, size_t len) = 0;
virtual bool good() { return true; }
@@ -73,8 +88,10 @@ struct Source
};
-/* A buffered abstract source. Warning: a BufferedSource should not be
- used from multiple threads concurrently. */
+/**
+ * A buffered abstract source. Warning: a BufferedSource should not be
+ * used from multiple threads concurrently.
+ */
struct BufferedSource : Source
{
size_t bufSize, bufPosIn, bufPosOut;
@@ -88,12 +105,16 @@ struct BufferedSource : Source
bool hasData();
protected:
- /* Underlying read call, to be overridden. */
+ /**
+ * Underlying read call, to be overridden.
+ */
virtual size_t readUnbuffered(char * data, size_t len) = 0;
};
-/* A sink that writes data to a file descriptor. */
+/**
+ * A sink that writes data to a file descriptor.
+ */
struct FdSink : BufferedSink
{
int fd;
@@ -114,7 +135,7 @@ struct FdSink : BufferedSink
~FdSink();
- void write(std::string_view data) override;
+ void writeUnbuffered(std::string_view data) override;
bool good() override;
@@ -123,7 +144,9 @@ private:
};
-/* A source that reads data from a file descriptor. */
+/**
+ * A source that reads data from a file descriptor.
+ */
struct FdSource : BufferedSource
{
int fd;
@@ -149,7 +172,9 @@ private:
};
-/* A sink that writes data to a string. */
+/**
+ * A sink that writes data to a string.
+ */
struct StringSink : Sink
{
std::string s;
@@ -163,7 +188,9 @@ struct StringSink : Sink
};
-/* A source that reads data from a string. */
+/**
+ * A source that reads data from a string.
+ */
struct StringSource : Source
{
std::string_view s;
@@ -173,7 +200,9 @@ struct StringSource : Source
};
-/* A sink that writes all incoming data to two other sinks. */
+/**
+ * A sink that writes all incoming data to two other sinks.
+ */
struct TeeSink : Sink
{
Sink & sink1, & sink2;
@@ -186,7 +215,9 @@ struct TeeSink : Sink
};
-/* Adapter class of a Source that saves all data read to a sink. */
+/**
+ * Adapter class of a Source that saves all data read to a sink.
+ */
struct TeeSource : Source
{
Source & orig;
@@ -201,7 +232,9 @@ struct TeeSource : Source
}
};
-/* A reader that consumes the original Source until 'size'. */
+/**
+ * A reader that consumes the original Source until 'size'.
+ */
struct SizedSource : Source
{
Source & orig;
@@ -219,7 +252,9 @@ struct SizedSource : Source
return n;
}
- /* Consume the original source until no remain data is left to consume. */
+ /**
+ * Consume the original source until no remain data is left to consume.
+ */
size_t drainAll()
{
std::vector<char> buf(8192);
@@ -232,7 +267,9 @@ struct SizedSource : Source
}
};
-/* A sink that that just counts the number of bytes given to it */
+/**
+ * A sink that that just counts the number of bytes given to it
+ */
struct LengthSink : Sink
{
uint64_t length = 0;
@@ -243,7 +280,9 @@ struct LengthSink : Sink
}
};
-/* Convert a function into a sink. */
+/**
+ * Convert a function into a sink.
+ */
struct LambdaSink : Sink
{
typedef std::function<void(std::string_view data)> lambda_t;
@@ -259,7 +298,9 @@ struct LambdaSink : Sink
};
-/* Convert a function into a source. */
+/**
+ * Convert a function into a source.
+ */
struct LambdaSource : Source
{
typedef std::function<size_t(char *, size_t)> lambda_t;
@@ -274,8 +315,10 @@ struct LambdaSource : Source
}
};
-/* Chain two sources together so after the first is exhausted, the second is
- used */
+/**
+ * Chain two sources together so after the first is exhausted, the second is
+ * used
+ */
struct ChainSource : Source
{
Source & source1, & source2;
@@ -289,8 +332,10 @@ struct ChainSource : Source
std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun);
-/* Convert a function that feeds data into a Sink into a Source. The
- Source executes the function as a coroutine. */
+/**
+ * Convert a function that feeds data into a Sink into a Source. The
+ * Source executes the function as a coroutine.
+ */
std::unique_ptr<Source> sinkToSource(
std::function<void(Sink &)> fun,
std::function<void()> eof = []() {
@@ -376,7 +421,9 @@ Source & operator >> (Source & in, bool & b)
Error readError(Source & source);
-/* An adapter that converts a std::basic_istream into a source. */
+/**
+ * An adapter that converts a std::basic_istream into a source.
+ */
struct StreamToSourceAdapter : Source
{
std::shared_ptr<std::basic_istream<char>> istream;
@@ -399,13 +446,14 @@ struct StreamToSourceAdapter : Source
};
-/* A source that reads a distinct format of concatenated chunks back into its
- logical form, in order to guarantee a known state to the original stream,
- even in the event of errors.
-
- Use with FramedSink, which also allows the logical stream to be terminated
- in the event of an exception.
-*/
+/**
+ * A source that reads a distinct format of concatenated chunks back into its
+ * logical form, in order to guarantee a known state to the original stream,
+ * even in the event of errors.
+ *
+ * Use with FramedSink, which also allows the logical stream to be terminated
+ * in the event of an exception.
+ */
struct FramedSource : Source
{
Source & from;
@@ -450,11 +498,12 @@ struct FramedSource : Source
}
};
-/* Write as chunks in the format expected by FramedSource.
-
- The exception_ptr reference can be used to terminate the stream when you
- detect that an error has occurred on the remote end.
-*/
+/**
+ * Write as chunks in the format expected by FramedSource.
+ *
+ * The exception_ptr reference can be used to terminate the stream when you
+ * detect that an error has occurred on the remote end.
+ */
struct FramedSink : nix::BufferedSink
{
BufferedSink & to;
@@ -473,7 +522,7 @@ struct FramedSink : nix::BufferedSink
}
}
- void write(std::string_view data) override
+ void writeUnbuffered(std::string_view data) override
{
/* Don't send more data if the remote has
encountered an error. */
@@ -487,18 +536,27 @@ struct FramedSink : nix::BufferedSink
};
};
-/* Stack allocation strategy for sinkToSource.
- Mutable to avoid a boehm gc dependency in libutil.
-
- boost::context doesn't provide a virtual class, so we define our own.
+/**
+ * Stack allocation strategy for sinkToSource.
+ * Mutable to avoid a boehm gc dependency in libutil.
+ *
+ * boost::context doesn't provide a virtual class, so we define our own.
*/
struct StackAllocator {
virtual boost::context::stack_context allocate() = 0;
virtual void deallocate(boost::context::stack_context sctx) = 0;
- /* The stack allocator to use in sinkToSource and potentially elsewhere.
- It is reassigned by the initGC() method in libexpr. */
+ /**
+ * The stack allocator to use in sinkToSource and potentially elsewhere.
+ * It is reassigned by the initGC() method in libexpr.
+ */
static StackAllocator *defaultAllocator;
};
+/* Disabling GC when entering a coroutine (without the boehm patch).
+ mutable to avoid boehm gc dependency in libutil.
+ */
+extern std::shared_ptr<void> (*create_coro_gc_hook)();
+
+
}
diff --git a/src/libutil/split.hh b/src/libutil/split.hh
index 87a23b13e..3b9b2b83b 100644
--- a/src/libutil/split.hh
+++ b/src/libutil/split.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <optional>
#include <string_view>
@@ -7,10 +8,12 @@
namespace nix {
-// If `separator` is found, we return the portion of the string before the
-// separator, and modify the string argument to contain only the part after the
-// separator. Otherwise, we return `std::nullopt`, and we leave the argument
-// string alone.
+/**
+ * If `separator` is found, we return the portion of the string before the
+ * separator, and modify the string argument to contain only the part after the
+ * separator. Otherwise, we return `std::nullopt`, and we leave the argument
+ * string alone.
+ */
static inline std::optional<std::string_view> splitPrefixTo(std::string_view & string, char separator) {
auto sepInstance = string.find(separator);
diff --git a/src/libutil/suggestions.hh b/src/libutil/suggestions.hh
index d54dd8e31..9abf5ee5f 100644
--- a/src/libutil/suggestions.hh
+++ b/src/libutil/suggestions.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "comparator.hh"
#include "types.hh"
@@ -13,7 +14,8 @@ int levenshteinDistance(std::string_view first, std::string_view second);
*/
class Suggestion {
public:
- int distance; // The smaller the better
+ /// The smaller the better
+ int distance;
std::string suggestion;
std::string to_string() const;
@@ -43,7 +45,9 @@ public:
std::ostream & operator<<(std::ostream & str, const Suggestion &);
std::ostream & operator<<(std::ostream & str, const Suggestions &);
-// Either a value of type `T`, or some suggestions
+/**
+ * Either a value of type `T`, or some suggestions
+ */
template<typename T>
class OrSuggestions {
public:
diff --git a/src/libutil/sync.hh b/src/libutil/sync.hh
index e1d591d77..47e4512b1 100644
--- a/src/libutil/sync.hh
+++ b/src/libutil/sync.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <cstdlib>
#include <mutex>
@@ -7,22 +8,22 @@
namespace nix {
-/* This template class ensures synchronized access to a value of type
- T. It is used as follows:
-
- struct Data { int x; ... };
-
- Sync<Data> data;
-
- {
- auto data_(data.lock());
- data_->x = 123;
- }
-
- Here, "data" is automatically unlocked when "data_" goes out of
- scope.
-*/
-
+/**
+ * This template class ensures synchronized access to a value of type
+ * T. It is used as follows:
+ *
+ * struct Data { int x; ... };
+ *
+ * Sync<Data> data;
+ *
+ * {
+ * auto data_(data.lock());
+ * data_->x = 123;
+ * }
+ *
+ * Here, "data" is automatically unlocked when "data_" goes out of
+ * scope.
+ */
template<class T, class M = std::mutex>
class Sync
{
diff --git a/src/libutil/tarfile.cc b/src/libutil/tarfile.cc
index 238d0a7a6..5060a8f24 100644
--- a/src/libutil/tarfile.cc
+++ b/src/libutil/tarfile.cc
@@ -17,7 +17,7 @@ static ssize_t callback_read(struct archive * archive, void * _self, const void
*buffer = self->buffer.data();
try {
- return self->source->read((char *) self->buffer.data(), 4096);
+ return self->source->read((char *) self->buffer.data(), self->buffer.size());
} catch (EndOfFile &) {
return 0;
} catch (std::exception & err) {
@@ -39,7 +39,7 @@ void TarArchive::check(int err, const std::string & reason)
throw Error(reason, archive_error_string(this->archive));
}
-TarArchive::TarArchive(Source & source, bool raw) : buffer(4096)
+TarArchive::TarArchive(Source & source, bool raw) : buffer(65536)
{
this->archive = archive_read_new();
this->source = &source;
diff --git a/src/libutil/tarfile.hh b/src/libutil/tarfile.hh
index 4d9141fd4..237d18c31 100644
--- a/src/libutil/tarfile.hh
+++ b/src/libutil/tarfile.hh
@@ -1,3 +1,6 @@
+#pragma once
+///@file
+
#include "serialise.hh"
#include <archive.h>
@@ -14,13 +17,14 @@ struct TarArchive {
TarArchive(const Path & path);
- // disable copy constructor
+ /// disable copy constructor
TarArchive(const TarArchive &) = delete;
void close();
~TarArchive();
};
+
void unpackTarfile(Source & source, const Path & destDir);
void unpackTarfile(const Path & tarFile, const Path & destDir);
diff --git a/src/libutil/tests/canon-path.cc b/src/libutil/tests/canon-path.cc
index c1c5adadf..fc94ccc3d 100644
--- a/src/libutil/tests/canon-path.cc
+++ b/src/libutil/tests/canon-path.cc
@@ -107,15 +107,13 @@ namespace nix {
}
TEST(CanonPath, within) {
- {
- ASSERT_TRUE(CanonPath("foo").isWithin(CanonPath("foo")));
- ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("bar")));
- ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("fo")));
- ASSERT_TRUE(CanonPath("foo/bar").isWithin(CanonPath("foo")));
- ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("foo/bar")));
- ASSERT_TRUE(CanonPath("/foo/bar/default.nix").isWithin(CanonPath("/")));
- ASSERT_TRUE(CanonPath("/").isWithin(CanonPath("/")));
- }
+ ASSERT_TRUE(CanonPath("foo").isWithin(CanonPath("foo")));
+ ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("bar")));
+ ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("fo")));
+ ASSERT_TRUE(CanonPath("foo/bar").isWithin(CanonPath("foo")));
+ ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("foo/bar")));
+ ASSERT_TRUE(CanonPath("/foo/bar/default.nix").isWithin(CanonPath("/")));
+ ASSERT_TRUE(CanonPath("/").isWithin(CanonPath("/")));
}
TEST(CanonPath, sort) {
@@ -127,29 +125,38 @@ namespace nix {
}
TEST(CanonPath, allowed) {
- {
- std::set<CanonPath> allowed {
- CanonPath("foo/bar"),
- CanonPath("foo!"),
- CanonPath("xyzzy"),
- CanonPath("a/b/c"),
- };
-
- ASSERT_TRUE (CanonPath("foo/bar").isAllowed(allowed));
- ASSERT_TRUE (CanonPath("foo/bar/bla").isAllowed(allowed));
- ASSERT_TRUE (CanonPath("foo").isAllowed(allowed));
- ASSERT_FALSE(CanonPath("bar").isAllowed(allowed));
- ASSERT_FALSE(CanonPath("bar/a").isAllowed(allowed));
- ASSERT_TRUE (CanonPath("a").isAllowed(allowed));
- ASSERT_TRUE (CanonPath("a/b").isAllowed(allowed));
- ASSERT_TRUE (CanonPath("a/b/c").isAllowed(allowed));
- ASSERT_TRUE (CanonPath("a/b/c/d").isAllowed(allowed));
- ASSERT_TRUE (CanonPath("a/b/c/d/e").isAllowed(allowed));
- ASSERT_FALSE(CanonPath("a/b/a").isAllowed(allowed));
- ASSERT_FALSE(CanonPath("a/b/d").isAllowed(allowed));
- ASSERT_FALSE(CanonPath("aaa").isAllowed(allowed));
- ASSERT_FALSE(CanonPath("zzz").isAllowed(allowed));
- ASSERT_TRUE (CanonPath("/").isAllowed(allowed));
- }
+ std::set<CanonPath> allowed {
+ CanonPath("foo/bar"),
+ CanonPath("foo!"),
+ CanonPath("xyzzy"),
+ CanonPath("a/b/c"),
+ };
+
+ ASSERT_TRUE (CanonPath("foo/bar").isAllowed(allowed));
+ ASSERT_TRUE (CanonPath("foo/bar/bla").isAllowed(allowed));
+ ASSERT_TRUE (CanonPath("foo").isAllowed(allowed));
+ ASSERT_FALSE(CanonPath("bar").isAllowed(allowed));
+ ASSERT_FALSE(CanonPath("bar/a").isAllowed(allowed));
+ ASSERT_TRUE (CanonPath("a").isAllowed(allowed));
+ ASSERT_TRUE (CanonPath("a/b").isAllowed(allowed));
+ ASSERT_TRUE (CanonPath("a/b/c").isAllowed(allowed));
+ ASSERT_TRUE (CanonPath("a/b/c/d").isAllowed(allowed));
+ ASSERT_TRUE (CanonPath("a/b/c/d/e").isAllowed(allowed));
+ ASSERT_FALSE(CanonPath("a/b/a").isAllowed(allowed));
+ ASSERT_FALSE(CanonPath("a/b/d").isAllowed(allowed));
+ ASSERT_FALSE(CanonPath("aaa").isAllowed(allowed));
+ ASSERT_FALSE(CanonPath("zzz").isAllowed(allowed));
+ ASSERT_TRUE (CanonPath("/").isAllowed(allowed));
+ }
+
+ TEST(CanonPath, makeRelative) {
+ CanonPath d("/foo/bar");
+ ASSERT_EQ(d.makeRelative(CanonPath("/foo/bar")), ".");
+ ASSERT_EQ(d.makeRelative(CanonPath("/foo")), "..");
+ ASSERT_EQ(d.makeRelative(CanonPath("/")), "../..");
+ ASSERT_EQ(d.makeRelative(CanonPath("/foo/bar/xyzzy")), "xyzzy");
+ ASSERT_EQ(d.makeRelative(CanonPath("/foo/bar/xyzzy/bla")), "xyzzy/bla");
+ ASSERT_EQ(d.makeRelative(CanonPath("/foo/xyzzy/bla")), "../xyzzy/bla");
+ ASSERT_EQ(d.makeRelative(CanonPath("/xyzzy/bla")), "../../xyzzy/bla");
}
}
diff --git a/src/libutil/tests/config.cc b/src/libutil/tests/config.cc
index 8be6730dd..886e70da5 100644
--- a/src/libutil/tests/config.cc
+++ b/src/libutil/tests/config.cc
@@ -82,6 +82,7 @@ namespace nix {
TestSetting() : AbstractSetting("test", "test", {}) {}
void set(const std::string & value, bool append) override {}
std::string to_string() const override { return {}; }
+ bool isAppendable() override { return false; }
};
Config config;
@@ -90,6 +91,7 @@ namespace nix {
ASSERT_FALSE(config.set("test", "value"));
config.addSetting(&setting);
ASSERT_TRUE(config.set("test", "value"));
+ ASSERT_FALSE(config.set("extra-test", "value"));
}
TEST(Config, withInitialValue) {
@@ -156,12 +158,54 @@ namespace nix {
}
TEST(Config, toJSONOnNonEmptyConfig) {
+ using nlohmann::literals::operator "" _json;
Config config;
- std::map<std::string, Config::SettingInfo> settings;
- Setting<std::string> setting{&config, "", "name-of-the-setting", "description"};
+ Setting<std::string> setting{
+ &config,
+ "",
+ "name-of-the-setting",
+ "description",
+ };
+ setting.assign("value");
+
+ ASSERT_EQ(config.toJSON(),
+ R"#({
+ "name-of-the-setting": {
+ "aliases": [],
+ "defaultValue": "",
+ "description": "description\n",
+ "documentDefault": true,
+ "value": "value",
+ "experimentalFeature": null
+ }
+ })#"_json);
+ }
+
+ TEST(Config, toJSONOnNonEmptyConfigWithExperimentalSetting) {
+ using nlohmann::literals::operator "" _json;
+ Config config;
+ Setting<std::string> setting{
+ &config,
+ "",
+ "name-of-the-setting",
+ "description",
+ {},
+ true,
+ Xp::Flakes,
+ };
setting.assign("value");
- ASSERT_EQ(config.toJSON().dump(), R"#({"name-of-the-setting":{"aliases":[],"defaultValue":"","description":"description\n","documentDefault":true,"value":"value"}})#");
+ ASSERT_EQ(config.toJSON(),
+ R"#({
+ "name-of-the-setting": {
+ "aliases": [],
+ "defaultValue": "",
+ "description": "description\n",
+ "documentDefault": true,
+ "value": "value",
+ "experimentalFeature": "flakes"
+ }
+ })#"_json);
}
TEST(Config, setSettingAlias) {
diff --git a/src/libutil/tests/hash.hh b/src/libutil/tests/hash.hh
index 9e9650e6e..1f9fa59ae 100644
--- a/src/libutil/tests/hash.hh
+++ b/src/libutil/tests/hash.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <rapidcheck/gen/Arbitrary.h>
diff --git a/src/libutil/thread-pool.hh b/src/libutil/thread-pool.hh
index b22e0d162..0e09fae97 100644
--- a/src/libutil/thread-pool.hh
+++ b/src/libutil/thread-pool.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "sync.hh"
#include "util.hh"
@@ -13,8 +14,10 @@ namespace nix {
MakeError(ThreadPoolShutDown, Error);
-/* A simple thread pool that executes a queue of work items
- (lambdas). */
+/**
+ * A simple thread pool that executes a queue of work items
+ * (lambdas).
+ */
class ThreadPool
{
public:
@@ -23,19 +26,30 @@ public:
~ThreadPool();
- // FIXME: use std::packaged_task?
+ /**
+ * An individual work item.
+ *
+ * \todo use std::packaged_task?
+ */
typedef std::function<void()> work_t;
- /* Enqueue a function to be executed by the thread pool. */
+ /**
+ * Enqueue a function to be executed by the thread pool.
+ */
void enqueue(const work_t & t);
- /* Execute work items until the queue is empty. Note that work
- items are allowed to add new items to the queue; this is
- handled correctly. Queue processing stops prematurely if any
- work item throws an exception. This exception is propagated to
- the calling thread. If multiple work items throw an exception
- concurrently, only one item is propagated; the others are
- printed on stderr and otherwise ignored. */
+ /**
+ * Execute work items until the queue is empty.
+ *
+ * \note Note that work items are allowed to add new items to the
+ * queue; this is handled correctly.
+ *
+ * Queue processing stops prematurely if any work item throws an
+ * exception. This exception is propagated to the calling thread. If
+ * multiple work items throw an exception concurrently, only one
+ * item is propagated; the others are printed on stderr and
+ * otherwise ignored.
+ */
void process();
private:
@@ -62,9 +76,11 @@ private:
void shutdown();
};
-/* Process in parallel a set of items of type T that have a partial
- ordering between them. Thus, any item is only processed after all
- its dependencies have been processed. */
+/**
+ * Process in parallel a set of items of type T that have a partial
+ * ordering between them. Thus, any item is only processed after all
+ * its dependencies have been processed.
+ */
template<typename T>
void processGraph(
ThreadPool & pool,
diff --git a/src/libutil/topo-sort.hh b/src/libutil/topo-sort.hh
index 7418be5e0..a52811fbf 100644
--- a/src/libutil/topo-sort.hh
+++ b/src/libutil/topo-sort.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "error.hh"
diff --git a/src/libutil/types.hh b/src/libutil/types.hh
index 6bcbd7e1d..c86f52175 100644
--- a/src/libutil/types.hh
+++ b/src/libutil/types.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "ref.hh"
@@ -17,7 +18,9 @@ typedef std::set<std::string> StringSet;
typedef std::map<std::string, std::string> StringMap;
typedef std::map<std::string, std::string> StringPairs;
-/* Paths are just strings. */
+/**
+ * Paths are just strings.
+ */
typedef std::string Path;
typedef std::string_view PathView;
typedef std::list<Path> Paths;
@@ -25,15 +28,19 @@ typedef std::set<Path> PathSet;
typedef std::vector<std::pair<std::string, std::string>> Headers;
-/* Helper class to run code at startup. */
+/**
+ * Helper class to run code at startup.
+ */
template<typename T>
struct OnStartup
{
OnStartup(T && t) { t(); }
};
-/* Wrap bools to prevent string literals (i.e. 'char *') from being
- cast to a bool in Attr. */
+/**
+ * Wrap bools to prevent string literals (i.e. 'char *') from being
+ * cast to a bool in Attr.
+ */
template<typename T>
struct Explicit {
T t;
@@ -45,21 +52,25 @@ struct Explicit {
};
-/* This wants to be a little bit like rust's Cow type.
- Some parts of the evaluator benefit greatly from being able to reuse
- existing allocations for strings, but have to be able to also use
- newly allocated storage for values.
-
- We do not define implicit conversions, even with ref qualifiers,
- since those can easily become ambiguous to the reader and can degrade
- into copying behaviour we want to avoid. */
+/**
+ * This wants to be a little bit like rust's Cow type.
+ * Some parts of the evaluator benefit greatly from being able to reuse
+ * existing allocations for strings, but have to be able to also use
+ * newly allocated storage for values.
+ *
+ * We do not define implicit conversions, even with ref qualifiers,
+ * since those can easily become ambiguous to the reader and can degrade
+ * into copying behaviour we want to avoid.
+ */
class BackedStringView {
private:
std::variant<std::string, std::string_view> data;
- /* Needed to introduce a temporary since operator-> must return
- a pointer. Without this we'd need to store the view object
- even when we already own a string. */
+ /**
+ * Needed to introduce a temporary since operator-> must return
+ * a pointer. Without this we'd need to store the view object
+ * even when we already own a string.
+ */
class Ptr {
private:
std::string_view view;
@@ -77,8 +88,10 @@ public:
BackedStringView(const BackedStringView &) = delete;
BackedStringView & operator=(const BackedStringView &) = delete;
- /* We only want move operations defined since the sole purpose of
- this type is to avoid copies. */
+ /**
+ * We only want move operations defined since the sole purpose of
+ * this type is to avoid copies.
+ */
BackedStringView(BackedStringView && other) = default;
BackedStringView & operator=(BackedStringView && other) = default;
diff --git a/src/libutil/url-parts.hh b/src/libutil/url-parts.hh
index d5e6a2736..98162b0f7 100644
--- a/src/libutil/url-parts.hh
+++ b/src/libutil/url-parts.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <string>
#include <regex>
@@ -22,21 +23,22 @@ const static std::string segmentRegex = "(?:" + pcharRegex + "*)";
const static std::string absPathRegex = "(?:(?:/" + segmentRegex + ")*/?)";
const static std::string pathRegex = "(?:" + segmentRegex + "(?:/" + segmentRegex + ")*/?)";
-// A Git ref (i.e. branch or tag name).
-const static std::string refRegexS = "[a-zA-Z0-9][a-zA-Z0-9_.\\/-]*"; // FIXME: check
+/// A Git ref (i.e. branch or tag name).
+/// \todo check that this is correct.
+const static std::string refRegexS = "[a-zA-Z0-9@][a-zA-Z0-9_.\\/@-]*";
extern std::regex refRegex;
-// Instead of defining what a good Git Ref is, we define what a bad Git Ref is
-// This is because of the definition of a ref in refs.c in https://github.com/git/git
-// See tests/fetchGitRefs.sh for the full definition
+/// Instead of defining what a good Git Ref is, we define what a bad Git Ref is
+/// This is because of the definition of a ref in refs.c in https://github.com/git/git
+/// See tests/fetchGitRefs.sh for the full definition
const static std::string badGitRefRegexS = "//|^[./]|/\\.|\\.\\.|[[:cntrl:][:space:]:?^~\[]|\\\\|\\*|\\.lock$|\\.lock/|@\\{|[/.]$|^@$|^$";
extern std::regex badGitRefRegex;
-// A Git revision (a SHA-1 commit hash).
+/// A Git revision (a SHA-1 commit hash).
const static std::string revRegexS = "[0-9a-fA-F]{40}";
extern std::regex revRegex;
-// A ref or revision, or a ref followed by a revision.
+/// A ref or revision, or a ref followed by a revision.
const static std::string refAndOrRevRegex = "(?:(" + revRegexS + ")|(?:(" + refRegexS + ")(?:/(" + revRegexS + "))?))";
const static std::string flakeIdRegexS = "[a-zA-Z][a-zA-Z0-9_-]*";
diff --git a/src/libutil/url.hh b/src/libutil/url.hh
index ddd673d65..d2413ec0e 100644
--- a/src/libutil/url.hh
+++ b/src/libutil/url.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "error.hh"
@@ -7,7 +8,8 @@ namespace nix {
struct ParsedURL
{
std::string url;
- std::string base; // URL without query/fragment
+ /// URL without query/fragment
+ std::string base;
std::string scheme;
std::optional<std::string> authority;
std::string path;
@@ -28,7 +30,7 @@ std::map<std::string, std::string> decodeQuery(const std::string & query);
ParsedURL parseURL(const std::string & url);
-/*
+/**
* Although that’s not really standardized anywhere, an number of tools
* use a scheme of the form 'x+y' in urls, where y is the “transport layer”
* scheme, and x is the “application layer” scheme.
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 885bae69c..3a8309149 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -47,6 +47,9 @@ extern char * * environ __attribute__((weak));
namespace nix {
+void initLibUtil() {
+}
+
std::optional<std::string> getEnv(const std::string & key)
{
char * value = getenv(key.c_str());
@@ -54,6 +57,11 @@ std::optional<std::string> getEnv(const std::string & key)
return std::string(value);
}
+std::optional<std::string> getEnvNonEmpty(const std::string & key) {
+ auto value = getEnv(key);
+ if (value == "") return {};
+ return value;
+}
std::map<std::string, std::string> getEnv()
{
@@ -523,7 +531,7 @@ void deletePath(const Path & path)
void deletePath(const Path & path, uint64_t & bytesFreed)
{
- //Activity act(*logger, lvlDebug, format("recursively deleting path '%1%'") % path);
+ //Activity act(*logger, lvlDebug, "recursively deleting path '%1%'", path);
bytesFreed = 0;
_deletePath(path, bytesFreed);
}
@@ -1065,12 +1073,14 @@ static pid_t doFork(bool allowVfork, std::function<void()> fun)
}
+#if __linux__
static int childEntry(void * arg)
{
auto main = (std::function<void()> *) arg;
(*main)();
return 1;
}
+#endif
pid_t startProcess(std::function<void()> fun, const ProcessOptions & options)
@@ -1131,9 +1141,9 @@ std::vector<char *> stringsToCharPtrs(const Strings & ss)
}
std::string runProgram(Path program, bool searchPath, const Strings & args,
- const std::optional<std::string> & input)
+ const std::optional<std::string> & input, bool isInteractive)
{
- auto res = runProgram(RunOptions {.program = program, .searchPath = searchPath, .args = args, .input = input});
+ auto res = runProgram(RunOptions {.program = program, .searchPath = searchPath, .args = args, .input = input, .isInteractive = isInteractive});
if (!statusOk(res.first))
throw ExecError(res.first, "program '%1%' %2%", program, statusToString(res.first));
@@ -1183,6 +1193,16 @@ void runProgram2(const RunOptions & options)
// case), so we can't use it if we alter the environment
processOptions.allowVfork = !options.environment;
+ std::optional<Finally<std::function<void()>>> resumeLoggerDefer;
+ if (options.isInteractive) {
+ logger->pause();
+ resumeLoggerDefer.emplace(
+ []() {
+ logger->resume();
+ }
+ );
+ }
+
/* Fork. */
Pid pid = startProcess([&]() {
if (options.environment)
@@ -1394,14 +1414,14 @@ std::string statusToString(int status)
{
if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
if (WIFEXITED(status))
- return (format("failed with exit code %1%") % WEXITSTATUS(status)).str();
+ return fmt("failed with exit code %1%", WEXITSTATUS(status));
else if (WIFSIGNALED(status)) {
int sig = WTERMSIG(status);
#if HAVE_STRSIGNAL
const char * description = strsignal(sig);
- return (format("failed due to signal %1% (%2%)") % sig % description).str();
+ return fmt("failed due to signal %1% (%2%)", sig, description);
#else
- return (format("failed due to signal %1%") % sig).str();
+ return fmt("failed due to signal %1%", sig);
#endif
}
else
@@ -1470,7 +1490,7 @@ bool shouldANSI()
&& !getEnv("NO_COLOR").has_value();
}
-std::string filterANSIEscapes(const std::string & s, bool filterAll, unsigned int width)
+std::string filterANSIEscapes(std::string_view s, bool filterAll, unsigned int width)
{
std::string t, e;
size_t w = 0;
@@ -1737,14 +1757,40 @@ void triggerInterrupt()
}
static sigset_t savedSignalMask;
+static bool savedSignalMaskIsSet = false;
-void startSignalHandlerThread()
+void setChildSignalMask(sigset_t * sigs)
{
- updateWindowSize();
+ assert(sigs); // C style function, but think of sigs as a reference
+
+#if _POSIX_C_SOURCE >= 1 || _XOPEN_SOURCE || _POSIX_SOURCE
+ sigemptyset(&savedSignalMask);
+ // There's no "assign" or "copy" function, so we rely on (math) idempotence
+ // of the or operator: a or a = a.
+ sigorset(&savedSignalMask, sigs, sigs);
+#else
+ // Without sigorset, our best bet is to assume that sigset_t is a type that
+ // can be assigned directly, such as is the case for a sigset_t defined as
+ // an integer type.
+ savedSignalMask = *sigs;
+#endif
+
+ savedSignalMaskIsSet = true;
+}
+void saveSignalMask() {
if (sigprocmask(SIG_BLOCK, nullptr, &savedSignalMask))
throw SysError("querying signal mask");
+ savedSignalMaskIsSet = true;
+}
+
+void startSignalHandlerThread()
+{
+ updateWindowSize();
+
+ saveSignalMask();
+
sigset_t set;
sigemptyset(&set);
sigaddset(&set, SIGINT);
@@ -1760,6 +1806,20 @@ void startSignalHandlerThread()
static void restoreSignals()
{
+ // If startSignalHandlerThread wasn't called, that means we're not running
+ // in a proper libmain process, but a process that presumably manages its
+ // own signal handlers. Such a process should call either
+ // - initNix(), to be a proper libmain process
+ // - startSignalHandlerThread(), to resemble libmain regarding signal
+ // handling only
+ // - saveSignalMask(), for processes that define their own signal handling
+ // thread
+ // TODO: Warn about this? Have a default signal mask? The latter depends on
+ // whether we should generally inherit signal masks from the caller.
+ // I don't know what the larger unix ecosystem expects from us here.
+ if (!savedSignalMaskIsSet)
+ return;
+
if (sigprocmask(SIG_SETMASK, &savedSignalMask, nullptr))
throw SysError("restoring signals");
}
@@ -1961,7 +2021,7 @@ std::string showBytes(uint64_t bytes)
// FIXME: move to libstore/build
-void commonChildInit(Pipe & logPipe)
+void commonChildInit()
{
logger = makeSimpleLogger();
@@ -1975,10 +2035,6 @@ void commonChildInit(Pipe & logPipe)
if (setsid() == -1)
throw SysError("creating a new session");
- /* Dup the write side of the logger pipe into stderr. */
- if (dup2(logPipe.writeSide.get(), STDERR_FILENO) == -1)
- throw SysError("cannot pipe standard error into log file");
-
/* Dup stderr to stdout. */
if (dup2(STDERR_FILENO, STDOUT_FILENO) == -1)
throw SysError("cannot dup stderr into stdout");
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
index b5625ecef..a7907cd14 100644
--- a/src/libutil/util.hh
+++ b/src/libutil/util.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "types.hh"
#include "error.hh"
@@ -31,74 +32,114 @@ namespace nix {
struct Sink;
struct Source;
+void initLibUtil();
-/* The system for which Nix is compiled. */
+/**
+ * The system for which Nix is compiled.
+ */
extern const std::string nativeSystem;
-/* Return an environment variable. */
+/**
+ * @return an environment variable.
+ */
std::optional<std::string> getEnv(const std::string & key);
-/* Get the entire environment. */
+/**
+ * @return a non empty environment variable. Returns nullopt if the env
+ * variable is set to ""
+ */
+std::optional<std::string> getEnvNonEmpty(const std::string & key);
+
+/**
+ * Get the entire environment.
+ */
std::map<std::string, std::string> getEnv();
-/* Clear the environment. */
+/**
+ * Clear the environment.
+ */
void clearEnv();
-/* Return an absolutized path, resolving paths relative to the
- specified directory, or the current directory otherwise. The path
- is also canonicalised. */
+/**
+ * @return An absolutized path, resolving paths relative to the
+ * specified directory, or the current directory otherwise. The path
+ * is also canonicalised.
+ */
Path absPath(Path path,
std::optional<PathView> dir = {},
bool resolveSymlinks = false);
-/* Canonicalise a path by removing all `.' or `..' components and
- double or trailing slashes. Optionally resolves all symlink
- components such that each component of the resulting path is *not*
- a symbolic link. */
+/**
+ * Canonicalise a path by removing all `.` or `..` components and
+ * double or trailing slashes. Optionally resolves all symlink
+ * components such that each component of the resulting path is *not*
+ * a symbolic link.
+ */
Path canonPath(PathView path, bool resolveSymlinks = false);
-/* Return the directory part of the given canonical path, i.e.,
- everything before the final `/'. If the path is the root or an
- immediate child thereof (e.g., `/foo'), this means `/'
- is returned.*/
+/**
+ * @return The directory part of the given canonical path, i.e.,
+ * everything before the final `/`. If the path is the root or an
+ * immediate child thereof (e.g., `/foo`), this means `/`
+ * is returned.
+ */
Path dirOf(const PathView path);
-/* Return the base name of the given canonical path, i.e., everything
- following the final `/' (trailing slashes are removed). */
+/**
+ * @return the base name of the given canonical path, i.e., everything
+ * following the final `/` (trailing slashes are removed).
+ */
std::string_view baseNameOf(std::string_view path);
-/* Perform tilde expansion on a path. */
+/**
+ * Perform tilde expansion on a path.
+ */
std::string expandTilde(std::string_view path);
-/* Check whether 'path' is a descendant of 'dir'. Both paths must be
- canonicalized. */
+/**
+ * Check whether 'path' is a descendant of 'dir'. Both paths must be
+ * canonicalized.
+ */
bool isInDir(std::string_view path, std::string_view dir);
-/* Check whether 'path' is equal to 'dir' or a descendant of
- 'dir'. Both paths must be canonicalized. */
+/**
+ * Check whether 'path' is equal to 'dir' or a descendant of
+ * 'dir'. Both paths must be canonicalized.
+ */
bool isDirOrInDir(std::string_view path, std::string_view dir);
-/* Get status of `path'. */
+/**
+ * Get status of `path`.
+ */
struct stat stat(const Path & path);
struct stat lstat(const Path & path);
-/* Return true iff the given path exists. */
+/**
+ * @return true iff the given path exists.
+ */
bool pathExists(const Path & path);
-/* Read the contents (target) of a symbolic link. The result is not
- in any way canonicalised. */
+/**
+ * Read the contents (target) of a symbolic link. The result is not
+ * in any way canonicalised.
+ */
Path readLink(const Path & path);
bool isLink(const Path & path);
-/* Read the contents of a directory. The entries `.' and `..' are
- removed. */
+/**
+ * Read the contents of a directory. The entries `.` and `..` are
+ * removed.
+ */
struct DirEntry
{
std::string name;
ino_t ino;
- unsigned char type; // one of DT_*
+ /**
+ * one of DT_*
+ */
+ unsigned char type;
DirEntry(std::string name, ino_t ino, unsigned char type)
: name(std::move(name)), ino(ino), type(type) { }
};
@@ -109,74 +150,110 @@ DirEntries readDirectory(const Path & path);
unsigned char getFileType(const Path & path);
-/* Read the contents of a file into a string. */
+/**
+ * Read the contents of a file into a string.
+ */
std::string readFile(int fd);
std::string readFile(const Path & path);
void readFile(const Path & path, Sink & sink);
-/* Write a string to a file. */
+/**
+ * Write a string to a file.
+ */
void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, bool sync = false);
void writeFile(const Path & path, Source & source, mode_t mode = 0666, bool sync = false);
-/* Flush a file's parent directory to disk */
+/**
+ * Flush a file's parent directory to disk
+ */
void syncParent(const Path & path);
-/* Read a line from a file descriptor. */
+/**
+ * Read a line from a file descriptor.
+ */
std::string readLine(int fd);
-/* Write a line to a file descriptor. */
+/**
+ * Write a line to a file descriptor.
+ */
void writeLine(int fd, std::string s);
-/* Delete a path; i.e., in the case of a directory, it is deleted
- recursively. It's not an error if the path does not exist. The
- second variant returns the number of bytes and blocks freed. */
+/**
+ * Delete a path; i.e., in the case of a directory, it is deleted
+ * recursively. It's not an error if the path does not exist. The
+ * second variant returns the number of bytes and blocks freed.
+ */
void deletePath(const Path & path);
void deletePath(const Path & path, uint64_t & bytesFreed);
std::string getUserName();
-/* Return the given user's home directory from /etc/passwd. */
+/**
+ * @return the given user's home directory from /etc/passwd.
+ */
Path getHomeOf(uid_t userId);
-/* Return $HOME or the user's home directory from /etc/passwd. */
+/**
+ * @return $HOME or the user's home directory from /etc/passwd.
+ */
Path getHome();
-/* Return $XDG_CACHE_HOME or $HOME/.cache. */
+/**
+ * @return $XDG_CACHE_HOME or $HOME/.cache.
+ */
Path getCacheDir();
-/* Return $XDG_CONFIG_HOME or $HOME/.config. */
+/**
+ * @return $XDG_CONFIG_HOME or $HOME/.config.
+ */
Path getConfigDir();
-/* Return the directories to search for user configuration files */
+/**
+ * @return the directories to search for user configuration files
+ */
std::vector<Path> getConfigDirs();
-/* Return $XDG_DATA_HOME or $HOME/.local/share. */
+/**
+ * @return $XDG_DATA_HOME or $HOME/.local/share.
+ */
Path getDataDir();
-/* Return the path of the current executable. */
+/**
+ * @return the path of the current executable.
+ */
std::optional<Path> getSelfExe();
-/* Return $XDG_STATE_HOME or $HOME/.local/state. */
+/**
+ * @return $XDG_STATE_HOME or $HOME/.local/state.
+ */
Path getStateDir();
-/* Create the Nix state directory and return the path to it. */
+/**
+ * Create the Nix state directory and return the path to it.
+ */
Path createNixStateDir();
-/* Create a directory and all its parents, if necessary. Returns the
- list of created directories, in order of creation. */
+/**
+ * Create a directory and all its parents, if necessary. Returns the
+ * list of created directories, in order of creation.
+ */
Paths createDirs(const Path & path);
inline Paths createDirs(PathView path)
{
return createDirs(Path(path));
}
-/* Create a symlink. */
+/**
+ * Create a symlink.
+ */
void createSymlink(const Path & target, const Path & link,
std::optional<time_t> mtime = {});
-/* Atomically create or replace a symlink. */
+/**
+ * Atomically create or replace a symlink.
+ */
void replaceSymlink(const Path & target, const Path & link,
std::optional<time_t> mtime = {});
@@ -192,24 +269,32 @@ void renameFile(const Path & src, const Path & dst);
void moveFile(const Path & src, const Path & dst);
-/* Wrappers arount read()/write() that read/write exactly the
- requested number of bytes. */
+/**
+ * Wrappers arount read()/write() that read/write exactly the
+ * requested number of bytes.
+ */
void readFull(int fd, char * buf, size_t count);
void writeFull(int fd, std::string_view s, bool allowInterrupts = true);
MakeError(EndOfFile, Error);
-/* Read a file descriptor until EOF occurs. */
+/**
+ * Read a file descriptor until EOF occurs.
+ */
std::string drainFD(int fd, bool block = true, const size_t reserveSize=0);
void drainFD(int fd, Sink & sink, bool block = true);
-/* If cgroups are active, attempt to calculate the number of CPUs available.
- If cgroups are unavailable or if cpu.max is set to "max", return 0. */
+/**
+ * If cgroups are active, attempt to calculate the number of CPUs available.
+ * If cgroups are unavailable or if cpu.max is set to "max", return 0.
+ */
unsigned int getMaxCPU();
-/* Automatic cleanup of resources. */
+/**
+ * Automatic cleanup of resources.
+ */
class AutoDelete
@@ -247,11 +332,15 @@ public:
};
-/* Create a temporary directory. */
+/**
+ * Create a temporary directory.
+ */
Path createTempDir(const Path & tmpRoot = "", const Path & prefix = "nix",
bool includePid = true, bool useGlobalCounter = true, mode_t mode = 0755);
-/* Create a temporary file, returning a file handle and its path. */
+/**
+ * Create a temporary file, returning a file handle and its path.
+ */
std::pair<AutoCloseFD, Path> createTempFile(const Path & prefix = "nix");
@@ -294,30 +383,39 @@ public:
};
-/* Kill all processes running under the specified uid by sending them
- a SIGKILL. */
+/**
+ * Kill all processes running under the specified uid by sending them
+ * a SIGKILL.
+ */
void killUser(uid_t uid);
-/* Fork a process that runs the given function, and return the child
- pid to the caller. */
+/**
+ * Fork a process that runs the given function, and return the child
+ * pid to the caller.
+ */
struct ProcessOptions
{
std::string errorPrefix = "";
bool dieWithParent = true;
bool runExitHandlers = false;
bool allowVfork = false;
- int cloneFlags = 0; // use clone() with the specified flags (Linux only)
+ /**
+ * use clone() with the specified flags (Linux only)
+ */
+ int cloneFlags = 0;
};
pid_t startProcess(std::function<void()> fun, const ProcessOptions & options = ProcessOptions());
-/* Run a program and return its stdout in a string (i.e., like the
- shell backtick operator). */
+/**
+ * Run a program and return its stdout in a string (i.e., like the
+ * shell backtick operator).
+ */
std::string runProgram(Path program, bool searchPath = false,
const Strings & args = Strings(),
- const std::optional<std::string> & input = {});
+ const std::optional<std::string> & input = {}, bool isInteractive = false);
struct RunOptions
{
@@ -332,6 +430,7 @@ struct RunOptions
Source * standardIn = nullptr;
Sink * standardOut = nullptr;
bool mergeStderrToStdout = false;
+ bool isInteractive = false;
};
std::pair<int, std::string> runProgram(RunOptions && options);
@@ -339,25 +438,37 @@ std::pair<int, std::string> runProgram(RunOptions && options);
void runProgram2(const RunOptions & options);
-/* Change the stack size. */
+/**
+ * Change the stack size.
+ */
void setStackSize(size_t stackSize);
-/* Restore the original inherited Unix process context (such as signal
- masks, stack size). */
+/**
+ * Restore the original inherited Unix process context (such as signal
+ * masks, stack size).
+
+ * See startSignalHandlerThread(), saveSignalMask().
+ */
void restoreProcessContext(bool restoreMounts = true);
-/* Save the current mount namespace. Ignored if called more than
- once. */
+/**
+ * Save the current mount namespace. Ignored if called more than
+ * once.
+ */
void saveMountNamespace();
-/* Restore the mount namespace saved by saveMountNamespace(). Ignored
- if saveMountNamespace() was never called. */
+/**
+ * Restore the mount namespace saved by saveMountNamespace(). Ignored
+ * if saveMountNamespace() was never called.
+ */
void restoreMountNamespace();
-/* Cause this thread to not share any FS attributes with the main
- thread, because this causes setns() in restoreMountNamespace() to
- fail. */
+/**
+ * Cause this thread to not share any FS attributes with the main
+ * thread, because this causes setns() in restoreMountNamespace() to
+ * fail.
+ */
void unshareFilesystem();
@@ -372,16 +483,22 @@ public:
{ }
};
-/* Convert a list of strings to a null-terminated vector of char
- *'s. The result must not be accessed beyond the lifetime of the
- list of strings. */
+/**
+ * Convert a list of strings to a null-terminated vector of `char
+ * *`s. The result must not be accessed beyond the lifetime of the
+ * list of strings.
+ */
std::vector<char *> stringsToCharPtrs(const Strings & ss);
-/* Close all file descriptors except those listed in the given set.
- Good practice in child processes. */
+/**
+ * Close all file descriptors except those listed in the given set.
+ * Good practice in child processes.
+ */
void closeMostFDs(const std::set<int> & exceptions);
-/* Set the close-on-exec flag for the given file descriptor. */
+/**
+ * Set the close-on-exec flag for the given file descriptor.
+ */
void closeOnExec(int fd);
@@ -407,12 +524,16 @@ MakeError(Interrupted, BaseError);
MakeError(FormatError, Error);
-/* String tokenizer. */
+/**
+ * String tokenizer.
+ */
template<class C> C tokenizeString(std::string_view s, std::string_view separators = " \t\n\r");
-/* Concatenate the given strings with a separator between the
- elements. */
+/**
+ * Concatenate the given strings with a separator between the
+ * elements.
+ */
template<class C>
std::string concatStringsSep(const std::string_view sep, const C & ss)
{
@@ -437,7 +558,9 @@ auto concatStrings(Parts && ... parts)
}
-/* Add quotes around a collection of strings. */
+/**
+ * Add quotes around a collection of strings.
+ */
template<class C> Strings quoteStrings(const C & c)
{
Strings res;
@@ -446,17 +569,23 @@ template<class C> Strings quoteStrings(const C & c)
return res;
}
-
-/* Remove trailing whitespace from a string. FIXME: return
- std::string_view. */
+/**
+ * Remove trailing whitespace from a string.
+ *
+ * \todo return std::string_view.
+ */
std::string chomp(std::string_view s);
-/* Remove whitespace from the start and end of a string. */
+/**
+ * Remove whitespace from the start and end of a string.
+ */
std::string trim(std::string_view s, std::string_view whitespace = " \n\r\t");
-/* Replace all occurrences of a string inside another string. */
+/**
+ * Replace all occurrences of a string inside another string.
+ */
std::string replaceStrings(
std::string s,
std::string_view from,
@@ -466,14 +595,18 @@ std::string replaceStrings(
std::string rewriteStrings(std::string s, const StringMap & rewrites);
-/* Convert the exit status of a child as returned by wait() into an
- error string. */
+/**
+ * Convert the exit status of a child as returned by wait() into an
+ * error string.
+ */
std::string statusToString(int status);
bool statusOk(int status);
-/* Parse a string into an integer. */
+/**
+ * Parse a string into an integer.
+ */
template<class N>
std::optional<N> string2Int(const std::string_view s)
{
@@ -486,8 +619,10 @@ std::optional<N> string2Int(const std::string_view s)
}
}
-/* Like string2Int(), but support an optional suffix 'K', 'M', 'G' or
- 'T' denoting a binary unit prefix. */
+/**
+ * Like string2Int(), but support an optional suffix 'K', 'M', 'G' or
+ * 'T' denoting a binary unit prefix.
+ */
template<class N>
N string2IntWithUnitPrefix(std::string_view s)
{
@@ -508,7 +643,9 @@ N string2IntWithUnitPrefix(std::string_view s)
throw UsageError("'%s' is not an integer", s);
}
-/* Parse a string into a float. */
+/**
+ * Parse a string into a float.
+ */
template<class N>
std::optional<N> string2Float(const std::string_view s)
{
@@ -520,7 +657,9 @@ std::optional<N> string2Float(const std::string_view s)
}
-/* Convert a little-endian integer to host order. */
+/**
+ * Convert a little-endian integer to host order.
+ */
template<typename T>
T readLittleEndian(unsigned char * p)
{
@@ -532,66 +671,90 @@ T readLittleEndian(unsigned char * p)
}
-/* Return true iff `s' starts with `prefix'. */
+/**
+ * @return true iff `s` starts with `prefix`.
+ */
bool hasPrefix(std::string_view s, std::string_view prefix);
-/* Return true iff `s' ends in `suffix'. */
+/**
+ * @return true iff `s` ends in `suffix`.
+ */
bool hasSuffix(std::string_view s, std::string_view suffix);
-/* Convert a string to lower case. */
+/**
+ * Convert a string to lower case.
+ */
std::string toLower(const std::string & s);
-/* Escape a string as a shell word. */
+/**
+ * Escape a string as a shell word.
+ */
std::string shellEscape(const std::string_view s);
-/* Exception handling in destructors: print an error message, then
- ignore the exception. */
+/**
+ * Exception handling in destructors: print an error message, then
+ * ignore the exception.
+ */
void ignoreException(Verbosity lvl = lvlError);
-/* Tree formatting. */
+/**
+ * Tree formatting.
+ */
constexpr char treeConn[] = "├───";
constexpr char treeLast[] = "└───";
constexpr char treeLine[] = "│ ";
constexpr char treeNull[] = " ";
-/* Determine whether ANSI escape sequences are appropriate for the
- present output. */
+/**
+ * Determine whether ANSI escape sequences are appropriate for the
+ * present output.
+ */
bool shouldANSI();
-/* Truncate a string to 'width' printable characters. If 'filterAll'
- is true, all ANSI escape sequences are filtered out. Otherwise,
- some escape sequences (such as colour setting) are copied but not
- included in the character count. Also, tabs are expanded to
- spaces. */
-std::string filterANSIEscapes(const std::string & s,
+/**
+ * Truncate a string to 'width' printable characters. If 'filterAll'
+ * is true, all ANSI escape sequences are filtered out. Otherwise,
+ * some escape sequences (such as colour setting) are copied but not
+ * included in the character count. Also, tabs are expanded to
+ * spaces.
+ */
+std::string filterANSIEscapes(std::string_view s,
bool filterAll = false,
unsigned int width = std::numeric_limits<unsigned int>::max());
-/* Base64 encoding/decoding. */
+/**
+ * Base64 encoding/decoding.
+ */
std::string base64Encode(std::string_view s);
std::string base64Decode(std::string_view s);
-/* Remove common leading whitespace from the lines in the string
- 's'. For example, if every line is indented by at least 3 spaces,
- then we remove 3 spaces from the start of every line. */
+/**
+ * Remove common leading whitespace from the lines in the string
+ * 's'. For example, if every line is indented by at least 3 spaces,
+ * then we remove 3 spaces from the start of every line.
+ */
std::string stripIndentation(std::string_view s);
-/* Get the prefix of 's' up to and excluding the next line break (LF
- optionally preceded by CR), and the remainder following the line
- break. */
+/**
+ * Get the prefix of 's' up to and excluding the next line break (LF
+ * optionally preceded by CR), and the remainder following the line
+ * break.
+ */
std::pair<std::string_view, std::string_view> getLine(std::string_view s);
-/* Get a value for the specified key from an associate container. */
+/**
+ * Get a value for the specified key from an associate container.
+ */
template <class T>
const typename T::mapped_type * get(const T & map, const typename T::key_type & key)
{
@@ -608,7 +771,9 @@ typename T::mapped_type * get(T & map, const typename T::key_type & key)
return &i->second;
}
-/* Get a value for the specified key from an associate container, or a default value if the key isn't present. */
+/**
+ * Get a value for the specified key from an associate container, or a default value if the key isn't present.
+ */
template <class T>
const typename T::mapped_type & getOr(T & map,
const typename T::key_type & key,
@@ -619,7 +784,9 @@ const typename T::mapped_type & getOr(T & map,
return i->second;
}
-/* Remove and return the first item from a container. */
+/**
+ * Remove and return the first item from a container.
+ */
template <class T>
std::optional<typename T::value_type> remove_begin(T & c)
{
@@ -631,7 +798,9 @@ std::optional<typename T::value_type> remove_begin(T & c)
}
-/* Remove and return the first item from a container. */
+/**
+ * Remove and return the first item from a container.
+ */
template <class T>
std::optional<typename T::value_type> pop(T & c)
{
@@ -646,25 +815,48 @@ template<typename T>
class Callback;
-/* Start a thread that handles various signals. Also block those signals
- on the current thread (and thus any threads created by it). */
+/**
+ * Start a thread that handles various signals. Also block those signals
+ * on the current thread (and thus any threads created by it).
+ * Saves the signal mask before changing the mask to block those signals.
+ * See saveSignalMask().
+ */
void startSignalHandlerThread();
+/**
+ * Saves the signal mask, which is the signal mask that nix will restore
+ * before creating child processes.
+ * See setChildSignalMask() to set an arbitrary signal mask instead of the
+ * current mask.
+ */
+void saveSignalMask();
+
+/**
+ * Sets the signal mask. Like saveSignalMask() but for a signal set that doesn't
+ * necessarily match the current thread's mask.
+ * See saveSignalMask() to set the saved mask to the current mask.
+ */
+void setChildSignalMask(sigset_t *sigs);
+
struct InterruptCallback
{
virtual ~InterruptCallback() { };
};
-/* Register a function that gets called on SIGINT (in a non-signal
- context). */
+/**
+ * Register a function that gets called on SIGINT (in a non-signal
+ * context).
+ */
std::unique_ptr<InterruptCallback> createInterruptCallback(
std::function<void()> callback);
void triggerInterrupt();
-/* A RAII class that causes the current thread to receive SIGUSR1 when
- the signal handler thread receives SIGINT. That is, this allows
- SIGINT to be multiplexed to multiple threads. */
+/**
+ * A RAII class that causes the current thread to receive SIGUSR1 when
+ * the signal handler thread receives SIGINT. That is, this allows
+ * SIGINT to be multiplexed to multiple threads.
+ */
struct ReceiveInterrupts
{
pthread_t target;
@@ -678,8 +870,10 @@ struct ReceiveInterrupts
-/* A RAII helper that increments a counter on construction and
- decrements it on destruction. */
+/**
+ * A RAII helper that increments a counter on construction and
+ * decrements it on destruction.
+ */
template<typename T>
struct MaintainCount
{
@@ -690,33 +884,50 @@ struct MaintainCount
};
-/* Return the number of rows and columns of the terminal. */
+/**
+ * @return the number of rows and columns of the terminal.
+ */
std::pair<unsigned short, unsigned short> getWindowSize();
-/* Used in various places. */
+/**
+ * Used in various places.
+ */
typedef std::function<bool(const Path & path)> PathFilter;
extern PathFilter defaultPathFilter;
-/* Common initialisation performed in child processes. */
-void commonChildInit(Pipe & logPipe);
+/**
+ * Common initialisation performed in child processes.
+ */
+void commonChildInit();
-/* Create a Unix domain socket. */
+/**
+ * Create a Unix domain socket.
+ */
AutoCloseFD createUnixDomainSocket();
-/* Create a Unix domain socket in listen mode. */
+/**
+ * Create a Unix domain socket in listen mode.
+ */
AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode);
-/* Bind a Unix domain socket to a path. */
+/**
+ * Bind a Unix domain socket to a path.
+ */
void bind(int fd, const std::string & path);
-/* Connect to a Unix domain socket. */
+/**
+ * Connect to a Unix domain socket.
+ */
void connect(int fd, const std::string & path);
-// A Rust/Python-like enumerate() iterator adapter.
-// Borrowed from http://reedbeta.com/blog/python-like-enumerate-in-cpp17.
+/**
+ * A Rust/Python-like enumerate() iterator adapter.
+ *
+ * Borrowed from http://reedbeta.com/blog/python-like-enumerate-in-cpp17.
+ */
template <typename T,
typename TIter = decltype(std::begin(std::declval<T>())),
typename = decltype(std::end(std::declval<T>()))>
@@ -726,23 +937,25 @@ constexpr auto enumerate(T && iterable)
{
size_t i;
TIter iter;
- bool operator != (const iterator & other) const { return iter != other.iter; }
- void operator ++ () { ++i; ++iter; }
- auto operator * () const { return std::tie(i, *iter); }
+ constexpr bool operator != (const iterator & other) const { return iter != other.iter; }
+ constexpr void operator ++ () { ++i; ++iter; }
+ constexpr auto operator * () const { return std::tie(i, *iter); }
};
struct iterable_wrapper
{
T iterable;
- auto begin() { return iterator{ 0, std::begin(iterable) }; }
- auto end() { return iterator{ 0, std::end(iterable) }; }
+ constexpr auto begin() { return iterator{ 0, std::begin(iterable) }; }
+ constexpr auto end() { return iterator{ 0, std::end(iterable) }; }
};
return iterable_wrapper{ std::forward<T>(iterable) };
}
-// C++17 std::visit boilerplate
+/**
+ * C++17 std::visit boilerplate
+ */
template<class... Ts> struct overloaded : Ts... { using Ts::operator()...; };
template<class... Ts> overloaded(Ts...) -> overloaded<Ts...>;
@@ -750,8 +963,10 @@ template<class... Ts> overloaded(Ts...) -> overloaded<Ts...>;
std::string showBytes(uint64_t bytes);
-/* Provide an addition operator between strings and string_views
- inexplicably omitted from the standard library. */
+/**
+ * Provide an addition operator between strings and string_views
+ * inexplicably omitted from the standard library.
+ */
inline std::string operator + (const std::string & s1, std::string_view s2)
{
auto s = s1;
diff --git a/src/libutil/xml-writer.hh b/src/libutil/xml-writer.hh
index 4c91adee6..74f53b7ca 100644
--- a/src/libutil/xml-writer.hh
+++ b/src/libutil/xml-writer.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include <iostream>
#include <string>
diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc
index da76c2ace..6510df8f0 100644
--- a/src/nix-build/nix-build.cc
+++ b/src/nix-build/nix-build.cc
@@ -84,7 +84,6 @@ static void main_nix_build(int argc, char * * argv)
auto interactive = isatty(STDIN_FILENO) && isatty(STDERR_FILENO);
Strings attrPaths;
Strings left;
- RepairFlag repair = NoRepair;
BuildMode buildMode = bmNormal;
bool readStdin = false;
@@ -169,11 +168,6 @@ static void main_nix_build(int argc, char * * argv)
else if (*arg == "--dry-run")
dryRun = true;
- else if (*arg == "--repair") {
- repair = Repair;
- buildMode = bmRepair;
- }
-
else if (*arg == "--run-env") // obsolete
runEnv = true;
@@ -219,9 +213,9 @@ static void main_nix_build(int argc, char * * argv)
// read the shebang to understand which packages to read from. Since
// this is handled via nix-shell -p, we wrap our ruby script execution
// in ruby -e 'load' which ignores the shebangs.
- envCommand = (format("exec %1% %2% -e 'load(ARGV.shift)' -- %3% %4%") % execArgs % interpreter % shellEscape(script) % joined.str()).str();
+ envCommand = fmt("exec %1% %2% -e 'load(ARGV.shift)' -- %3% %4%", execArgs, interpreter, shellEscape(script), joined.str());
} else {
- envCommand = (format("exec %1% %2% %3% %4%") % execArgs % interpreter % shellEscape(script) % joined.str()).str();
+ envCommand = fmt("exec %1% %2% %3% %4%", execArgs, interpreter, shellEscape(script), joined.str());
}
}
@@ -249,7 +243,8 @@ static void main_nix_build(int argc, char * * argv)
auto evalStore = myArgs.evalStoreUrl ? openStore(*myArgs.evalStoreUrl) : store;
auto state = std::make_unique<EvalState>(myArgs.searchPath, evalStore, store);
- state->repair = repair;
+ state->repair = myArgs.repair;
+ if (myArgs.repair) buildMode = bmRepair;
auto autoArgs = myArgs.getAutoArgs(*state);
@@ -289,7 +284,7 @@ static void main_nix_build(int argc, char * * argv)
else
for (auto i : left) {
if (fromArgs)
- exprs.push_back(state->parseExprFromString(std::move(i), absPath(".")));
+ exprs.push_back(state->parseExprFromString(std::move(i), state->rootPath(CanonPath::fromCwd())));
else {
auto absolute = i;
try {
@@ -385,7 +380,9 @@ static void main_nix_build(int argc, char * * argv)
if (!shell) {
try {
- auto expr = state->parseExprFromString("(import <nixpkgs> {}).bashInteractive", absPath("."));
+ auto expr = state->parseExprFromString(
+ "(import <nixpkgs> {}).bashInteractive",
+ state->rootPath(CanonPath::fromCwd()));
Value v;
state->eval(expr, v);
@@ -440,7 +437,7 @@ static void main_nix_build(int argc, char * * argv)
shell = store->printStorePath(shellDrvOutputs.at("out").value()) + "/bin/bash";
}
- if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
+ if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
auto resolvedDrv = drv.tryResolve(*store);
assert(resolvedDrv && "Successfully resolved the derivation");
drv = *resolvedDrv;
diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc
index 338a7d18e..740737ffe 100755
--- a/src/nix-channel/nix-channel.cc
+++ b/src/nix-channel/nix-channel.cc
@@ -168,7 +168,8 @@ static int main_nix_channel(int argc, char ** argv)
nixDefExpr = settings.useXDGBaseDirectories ? createNixStateDir() + "/defexpr" : home + "/.nix-defexpr";
// Figure out the name of the channels profile.
- profile = profilesDir() + "/channels";
+ profile = profilesDir() + "/channels";
+ createDirs(dirOf(profile));
enum {
cNone,
diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc
index e413faffe..cb1f42e35 100644
--- a/src/nix-collect-garbage/nix-collect-garbage.cc
+++ b/src/nix-collect-garbage/nix-collect-garbage.cc
@@ -40,7 +40,7 @@ void removeOldGenerations(std::string dir)
throw;
}
if (link.find("link") != std::string::npos) {
- printInfo(format("removing old generations of profile %1%") % path);
+ printInfo("removing old generations of profile %s", path);
if (deleteOlderThan != "")
deleteGenerationsOlderThan(path, deleteOlderThan, dryRun);
else
@@ -77,8 +77,12 @@ static int main_nix_collect_garbage(int argc, char * * argv)
return true;
});
- auto profilesDir = settings.nixStateDir + "/profiles";
- if (removeOld) removeOldGenerations(profilesDir);
+ if (removeOld) {
+ std::set<Path> dirsToClean = {
+ profilesDir(), settings.nixStateDir + "/profiles", dirOf(getDefaultProfile())};
+ for (auto & dir : dirsToClean)
+ removeOldGenerations(dir);
+ }
// Run the actual garbage collector.
if (!dryRun) {
diff --git a/src/nix-copy-closure/nix-copy-closure.cc b/src/nix-copy-closure/nix-copy-closure.cc
index 841d50fd3..7f2bb93b6 100755
--- a/src/nix-copy-closure/nix-copy-closure.cc
+++ b/src/nix-copy-closure/nix-copy-closure.cc
@@ -22,7 +22,7 @@ static int main_nix_copy_closure(int argc, char ** argv)
printVersion("nix-copy-closure");
else if (*arg == "--gzip" || *arg == "--bzip2" || *arg == "--xz") {
if (*arg != "--gzip")
- printMsg(lvlError, format("Warning: '%1%' is not implemented, falling back to gzip") % *arg);
+ warn("'%1%' is not implemented, falling back to gzip", *arg);
gzip = true;
} else if (*arg == "--from")
toMode = false;
diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc
index 0daf374de..5e94f2d14 100644
--- a/src/nix-env/nix-env.cc
+++ b/src/nix-env/nix-env.cc
@@ -44,7 +44,7 @@ typedef enum {
struct InstallSourceInfo
{
InstallSourceType type;
- Path nixExprPath; /* for srcNixExprDrvs, srcNixExprs */
+ std::shared_ptr<SourcePath> nixExprPath; /* for srcNixExprDrvs, srcNixExprs */
Path profile; /* for srcProfile */
std::string systemFilter; /* for srcNixExprDrvs */
Bindings * autoArgs;
@@ -92,9 +92,11 @@ static bool parseInstallSourceOptions(Globals & globals,
}
-static bool isNixExpr(const Path & path, struct stat & st)
+static bool isNixExpr(const SourcePath & path, struct InputAccessor::Stat & st)
{
- return S_ISREG(st.st_mode) || (S_ISDIR(st.st_mode) && pathExists(path + "/default.nix"));
+ return
+ st.type == InputAccessor::tRegular
+ || (st.type == InputAccessor::tDirectory && (path + "default.nix").pathExists());
}
@@ -102,10 +104,10 @@ static constexpr size_t maxAttrs = 1024;
static void getAllExprs(EvalState & state,
- const Path & path, StringSet & seen, BindingsBuilder & attrs)
+ const SourcePath & path, StringSet & seen, BindingsBuilder & attrs)
{
StringSet namesSorted;
- for (auto & i : readDirectory(path)) namesSorted.insert(i.name);
+ for (auto & [name, _] : path.readDirectory()) namesSorted.insert(name);
for (auto & i : namesSorted) {
/* Ignore the manifest.nix used by profiles. This is
@@ -113,13 +115,16 @@ static void getAllExprs(EvalState & state,
are implemented using profiles). */
if (i == "manifest.nix") continue;
- Path path2 = path + "/" + i;
+ SourcePath path2 = path + i;
- struct stat st;
- if (stat(path2.c_str(), &st) == -1)
+ InputAccessor::Stat st;
+ try {
+ st = path2.resolveSymlinks().lstat();
+ } catch (Error &) {
continue; // ignore dangling symlinks in ~/.nix-defexpr
+ }
- if (isNixExpr(path2, st) && (!S_ISREG(st.st_mode) || hasSuffix(path2, ".nix"))) {
+ if (isNixExpr(path2, st) && (st.type != InputAccessor::tRegular || hasSuffix(path2.baseName(), ".nix"))) {
/* Strip off the `.nix' filename suffix (if applicable),
otherwise the attribute cannot be selected with the
`-A' option. Useful if you want to stick a Nix
@@ -129,21 +134,20 @@ static void getAllExprs(EvalState & state,
attrName = std::string(attrName, 0, attrName.size() - 4);
if (!seen.insert(attrName).second) {
std::string suggestionMessage = "";
- if (path2.find("channels") != std::string::npos && path.find("channels") != std::string::npos) {
+ if (path2.path.abs().find("channels") != std::string::npos && path.path.abs().find("channels") != std::string::npos)
suggestionMessage = fmt("\nsuggestion: remove '%s' from either the root channels or the user channels", attrName);
- }
printError("warning: name collision in input Nix expressions, skipping '%1%'"
"%2%", path2, suggestionMessage);
continue;
}
/* Load the expression on demand. */
auto vArg = state.allocValue();
- vArg->mkString(path2);
+ vArg->mkString(path2.path.abs());
if (seen.size() == maxAttrs)
throw Error("too many Nix expressions in directory '%1%'", path);
attrs.alloc(attrName).mkApp(&state.getBuiltin("import"), vArg);
}
- else if (S_ISDIR(st.st_mode))
+ else if (st.type == InputAccessor::tDirectory)
/* `path2' is a directory (with no default.nix in it);
recurse into it. */
getAllExprs(state, path2, seen, attrs);
@@ -152,11 +156,9 @@ static void getAllExprs(EvalState & state,
-static void loadSourceExpr(EvalState & state, const Path & path, Value & v)
+static void loadSourceExpr(EvalState & state, const SourcePath & path, Value & v)
{
- struct stat st;
- if (stat(path.c_str(), &st) == -1)
- throw SysError("getting information about '%1%'", path);
+ auto st = path.resolveSymlinks().lstat();
if (isNixExpr(path, st))
state.evalFile(path, v);
@@ -167,7 +169,7 @@ static void loadSourceExpr(EvalState & state, const Path & path, Value & v)
set flat, not nested, to make it easier for a user to have a
~/.nix-defexpr directory that includes some system-wide
directory). */
- else if (S_ISDIR(st.st_mode)) {
+ else if (st.type == InputAccessor::tDirectory) {
auto attrs = state.buildBindings(maxAttrs);
attrs.alloc("_combineChannels").mkList(0);
StringSet seen;
@@ -179,7 +181,7 @@ static void loadSourceExpr(EvalState & state, const Path & path, Value & v)
}
-static void loadDerivations(EvalState & state, Path nixExprPath,
+static void loadDerivations(EvalState & state, const SourcePath & nixExprPath,
std::string systemFilter, Bindings & autoArgs,
const std::string & pathPrefix, DrvInfos & elems)
{
@@ -390,7 +392,7 @@ static void queryInstSources(EvalState & state,
/* Load the derivations from the (default or specified)
Nix expression. */
DrvInfos allElems;
- loadDerivations(state, instSource.nixExprPath,
+ loadDerivations(state, *instSource.nixExprPath,
instSource.systemFilter, *instSource.autoArgs, "", allElems);
elems = filterBySelector(state, allElems, args, newestOnly);
@@ -407,10 +409,10 @@ static void queryInstSources(EvalState & state,
case srcNixExprs: {
Value vArg;
- loadSourceExpr(state, instSource.nixExprPath, vArg);
+ loadSourceExpr(state, *instSource.nixExprPath, vArg);
for (auto & i : args) {
- Expr * eFun = state.parseExprFromString(i, absPath("."));
+ Expr * eFun = state.parseExprFromString(i, state.rootPath(CanonPath::fromCwd()));
Value vFun, vTmp;
state.eval(eFun, vFun);
vTmp.mkApp(&vFun, &vArg);
@@ -462,7 +464,7 @@ static void queryInstSources(EvalState & state,
case srcAttrPath: {
Value vRoot;
- loadSourceExpr(state, instSource.nixExprPath, vRoot);
+ loadSourceExpr(state, *instSource.nixExprPath, vRoot);
for (auto & i : args) {
Value & v(*findAlongAttrPath(state, i, *instSource.autoArgs, vRoot).first);
getDerivations(state, v, "", *instSource.autoArgs, elems, true);
@@ -500,7 +502,7 @@ static bool keep(DrvInfo & drv)
static void installDerivations(Globals & globals,
const Strings & args, const Path & profile)
{
- debug(format("installing derivations"));
+ debug("installing derivations");
/* Get the set of user environment elements to be installed. */
DrvInfos newElems, newElemsTmp;
@@ -579,7 +581,7 @@ typedef enum { utLt, utLeq, utEq, utAlways } UpgradeType;
static void upgradeDerivations(Globals & globals,
const Strings & args, UpgradeType upgradeType)
{
- debug(format("upgrading derivations"));
+ debug("upgrading derivations");
/* Upgrade works as follows: we take all currently installed
derivations, and for any derivation matching any selector, look
@@ -768,7 +770,7 @@ static void opSet(Globals & globals, Strings opFlags, Strings opArgs)
if (globals.dryRun) return;
globals.state->store->buildPaths(paths, globals.state->repair ? bmRepair : bmNormal);
- debug(format("switching to new user environment"));
+ debug("switching to new user environment");
Path generation = createGeneration(
ref<LocalFSStore>(store2),
globals.profile,
@@ -960,7 +962,7 @@ static void queryJSON(Globals & globals, std::vector<DrvInfo> & elems, bool prin
printError("derivation '%s' has invalid meta attribute '%s'", i.queryName(), j);
metaObj[j] = nullptr;
} else {
- PathSet context;
+ NixStringContext context;
metaObj[j] = printValueAsJSON(*globals.state, true, *v, noPos, context);
}
}
@@ -1030,7 +1032,7 @@ static void opQuery(Globals & globals, Strings opFlags, Strings opArgs)
installedElems = queryInstalled(*globals.state, globals.profile);
if (source == sAvailable || compareVersions)
- loadDerivations(*globals.state, globals.instSource.nixExprPath,
+ loadDerivations(*globals.state, *globals.instSource.nixExprPath,
globals.instSource.systemFilter, *globals.instSource.autoArgs,
attrPath, availElems);
@@ -1093,7 +1095,7 @@ static void opQuery(Globals & globals, Strings opFlags, Strings opArgs)
try {
if (i.hasFailed()) continue;
- //Activity act(*logger, lvlDebug, format("outputting query result '%1%'") % i.attrPath);
+ //Activity act(*logger, lvlDebug, "outputting query result '%1%'", i.attrPath);
if (globals.prebuiltOnly &&
!validPaths.count(i.queryOutPath()) &&
@@ -1229,11 +1231,11 @@ static void opQuery(Globals & globals, Strings opFlags, Strings opArgs)
xml.writeEmptyElement("meta", attrs2);
} else if (v->type() == nInt) {
attrs2["type"] = "int";
- attrs2["value"] = (format("%1%") % v->integer).str();
+ attrs2["value"] = fmt("%1%", v->integer);
xml.writeEmptyElement("meta", attrs2);
} else if (v->type() == nFloat) {
attrs2["type"] = "float";
- attrs2["value"] = (format("%1%") % v->fpoint).str();
+ attrs2["value"] = fmt("%1%", v->fpoint);
xml.writeEmptyElement("meta", attrs2);
} else if (v->type() == nBool) {
attrs2["type"] = "bool";
@@ -1337,11 +1339,11 @@ static void opListGenerations(Globals & globals, Strings opFlags, Strings opArgs
for (auto & i : gens) {
tm t;
if (!localtime_r(&i.creationTime, &t)) throw Error("cannot convert time");
- cout << format("%|4| %|4|-%|02|-%|02| %|02|:%|02|:%|02| %||\n")
- % i.number
- % (t.tm_year + 1900) % (t.tm_mon + 1) % t.tm_mday
- % t.tm_hour % t.tm_min % t.tm_sec
- % (i.number == curGen ? "(current)" : "");
+ logger->cout("%|4| %|4|-%|02|-%|02| %|02|:%|02|:%|02| %||",
+ i.number,
+ t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
+ t.tm_hour, t.tm_min, t.tm_sec,
+ i.number == curGen ? "(current)" : "");
}
}
@@ -1387,28 +1389,27 @@ static int main_nix_env(int argc, char * * argv)
{
Strings opFlags, opArgs;
Operation op = 0;
- RepairFlag repair = NoRepair;
+ std::string opName;
+ bool showHelp = false;
std::string file;
Globals globals;
globals.instSource.type = srcUnknown;
- {
- Path nixExprPath = settings.useXDGBaseDirectories ? createNixStateDir() + "/defexpr" : getHome() + "/.nix-defexpr";
- globals.instSource.nixExprPath = nixExprPath;
- }
globals.instSource.systemFilter = "*";
- if (!pathExists(globals.instSource.nixExprPath)) {
+ Path nixExprPath = settings.useXDGBaseDirectories ? createNixStateDir() + "/defexpr" : getHome() + "/.nix-defexpr";
+
+ if (!pathExists(nixExprPath)) {
try {
- createDirs(globals.instSource.nixExprPath);
+ createDirs(nixExprPath);
replaceSymlink(
- fmt("%s/profiles/per-user/%s/channels", settings.nixStateDir, getUserName()),
- globals.instSource.nixExprPath + "/channels");
+ defaultChannelsDir(),
+ nixExprPath + "/channels");
if (getuid() != 0)
replaceSymlink(
- fmt("%s/profiles/per-user/root/channels", settings.nixStateDir),
- globals.instSource.nixExprPath + "/channels_root");
+ rootChannelsDir(),
+ nixExprPath + "/channels_root");
} catch (Error &) { }
}
@@ -1426,37 +1427,59 @@ static int main_nix_env(int argc, char * * argv)
Operation oldOp = op;
if (*arg == "--help")
- showManPage("nix-env");
+ showHelp = true;
else if (*arg == "--version")
op = opVersion;
- else if (*arg == "--install" || *arg == "-i")
+ else if (*arg == "--install" || *arg == "-i") {
op = opInstall;
+ opName = "-install";
+ }
else if (*arg == "--force-name") // undocumented flag for nix-install-package
globals.forceName = getArg(*arg, arg, end);
- else if (*arg == "--uninstall" || *arg == "-e")
+ else if (*arg == "--uninstall" || *arg == "-e") {
op = opUninstall;
- else if (*arg == "--upgrade" || *arg == "-u")
+ opName = "-uninstall";
+ }
+ else if (*arg == "--upgrade" || *arg == "-u") {
op = opUpgrade;
- else if (*arg == "--set-flag")
+ opName = "-upgrade";
+ }
+ else if (*arg == "--set-flag") {
op = opSetFlag;
- else if (*arg == "--set")
+ opName = arg->substr(1);
+ }
+ else if (*arg == "--set") {
op = opSet;
- else if (*arg == "--query" || *arg == "-q")
+ opName = arg->substr(1);
+ }
+ else if (*arg == "--query" || *arg == "-q") {
op = opQuery;
+ opName = "-query";
+ }
else if (*arg == "--profile" || *arg == "-p")
globals.profile = absPath(getArg(*arg, arg, end));
else if (*arg == "--file" || *arg == "-f")
file = getArg(*arg, arg, end);
- else if (*arg == "--switch-profile" || *arg == "-S")
+ else if (*arg == "--switch-profile" || *arg == "-S") {
op = opSwitchProfile;
- else if (*arg == "--switch-generation" || *arg == "-G")
+ opName = "-switch-profile";
+ }
+ else if (*arg == "--switch-generation" || *arg == "-G") {
op = opSwitchGeneration;
- else if (*arg == "--rollback")
+ opName = "-switch-generation";
+ }
+ else if (*arg == "--rollback") {
op = opRollback;
- else if (*arg == "--list-generations")
+ opName = arg->substr(1);
+ }
+ else if (*arg == "--list-generations") {
op = opListGenerations;
- else if (*arg == "--delete-generations")
+ opName = arg->substr(1);
+ }
+ else if (*arg == "--delete-generations") {
op = opDeleteGenerations;
+ opName = arg->substr(1);
+ }
else if (*arg == "--dry-run") {
printInfo("(dry run; not doing anything)");
globals.dryRun = true;
@@ -1465,8 +1488,6 @@ static int main_nix_env(int argc, char * * argv)
globals.instSource.systemFilter = getArg(*arg, arg, end);
else if (*arg == "--prebuilt-only" || *arg == "-b")
globals.prebuiltOnly = true;
- else if (*arg == "--repair")
- repair = Repair;
else if (*arg != "" && arg->at(0) == '-') {
opFlags.push_back(*arg);
/* FIXME: hacky */
@@ -1485,15 +1506,18 @@ static int main_nix_env(int argc, char * * argv)
myArgs.parseCmdline(argvToStrings(argc, argv));
+ if (showHelp) showManPage("nix-env" + opName);
if (!op) throw UsageError("no operation specified");
auto store = openStore();
globals.state = std::shared_ptr<EvalState>(new EvalState(myArgs.searchPath, store));
- globals.state->repair = repair;
+ globals.state->repair = myArgs.repair;
- if (file != "")
- globals.instSource.nixExprPath = lookupFileArg(*globals.state, file);
+ globals.instSource.nixExprPath = std::make_shared<SourcePath>(
+ file != ""
+ ? lookupFileArg(*globals.state, file)
+ : globals.state->rootPath(CanonPath(nixExprPath)));
globals.instSource.autoArgs = myArgs.getAutoArgs(*globals.state);
diff --git a/src/nix-env/user-env.cc b/src/nix-env/user-env.cc
index cad7f9c88..9e916abc4 100644
--- a/src/nix-env/user-env.cc
+++ b/src/nix-env/user-env.cc
@@ -19,10 +19,10 @@ DrvInfos queryInstalled(EvalState & state, const Path & userEnv)
DrvInfos elems;
if (pathExists(userEnv + "/manifest.json"))
throw Error("profile '%s' is incompatible with 'nix-env'; please use 'nix profile' instead", userEnv);
- Path manifestFile = userEnv + "/manifest.nix";
+ auto manifestFile = userEnv + "/manifest.nix";
if (pathExists(manifestFile)) {
Value v;
- state.evalFile(manifestFile, v);
+ state.evalFile(state.rootPath(CanonPath(manifestFile)), v);
Bindings & bindings(*state.allocBindings(0));
getDerivations(state, v, "", bindings, elems, false);
}
@@ -41,7 +41,7 @@ bool createUserEnv(EvalState & state, DrvInfos & elems,
if (auto drvPath = i.queryDrvPath())
drvsToBuild.push_back({*drvPath});
- debug(format("building user environment dependencies"));
+ debug("building user environment dependencies");
state.store->buildPaths(
toDerivedPaths(drvsToBuild),
state.repair ? bmRepair : bmNormal);
@@ -114,14 +114,12 @@ bool createUserEnv(EvalState & state, DrvInfos & elems,
Value envBuilder;
state.eval(state.parseExprFromString(
#include "buildenv.nix.gen.hh"
- , "/"), envBuilder);
+ , state.rootPath(CanonPath::root)), envBuilder);
/* Construct a Nix expression that calls the user environment
builder with the manifest as argument. */
auto attrs = state.buildBindings(3);
- attrs.alloc("manifest").mkString(
- state.store->printStorePath(manifestFile),
- {state.store->printStorePath(manifestFile)});
+ state.mkStorePathString(manifestFile, attrs.alloc("manifest"));
attrs.insert(state.symbols.create("derivations"), &manifest);
Value args;
args.mkAttrs(attrs);
@@ -132,7 +130,7 @@ bool createUserEnv(EvalState & state, DrvInfos & elems,
/* Evaluate it. */
debug("evaluating user environment builder");
state.forceValue(topLevel, [&]() { return topLevel.determinePos(noPos); });
- PathSet context;
+ NixStringContext context;
Attr & aDrvPath(*topLevel.attrs->find(state.sDrvPath));
auto topLevelDrv = state.coerceToStorePath(aDrvPath.pos, *aDrvPath.value, context, "");
Attr & aOutPath(*topLevel.attrs->find(state.sOutPath));
@@ -159,7 +157,7 @@ bool createUserEnv(EvalState & state, DrvInfos & elems,
return false;
}
- debug(format("switching to new user environment"));
+ debug("switching to new user environment");
Path generation = createGeneration(ref<LocalFSStore>(store2), profile, topLevelOut);
switchLink(profile, generation);
}
diff --git a/src/nix-env/user-env.hh b/src/nix-env/user-env.hh
index 10646f713..af45d2d85 100644
--- a/src/nix-env/user-env.hh
+++ b/src/nix-env/user-env.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "get-drvs.hh"
diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc
index 6b5ba595d..446b27e66 100644
--- a/src/nix-instantiate/nix-instantiate.cc
+++ b/src/nix-instantiate/nix-instantiate.cc
@@ -43,7 +43,7 @@ void processExpr(EvalState & state, const Strings & attrPaths,
Value & v(*findAlongAttrPath(state, i, autoArgs, vRoot).first);
state.forceValue(v, [&]() { return v.determinePos(noPos); });
- PathSet context;
+ NixStringContext context;
if (evalOnly) {
Value vRes;
if (autoArgs.empty())
@@ -102,7 +102,6 @@ static int main_nix_instantiate(int argc, char * * argv)
bool strict = false;
Strings attrPaths;
bool wantsReadWrite = false;
- RepairFlag repair = NoRepair;
struct MyArgs : LegacyArgs, MixEvalArgs
{
@@ -140,8 +139,6 @@ static int main_nix_instantiate(int argc, char * * argv)
xmlOutputSourceLocation = false;
else if (*arg == "--strict")
strict = true;
- else if (*arg == "--repair")
- repair = Repair;
else if (*arg == "--dry-run")
settings.readOnlyMode = true;
else if (*arg != "" && arg->at(0) == '-')
@@ -160,7 +157,7 @@ static int main_nix_instantiate(int argc, char * * argv)
auto evalStore = myArgs.evalStoreUrl ? openStore(*myArgs.evalStoreUrl) : store;
auto state = std::make_unique<EvalState>(myArgs.searchPath, evalStore, store);
- state->repair = repair;
+ state->repair = myArgs.repair;
Bindings & autoArgs = *myArgs.getAutoArgs(*state);
@@ -168,9 +165,11 @@ static int main_nix_instantiate(int argc, char * * argv)
if (findFile) {
for (auto & i : files) {
- Path p = state->findFile(i);
- if (p == "") throw Error("unable to find '%1%'", i);
- std::cout << p << std::endl;
+ auto p = state->findFile(i);
+ if (auto fn = p.getPhysicalPath())
+ std::cout << fn->abs() << std::endl;
+ else
+ throw Error("'%s' has no physical path", p);
}
return 0;
}
@@ -184,7 +183,7 @@ static int main_nix_instantiate(int argc, char * * argv)
for (auto & i : files) {
Expr * e = fromArgs
- ? state->parseExprFromString(i, absPath("."))
+ ? state->parseExprFromString(i, state->rootPath(CanonPath::fromCwd()))
: state->parseExprFromFile(resolveExprPath(state->checkSourcePath(lookupFileArg(*state, i))));
processExpr(*state, attrPaths, parseOnly, strict, autoArgs,
evalOnly, outputKind, xmlOutputSourceLocation, e);
diff --git a/src/nix-store/dotgraph.hh b/src/nix-store/dotgraph.hh
index 73b8d06b9..4fd944080 100644
--- a/src/nix-store/dotgraph.hh
+++ b/src/nix-store/dotgraph.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "store-api.hh"
diff --git a/src/nix-store/graphml.cc b/src/nix-store/graphml.cc
index 425d61e53..439557658 100644
--- a/src/nix-store/graphml.cc
+++ b/src/nix-store/graphml.cc
@@ -57,7 +57,7 @@ void printGraphML(ref<Store> store, StorePathSet && roots)
<< "<graphml xmlns='http://graphml.graphdrawing.org/xmlns'\n"
<< " xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'\n"
<< " xsi:schemaLocation='http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd'>\n"
- << "<key id='narSize' for='node' attr.name='narSize' attr.type='int'/>"
+ << "<key id='narSize' for='node' attr.name='narSize' attr.type='long'/>"
<< "<key id='name' for='node' attr.name='name' attr.type='string'/>"
<< "<key id='type' for='node' attr.name='type' attr.type='string'/>"
<< "<graph id='G' edgedefault='directed'>\n";
diff --git a/src/nix-store/graphml.hh b/src/nix-store/graphml.hh
index 78be8a367..bd3a4a37c 100644
--- a/src/nix-store/graphml.hh
+++ b/src/nix-store/graphml.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "store-api.hh"
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index d4218550a..61c189efb 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -72,11 +72,13 @@ static PathSet realisePath(StorePathWithOutputs path, bool build = true)
Derivation drv = store->derivationFromPath(path.path);
rootNr++;
+ /* FIXME: Encode this empty special case explicitly in the type. */
if (path.outputs.empty())
for (auto & i : drv.outputs) path.outputs.insert(i.first);
PathSet outputs;
for (auto & j : path.outputs) {
+ /* Match outputs of a store path with outputs of the derivation that produces it. */
DerivationOutputs::iterator i = drv.outputs.find(j);
if (i == drv.outputs.end())
throw Error("derivation '%s' does not have an output named '%s'",
@@ -141,6 +143,7 @@ static void opRealise(Strings opFlags, Strings opArgs)
toDerivedPaths(paths),
willBuild, willSubstitute, unknown, downloadSize, narSize);
+ /* Filter out unknown paths from `paths`. */
if (ignoreUnknown) {
std::vector<StorePathWithOutputs> paths2;
for (auto & i : paths)
@@ -201,10 +204,10 @@ static void opAddFixed(Strings opFlags, Strings opArgs)
/* Hack to support caching in `nix-prefetch-url'. */
static void opPrintFixedPath(Strings opFlags, Strings opArgs)
{
- auto recursive = FileIngestionMethod::Flat;
+ auto method = FileIngestionMethod::Flat;
for (auto i : opFlags)
- if (i == "--recursive") recursive = FileIngestionMethod::Recursive;
+ if (i == "--recursive") method = FileIngestionMethod::Recursive;
else throw UsageError("unknown flag '%1%'", i);
if (opArgs.size() != 3)
@@ -215,7 +218,13 @@ static void opPrintFixedPath(Strings opFlags, Strings opArgs)
std::string hash = *i++;
std::string name = *i++;
- cout << fmt("%s\n", store->printStorePath(store->makeFixedOutputPath(recursive, Hash::parseAny(hash, hashAlgo), name)));
+ cout << fmt("%s\n", store->printStorePath(store->makeFixedOutputPath(name, FixedOutputInfo {
+ .hash = {
+ .method = method,
+ .hash = Hash::parseAny(hash, hashAlgo),
+ },
+ .references = {},
+ })));
}
@@ -274,17 +283,17 @@ static void printTree(const StorePath & path,
static void opQuery(Strings opFlags, Strings opArgs)
{
enum QueryType
- { qDefault, qOutputs, qRequisites, qReferences, qReferrers
+ { qOutputs, qRequisites, qReferences, qReferrers
, qReferrersClosure, qDeriver, qBinding, qHash, qSize
, qTree, qGraph, qGraphML, qResolve, qRoots };
- QueryType query = qDefault;
+ std::optional<QueryType> query;
bool useOutput = false;
bool includeOutputs = false;
bool forceRealise = false;
std::string bindingName;
for (auto & i : opFlags) {
- QueryType prev = query;
+ std::optional<QueryType> prev = query;
if (i == "--outputs") query = qOutputs;
else if (i == "--requisites" || i == "-R") query = qRequisites;
else if (i == "--references") query = qReferences;
@@ -309,15 +318,15 @@ static void opQuery(Strings opFlags, Strings opArgs)
else if (i == "--force-realise" || i == "--force-realize" || i == "-f") forceRealise = true;
else if (i == "--include-outputs") includeOutputs = true;
else throw UsageError("unknown flag '%1%'", i);
- if (prev != qDefault && prev != query)
+ if (prev && prev != query)
throw UsageError("query type '%1%' conflicts with earlier flag", i);
}
- if (query == qDefault) query = qOutputs;
+ if (!query) query = qOutputs;
RunPager pager;
- switch (query) {
+ switch (*query) {
case qOutputs: {
for (auto & i : opArgs) {
@@ -457,7 +466,7 @@ static void opPrintEnv(Strings opFlags, Strings opArgs)
/* Print each environment variable in the derivation in a format
* that can be sourced by the shell. */
for (auto & i : drv.env)
- cout << format("export %1%; %1%=%2%\n") % i.first % shellEscape(i.second);
+ logger->cout("export %1%; %1%=%2%\n", i.first, shellEscape(i.second));
/* Also output the arguments. This doesn't preserve whitespace in
arguments. */
@@ -840,7 +849,7 @@ static void opServe(Strings opFlags, Strings opArgs)
case cmdQueryValidPaths: {
bool lock = readInt(in);
bool substitute = readInt(in);
- auto paths = worker_proto::read(*store, in, Phantom<StorePathSet> {});
+ auto paths = WorkerProto<StorePathSet>::read(*store, in);
if (lock && writeAllowed)
for (auto & path : paths)
store->addTempRoot(path);
@@ -849,19 +858,19 @@ static void opServe(Strings opFlags, Strings opArgs)
store->substitutePaths(paths);
}
- worker_proto::write(*store, out, store->queryValidPaths(paths));
+ workerProtoWrite(*store, out, store->queryValidPaths(paths));
break;
}
case cmdQueryPathInfos: {
- auto paths = worker_proto::read(*store, in, Phantom<StorePathSet> {});
+ auto paths = WorkerProto<StorePathSet>::read(*store, in);
// !!! Maybe we want a queryPathInfos?
for (auto & i : paths) {
try {
auto info = store->queryPathInfo(i);
out << store->printStorePath(info->path)
<< (info->deriver ? store->printStorePath(*info->deriver) : "");
- worker_proto::write(*store, out, info->references);
+ workerProtoWrite(*store, out, info->references);
// !!! Maybe we want compression?
out << info->narSize // downloadSize
<< info->narSize;
@@ -889,7 +898,7 @@ static void opServe(Strings opFlags, Strings opArgs)
case cmdExportPaths: {
readInt(in); // obsolete
- store->exportPaths(worker_proto::read(*store, in, Phantom<StorePathSet> {}), out);
+ store->exportPaths(WorkerProto<StorePathSet>::read(*store, in), out);
break;
}
@@ -932,7 +941,10 @@ static void opServe(Strings opFlags, Strings opArgs)
if (GET_PROTOCOL_MINOR(clientVersion) >= 3)
out << status.timesBuilt << status.isNonDeterministic << status.startTime << status.stopTime;
if (GET_PROTOCOL_MINOR(clientVersion) >= 6) {
- worker_proto::write(*store, out, status.builtOutputs);
+ DrvOutputs builtOutputs;
+ for (auto & [output, realisation] : status.builtOutputs)
+ builtOutputs.insert_or_assign(realisation.id, realisation);
+ workerProtoWrite(*store, out, builtOutputs);
}
break;
@@ -941,9 +953,9 @@ static void opServe(Strings opFlags, Strings opArgs)
case cmdQueryClosure: {
bool includeOutputs = readInt(in);
StorePathSet closure;
- store->computeFSClosure(worker_proto::read(*store, in, Phantom<StorePathSet> {}),
+ store->computeFSClosure(WorkerProto<StorePathSet>::read(*store, in),
closure, false, includeOutputs);
- worker_proto::write(*store, out, closure);
+ workerProtoWrite(*store, out, closure);
break;
}
@@ -958,10 +970,10 @@ static void opServe(Strings opFlags, Strings opArgs)
};
if (deriver != "")
info.deriver = store->parseStorePath(deriver);
- info.references = worker_proto::read(*store, in, Phantom<StorePathSet> {});
+ info.references = WorkerProto<StorePathSet>::read(*store, in);
in >> info.registrationTime >> info.narSize >> info.ultimate;
info.sigs = readStrings<StringSet>(in);
- info.ca = parseContentAddressOpt(readString(in));
+ info.ca = ContentAddress::parseOpt(readString(in));
if (info.narSize == 0)
throw Error("narInfo is too old and missing the narSize field");
@@ -1020,63 +1032,105 @@ static int main_nix_store(int argc, char * * argv)
{
Strings opFlags, opArgs;
Operation op = 0;
- bool readFromStdIn;
+ bool readFromStdIn = false;
+ std::string opName;
+ bool showHelp = false;
parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
Operation oldOp = op;
if (*arg == "--help")
- showManPage("nix-store");
+ showHelp = true;
else if (*arg == "--version")
op = opVersion;
- else if (*arg == "--realise" || *arg == "--realize" || *arg == "-r")
+ else if (*arg == "--realise" || *arg == "--realize" || *arg == "-r") {
op = opRealise;
- else if (*arg == "--add" || *arg == "-A")
+ opName = "-realise";
+ }
+ else if (*arg == "--add" || *arg == "-A"){
op = opAdd;
- else if (*arg == "--add-fixed")
+ opName = "-add";
+ }
+ else if (*arg == "--add-fixed") {
op = opAddFixed;
+ opName = arg->substr(1);
+ }
else if (*arg == "--print-fixed-path")
op = opPrintFixedPath;
- else if (*arg == "--delete")
+ else if (*arg == "--delete") {
op = opDelete;
- else if (*arg == "--query" || *arg == "-q")
+ opName = arg->substr(1);
+ }
+ else if (*arg == "--query" || *arg == "-q") {
op = opQuery;
- else if (*arg == "--print-env")
+ opName = "-query";
+ }
+ else if (*arg == "--print-env") {
op = opPrintEnv;
- else if (*arg == "--read-log" || *arg == "-l")
+ opName = arg->substr(1);
+ }
+ else if (*arg == "--read-log" || *arg == "-l") {
op = opReadLog;
- else if (*arg == "--dump-db")
+ opName = "-read-log";
+ }
+ else if (*arg == "--dump-db") {
op = opDumpDB;
- else if (*arg == "--load-db")
+ opName = arg->substr(1);
+ }
+ else if (*arg == "--load-db") {
op = opLoadDB;
+ opName = arg->substr(1);
+ }
else if (*arg == "--register-validity")
op = opRegisterValidity;
else if (*arg == "--check-validity")
op = opCheckValidity;
- else if (*arg == "--gc")
+ else if (*arg == "--gc") {
op = opGC;
- else if (*arg == "--dump")
+ opName = arg->substr(1);
+ }
+ else if (*arg == "--dump") {
op = opDump;
- else if (*arg == "--restore")
+ opName = arg->substr(1);
+ }
+ else if (*arg == "--restore") {
op = opRestore;
- else if (*arg == "--export")
+ opName = arg->substr(1);
+ }
+ else if (*arg == "--export") {
op = opExport;
- else if (*arg == "--import")
+ opName = arg->substr(1);
+ }
+ else if (*arg == "--import") {
op = opImport;
+ opName = arg->substr(1);
+ }
else if (*arg == "--init")
op = opInit;
- else if (*arg == "--verify")
+ else if (*arg == "--verify") {
op = opVerify;
- else if (*arg == "--verify-path")
+ opName = arg->substr(1);
+ }
+ else if (*arg == "--verify-path") {
op = opVerifyPath;
- else if (*arg == "--repair-path")
+ opName = arg->substr(1);
+ }
+ else if (*arg == "--repair-path") {
op = opRepairPath;
- else if (*arg == "--optimise" || *arg == "--optimize")
+ opName = arg->substr(1);
+ }
+ else if (*arg == "--optimise" || *arg == "--optimize") {
op = opOptimise;
- else if (*arg == "--serve")
+ opName = "-optimise";
+ }
+ else if (*arg == "--serve") {
op = opServe;
- else if (*arg == "--generate-binary-cache-key")
+ opName = arg->substr(1);
+ }
+ else if (*arg == "--generate-binary-cache-key") {
op = opGenerateBinaryCacheKey;
+ opName = arg->substr(1);
+ }
else if (*arg == "--add-root")
gcRoot = absPath(getArg(*arg, arg, end));
else if (*arg == "--stdin" && !isatty(STDIN_FILENO))
@@ -1106,6 +1160,7 @@ static int main_nix_store(int argc, char * * argv)
return true;
});
+ if (showHelp) showManPage("nix-store" + opName);
if (!op) throw UsageError("no operation specified");
if (op != opDump && op != opRestore) /* !!! hack */
diff --git a/src/nix/add-to-store.cc b/src/nix/add-to-store.cc
index 5168413d2..16e48a39b 100644
--- a/src/nix/add-to-store.cc
+++ b/src/nix/add-to-store.cc
@@ -42,14 +42,18 @@ struct CmdAddToStore : MixDryRun, StoreCommand
}
ValidPathInfo info {
- store->makeFixedOutputPath(ingestionMethod, hash, *namePart),
+ *store,
+ std::move(*namePart),
+ FixedOutputInfo {
+ .hash = {
+ .method = std::move(ingestionMethod),
+ .hash = std::move(hash),
+ },
+ .references = {},
+ },
narHash,
};
info.narSize = sink.s.size();
- info.ca = std::optional { FixedOutputHash {
- .method = ingestionMethod,
- .hash = hash,
- } };
if (!dryRun) {
auto source = StringSource(sink.s);
diff --git a/src/nix/app.cc b/src/nix/app.cc
index 5cd65136f..e678b54f0 100644
--- a/src/nix/app.cc
+++ b/src/nix/app.cc
@@ -1,11 +1,13 @@
#include "installables.hh"
#include "installable-derived-path.hh"
+#include "installable-value.hh"
#include "store-api.hh"
#include "eval-inline.hh"
#include "eval-cache.hh"
#include "names.hh"
#include "command.hh"
#include "derivations.hh"
+#include "downstream-placeholder.hh"
namespace nix {
@@ -22,7 +24,7 @@ StringPairs resolveRewrites(
if (auto drvDep = std::get_if<BuiltPathBuilt>(&dep.path))
for (auto & [ outputName, outputPath ] : drvDep->outputs)
res.emplace(
- downstreamPlaceholder(store, drvDep->drvPath, outputName),
+ DownstreamPlaceholder::unknownCaOutput(drvDep->drvPath, outputName).render(),
store.printStorePath(outputPath)
);
return res;
@@ -40,7 +42,7 @@ std::string resolveString(
return rewriteStrings(toResolve, rewrites);
}
-UnresolvedApp Installable::toApp(EvalState & state)
+UnresolvedApp InstallableValue::toApp(EvalState & state)
{
auto cursor = getCursor(state);
auto attrPath = cursor->getAttrPath();
@@ -119,11 +121,11 @@ App UnresolvedApp::resolve(ref<Store> evalStore, ref<Store> store)
{
auto res = unresolved;
- std::vector<std::shared_ptr<Installable>> installableContext;
+ Installables installableContext;
for (auto & ctxElt : unresolved.context)
installableContext.push_back(
- std::make_shared<InstallableDerivedPath>(store, DerivedPath { ctxElt }));
+ make_ref<InstallableDerivedPath>(store, DerivedPath { ctxElt }));
auto builtContext = Installable::build(evalStore, store, Realise::Outputs, installableContext);
res.program = resolveString(*store, unresolved.program, builtContext);
diff --git a/src/nix/build.cc b/src/nix/build.cc
index f4f2ec81d..ad1842a4e 100644
--- a/src/nix/build.cc
+++ b/src/nix/build.cc
@@ -1,4 +1,3 @@
-#include "eval.hh"
#include "command.hh"
#include "common-args.hh"
#include "shared.hh"
@@ -28,8 +27,10 @@ nlohmann::json builtPathsWithResultToJSON(const std::vector<BuiltPathWithResult>
std::visit([&](const auto & t) {
auto j = t.toJSON(store);
if (b.result) {
- j["startTime"] = b.result->startTime;
- j["stopTime"] = b.result->stopTime;
+ if (b.result->startTime)
+ j["startTime"] = b.result->startTime;
+ if (b.result->stopTime)
+ j["stopTime"] = b.result->stopTime;
if (b.result->cpuUser)
j["cpuUser"] = ((double) b.result->cpuUser->count()) / 1000000;
if (b.result->cpuSystem)
@@ -41,6 +42,29 @@ nlohmann::json builtPathsWithResultToJSON(const std::vector<BuiltPathWithResult>
return res;
}
+// TODO deduplicate with other code also setting such out links.
+static void createOutLinks(const Path& outLink, const std::vector<BuiltPathWithResult>& buildables, LocalFSStore& store2)
+{
+ for (const auto & [_i, buildable] : enumerate(buildables)) {
+ auto i = _i;
+ std::visit(overloaded {
+ [&](const BuiltPath::Opaque & bo) {
+ std::string symlink = outLink;
+ if (i) symlink += fmt("-%d", i);
+ store2.addPermRoot(bo.path, absPath(symlink));
+ },
+ [&](const BuiltPath::Built & bfd) {
+ for (auto & output : bfd.outputs) {
+ std::string symlink = outLink;
+ if (i) symlink += fmt("-%d", i);
+ if (output.first != "out") symlink += fmt("-%s", output.first);
+ store2.addPermRoot(output.second, absPath(symlink));
+ }
+ },
+ }, buildable.path.raw());
+ }
+}
+
struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
{
Path outLink = "result";
@@ -89,7 +113,7 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
;
}
- void run(ref<Store> store) override
+ void run(ref<Store> store, Installables && installables) override
{
if (dryRun) {
std::vector<DerivedPath> pathsToBuild;
@@ -109,30 +133,14 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
auto buildables = Installable::build(
getEvalStore(), store,
Realise::Outputs,
- installables, buildMode);
+ installables,
+ repair ? bmRepair : buildMode);
if (json) logger->cout("%s", builtPathsWithResultToJSON(buildables, store).dump());
if (outLink != "")
if (auto store2 = store.dynamic_pointer_cast<LocalFSStore>())
- for (const auto & [_i, buildable] : enumerate(buildables)) {
- auto i = _i;
- std::visit(overloaded {
- [&](const BuiltPath::Opaque & bo) {
- std::string symlink = outLink;
- if (i) symlink += fmt("-%d", i);
- store2->addPermRoot(bo.path, absPath(symlink));
- },
- [&](const BuiltPath::Built & bfd) {
- for (auto & output : bfd.outputs) {
- std::string symlink = outLink;
- if (i) symlink += fmt("-%d", i);
- if (output.first != "out") symlink += fmt("-%s", output.first);
- store2->addPermRoot(output.second, absPath(symlink));
- }
- },
- }, buildable.path.raw());
- }
+ createOutLinks(outLink, buildables, *store2);
if (printOutputPaths) {
stopProgressBar();
diff --git a/src/nix/build.md b/src/nix/build.md
index 6a79f308c..0fbb39cc3 100644
--- a/src/nix/build.md
+++ b/src/nix/build.md
@@ -44,7 +44,7 @@ R""(
`release.nix`:
```console
- # nix build -f release.nix build.x86_64-linux
+ # nix build --file release.nix build.x86_64-linux
```
* Build a NixOS system configuration from a flake, and make a profile
@@ -82,7 +82,7 @@ R""(
# Description
-`nix build` builds the specified *installables*. Installables that
+`nix build` builds the specified *installables*. [Installables](./nix.md#installables) that
resolve to derivations are built (or substituted if possible). Store
path installables are substituted.
diff --git a/src/nix/bundle.cc b/src/nix/bundle.cc
index dcf9a6f2d..bcc00d490 100644
--- a/src/nix/bundle.cc
+++ b/src/nix/bundle.cc
@@ -1,14 +1,15 @@
-#include "command.hh"
#include "installable-flake.hh"
+#include "command-installable-value.hh"
#include "common-args.hh"
#include "shared.hh"
#include "store-api.hh"
#include "local-fs-store.hh"
#include "fs-accessor.hh"
+#include "eval-inline.hh"
using namespace nix;
-struct CmdBundle : InstallableCommand
+struct CmdBundle : InstallableValueCommand
{
std::string bundler = "github:NixOS/bundlers";
std::optional<Path> outLink;
@@ -70,7 +71,7 @@ struct CmdBundle : InstallableCommand
return res;
}
- void run(ref<Store> store) override
+ void run(ref<Store> store, ref<InstallableValue> installable) override
{
auto evalState = getEvalState();
@@ -97,7 +98,7 @@ struct CmdBundle : InstallableCommand
if (!attr1)
throw Error("the bundler '%s' does not produce a derivation", bundler.what());
- PathSet context2;
+ NixStringContext context2;
auto drvPath = evalState->coerceToStorePath(attr1->pos, *attr1->value, context2, "");
auto attr2 = vRes->attrs->get(evalState->sOutPath);
diff --git a/src/nix/bundle.md b/src/nix/bundle.md
index a18161a3c..89458aaaa 100644
--- a/src/nix/bundle.md
+++ b/src/nix/bundle.md
@@ -29,7 +29,7 @@ R""(
# Description
-`nix bundle`, by default, packs the closure of the *installable* into a single
+`nix bundle`, by default, packs the closure of the [*installable*](./nix.md#installables) into a single
self-extracting executable. See the [`bundlers`
homepage](https://github.com/NixOS/bundlers) for more details.
diff --git a/src/nix/copy.cc b/src/nix/copy.cc
index 8730a9a5c..151d28277 100644
--- a/src/nix/copy.cc
+++ b/src/nix/copy.cc
@@ -10,8 +10,6 @@ struct CmdCopy : virtual CopyCommand, virtual BuiltPathsCommand
SubstituteFlag substitute = NoSubstitute;
- using BuiltPathsCommand::run;
-
CmdCopy()
: BuiltPathsCommand(true)
{
diff --git a/src/nix/copy.md b/src/nix/copy.md
index 25e0ddadc..199006436 100644
--- a/src/nix/copy.md
+++ b/src/nix/copy.md
@@ -15,7 +15,7 @@ R""(
SSH:
```console
- # nix copy -s --to ssh://server /run/current-system
+ # nix copy --substitute-on-destination --to ssh://server /run/current-system
```
The `-s` flag causes the remote machine to try to substitute missing
diff --git a/src/nix/daemon.cc b/src/nix/daemon.cc
index a22bccba1..c1a91c63d 100644
--- a/src/nix/daemon.cc
+++ b/src/nix/daemon.cc
@@ -1,3 +1,5 @@
+///@file
+
#include "command.hh"
#include "shared.hh"
#include "local-store.hh"
@@ -34,6 +36,19 @@
using namespace nix;
using namespace nix::daemon;
+/**
+ * Settings related to authenticating clients for the Nix daemon.
+ *
+ * For pipes we have little good information about the client side, but
+ * for Unix domain sockets we do. So currently these options implemented
+ * mandatory access control based on user names and group names (looked
+ * up and translated to UID/GIDs in the CLI process that runs the code
+ * in this file).
+ *
+ * No code outside of this file knows about these settings (this is not
+ * exposed in a header); all authentication and authorization happens in
+ * `daemon.cc`.
+ */
struct AuthorizationSettings : Config {
Setting<Strings> trustedUsers{
@@ -54,7 +69,9 @@ struct AuthorizationSettings : Config {
> directories that are otherwise inacessible to them.
)"};
- /* ?Who we trust to use the daemon in safe ways */
+ /**
+ * Who we trust to use the daemon in safe ways
+ */
Setting<Strings> allowedUsers{
this, {"*"}, "allowed-users",
R"(
@@ -112,8 +129,36 @@ static void setSigChldAction(bool autoReap)
throw SysError("setting SIGCHLD handler");
}
+/**
+ * @return Is the given user a member of this group?
+ *
+ * @param user User specified by username.
+ *
+ * @param group Group the user might be a member of.
+ */
+static bool matchUser(std::string_view user, const struct group & gr)
+{
+ for (char * * mem = gr.gr_mem; *mem; mem++)
+ if (user == std::string_view(*mem)) return true;
+ return false;
+}
+
-bool matchUser(const std::string & user, const std::string & group, const Strings & users)
+/**
+ * Does the given user (specified by user name and primary group name)
+ * match the given user/group whitelist?
+ *
+ * If the list allows all users: Yes.
+ *
+ * If the username is in the set: Yes.
+ *
+ * If the groupname is in the set: Yes.
+ *
+ * If the user is in another group which is in the set: yes.
+ *
+ * Otherwise: No.
+ */
+static bool matchUser(const std::string & user, const std::string & group, const Strings & users)
{
if (find(users.begin(), users.end(), "*") != users.end())
return true;
@@ -126,8 +171,7 @@ bool matchUser(const std::string & user, const std::string & group, const String
if (group == i.substr(1)) return true;
struct group * gr = getgrnam(i.c_str() + 1);
if (!gr) continue;
- for (char * * mem = gr->gr_mem; *mem; mem++)
- if (user == std::string(*mem)) return true;
+ if (matchUser(user, *gr)) return true;
}
return false;
@@ -145,7 +189,9 @@ struct PeerInfo
};
-// Get the identity of the caller, if possible.
+/**
+ * Get the identity of the caller, if possible.
+ */
static PeerInfo getPeerInfo(int remote)
{
PeerInfo peer = { false, 0, false, 0, false, 0 };
@@ -179,6 +225,9 @@ static PeerInfo getPeerInfo(int remote)
#define SD_LISTEN_FDS_START 3
+/**
+ * Open a store without a path info cache.
+ */
static ref<Store> openUncachedStore()
{
Store::Params params; // FIXME: get params from somewhere
@@ -187,8 +236,49 @@ static ref<Store> openUncachedStore()
return openStore(settings.storeUri, params);
}
+/**
+ * Authenticate a potential client
+ *
+ * @param peer Information about other end of the connection, the client which
+ * wants to communicate with us.
+ *
+ * @return A pair of a `TrustedFlag`, whether the potential client is trusted,
+ * and the name of the user (useful for printing messages).
+ *
+ * If the potential client is not allowed to talk to us, we throw an `Error`.
+ */
+static std::pair<TrustedFlag, std::string> authPeer(const PeerInfo & peer)
+{
+ TrustedFlag trusted = NotTrusted;
+
+ struct passwd * pw = peer.uidKnown ? getpwuid(peer.uid) : 0;
+ std::string user = pw ? pw->pw_name : std::to_string(peer.uid);
+
+ struct group * gr = peer.gidKnown ? getgrgid(peer.gid) : 0;
+ std::string group = gr ? gr->gr_name : std::to_string(peer.gid);
-static void daemonLoop()
+ const Strings & trustedUsers = authorizationSettings.trustedUsers;
+ const Strings & allowedUsers = authorizationSettings.allowedUsers;
+
+ if (matchUser(user, group, trustedUsers))
+ trusted = Trusted;
+
+ if ((!trusted && !matchUser(user, group, allowedUsers)) || group == settings.buildUsersGroup)
+ throw Error("user '%1%' is not allowed to connect to the Nix daemon", user);
+
+ return { trusted, std::move(user) };
+}
+
+
+/**
+ * Run a server. The loop opens a socket and accepts new connections from that
+ * socket.
+ *
+ * @param forceTrustClientOpt If present, force trusting or not trusted
+ * the client. Otherwise, decide based on the authentication settings
+ * and user credentials (from the unix domain socket).
+ */
+static void daemonLoop(std::optional<TrustedFlag> forceTrustClientOpt)
{
if (chdir("/") == -1)
throw SysError("cannot change current directory");
@@ -231,27 +321,22 @@ static void daemonLoop()
closeOnExec(remote.get());
- TrustedFlag trusted = NotTrusted;
- PeerInfo peer = getPeerInfo(remote.get());
-
- struct passwd * pw = peer.uidKnown ? getpwuid(peer.uid) : 0;
- std::string user = pw ? pw->pw_name : std::to_string(peer.uid);
+ PeerInfo peer { .pidKnown = false };
+ TrustedFlag trusted;
+ std::string user;
- struct group * gr = peer.gidKnown ? getgrgid(peer.gid) : 0;
- std::string group = gr ? gr->gr_name : std::to_string(peer.gid);
+ if (forceTrustClientOpt)
+ trusted = *forceTrustClientOpt;
+ else {
+ peer = getPeerInfo(remote.get());
+ auto [_trusted, _user] = authPeer(peer);
+ trusted = _trusted;
+ user = _user;
+ };
- Strings trustedUsers = authorizationSettings.trustedUsers;
- Strings allowedUsers = authorizationSettings.allowedUsers;
-
- if (matchUser(user, group, trustedUsers))
- trusted = Trusted;
-
- if ((!trusted && !matchUser(user, group, allowedUsers)) || group == settings.buildUsersGroup)
- throw Error("user '%1%' is not allowed to connect to the Nix daemon", user);
-
- printInfo(format((std::string) "accepted connection from pid %1%, user %2%" + (trusted ? " (trusted)" : ""))
- % (peer.pidKnown ? std::to_string(peer.pid) : "<unknown>")
- % (peer.uidKnown ? user : "<unknown>"));
+ printInfo((std::string) "accepted connection from pid %1%, user %2%" + (trusted ? " (trusted)" : ""),
+ peer.pidKnown ? std::to_string(peer.pid) : "<unknown>",
+ peer.uidKnown ? user : "<unknown>");
// Fork a child to handle the connection.
ProcessOptions options;
@@ -294,53 +379,91 @@ static void daemonLoop()
}
}
-static void runDaemon(bool stdio)
+/**
+ * Forward a standard IO connection to the given remote store.
+ *
+ * We just act as a middleman blindly ferry output between the standard
+ * input/output and the remote store connection, not processing anything.
+ *
+ * Loops until standard input disconnects, or an error is encountered.
+ */
+static void forwardStdioConnection(RemoteStore & store) {
+ auto conn = store.openConnectionWrapper();
+ int from = conn->from.fd;
+ int to = conn->to.fd;
+
+ auto nfds = std::max(from, STDIN_FILENO) + 1;
+ while (true) {
+ fd_set fds;
+ FD_ZERO(&fds);
+ FD_SET(from, &fds);
+ FD_SET(STDIN_FILENO, &fds);
+ if (select(nfds, &fds, nullptr, nullptr, nullptr) == -1)
+ throw SysError("waiting for data from client or server");
+ if (FD_ISSET(from, &fds)) {
+ auto res = splice(from, nullptr, STDOUT_FILENO, nullptr, SSIZE_MAX, SPLICE_F_MOVE);
+ if (res == -1)
+ throw SysError("splicing data from daemon socket to stdout");
+ else if (res == 0)
+ throw EndOfFile("unexpected EOF from daemon socket");
+ }
+ if (FD_ISSET(STDIN_FILENO, &fds)) {
+ auto res = splice(STDIN_FILENO, nullptr, to, nullptr, SSIZE_MAX, SPLICE_F_MOVE);
+ if (res == -1)
+ throw SysError("splicing data from stdin to daemon socket");
+ else if (res == 0)
+ return;
+ }
+ }
+}
+
+/**
+ * Process a client connecting to us via standard input/output
+ *
+ * Unlike `forwardStdioConnection()` we do process commands ourselves in
+ * this case, not delegating to another daemon.
+ *
+ * @param trustClient Whether to trust the client. Forwarded directly to
+ * `processConnection()`.
+ */
+static void processStdioConnection(ref<Store> store, TrustedFlag trustClient)
+{
+ FdSource from(STDIN_FILENO);
+ FdSink to(STDOUT_FILENO);
+ processConnection(store, from, to, trustClient, NotRecursive);
+}
+
+/**
+ * Entry point shared between the new CLI `nix daemon` and old CLI
+ * `nix-daemon`.
+ *
+ * @param forceTrustClientOpt See `daemonLoop()` and the parameter with
+ * the same name over there for details.
+ */
+static void runDaemon(bool stdio, std::optional<TrustedFlag> forceTrustClientOpt)
{
if (stdio) {
- if (auto store = openUncachedStore().dynamic_pointer_cast<RemoteStore>()) {
- auto conn = store->openConnectionWrapper();
- int from = conn->from.fd;
- int to = conn->to.fd;
-
- auto nfds = std::max(from, STDIN_FILENO) + 1;
- while (true) {
- fd_set fds;
- FD_ZERO(&fds);
- FD_SET(from, &fds);
- FD_SET(STDIN_FILENO, &fds);
- if (select(nfds, &fds, nullptr, nullptr, nullptr) == -1)
- throw SysError("waiting for data from client or server");
- if (FD_ISSET(from, &fds)) {
- auto res = splice(from, nullptr, STDOUT_FILENO, nullptr, SSIZE_MAX, SPLICE_F_MOVE);
- if (res == -1)
- throw SysError("splicing data from daemon socket to stdout");
- else if (res == 0)
- throw EndOfFile("unexpected EOF from daemon socket");
- }
- if (FD_ISSET(STDIN_FILENO, &fds)) {
- auto res = splice(STDIN_FILENO, nullptr, to, nullptr, SSIZE_MAX, SPLICE_F_MOVE);
- if (res == -1)
- throw SysError("splicing data from stdin to daemon socket");
- else if (res == 0)
- return;
- }
- }
- } else {
- FdSource from(STDIN_FILENO);
- FdSink to(STDOUT_FILENO);
- /* Auth hook is empty because in this mode we blindly trust the
- standard streams. Limiting access to those is explicitly
- not `nix-daemon`'s responsibility. */
- processConnection(openUncachedStore(), from, to, Trusted, NotRecursive);
- }
+ auto store = openUncachedStore();
+
+ // If --force-untrusted is passed, we cannot forward the connection and
+ // must process it ourselves (before delegating to the next store) to
+ // force untrusting the client.
+ if (auto remoteStore = store.dynamic_pointer_cast<RemoteStore>(); remoteStore && (!forceTrustClientOpt || *forceTrustClientOpt != NotTrusted))
+ forwardStdioConnection(*remoteStore);
+ else
+ // `Trusted` is passed in the auto (no override case) because we
+ // cannot see who is on the other side of a plain pipe. Limiting
+ // access to those is explicitly not `nix-daemon`'s responsibility.
+ processStdioConnection(store, forceTrustClientOpt.value_or(Trusted));
} else
- daemonLoop();
+ daemonLoop(forceTrustClientOpt);
}
static int main_nix_daemon(int argc, char * * argv)
{
{
auto stdio = false;
+ std::optional<TrustedFlag> isTrustedOpt = std::nullopt;
parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
if (*arg == "--daemon")
@@ -351,11 +474,20 @@ static int main_nix_daemon(int argc, char * * argv)
printVersion("nix-daemon");
else if (*arg == "--stdio")
stdio = true;
- else return false;
+ else if (*arg == "--force-trusted") {
+ experimentalFeatureSettings.require(Xp::DaemonTrustOverride);
+ isTrustedOpt = Trusted;
+ } else if (*arg == "--force-untrusted") {
+ experimentalFeatureSettings.require(Xp::DaemonTrustOverride);
+ isTrustedOpt = NotTrusted;
+ } else if (*arg == "--default-trust") {
+ experimentalFeatureSettings.require(Xp::DaemonTrustOverride);
+ isTrustedOpt = std::nullopt;
+ } else return false;
return true;
});
- runDaemon(stdio);
+ runDaemon(stdio, isTrustedOpt);
return 0;
}
@@ -381,7 +513,7 @@ struct CmdDaemon : StoreCommand
void run(ref<Store> store) override
{
- runDaemon(false);
+ runDaemon(false, std::nullopt);
}
};
diff --git a/src/nix/derivation-add.cc b/src/nix/derivation-add.cc
new file mode 100644
index 000000000..4d91d4538
--- /dev/null
+++ b/src/nix/derivation-add.cc
@@ -0,0 +1,45 @@
+// FIXME: rename to 'nix plan add' or 'nix derivation add'?
+
+#include "command.hh"
+#include "common-args.hh"
+#include "store-api.hh"
+#include "archive.hh"
+#include "derivations.hh"
+#include <nlohmann/json.hpp>
+
+using namespace nix;
+using json = nlohmann::json;
+
+struct CmdAddDerivation : MixDryRun, StoreCommand
+{
+ std::string description() override
+ {
+ return "Add a store derivation";
+ }
+
+ std::string doc() override
+ {
+ return
+ #include "derivation-add.md"
+ ;
+ }
+
+ Category category() override { return catUtility; }
+
+ void run(ref<Store> store) override
+ {
+ auto json = nlohmann::json::parse(drainFD(STDIN_FILENO));
+
+ auto drv = Derivation::fromJSON(*store, json);
+
+ auto drvPath = writeDerivation(*store, drv, NoRepair, /* read only */ dryRun);
+
+ drv.checkInvariants(*store, drvPath);
+
+ writeDerivation(*store, drv, NoRepair, dryRun);
+
+ logger->cout("%s", store->printStorePath(drvPath));
+ }
+};
+
+static auto rCmdAddDerivation = registerCommand2<CmdAddDerivation>({"derivation", "add"});
diff --git a/src/nix/derivation-add.md b/src/nix/derivation-add.md
new file mode 100644
index 000000000..f116681ab
--- /dev/null
+++ b/src/nix/derivation-add.md
@@ -0,0 +1,18 @@
+R""(
+
+# Description
+
+This command reads from standard input a JSON representation of a
+[store derivation] to which an [*installable*](./nix.md#installables) evaluates.
+
+Store derivations are used internally by Nix. They are store paths with
+extension `.drv` that represent the build-time dependency graph to which
+a Nix expression evaluates.
+
+[store derivation]: ../../glossary.md#gloss-store-derivation
+
+The JSON format is documented under the [`derivation show`] command.
+
+[`derivation show`]: ./nix3-derivation-show.md
+
+)""
diff --git a/src/nix/show-derivation.cc b/src/nix/derivation-show.cc
index 520e8b1ce..bf637246d 100644
--- a/src/nix/show-derivation.cc
+++ b/src/nix/derivation-show.cc
@@ -1,5 +1,5 @@
// FIXME: integrate this with nix path-info?
-// FIXME: rename to 'nix store show-derivation' or 'nix debug show-derivation'?
+// FIXME: rename to 'nix store derivation show' or 'nix debug derivation show'?
#include "command.hh"
#include "common-args.hh"
@@ -33,13 +33,13 @@ struct CmdShowDerivation : InstallablesCommand
std::string doc() override
{
return
- #include "show-derivation.md"
+ #include "derivation-show.md"
;
}
Category category() override { return catUtility; }
- void run(ref<Store> store) override
+ void run(ref<Store> store, Installables && installables) override
{
auto drvPaths = Installable::toDerivations(store, installables, true);
@@ -61,4 +61,4 @@ struct CmdShowDerivation : InstallablesCommand
}
};
-static auto rCmdShowDerivation = registerCommand<CmdShowDerivation>("show-derivation");
+static auto rCmdShowDerivation = registerCommand2<CmdShowDerivation>({"derivation", "show"});
diff --git a/src/nix/show-derivation.md b/src/nix/derivation-show.md
index 2cd93aa62..1296e2885 100644
--- a/src/nix/show-derivation.md
+++ b/src/nix/derivation-show.md
@@ -8,7 +8,7 @@ R""(
[store derivation]: ../../glossary.md#gloss-store-derivation
```console
- # nix show-derivation nixpkgs#hello
+ # nix derivation show nixpkgs#hello
{
"/nix/store/s6rn4jz1sin56rf4qj5b5v8jxjm32hlk-hello-2.10.drv": {
@@ -20,14 +20,14 @@ R""(
NixOS system:
```console
- # nix show-derivation -r /run/current-system
+ # nix derivation show -r /run/current-system
```
* Print all files fetched using `fetchurl` by Firefox's dependency
graph:
```console
- # nix show-derivation -r nixpkgs#firefox \
+ # nix derivation show -r nixpkgs#firefox \
| jq -r '.[] | select(.outputs.out.hash and .env.urls) | .env.urls' \
| uniq | sort
```
@@ -39,10 +39,11 @@ R""(
# Description
This command prints on standard output a JSON representation of the
-[store derivation]s to which *installables* evaluate. Store derivations
-are used internally by Nix. They are store paths with extension `.drv`
-that represent the build-time dependency graph to which a Nix
-expression evaluates.
+[store derivation]s to which [*installables*](./nix.md#installables) evaluate.
+
+Store derivations are used internally by Nix. They are store paths with
+extension `.drv` that represent the build-time dependency graph to which
+a Nix expression evaluates.
By default, this command only shows top-level derivations, but with
`--recursive`, it also shows their dependencies.
@@ -51,6 +52,9 @@ The JSON output is a JSON object whose keys are the store paths of the
derivations, and whose values are a JSON object with the following
fields:
+* `name`: The name of the derivation. This is used when calculating the
+ store paths of the derivation's outputs.
+
* `outputs`: Information about the output paths of the
derivation. This is a JSON object with one member per output, where
the key is the output name and the value is a JSON object with these
diff --git a/src/nix/derivation.cc b/src/nix/derivation.cc
new file mode 100644
index 000000000..cd3975a4f
--- /dev/null
+++ b/src/nix/derivation.cc
@@ -0,0 +1,25 @@
+#include "command.hh"
+
+using namespace nix;
+
+struct CmdDerivation : virtual NixMultiCommand
+{
+ CmdDerivation() : MultiCommand(RegisterCommand::getCommandsFor({"derivation"}))
+ { }
+
+ std::string description() override
+ {
+ return "Work with derivations, Nix's notion of a build plan.";
+ }
+
+ Category category() override { return catUtility; }
+
+ void run() override
+ {
+ if (!command)
+ throw UsageError("'nix derivation' requires a sub-command.");
+ command->second->run();
+ }
+};
+
+static auto rCmdDerivation = registerCommand<CmdDerivation>("derivation");
diff --git a/src/nix/describe-stores.cc b/src/nix/describe-stores.cc
deleted file mode 100644
index eafcedd1f..000000000
--- a/src/nix/describe-stores.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-#include "command.hh"
-#include "common-args.hh"
-#include "shared.hh"
-#include "store-api.hh"
-
-#include <nlohmann/json.hpp>
-
-using namespace nix;
-
-struct CmdDescribeStores : Command, MixJSON
-{
- std::string description() override
- {
- return "show registered store types and their available options";
- }
-
- Category category() override { return catUtility; }
-
- void run() override
- {
- auto res = nlohmann::json::object();
- for (auto & implem : *Implementations::registered) {
- auto storeConfig = implem.getConfig();
- auto storeName = storeConfig->name();
- res[storeName] = storeConfig->toJSON();
- }
- if (json) {
- logger->cout("%s", res);
- } else {
- for (auto & [storeName, storeConfig] : res.items()) {
- std::cout << "## " << storeName << std::endl << std::endl;
- for (auto & [optionName, optionDesc] : storeConfig.items()) {
- std::cout << "### " << optionName << std::endl << std::endl;
- std::cout << optionDesc["description"].get<std::string>() << std::endl;
- std::cout << "default: " << optionDesc["defaultValue"] << std::endl <<std::endl;
- if (!optionDesc["aliases"].empty())
- std::cout << "aliases: " << optionDesc["aliases"] << std::endl << std::endl;
- }
- }
- }
- }
-};
-
-static auto rDescribeStore = registerCommand<CmdDescribeStores>("describe-stores");
diff --git a/src/nix/develop.cc b/src/nix/develop.cc
index 9d07a7a85..195eeaa21 100644
--- a/src/nix/develop.cc
+++ b/src/nix/develop.cc
@@ -1,6 +1,6 @@
#include "eval.hh"
-#include "command.hh"
#include "installable-flake.hh"
+#include "command-installable-value.hh"
#include "common-args.hh"
#include "shared.hh"
#include "store-api.hh"
@@ -208,7 +208,7 @@ static StorePath getDerivationEnvironment(ref<Store> store, ref<Store> evalStore
drv.name += "-env";
drv.env.emplace("name", drv.name);
drv.inputSrcs.insert(std::move(getEnvShPath));
- if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
+ if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
for (auto & output : drv.outputs) {
output.second = DerivationOutput::Deferred {},
drv.env[output.first] = hashPlaceholder(output.first);
@@ -313,7 +313,7 @@ struct Common : InstallableCommand, MixProfile
buildEnvironment.toBash(out, ignoreVars);
for (auto & var : savedVars)
- out << fmt("%s=\"$%s:$nix_saved_%s\"\n", var, var, var);
+ out << fmt("%s=\"$%s${nix_saved_%s:+:$nix_saved_%s}\"\n", var, var, var, var);
out << "export NIX_BUILD_TOP=\"$(mktemp -d -t nix-shell.XXXXXX)\"\n";
for (auto & i : {"TMP", "TMPDIR", "TEMP", "TEMPDIR"})
@@ -374,7 +374,7 @@ struct Common : InstallableCommand, MixProfile
return res;
}
- StorePath getShellOutPath(ref<Store> store)
+ StorePath getShellOutPath(ref<Store> store, ref<Installable> installable)
{
auto path = installable->getStorePath();
if (path && hasSuffix(path->to_string(), "-env"))
@@ -392,9 +392,10 @@ struct Common : InstallableCommand, MixProfile
}
}
- std::pair<BuildEnvironment, std::string> getBuildEnvironment(ref<Store> store)
+ std::pair<BuildEnvironment, std::string>
+ getBuildEnvironment(ref<Store> store, ref<Installable> installable)
{
- auto shellOutPath = getShellOutPath(store);
+ auto shellOutPath = getShellOutPath(store, installable);
auto strPath = store->printStorePath(shellOutPath);
@@ -480,9 +481,9 @@ struct CmdDevelop : Common, MixEnvironment
;
}
- void run(ref<Store> store) override
+ void run(ref<Store> store, ref<Installable> installable) override
{
- auto [buildEnvironment, gcroot] = getBuildEnvironment(store);
+ auto [buildEnvironment, gcroot] = getBuildEnvironment(store, installable);
auto [rcFileFd, rcFilePath] = createTempFile("nix-shell");
@@ -537,10 +538,14 @@ struct CmdDevelop : Common, MixEnvironment
nixpkgsLockFlags.inputOverrides = {};
nixpkgsLockFlags.inputUpdates = {};
- auto bashInstallable = std::make_shared<InstallableFlake>(
+ auto nixpkgs = defaultNixpkgsFlakeRef();
+ if (auto * i = dynamic_cast<const InstallableFlake *>(&*installable))
+ nixpkgs = i->nixpkgsFlakeRef();
+
+ auto bashInstallable = make_ref<InstallableFlake>(
this,
state,
- installable->nixpkgsFlakeRef(),
+ std::move(nixpkgs),
"bashInteractive",
DefaultOutputs(),
Strings{},
@@ -573,7 +578,7 @@ struct CmdDevelop : Common, MixEnvironment
// Need to chdir since phases assume in flake directory
if (phase) {
// chdir if installable is a flake of type git+file or path
- auto installableFlake = std::dynamic_pointer_cast<InstallableFlake>(installable);
+ auto installableFlake = installable.dynamic_pointer_cast<InstallableFlake>();
if (installableFlake) {
auto sourcePath = installableFlake->getLockedFlake()->flake.resolvedRef.input.getSourcePath();
if (sourcePath) {
@@ -604,9 +609,9 @@ struct CmdPrintDevEnv : Common, MixJSON
Category category() override { return catUtility; }
- void run(ref<Store> store) override
+ void run(ref<Store> store, ref<Installable> installable) override
{
- auto buildEnvironment = getBuildEnvironment(store).first;
+ auto buildEnvironment = getBuildEnvironment(store, installable).first;
stopProgressBar();
diff --git a/src/nix/develop.md b/src/nix/develop.md
index 4e8542d1b..1b5a8aeba 100644
--- a/src/nix/develop.md
+++ b/src/nix/develop.md
@@ -69,14 +69,14 @@ R""(
* Run a series of script commands:
```console
- # nix develop --command bash -c "mkdir build && cmake .. && make"
+ # nix develop --command bash --command "mkdir build && cmake .. && make"
```
# Description
`nix develop` starts a `bash` shell that provides an interactive build
environment nearly identical to what Nix would use to build
-*installable*. Inside this shell, environment variables and shell
+[*installable*](./nix.md#installables). Inside this shell, environment variables and shell
functions are set up so that you can interactively and incrementally
build your package.
diff --git a/src/nix/doctor.cc b/src/nix/doctor.cc
index ea87e3d87..1aa6831d3 100644
--- a/src/nix/doctor.cc
+++ b/src/nix/doctor.cc
@@ -18,7 +18,7 @@ std::string formatProtocol(unsigned int proto)
if (proto) {
auto major = GET_PROTOCOL_MAJOR(proto) >> 8;
auto minor = GET_PROTOCOL_MINOR(proto);
- return (format("%1%.%2%") % major % minor).str();
+ return fmt("%1%.%2%", major, minor);
}
return "unknown";
}
@@ -33,12 +33,24 @@ bool checkFail(const std::string & msg) {
return false;
}
+void checkInfo(const std::string & msg) {
+ notice(ANSI_BLUE "[INFO] " ANSI_NORMAL + msg);
+}
+
}
struct CmdDoctor : StoreCommand
{
bool success = true;
+ /**
+ * This command is stable before the others
+ */
+ std::optional<ExperimentalFeature> experimentalFeature() override
+ {
+ return std::nullopt;
+ }
+
std::string description() override
{
return "check your system for potential problems and print a PASS or FAIL for each check";
@@ -55,6 +67,7 @@ struct CmdDoctor : StoreCommand
success &= checkProfileRoots(store);
}
success &= checkStoreProtocol(store->getProtocol());
+ checkTrustedUser(store);
if (!success)
throw Exit(2);
@@ -130,6 +143,14 @@ struct CmdDoctor : StoreCommand
return checkPass("Client protocol matches store protocol.");
}
+
+ void checkTrustedUser(ref<Store> store)
+ {
+ std::string_view trusted = store->isTrustedClient()
+ ? "trusted"
+ : "not trusted";
+ checkInfo(fmt("You are %s by store uri: %s", trusted, store->getUri()));
+ }
};
static auto rCmdDoctor = registerCommand<CmdDoctor>("doctor");
diff --git a/src/nix/edit.cc b/src/nix/edit.cc
index dfe75fbdf..66629fab0 100644
--- a/src/nix/edit.cc
+++ b/src/nix/edit.cc
@@ -1,4 +1,4 @@
-#include "command.hh"
+#include "command-installable-value.hh"
#include "shared.hh"
#include "eval.hh"
#include "attr-path.hh"
@@ -9,7 +9,7 @@
using namespace nix;
-struct CmdEdit : InstallableCommand
+struct CmdEdit : InstallableValueCommand
{
std::string description() override
{
@@ -25,7 +25,7 @@ struct CmdEdit : InstallableCommand
Category category() override { return catSecondary; }
- void run(ref<Store> store) override
+ void run(ref<Store> store, ref<InstallableValue> installable) override
{
auto state = getEvalState();
diff --git a/src/nix/eval.cc b/src/nix/eval.cc
index 209fd3ed2..d880bef0a 100644
--- a/src/nix/eval.cc
+++ b/src/nix/eval.cc
@@ -1,4 +1,4 @@
-#include "command.hh"
+#include "command-installable-value.hh"
#include "common-args.hh"
#include "shared.hh"
#include "store-api.hh"
@@ -11,13 +11,13 @@
using namespace nix;
-struct CmdEval : MixJSON, InstallableCommand, MixReadOnlyOption
+struct CmdEval : MixJSON, InstallableValueCommand, MixReadOnlyOption
{
bool raw = false;
std::optional<std::string> apply;
std::optional<Path> writeTo;
- CmdEval() : InstallableCommand()
+ CmdEval() : InstallableValueCommand()
{
addFlag({
.longName = "raw",
@@ -54,7 +54,7 @@ struct CmdEval : MixJSON, InstallableCommand, MixReadOnlyOption
Category category() override { return catSecondary; }
- void run(ref<Store> store) override
+ void run(ref<Store> store, ref<InstallableValue> installable) override
{
if (raw && json)
throw UsageError("--raw and --json are mutually exclusive");
@@ -62,11 +62,11 @@ struct CmdEval : MixJSON, InstallableCommand, MixReadOnlyOption
auto state = getEvalState();
auto [v, pos] = installable->toValue(*state);
- PathSet context;
+ NixStringContext context;
if (apply) {
auto vApply = state->allocValue();
- state->eval(state->parseExprFromString(*apply, absPath(".")), *vApply);
+ state->eval(state->parseExprFromString(*apply, state->rootPath(CanonPath::fromCwd())), *vApply);
auto vRes = state->allocValue();
state->callFunction(*vApply, *v, *vRes, noPos);
v = vRes;
diff --git a/src/nix/eval.md b/src/nix/eval.md
index 61334cde1..48d5aa597 100644
--- a/src/nix/eval.md
+++ b/src/nix/eval.md
@@ -18,7 +18,7 @@ R""(
* Evaluate a Nix expression from a file:
```console
- # nix eval -f ./my-nixpkgs hello.name
+ # nix eval --file ./my-nixpkgs hello.name
```
* Get the current version of the `nixpkgs` flake:
@@ -50,7 +50,7 @@ R""(
# Description
-This command evaluates the Nix expression *installable* and prints the
+This command evaluates the given Nix expression and prints the
result on standard output.
# Output format
diff --git a/src/nix/flake-check.md b/src/nix/flake-check.md
index 07031c909..c8307f8d8 100644
--- a/src/nix/flake-check.md
+++ b/src/nix/flake-check.md
@@ -68,6 +68,6 @@ The following flake output attributes must be
In addition, the `hydraJobs` output is evaluated in the same way as
Hydra's `hydra-eval-jobs` (i.e. as a arbitrarily deeply nested
attribute set of derivations). Similarly, the
-`legacyPackages`.*system* output is evaluated like `nix-env -qa`.
+`legacyPackages`.*system* output is evaluated like `nix-env --query --available `.
)""
diff --git a/src/nix/flake.cc b/src/nix/flake.cc
index 3fe093fc7..1eea52e15 100644
--- a/src/nix/flake.cc
+++ b/src/nix/flake.cc
@@ -259,6 +259,7 @@ struct CmdFlakeInfo : CmdFlakeMetadata
struct CmdFlakeCheck : FlakeCommand
{
bool build = true;
+ bool checkAllSystems = false;
CmdFlakeCheck()
{
@@ -267,6 +268,11 @@ struct CmdFlakeCheck : FlakeCommand
.description = "Do not build checks.",
.handler = {&build, false}
});
+ addFlag({
+ .longName = "all-systems",
+ .description = "Check the outputs for all systems.",
+ .handler = {&checkAllSystems, true}
+ });
}
std::string description() override
@@ -292,6 +298,7 @@ struct CmdFlakeCheck : FlakeCommand
lockFlags.applyNixConfig = true;
auto flake = lockFlake();
+ auto localSystem = std::string(settings.thisSystem.get());
bool hasErrors = false;
auto reportError = [&](const Error & e) {
@@ -307,6 +314,8 @@ struct CmdFlakeCheck : FlakeCommand
}
};
+ std::set<std::string> omittedSystems;
+
// FIXME: rewrite to use EvalCache.
auto resolve = [&] (PosIdx p) {
@@ -327,6 +336,15 @@ struct CmdFlakeCheck : FlakeCommand
reportError(Error("'%s' is not a valid system type, at %s", system, resolve(pos)));
};
+ auto checkSystemType = [&](const std::string & system, const PosIdx pos) {
+ if (!checkAllSystems && system != localSystem) {
+ omittedSystems.insert(system);
+ return false;
+ } else {
+ return true;
+ }
+ };
+
auto checkDerivation = [&](const std::string & attrPath, Value & v, const PosIdx pos) -> std::optional<StorePath> {
try {
auto drvInfo = getDerivation(*state, v, false);
@@ -438,10 +456,10 @@ struct CmdFlakeCheck : FlakeCommand
if (auto attr = v.attrs->get(state->symbols.create("path"))) {
if (attr->name == state->symbols.create("path")) {
- PathSet context;
+ NixStringContext context;
auto path = state->coerceToPath(attr->pos, *attr->value, context, "");
- if (!store->isInStore(path))
- throw Error("template '%s' has a bad 'path' attribute");
+ if (!path.pathExists())
+ throw Error("template '%s' refers to a non-existent path '%s'", attrPath, path);
// TODO: recursively check the flake in 'path'.
}
} else
@@ -509,16 +527,18 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *vOutput.attrs) {
const auto & attr_name = state->symbols[attr.name];
checkSystemName(attr_name, attr.pos);
- state->forceAttrs(*attr.value, attr.pos, "");
- for (auto & attr2 : *attr.value->attrs) {
- auto drvPath = checkDerivation(
- fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
- *attr2.value, attr2.pos);
- if (drvPath && attr_name == settings.thisSystem.get()) {
- drvPaths.push_back(DerivedPath::Built {
- .drvPath = *drvPath,
- .outputs = OutputsSpec::All { },
- });
+ if (checkSystemType(attr_name, attr.pos)) {
+ state->forceAttrs(*attr.value, attr.pos, "");
+ for (auto & attr2 : *attr.value->attrs) {
+ auto drvPath = checkDerivation(
+ fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
+ *attr2.value, attr2.pos);
+ if (drvPath && attr_name == settings.thisSystem.get()) {
+ drvPaths.push_back(DerivedPath::Built {
+ .drvPath = *drvPath,
+ .outputs = OutputsSpec::All { },
+ });
+ }
}
}
}
@@ -529,9 +549,11 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *vOutput.attrs) {
const auto & attr_name = state->symbols[attr.name];
checkSystemName(attr_name, attr.pos);
- checkApp(
- fmt("%s.%s", name, attr_name),
- *attr.value, attr.pos);
+ if (checkSystemType(attr_name, attr.pos)) {
+ checkApp(
+ fmt("%s.%s", name, attr_name),
+ *attr.value, attr.pos);
+ };
}
}
@@ -540,11 +562,13 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *vOutput.attrs) {
const auto & attr_name = state->symbols[attr.name];
checkSystemName(attr_name, attr.pos);
- state->forceAttrs(*attr.value, attr.pos, "");
- for (auto & attr2 : *attr.value->attrs)
- checkDerivation(
- fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
- *attr2.value, attr2.pos);
+ if (checkSystemType(attr_name, attr.pos)) {
+ state->forceAttrs(*attr.value, attr.pos, "");
+ for (auto & attr2 : *attr.value->attrs)
+ checkDerivation(
+ fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
+ *attr2.value, attr2.pos);
+ };
}
}
@@ -553,11 +577,13 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *vOutput.attrs) {
const auto & attr_name = state->symbols[attr.name];
checkSystemName(attr_name, attr.pos);
- state->forceAttrs(*attr.value, attr.pos, "");
- for (auto & attr2 : *attr.value->attrs)
- checkApp(
- fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
- *attr2.value, attr2.pos);
+ if (checkSystemType(attr_name, attr.pos)) {
+ state->forceAttrs(*attr.value, attr.pos, "");
+ for (auto & attr2 : *attr.value->attrs)
+ checkApp(
+ fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
+ *attr2.value, attr2.pos);
+ };
}
}
@@ -566,9 +592,11 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *vOutput.attrs) {
const auto & attr_name = state->symbols[attr.name];
checkSystemName(attr_name, attr.pos);
- checkDerivation(
- fmt("%s.%s", name, attr_name),
- *attr.value, attr.pos);
+ if (checkSystemType(attr_name, attr.pos)) {
+ checkDerivation(
+ fmt("%s.%s", name, attr_name),
+ *attr.value, attr.pos);
+ };
}
}
@@ -577,9 +605,11 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *vOutput.attrs) {
const auto & attr_name = state->symbols[attr.name];
checkSystemName(attr_name, attr.pos);
- checkApp(
- fmt("%s.%s", name, attr_name),
- *attr.value, attr.pos);
+ if (checkSystemType(attr_name, attr.pos) ) {
+ checkApp(
+ fmt("%s.%s", name, attr_name),
+ *attr.value, attr.pos);
+ };
}
}
@@ -587,6 +617,7 @@ struct CmdFlakeCheck : FlakeCommand
state->forceAttrs(vOutput, pos, "");
for (auto & attr : *vOutput.attrs) {
checkSystemName(state->symbols[attr.name], attr.pos);
+ checkSystemType(state->symbols[attr.name], attr.pos);
// FIXME: do getDerivations?
}
}
@@ -636,9 +667,11 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *vOutput.attrs) {
const auto & attr_name = state->symbols[attr.name];
checkSystemName(attr_name, attr.pos);
- checkBundler(
- fmt("%s.%s", name, attr_name),
- *attr.value, attr.pos);
+ if (checkSystemType(attr_name, attr.pos)) {
+ checkBundler(
+ fmt("%s.%s", name, attr_name),
+ *attr.value, attr.pos);
+ };
}
}
@@ -647,12 +680,14 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *vOutput.attrs) {
const auto & attr_name = state->symbols[attr.name];
checkSystemName(attr_name, attr.pos);
- state->forceAttrs(*attr.value, attr.pos, "");
- for (auto & attr2 : *attr.value->attrs) {
- checkBundler(
- fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
- *attr2.value, attr2.pos);
- }
+ if (checkSystemType(attr_name, attr.pos)) {
+ state->forceAttrs(*attr.value, attr.pos, "");
+ for (auto & attr2 : *attr.value->attrs) {
+ checkBundler(
+ fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
+ *attr2.value, attr2.pos);
+ }
+ };
}
}
@@ -685,7 +720,15 @@ struct CmdFlakeCheck : FlakeCommand
}
if (hasErrors)
throw Error("some errors were encountered during the evaluation");
- }
+
+ if (!omittedSystems.empty()) {
+ warn(
+ "The check omitted these incompatible systems: %s\n"
+ "Use '--all-systems' to check all.",
+ concatStringsSep(", ", omittedSystems)
+ );
+ };
+ };
};
static Strings defaultTemplateAttrPathsPrefixes{"templates."};
@@ -1026,36 +1069,43 @@ struct CmdFlakeShow : FlakeCommand, MixJSON
auto visitor2 = visitor.getAttr(attrName);
- if ((attrPathS[0] == "apps"
- || attrPathS[0] == "checks"
- || attrPathS[0] == "devShells"
- || attrPathS[0] == "legacyPackages"
- || attrPathS[0] == "packages")
- && (attrPathS.size() == 1 || attrPathS.size() == 2)) {
- for (const auto &subAttr : visitor2->getAttrs()) {
- if (hasContent(*visitor2, attrPath2, subAttr)) {
- return true;
+ try {
+ if ((attrPathS[0] == "apps"
+ || attrPathS[0] == "checks"
+ || attrPathS[0] == "devShells"
+ || attrPathS[0] == "legacyPackages"
+ || attrPathS[0] == "packages")
+ && (attrPathS.size() == 1 || attrPathS.size() == 2)) {
+ for (const auto &subAttr : visitor2->getAttrs()) {
+ if (hasContent(*visitor2, attrPath2, subAttr)) {
+ return true;
+ }
}
+ return false;
}
- return false;
- }
- if ((attrPathS.size() == 1)
- && (attrPathS[0] == "formatter"
- || attrPathS[0] == "nixosConfigurations"
- || attrPathS[0] == "nixosModules"
- || attrPathS[0] == "overlays"
- )) {
- for (const auto &subAttr : visitor2->getAttrs()) {
- if (hasContent(*visitor2, attrPath2, subAttr)) {
- return true;
+ if ((attrPathS.size() == 1)
+ && (attrPathS[0] == "formatter"
+ || attrPathS[0] == "nixosConfigurations"
+ || attrPathS[0] == "nixosModules"
+ || attrPathS[0] == "overlays"
+ )) {
+ for (const auto &subAttr : visitor2->getAttrs()) {
+ if (hasContent(*visitor2, attrPath2, subAttr)) {
+ return true;
+ }
}
+ return false;
}
- return false;
- }
- // If we don't recognize it, it's probably content
- return true;
+ // If we don't recognize it, it's probably content
+ return true;
+ } catch (EvalError & e) {
+ // Some attrs may contain errors, eg. legacyPackages of
+ // nixpkgs. We still want to recurse into it, instead of
+ // skipping it at all.
+ return true;
+ }
};
std::function<nlohmann::json(
@@ -1328,8 +1378,7 @@ struct CmdFlake : NixMultiCommand
{
if (!command)
throw UsageError("'nix flake' requires a sub-command.");
- settings.requireExperimentalFeature(Xp::Flakes);
- command->second->prepare();
+ experimentalFeatureSettings.require(Xp::Flakes);
command->second->run();
}
};
diff --git a/src/nix/flake.md b/src/nix/flake.md
index 810e9ebea..456fd0ea1 100644
--- a/src/nix/flake.md
+++ b/src/nix/flake.md
@@ -54,7 +54,7 @@ output attribute). They are also allowed in the `inputs` attribute
of a flake, e.g.
```nix
-inputs.nixpkgs.url = github:NixOS/nixpkgs;
+inputs.nixpkgs.url = "github:NixOS/nixpkgs";
```
is equivalent to
@@ -221,11 +221,46 @@ Currently the `type` attribute can be one of the following:
commit hash (`rev`). Note that unlike Git, GitHub allows fetching by
commit hash without specifying a branch or tag.
+ You can also specify `host` as a parameter, to point to a custom GitHub
+ Enterprise server.
+
Some examples:
* `github:edolstra/dwarffs`
* `github:edolstra/dwarffs/unstable`
* `github:edolstra/dwarffs/d3f2baba8f425779026c6ec04021b2e927f61e31`
+ * `github:internal/project?host=company-github.example.org`
+
+* `gitlab`: Similar to `github`, is a more efficient way to fetch
+ GitLab repositories. The following attributes are required:
+
+ * `owner`: The owner of the repository.
+
+ * `repo`: The name of the repository.
+
+ Like `github`, these are downloaded as tarball archives.
+
+ The URL syntax for `gitlab` flakes is:
+
+ `gitlab:<owner>/<repo>(/<rev-or-ref>)?(\?<params>)?`
+
+ `<rev-or-ref>` works the same as `github`. Either a branch or tag name
+ (`ref`), or a commit hash (`rev`) can be specified.
+
+ Since GitLab allows for self-hosting, you can specify `host` as
+ a parameter, to point to any instances other than `gitlab.com`.
+
+ Some examples:
+
+ * `gitlab:veloren/veloren`
+ * `gitlab:veloren/veloren/master`
+ * `gitlab:veloren/veloren/80a4d7f13492d916e47d6195be23acae8001985a`
+ * `gitlab:openldap/openldap?host=git.openldap.org`
+
+ When accessing a project in a (nested) subgroup, make sure to URL-encode any
+ slashes, i.e. replace `/` with `%2F`:
+
+ * `gitlab:veloren%2Fdev/rfcs`
* `sourcehut`: Similar to `github`, is a more efficient way to fetch
SourceHut repositories. The following attributes are required:
@@ -275,14 +310,14 @@ Currently the `type` attribute can be one of the following:
# Flake format
As an example, here is a simple `flake.nix` that depends on the
-Nixpkgs flake and provides a single package (i.e. an installable
-derivation):
+Nixpkgs flake and provides a single package (i.e. an
+[installable](./nix.md#installables) derivation):
```nix
{
description = "A flake for building Hello World";
- inputs.nixpkgs.url = github:NixOS/nixpkgs/nixos-20.03;
+ inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-20.03";
outputs = { self, nixpkgs }: {
@@ -317,6 +352,8 @@ The following attributes are supported in `flake.nix`:
also contains some metadata about the inputs. These are:
* `outPath`: The path in the Nix store of the flake's source tree.
+ This way, the attribute set can be passed to `import` as if it was a path,
+ as in the example above (`import nixpkgs`).
* `rev`: The commit hash of the flake's repository, if applicable.
@@ -344,10 +381,12 @@ The following attributes are supported in `flake.nix`:
* `nixConfig`: a set of `nix.conf` options to be set when evaluating any
part of a flake. In the interests of security, only a small set of
- whitelisted options (currently `bash-prompt`, `bash-prompt-prefix`,
- `bash-prompt-suffix`, and `flake-registry`) are allowed to be set without
- confirmation so long as `accept-flake-config` is not set in the global
- configuration.
+ set of options is allowed to be set without confirmation so long as [`accept-flake-config`](@docroot@/command-ref/conf-file.md#conf-accept-flake-config) is not enabled in the global configuration:
+ - [`bash-prompt`](@docroot@/command-ref/conf-file.md#conf-bash-prompt)
+ - [`bash-prompt-prefix`](@docroot@/command-ref/conf-file.md#conf-bash-prompt-prefix)
+ - [`bash-prompt-suffix`](@docroot@/command-ref/conf-file.md#conf-bash-prompt-suffix)
+ - [`flake-registry`](@docroot@/command-ref/conf-file.md#conf-flake-registry)
+ - [`commit-lockfile-summary`](@docroot@/command-ref/conf-file.md#conf-commit-lockfile-summary)
## Flake inputs
@@ -374,7 +413,7 @@ inputs.nixpkgs = {
Alternatively, you can use the URL-like syntax:
```nix
-inputs.import-cargo.url = github:edolstra/import-cargo;
+inputs.import-cargo.url = "github:edolstra/import-cargo";
inputs.nixpkgs.url = "nixpkgs";
```
diff --git a/src/nix/fmt.cc b/src/nix/fmt.cc
index 6f6a4a632..c85eacded 100644
--- a/src/nix/fmt.cc
+++ b/src/nix/fmt.cc
@@ -1,4 +1,5 @@
#include "command.hh"
+#include "installable-value.hh"
#include "run.hh"
using namespace nix;
@@ -31,8 +32,9 @@ struct CmdFmt : SourceExprCommand {
auto evalState = getEvalState();
auto evalStore = getEvalStore();
- auto installable = parseInstallable(store, ".");
- auto app = installable->toApp(*evalState).resolve(evalStore, store);
+ auto installable_ = parseInstallable(store, ".");
+ auto & installable = InstallableValue::require(*installable_);
+ auto app = installable.toApp(*evalState).resolve(evalStore, store);
Strings programArgs{app.program};
diff --git a/src/nix/hash.cc b/src/nix/hash.cc
index 60d9593a7..9feca9345 100644
--- a/src/nix/hash.cc
+++ b/src/nix/hash.cc
@@ -151,7 +151,6 @@ struct CmdHash : NixMultiCommand
{
if (!command)
throw UsageError("'nix hash' requires a sub-command.");
- command->second->prepare();
command->second->run();
}
};
@@ -161,11 +160,11 @@ static auto rCmdHash = registerCommand<CmdHash>("hash");
/* Legacy nix-hash command. */
static int compatNixHash(int argc, char * * argv)
{
- HashType ht = htMD5;
+ std::optional<HashType> ht;
bool flat = false;
- bool base32 = false;
+ Base base = Base16;
bool truncate = false;
- enum { opHash, opTo32, opTo16 } op = opHash;
+ enum { opHash, opTo } op = opHash;
std::vector<std::string> ss;
parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
@@ -174,14 +173,31 @@ static int compatNixHash(int argc, char * * argv)
else if (*arg == "--version")
printVersion("nix-hash");
else if (*arg == "--flat") flat = true;
- else if (*arg == "--base32") base32 = true;
+ else if (*arg == "--base16") base = Base16;
+ else if (*arg == "--base32") base = Base32;
+ else if (*arg == "--base64") base = Base64;
+ else if (*arg == "--sri") base = SRI;
else if (*arg == "--truncate") truncate = true;
else if (*arg == "--type") {
std::string s = getArg(*arg, arg, end);
ht = parseHashType(s);
}
- else if (*arg == "--to-base16") op = opTo16;
- else if (*arg == "--to-base32") op = opTo32;
+ else if (*arg == "--to-base16") {
+ op = opTo;
+ base = Base16;
+ }
+ else if (*arg == "--to-base32") {
+ op = opTo;
+ base = Base32;
+ }
+ else if (*arg == "--to-base64") {
+ op = opTo;
+ base = Base64;
+ }
+ else if (*arg == "--to-sri") {
+ op = opTo;
+ base = SRI;
+ }
else if (*arg != "" && arg->at(0) == '-')
return false;
else
@@ -191,17 +207,18 @@ static int compatNixHash(int argc, char * * argv)
if (op == opHash) {
CmdHashBase cmd(flat ? FileIngestionMethod::Flat : FileIngestionMethod::Recursive);
- cmd.ht = ht;
- cmd.base = base32 ? Base32 : Base16;
+ if (!ht.has_value()) ht = htMD5;
+ cmd.ht = ht.value();
+ cmd.base = base;
cmd.truncate = truncate;
cmd.paths = ss;
cmd.run();
}
else {
- CmdToBase cmd(op == opTo32 ? Base32 : Base16);
+ CmdToBase cmd(base);
cmd.args = ss;
- cmd.ht = ht;
+ if (ht.has_value()) cmd.ht = ht;
cmd.run();
}
diff --git a/src/nix/help-stores.md b/src/nix/help-stores.md
new file mode 100644
index 000000000..47ba9b94d
--- /dev/null
+++ b/src/nix/help-stores.md
@@ -0,0 +1,46 @@
+R"(
+
+Nix supports different types of stores. These are described below.
+
+## Store URL format
+
+Stores are specified using a URL-like syntax. For example, the command
+
+```console
+# nix path-info --store https://cache.nixos.org/ --json \
+ /nix/store/a7gvj343m05j2s32xcnwr35v31ynlypr-coreutils-9.1
+```
+
+fetches information about a store path in the HTTP binary cache
+located at https://cache.nixos.org/, which is a type of store.
+
+Store URLs can specify **store settings** using URL query strings,
+i.e. by appending `?name1=value1&name2=value2&...` to the URL. For
+instance,
+
+```
+--store ssh://machine.example.org?ssh-key=/path/to/my/key
+```
+
+tells Nix to access the store on a remote machine via the SSH
+protocol, using `/path/to/my/key` as the SSH private key. The
+supported settings for each store type are documented below.
+
+The special store URL `auto` causes Nix to automatically select a
+store as follows:
+
+* Use the [local store](#local-store) `/nix/store` if `/nix/var/nix`
+ is writable by the current user.
+
+* Otherwise, if `/nix/var/nix/daemon-socket/socket` exists, [connect
+ to the Nix daemon listening on that socket](#local-daemon-store).
+
+* Otherwise, on Linux only, use the [local chroot store](#local-store)
+ `~/.local/share/nix/root`, which will be created automatically if it
+ does not exist.
+
+* Otherwise, use the [local store](#local-store) `/nix/store`.
+
+@stores@
+
+)"
diff --git a/src/nix/local.mk b/src/nix/local.mk
index 0f2f016ec..20ea29d10 100644
--- a/src/nix/local.mk
+++ b/src/nix/local.mk
@@ -32,3 +32,9 @@ src/nix/develop.cc: src/nix/get-env.sh.gen.hh
src/nix-channel/nix-channel.cc: src/nix-channel/unpack-channel.nix.gen.hh
src/nix/main.cc: doc/manual/generate-manpage.nix.gen.hh doc/manual/utils.nix.gen.hh
+
+src/nix/doc/files/%.md: doc/manual/src/command-ref/files/%.md
+ @mkdir -p $$(dirname $@)
+ @cp $< $@
+
+src/nix/profile.cc: src/nix/profile.md src/nix/doc/files/profiles.md.gen.hh
diff --git a/src/nix/log.cc b/src/nix/log.cc
index 0c9f778f0..aaf829764 100644
--- a/src/nix/log.cc
+++ b/src/nix/log.cc
@@ -23,7 +23,7 @@ struct CmdLog : InstallableCommand
Category category() override { return catSecondary; }
- void run(ref<Store> store) override
+ void run(ref<Store> store, ref<Installable> installable) override
{
settings.readOnlyMode = true;
diff --git a/src/nix/log.md b/src/nix/log.md
index 1c76226a3..01e9801df 100644
--- a/src/nix/log.md
+++ b/src/nix/log.md
@@ -22,8 +22,7 @@ R""(
# Description
-This command prints the log of a previous build of the derivation
-*installable* on standard output.
+This command prints the log of a previous build of the [*installable*](./nix.md#installables) on standard output.
Nix looks for build logs in two places:
diff --git a/src/nix/main.cc b/src/nix/main.cc
index 53bf649d4..ce0bed2a3 100644
--- a/src/nix/main.cc
+++ b/src/nix/main.cc
@@ -54,17 +54,17 @@ static bool haveInternet()
std::string programPath;
-struct HelpRequested { };
-
struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
{
bool useNet = true;
bool refresh = false;
+ bool helpRequested = false;
bool showVersion = false;
NixArgs() : MultiCommand(RegisterCommand::getCommandsFor({})), MixCommonArgs("nix")
{
categories.clear();
+ categories[catHelp] = "Help commands";
categories[Command::catDefault] = "Main commands";
categories[catSecondary] = "Infrequently used commands";
categories[catUtility] = "Utility/scripting commands";
@@ -74,7 +74,7 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
.longName = "help",
.description = "Show usage information.",
.category = miscCategory,
- .handler = {[&]() { throw HelpRequested(); }},
+ .handler = {[this]() { this->helpRequested = true; }},
});
addFlag({
@@ -83,6 +83,7 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
.description = "Print full build logs on standard error.",
.category = loggingCategory,
.handler = {[&]() { logger->setPrintBuildLogs(true); }},
+ .experimentalFeature = Xp::NixCommand,
});
addFlag({
@@ -98,6 +99,7 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
.description = "Disable substituters and consider all previously downloaded files up-to-date.",
.category = miscCategory,
.handler = {[&]() { useNet = false; }},
+ .experimentalFeature = Xp::NixCommand,
});
addFlag({
@@ -105,6 +107,7 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
.description = "Consider all previously downloaded files out-of-date.",
.category = miscCategory,
.handler = {[&]() { refresh = true; }},
+ .experimentalFeature = Xp::NixCommand,
});
}
@@ -124,6 +127,7 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
{"optimise-store", {"store", "optimise"}},
{"ping-store", {"store", "ping"}},
{"sign-paths", {"store", "sign"}},
+ {"show-derivation", {"derivation", "show"}},
{"to-base16", {"hash", "to-base16"}},
{"to-base32", {"hash", "to-base32"}},
{"to-base64", {"hash", "to-base64"}},
@@ -164,11 +168,29 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
{
commands = RegisterCommand::getCommandsFor({});
}
+
+ std::string dumpCli()
+ {
+ auto res = nlohmann::json::object();
+
+ res["args"] = toJSON();
+
+ auto stores = nlohmann::json::object();
+ for (auto & implem : *Implementations::registered) {
+ auto storeConfig = implem.getConfig();
+ auto storeName = storeConfig->name();
+ stores[storeName]["doc"] = storeConfig->doc();
+ stores[storeName]["settings"] = storeConfig->toJSON();
+ }
+ res["stores"] = std::move(stores);
+
+ return res.dump();
+ }
};
/* Render the help for the specified subcommand to stdout using
lowdown. */
-static void showHelp(std::vector<std::string> subcommand, MultiCommand & toplevel)
+static void showHelp(std::vector<std::string> subcommand, NixArgs & toplevel)
{
auto mdName = subcommand.empty() ? "nix" : fmt("nix3-%s", concatStringsSep("-", subcommand));
@@ -179,21 +201,21 @@ static void showHelp(std::vector<std::string> subcommand, MultiCommand & topleve
auto vGenerateManpage = state.allocValue();
state.eval(state.parseExprFromString(
#include "generate-manpage.nix.gen.hh"
- , "/"), *vGenerateManpage);
+ , CanonPath::root), *vGenerateManpage);
auto vUtils = state.allocValue();
state.cacheFile(
- "/utils.nix", "/utils.nix",
+ CanonPath("/utils.nix"), CanonPath("/utils.nix"),
state.parseExprFromString(
#include "utils.nix.gen.hh"
- , "/"),
+ , CanonPath::root),
*vUtils);
- auto attrs = state.buildBindings(16);
- attrs.alloc("toplevel").mkString(toplevel.toJSON().dump());
+ auto vDump = state.allocValue();
+ vDump->mkString(toplevel.dumpCli());
auto vRes = state.allocValue();
- state.callFunction(*vGenerateManpage, state.allocValue()->mkAttrs(attrs), *vRes, noPos);
+ state.callFunction(*vGenerateManpage, *vDump, *vRes, noPos);
auto attr = vRes->attrs->get(state.symbols.create(mdName + ".md"));
if (!attr)
@@ -205,6 +227,14 @@ static void showHelp(std::vector<std::string> subcommand, MultiCommand & topleve
std::cout << renderMarkdownToTerminal(markdown) << "\n";
}
+static NixArgs & getNixArgs(Command & cmd)
+{
+ assert(cmd.parent);
+ MultiCommand * toplevel = cmd.parent;
+ while (toplevel->parent) toplevel = toplevel->parent;
+ return dynamic_cast<NixArgs &>(*toplevel);
+}
+
struct CmdHelp : Command
{
std::vector<std::string> subcommand;
@@ -229,17 +259,43 @@ struct CmdHelp : Command
;
}
+ Category category() override { return catHelp; }
+
void run() override
{
assert(parent);
MultiCommand * toplevel = parent;
while (toplevel->parent) toplevel = toplevel->parent;
- showHelp(subcommand, *toplevel);
+ showHelp(subcommand, getNixArgs(*this));
}
};
static auto rCmdHelp = registerCommand<CmdHelp>("help");
+struct CmdHelpStores : Command
+{
+ std::string description() override
+ {
+ return "show help about store types and their settings";
+ }
+
+ std::string doc() override
+ {
+ return
+ #include "help-stores.md"
+ ;
+ }
+
+ Category category() override { return catHelp; }
+
+ void run() override
+ {
+ showHelp({"help-stores"}, getNixArgs(*this));
+ }
+};
+
+static auto rCmdHelpStores = registerCommand<CmdHelpStores>("help-stores");
+
void mainWrapped(int argc, char * * argv)
{
savedArgv = argv;
@@ -291,13 +347,16 @@ void mainWrapped(int argc, char * * argv)
NixArgs args;
- if (argc == 2 && std::string(argv[1]) == "__dump-args") {
- logger->cout("%s", args.toJSON());
+ if (argc == 2 && std::string(argv[1]) == "__dump-cli") {
+ logger->cout(args.dumpCli());
return;
}
if (argc == 2 && std::string(argv[1]) == "__dump-builtins") {
- settings.experimentalFeatures = {Xp::Flakes, Xp::FetchClosure};
+ experimentalFeatureSettings.experimentalFeatures = {
+ Xp::Flakes,
+ Xp::FetchClosure,
+ };
evalSettings.pureEval = false;
EvalState state({}, openStore("dummy://"));
auto res = nlohmann::json::object();
@@ -316,6 +375,11 @@ void mainWrapped(int argc, char * * argv)
return;
}
+ if (argc == 2 && std::string(argv[1]) == "__dump-xp-features") {
+ logger->cout(documentExperimentalFeatures().dump());
+ return;
+ }
+
Finally printCompletions([&]()
{
if (completions) {
@@ -334,7 +398,11 @@ void mainWrapped(int argc, char * * argv)
try {
args.parseCmdline(argvToStrings(argc, argv));
- } catch (HelpRequested &) {
+ } catch (UsageError &) {
+ if (!args.helpRequested && !completions) throw;
+ }
+
+ if (args.helpRequested) {
std::vector<std::string> subcommand;
MultiCommand * command = &args;
while (command) {
@@ -346,8 +414,6 @@ void mainWrapped(int argc, char * * argv)
}
showHelp(subcommand, args);
return;
- } catch (UsageError &) {
- if (!completions) throw;
}
if (completions) {
@@ -363,10 +429,8 @@ void mainWrapped(int argc, char * * argv)
if (!args.command)
throw UsageError("no subcommand specified");
- if (args.command->first != "repl"
- && args.command->first != "doctor"
- && args.command->first != "upgrade-nix")
- settings.requireExperimentalFeature(Xp::NixCommand);
+ experimentalFeatureSettings.require(
+ args.command->second->experimentalFeature());
if (args.useNet && !haveInternet()) {
warn("you don't have Internet access; disabling some network-dependent features");
@@ -394,7 +458,6 @@ void mainWrapped(int argc, char * * argv)
if (args.command->second->forceImpureByDefault() && !evalSettings.pureEval.overridden) {
evalSettings.pureEval = false;
}
- args.command->second->prepare();
args.command->second->run();
}
diff --git a/src/nix/make-content-addressed.cc b/src/nix/make-content-addressed.cc
index 6693c55ac..d9c988a9f 100644
--- a/src/nix/make-content-addressed.cc
+++ b/src/nix/make-content-addressed.cc
@@ -28,7 +28,6 @@ struct CmdMakeContentAddressed : virtual CopyCommand, virtual StorePathsCommand,
;
}
- using StorePathsCommand::run;
void run(ref<Store> srcStore, StorePaths && storePaths) override
{
auto dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
diff --git a/src/nix/make-content-addressed.md b/src/nix/make-content-addressed.md
index 32eecc880..b1f7da525 100644
--- a/src/nix/make-content-addressed.md
+++ b/src/nix/make-content-addressed.md
@@ -35,7 +35,9 @@ R""(
# Description
This command converts the closure of the store paths specified by
-*installables* to content-addressed form. Nix store paths are usually
+[*installables*](./nix.md#installables) to content-addressed form.
+
+Nix store paths are usually
*input-addressed*, meaning that the hash part of the store path is
computed from the contents of the derivation (i.e., the build-time
dependency graph). Input-addressed paths need to be signed by a
diff --git a/src/nix/nar-ls.md b/src/nix/nar-ls.md
index d373f9715..5a03c5d82 100644
--- a/src/nix/nar-ls.md
+++ b/src/nix/nar-ls.md
@@ -5,7 +5,7 @@ R""(
* To list a specific file in a NAR:
```console
- # nix nar ls -l ./hello.nar /bin/hello
+ # nix nar ls --long ./hello.nar /bin/hello
-r-xr-xr-x 38184 hello
```
@@ -13,7 +13,7 @@ R""(
format:
```console
- # nix nar ls --json -R ./hello.nar /bin
+ # nix nar ls --json --recursive ./hello.nar /bin
{"type":"directory","entries":{"hello":{"type":"regular","size":38184,"executable":true,"narOffset":400}}}
```
diff --git a/src/nix/nar.cc b/src/nix/nar.cc
index dbb043d9b..9815410cf 100644
--- a/src/nix/nar.cc
+++ b/src/nix/nar.cc
@@ -25,7 +25,6 @@ struct CmdNar : NixMultiCommand
{
if (!command)
throw UsageError("'nix nar' requires a sub-command.");
- command->second->prepare();
command->second->run();
}
};
diff --git a/src/nix/nix.md b/src/nix/nix.md
index db60c59ff..8a850ae83 100644
--- a/src/nix/nix.md
+++ b/src/nix/nix.md
@@ -48,102 +48,124 @@ manual](https://nixos.org/manual/nix/stable/).
# Installables
-Many `nix` subcommands operate on one or more *installables*. These are
-command line arguments that represent something that can be built in
-the Nix store. Here are the recognised types of installables:
-
-* **Flake output attributes**: `nixpkgs#hello`
-
- These have the form *flakeref*[`#`*attrpath*], where *flakeref* is a
- flake reference and *attrpath* is an optional attribute path. For
- more information on flakes, see [the `nix flake` manual
- page](./nix3-flake.md). Flake references are most commonly a flake
- identifier in the flake registry (e.g. `nixpkgs`), or a raw path
- (e.g. `/path/to/my-flake` or `.` or `../foo`), or a full URL
- (e.g. `github:nixos/nixpkgs` or `path:.`)
-
- When the flake reference is a raw path (a path without any URL
- scheme), it is interpreted as a `path:` or `git+file:` url in the following
- way:
-
- - If the path is within a Git repository, then the url will be of the form
- `git+file://[GIT_REPO_ROOT]?dir=[RELATIVE_FLAKE_DIR_PATH]`
- where `GIT_REPO_ROOT` is the path to the root of the git repository,
- and `RELATIVE_FLAKE_DIR_PATH` is the path (relative to the directory
- root) of the closest parent of the given path that contains a `flake.nix` within
- the git repository.
- If no such directory exists, then Nix will error-out.
-
- Note that the search will only include files indexed by git. In particular, files
- which are matched by `.gitignore` or have never been `git add`-ed will not be
- available in the flake. If this is undesirable, specify `path:<directory>` explicitly;
-
- For example, if `/foo/bar` is a git repository with the following structure:
- ```
- .
- └── baz
- ├── blah
- │  └── file.txt
- └── flake.nix
- ```
+> **Warning** \
+> Installables are part of the unstable
+> [`nix-command` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-nix-command),
+> and subject to change without notice.
+
+Many `nix` subcommands operate on one or more *installables*.
+These are command line arguments that represent something that can be realised in the Nix store.
+
+The following types of installable are supported by most commands:
+
+- [Flake output attribute](#flake-output-attribute) (experimental)
+- [Store path](#store-path)
+- [Nix file](#nix-file), optionally qualified by an attribute path
+- [Nix expression](#nix-expression), optionally qualified by an attribute path
+
+For most commands, if no installable is specified, `.` as assumed.
+That is, Nix will operate on the default flake output attribute of the flake in the current directory.
+
+### Flake output attribute
+
+> **Warning** \
+> Flake output attribute installables depend on both the
+> [`flakes`](@docroot@/contributing/experimental-features.md#xp-feature-flakes)
+> and
+> [`nix-command`](@docroot@/contributing/experimental-features.md#xp-feature-nix-command)
+> experimental features, and subject to change without notice.
+
+Example: `nixpkgs#hello`
+
+These have the form *flakeref*[`#`*attrpath*], where *flakeref* is a
+[flake reference](./nix3-flake.md#flake-references) and *attrpath* is an optional attribute path. For
+more information on flakes, see [the `nix flake` manual
+page](./nix3-flake.md). Flake references are most commonly a flake
+identifier in the flake registry (e.g. `nixpkgs`), or a raw path
+(e.g. `/path/to/my-flake` or `.` or `../foo`), or a full URL
+(e.g. `github:nixos/nixpkgs` or `path:.`)
+
+When the flake reference is a raw path (a path without any URL
+scheme), it is interpreted as a `path:` or `git+file:` url in the following
+way:
+
+- If the path is within a Git repository, then the url will be of the form
+ `git+file://[GIT_REPO_ROOT]?dir=[RELATIVE_FLAKE_DIR_PATH]`
+ where `GIT_REPO_ROOT` is the path to the root of the git repository,
+ and `RELATIVE_FLAKE_DIR_PATH` is the path (relative to the directory
+ root) of the closest parent of the given path that contains a `flake.nix` within
+ the git repository.
+ If no such directory exists, then Nix will error-out.
+
+ Note that the search will only include files indexed by git. In particular, files
+ which are matched by `.gitignore` or have never been `git add`-ed will not be
+ available in the flake. If this is undesirable, specify `path:<directory>` explicitly;
+
+ For example, if `/foo/bar` is a git repository with the following structure:
+ ```
+ .
+ └── baz
+ ├── blah
+ │  └── file.txt
+ └── flake.nix
+ ```
Then `/foo/bar/baz/blah` will resolve to `git+file:///foo/bar?dir=baz`
- - If the supplied path is not a git repository, then the url will have the form
- `path:FLAKE_DIR_PATH` where `FLAKE_DIR_PATH` is the closest parent
- of the supplied path that contains a `flake.nix` file (within the same file-system).
- If no such directory exists, then Nix will error-out.
-
- For example, if `/foo/bar/flake.nix` exists, then `/foo/bar/baz/` will resolve to
- `path:/foo/bar`
+- If the supplied path is not a git repository, then the url will have the form
+ `path:FLAKE_DIR_PATH` where `FLAKE_DIR_PATH` is the closest parent
+ of the supplied path that contains a `flake.nix` file (within the same file-system).
+ If no such directory exists, then Nix will error-out.
- If *attrpath* is omitted, Nix tries some default values; for most
- subcommands, the default is `packages.`*system*`.default`
- (e.g. `packages.x86_64-linux.default`), but some subcommands have
- other defaults. If *attrpath* *is* specified, *attrpath* is
- interpreted as relative to one or more prefixes; for most
- subcommands, these are `packages.`*system*,
- `legacyPackages.*system*` and the empty prefix. Thus, on
- `x86_64-linux` `nix build nixpkgs#hello` will try to build the
- attributes `packages.x86_64-linux.hello`,
- `legacyPackages.x86_64-linux.hello` and `hello`.
+ For example, if `/foo/bar/flake.nix` exists, then `/foo/bar/baz/` will resolve to
+ `path:/foo/bar`
-* **Store paths**: `/nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10`
+If *attrpath* is omitted, Nix tries some default values; for most
+subcommands, the default is `packages.`*system*`.default`
+(e.g. `packages.x86_64-linux.default`), but some subcommands have
+other defaults. If *attrpath* *is* specified, *attrpath* is
+interpreted as relative to one or more prefixes; for most
+subcommands, these are `packages.`*system*,
+`legacyPackages.*system*` and the empty prefix. Thus, on
+`x86_64-linux` `nix build nixpkgs#hello` will try to build the
+attributes `packages.x86_64-linux.hello`,
+`legacyPackages.x86_64-linux.hello` and `hello`.
- These are paths inside the Nix store, or symlinks that resolve to a
- path in the Nix store.
+### Store path
-* **Store derivations**: `/nix/store/p7gp6lxdg32h4ka1q398wd9r2zkbbz2v-hello-2.10.drv`
+Example: `/nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10`
- By default, if you pass a [store derivation] path to a `nix` subcommand, the command will operate on the [output path]s of the derivation.
+These are paths inside the Nix store, or symlinks that resolve to a path in the Nix store.
- [output path]: ../../glossary.md#gloss-output-path
+A [store derivation] is also addressed by store path.
- For example, `nix path-info` prints information about the output paths:
+Example: `/nix/store/p7gp6lxdg32h4ka1q398wd9r2zkbbz2v-hello-2.10.drv`
- ```console
- # nix path-info --json /nix/store/p7gp6lxdg32h4ka1q398wd9r2zkbbz2v-hello-2.10.drv
- [{"path":"/nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10",…}]
- ```
+If you want to refer to an output path of that store derivation, add the output name preceded by a caret (`^`).
+
+Example: `/nix/store/p7gp6lxdg32h4ka1q398wd9r2zkbbz2v-hello-2.10.drv^out`
- If you want to operate on the store derivation itself, pass the
- `--derivation` flag.
+All outputs can be referred to at once with the special syntax `^*`.
-* **Nix attributes**: `--file /path/to/nixpkgs hello`
+Example: `/nix/store/p7gp6lxdg32h4ka1q398wd9r2zkbbz2v-hello-2.10.drv^*`
- When the `-f` / `--file` *path* option is given, installables are
- interpreted as attribute paths referencing a value returned by
- evaluating the Nix file *path*.
+### Nix file
-* **Nix expressions**: `--expr '(import <nixpkgs> {}).hello.overrideDerivation (prev: { name = "my-hello"; })'`.
+Example: `--file /path/to/nixpkgs hello`
- When the `--expr` option is given, all installables are interpreted
- as Nix expressions. You may need to specify `--impure` if the
- expression references impure inputs (such as `<nixpkgs>`).
+When the option `-f` / `--file` *path* \[*attrpath*...\] is given, installables are interpreted as the value of the expression in the Nix file at *path*.
+If attribute paths are provided, commands will operate on the corresponding values accessible at these paths.
+The Nix expression in that file, or any selected attribute, must evaluate to a derivation.
-For most commands, if no installable is specified, the default is `.`,
-i.e. Nix will operate on the default flake output attribute of the
-flake in the current directory.
+### Nix expression
+
+Example: `--expr 'import <nixpkgs> {}' hello`
+
+When the option `--expr` *expression* \[*attrpath*...\] is given, installables are interpreted as the value of the of the Nix expression.
+If attribute paths are provided, commands will operate on the corresponding values accessible at these paths.
+The Nix expression, or any selected attribute, must evaluate to a derivation.
+
+You may need to specify `--impure` if the expression references impure inputs (such as `<nixpkgs>`).
## Derivation output selection
@@ -175,7 +197,7 @@ operate are determined as follows:
of all outputs of the `glibc` package in the binary cache:
```console
- # nix path-info -S --eval-store auto --store https://cache.nixos.org 'nixpkgs#glibc^*'
+ # nix path-info --closure-size --eval-store auto --store https://cache.nixos.org 'nixpkgs#glibc^*'
/nix/store/g02b1lpbddhymmcjb923kf0l7s9nww58-glibc-2.33-123 33208200
/nix/store/851dp95qqiisjifi639r0zzg5l465ny4-glibc-2.33-123-bin 36142896
/nix/store/kdgs3q6r7xdff1p7a9hnjr43xw2404z7-glibc-2.33-123-debug 155787312
@@ -186,7 +208,7 @@ operate are determined as follows:
and likewise, using a store path to a "drv" file to specify the derivation:
```console
- # nix path-info -S '/nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^*'
+ # nix path-info --closure-size '/nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^*'
```
* If you didn't specify the desired outputs, but the derivation has an
@@ -210,8 +232,7 @@ operate are determined as follows:
# Nix stores
-Most `nix` subcommands operate on a *Nix store*.
-
-TODO: list store types, options
+Most `nix` subcommands operate on a *Nix store*. These are documented
+in [`nix help-stores`](./nix3-help-stores.md).
)""
diff --git a/src/nix/path-info.md b/src/nix/path-info.md
index b30898ac0..2dda866d0 100644
--- a/src/nix/path-info.md
+++ b/src/nix/path-info.md
@@ -13,7 +13,7 @@ R""(
closure, sorted by size:
```console
- # nix path-info -rS /run/current-system | sort -nk2
+ # nix path-info --recursive --closure-size /run/current-system | sort -nk2
/nix/store/hl5xwp9kdrd1zkm0idm3kkby9q66z404-empty 96
/nix/store/27324qvqhnxj3rncazmxc4mwy79kz8ha-nameservers 112
@@ -25,7 +25,7 @@ R""(
readable sizes:
```console
- # nix path-info -rsSh nixpkgs#rustc
+ # nix path-info --recursive --size --closure-size --human-readable nixpkgs#rustc
/nix/store/01rrgsg5zk3cds0xgdsq40zpk6g51dz9-ncurses-6.2-dev 386.7K 69.1M
/nix/store/0q783wnvixpqz6dxjp16nw296avgczam-libpfm-4.11.0 5.9M 37.4M
@@ -34,7 +34,7 @@ R""(
* Check the existence of a path in a binary cache:
```console
- # nix path-info -r /nix/store/blzxgyvrk32ki6xga10phr4sby2xf25q-geeqie-1.5.1 --store https://cache.nixos.org/
+ # nix path-info --recursive /nix/store/blzxgyvrk32ki6xga10phr4sby2xf25q-geeqie-1.5.1 --store https://cache.nixos.org/
path '/nix/store/blzxgyvrk32ki6xga10phr4sby2xf25q-geeqie-1.5.1' is not valid
```
@@ -57,7 +57,7 @@ R""(
size:
```console
- # nix path-info --json --all -S \
+ # nix path-info --json --all --closure-size \
| jq 'map(select(.closureSize > 1e9)) | sort_by(.closureSize) | map([.path, .closureSize])'
[
…,
@@ -80,7 +80,7 @@ R""(
# Description
This command shows information about the store paths produced by
-*installables*, or about all paths in the store if you pass `--all`.
+[*installables*](./nix.md#installables), or about all paths in the store if you pass `--all`.
By default, this command only prints the store paths. You can get
additional information by passing flags such as `--closure-size`,
diff --git a/src/nix/ping-store.cc b/src/nix/ping-store.cc
index 5c44510ab..ec450e8e0 100644
--- a/src/nix/ping-store.cc
+++ b/src/nix/ping-store.cc
@@ -28,15 +28,20 @@ struct CmdPingStore : StoreCommand, MixJSON
store->connect();
if (auto version = store->getVersion())
notice("Version: %s", *version);
+ if (auto trusted = store->isTrustedClient())
+ notice("Trusted: %s", *trusted);
} else {
nlohmann::json res;
Finally printRes([&]() {
logger->cout("%s", res);
});
+
res["url"] = store->getUri();
store->connect();
if (auto version = store->getVersion())
res["version"] = *version;
+ if (auto trusted = store->isTrustedClient())
+ res["trusted"] = *trusted;
}
}
};
diff --git a/src/nix/prefetch.cc b/src/nix/prefetch.cc
index 51c8a3319..3b2e225f6 100644
--- a/src/nix/prefetch.cc
+++ b/src/nix/prefetch.cc
@@ -27,7 +27,10 @@ std::string resolveMirrorUrl(EvalState & state, const std::string & url)
Value vMirrors;
// FIXME: use nixpkgs flake
- state.eval(state.parseExprFromString("import <nixpkgs/pkgs/build-support/fetchurl/mirrors.nix>", "."), vMirrors);
+ state.eval(state.parseExprFromString(
+ "import <nixpkgs/pkgs/build-support/fetchurl/mirrors.nix>",
+ state.rootPath(CanonPath::root)),
+ vMirrors);
state.forceAttrs(vMirrors, noPos, "while evaluating the set of all mirrors");
auto mirrorList = vMirrors.attrs->find(state.symbols.create(mirrorName));
@@ -67,7 +70,13 @@ std::tuple<StorePath, Hash> prefetchFile(
the store. */
if (expectedHash) {
hashType = expectedHash->type;
- storePath = store->makeFixedOutputPath(ingestionMethod, *expectedHash, *name);
+ storePath = store->makeFixedOutputPath(*name, FixedOutputInfo {
+ .hash = {
+ .method = ingestionMethod,
+ .hash = *expectedHash,
+ },
+ .references = {},
+ });
if (store->isValidPath(*storePath))
hash = expectedHash;
else
@@ -118,7 +127,7 @@ std::tuple<StorePath, Hash> prefetchFile(
auto info = store->addToStoreSlow(*name, tmpFile, ingestionMethod, hashType, expectedHash);
storePath = info.path;
assert(info.ca);
- hash = getContentAddressHash(*info.ca);
+ hash = info.ca->getHash();
}
return {storePath.value(), hash.value()};
@@ -192,9 +201,11 @@ static int main_nix_prefetch_url(int argc, char * * argv)
throw UsageError("you must specify a URL");
url = args[0];
} else {
- Path path = resolveExprPath(lookupFileArg(*state, args.empty() ? "." : args[0]));
Value vRoot;
- state->evalFile(path, vRoot);
+ state->evalFile(
+ resolveExprPath(
+ lookupFileArg(*state, args.empty() ? "." : args[0])),
+ vRoot);
Value & v(*findAlongAttrPath(*state, attrPath, autoArgs, vRoot).first);
state->forceAttrs(v, noPos, "while evaluating the source attribute to prefetch");
diff --git a/src/nix/print-dev-env.md b/src/nix/print-dev-env.md
index 2aad491de..a8ce9d36a 100644
--- a/src/nix/print-dev-env.md
+++ b/src/nix/print-dev-env.md
@@ -40,7 +40,7 @@ R""(
This command prints a shell script that can be sourced by `bash` and
that sets the variables and shell functions defined by the build
-process of *installable*. This allows you to get a similar build
+process of [*installable*](./nix.md#installables). This allows you to get a similar build
environment in your current shell rather than in a subshell (as with
`nix develop`).
diff --git a/src/nix/profile-install.md b/src/nix/profile-install.md
index aed414963..4c0f82c09 100644
--- a/src/nix/profile-install.md
+++ b/src/nix/profile-install.md
@@ -29,6 +29,6 @@ R""(
# Description
-This command adds *installables* to a Nix profile.
+This command adds [*installables*](./nix.md#installables) to a Nix profile.
)""
diff --git a/src/nix/profile.cc b/src/nix/profile.cc
index eef33b3d9..7cea616d2 100644
--- a/src/nix/profile.cc
+++ b/src/nix/profile.cc
@@ -31,6 +31,11 @@ struct ProfileElementSource
std::tuple(originalRef.to_string(), attrPath, outputs) <
std::tuple(other.originalRef.to_string(), other.attrPath, other.outputs);
}
+
+ std::string to_string() const
+ {
+ return fmt("%s#%s%s", originalRef, attrPath, outputs.to_string());
+ }
};
const int defaultPriority = 5;
@@ -42,16 +47,30 @@ struct ProfileElement
bool active = true;
int priority = defaultPriority;
- std::string describe() const
+ std::string identifier() const
{
if (source)
- return fmt("%s#%s%s", source->originalRef, source->attrPath, source->outputs.to_string());
+ return source->to_string();
StringSet names;
for (auto & path : storePaths)
names.insert(DrvName(path.name()).name);
return concatStringsSep(", ", names);
}
+ /**
+ * Return a string representing an installable corresponding to the current
+ * element, either a flakeref or a plain store path
+ */
+ std::set<std::string> toInstallables(Store & store)
+ {
+ if (source)
+ return {source->to_string()};
+ StringSet rawPaths;
+ for (auto & path : storePaths)
+ rawPaths.insert(store.printStorePath(path));
+ return rawPaths;
+ }
+
std::string versions() const
{
StringSet versions;
@@ -62,7 +81,7 @@ struct ProfileElement
bool operator < (const ProfileElement & other) const
{
- return std::tuple(describe(), storePaths) < std::tuple(other.describe(), other.storePaths);
+ return std::tuple(identifier(), storePaths) < std::tuple(other.identifier(), other.storePaths);
}
void updateStorePaths(
@@ -200,12 +219,22 @@ struct ProfileManifest
auto narHash = hashString(htSHA256, sink.s);
ValidPathInfo info {
- store->makeFixedOutputPath(FileIngestionMethod::Recursive, narHash, "profile", references),
+ *store,
+ "profile",
+ FixedOutputInfo {
+ .hash = {
+ .method = FileIngestionMethod::Recursive,
+ .hash = narHash,
+ },
+ .references = {
+ .others = std::move(references),
+ // profiles never refer to themselves
+ .self = false,
+ },
+ },
narHash,
};
- info.references = std::move(references);
info.narSize = sink.s.size();
- info.ca = FixedOutputHash { .method = FileIngestionMethod::Recursive, .hash = info.narHash };
StringSource source(sink.s);
store->addToStore(info, source);
@@ -227,13 +256,13 @@ struct ProfileManifest
bool changes = false;
while (i != prevElems.end() || j != curElems.end()) {
- if (j != curElems.end() && (i == prevElems.end() || i->describe() > j->describe())) {
- logger->cout("%s%s: ∅ -> %s", indent, j->describe(), j->versions());
+ if (j != curElems.end() && (i == prevElems.end() || i->identifier() > j->identifier())) {
+ logger->cout("%s%s: ∅ -> %s", indent, j->identifier(), j->versions());
changes = true;
++j;
}
- else if (i != prevElems.end() && (j == curElems.end() || i->describe() < j->describe())) {
- logger->cout("%s%s: %s -> ∅", indent, i->describe(), i->versions());
+ else if (i != prevElems.end() && (j == curElems.end() || i->identifier() < j->identifier())) {
+ logger->cout("%s%s: %s -> ∅", indent, i->identifier(), i->versions());
changes = true;
++i;
}
@@ -241,7 +270,7 @@ struct ProfileManifest
auto v1 = i->versions();
auto v2 = j->versions();
if (v1 != v2) {
- logger->cout("%s%s: %s -> %s", indent, i->describe(), v1, v2);
+ logger->cout("%s%s: %s -> %s", indent, i->identifier(), v1, v2);
changes = true;
}
++i;
@@ -254,13 +283,19 @@ struct ProfileManifest
}
};
-static std::map<Installable *, std::pair<BuiltPaths, ExtraPathInfo>>
+static std::map<Installable *, std::pair<BuiltPaths, ref<ExtraPathInfo>>>
builtPathsPerInstallable(
- const std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> & builtPaths)
+ const std::vector<std::pair<ref<Installable>, BuiltPathWithResult>> & builtPaths)
{
- std::map<Installable *, std::pair<BuiltPaths, ExtraPathInfo>> res;
+ std::map<Installable *, std::pair<BuiltPaths, ref<ExtraPathInfo>>> res;
for (auto & [installable, builtPath] : builtPaths) {
- auto & r = res[installable.get()];
+ auto & r = res.insert({
+ &*installable,
+ {
+ {},
+ make_ref<ExtraPathInfo>(),
+ }
+ }).first->second;
/* Note that there could be conflicting info
(e.g. meta.priority fields) if the installable returned
multiple derivations. So pick one arbitrarily. FIXME:
@@ -296,7 +331,7 @@ struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile
;
}
- void run(ref<Store> store) override
+ void run(ref<Store> store, Installables && installables) override
{
ProfileManifest manifest(*getEvalState(), *profile);
@@ -307,14 +342,16 @@ struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile
for (auto & installable : installables) {
ProfileElement element;
- auto & [res, info] = builtPaths[installable.get()];
+ auto iter = builtPaths.find(&*installable);
+ if (iter == builtPaths.end()) continue;
+ auto & [res, info] = iter->second;
- if (info.originalRef && info.resolvedRef && info.attrPath && info.extendedOutputsSpec) {
+ if (auto * info2 = dynamic_cast<ExtraPathInfoFlake *>(&*info)) {
element.source = ProfileElementSource {
- .originalRef = *info.originalRef,
- .resolvedRef = *info.resolvedRef,
- .attrPath = *info.attrPath,
- .outputs = *info.extendedOutputsSpec,
+ .originalRef = info2->flake.originalRef,
+ .resolvedRef = info2->flake.resolvedRef,
+ .attrPath = info2->value.attrPath,
+ .outputs = info2->value.extendedOutputsSpec,
};
}
@@ -323,7 +360,12 @@ struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile
element.priority =
priority
? *priority
- : info.priority.value_or(defaultPriority);
+ : ({
+ auto * info2 = dynamic_cast<ExtraPathInfoValue *>(&*info);
+ info2
+ ? info2->value.priority.value_or(defaultPriority)
+ : defaultPriority;
+ });
element.updateStorePaths(getEvalStore(), store, res);
@@ -340,10 +382,10 @@ struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile
auto profileElement = *it;
for (auto & storePath : profileElement.storePaths) {
if (conflictError.fileA.starts_with(store->printStorePath(storePath))) {
- return std::pair(conflictError.fileA, profileElement.source->originalRef);
+ return std::pair(conflictError.fileA, profileElement.toInstallables(*store));
}
if (conflictError.fileB.starts_with(store->printStorePath(storePath))) {
- return std::pair(conflictError.fileB, profileElement.source->originalRef);
+ return std::pair(conflictError.fileB, profileElement.toInstallables(*store));
}
}
}
@@ -352,9 +394,9 @@ struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile
// There are 2 conflicting files. We need to find out which one is from the already installed package and
// which one is the package that is the new package that is being installed.
// The first matching package is the one that was already installed (original).
- auto [originalConflictingFilePath, originalConflictingRef] = findRefByFilePath(manifest.elements.begin(), manifest.elements.end());
+ auto [originalConflictingFilePath, originalConflictingRefs] = findRefByFilePath(manifest.elements.begin(), manifest.elements.end());
// The last matching package is the one that was going to be installed (new).
- auto [newConflictingFilePath, newConflictingRef] = findRefByFilePath(manifest.elements.rbegin(), manifest.elements.rend());
+ auto [newConflictingFilePath, newConflictingRefs] = findRefByFilePath(manifest.elements.rbegin(), manifest.elements.rend());
throw Error(
"An existing package already provides the following file:\n"
@@ -380,8 +422,8 @@ struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile
" nix profile install %4% --priority %7%\n",
originalConflictingFilePath,
newConflictingFilePath,
- originalConflictingRef.to_string(),
- newConflictingRef.to_string(),
+ concatStringsSep(" ", originalConflictingRefs),
+ concatStringsSep(" ", newConflictingRefs),
conflictError.priority,
conflictError.priority - 1,
conflictError.priority + 1
@@ -468,7 +510,7 @@ struct CmdProfileRemove : virtual EvalCommand, MixDefaultProfile, MixProfileElem
if (!matches(*store, element, i, matchers)) {
newManifest.elements.push_back(std::move(element));
} else {
- notice("removing '%s'", element.describe());
+ notice("removing '%s'", element.identifier());
}
}
@@ -513,7 +555,7 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf
auto matchers = getMatchers(store);
- std::vector<std::shared_ptr<Installable>> installables;
+ Installables installables;
std::vector<size_t> indices;
auto upgradedCount = 0;
@@ -529,7 +571,7 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf
Activity act(*logger, lvlChatty, actUnknown,
fmt("checking '%s' for updates", element.source->attrPath));
- auto installable = std::make_shared<InstallableFlake>(
+ auto installable = make_ref<InstallableFlake>(
this,
getEvalState(),
FlakeRef(element.source->originalRef),
@@ -541,19 +583,20 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf
auto derivedPaths = installable->toDerivedPaths();
if (derivedPaths.empty()) continue;
- auto & info = derivedPaths[0].info;
-
- assert(info.resolvedRef && info.attrPath);
+ auto * infop = dynamic_cast<ExtraPathInfoFlake *>(&*derivedPaths[0].info);
+ // `InstallableFlake` should use `ExtraPathInfoFlake`.
+ assert(infop);
+ auto & info = *infop;
- if (element.source->resolvedRef == info.resolvedRef) continue;
+ if (element.source->resolvedRef == info.flake.resolvedRef) continue;
printInfo("upgrading '%s' from flake '%s' to '%s'",
- element.source->attrPath, element.source->resolvedRef, *info.resolvedRef);
+ element.source->attrPath, element.source->resolvedRef, info.flake.resolvedRef);
element.source = ProfileElementSource {
.originalRef = installable->flakeRef,
- .resolvedRef = *info.resolvedRef,
- .attrPath = *info.attrPath,
+ .resolvedRef = info.flake.resolvedRef,
+ .attrPath = info.value.attrPath,
.outputs = installable->extendedOutputsSpec,
};
@@ -582,7 +625,10 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf
for (size_t i = 0; i < installables.size(); ++i) {
auto & installable = installables.at(i);
auto & element = manifest.elements[indices.at(i)];
- element.updateStorePaths(getEvalStore(), store, builtPaths[installable.get()].first);
+ element.updateStorePaths(
+ getEvalStore(),
+ store,
+ builtPaths.find(&*installable)->second.first);
}
updateProfile(manifest.build(store));
@@ -798,7 +844,6 @@ struct CmdProfile : NixMultiCommand
{
if (!command)
throw UsageError("'nix profile' requires a sub-command.");
- command->second->prepare();
command->second->run();
}
};
diff --git a/src/nix/profile.md b/src/nix/profile.md
index 273e02280..bd13f906f 100644
--- a/src/nix/profile.md
+++ b/src/nix/profile.md
@@ -7,100 +7,39 @@ profile is a set of packages that can be installed and upgraded
independently from each other. Nix profiles are versioned, allowing
them to be rolled back easily.
-# Default profile
+# Files
-The default profile used by `nix profile` is `$HOME/.nix-profile`,
-which, if it does not exist, is created as a symlink to
-`/nix/var/nix/profiles/default` if Nix is invoked by the
-`root` user, or `/nix/var/nix/profiles/per-user/`*username* otherwise.
+)""
-You can specify another profile location using `--profile` *path*.
+#include "doc/files/profiles.md.gen.hh"
-# Filesystem layout
+R""(
-Profiles are versioned as follows. When using profile *path*, *path*
-is a symlink to *path*`-`*N*, where *N* is the current *version* of
-the profile. In turn, *path*`-`*N* is a symlink to a path in the Nix
-store. For example:
+### Profile compatibility
-```console
-$ ls -l /nix/var/nix/profiles/per-user/alice/profile*
-lrwxrwxrwx 1 alice users 14 Nov 25 14:35 /nix/var/nix/profiles/per-user/alice/profile -> profile-7-link
-lrwxrwxrwx 1 alice users 51 Oct 28 16:18 /nix/var/nix/profiles/per-user/alice/profile-5-link -> /nix/store/q69xad13ghpf7ir87h0b2gd28lafjj1j-profile
-lrwxrwxrwx 1 alice users 51 Oct 29 13:20 /nix/var/nix/profiles/per-user/alice/profile-6-link -> /nix/store/6bvhpysd7vwz7k3b0pndn7ifi5xr32dg-profile
-lrwxrwxrwx 1 alice users 51 Nov 25 14:35 /nix/var/nix/profiles/per-user/alice/profile-7-link -> /nix/store/mp0x6xnsg0b8qhswy6riqvimai4gm677-profile
-```
+> **Warning**
+>
+> Once you have used [`nix profile`] you can no longer use [`nix-env`] without first deleting `$XDG_STATE_HOME/nix/profiles/profile`
-Each of these symlinks is a root for the Nix garbage collector.
+[`nix-env`]: @docroot@/command-ref/nix-env.md
+[`nix profile`]: @docroot@/command-ref/new-cli/nix3-profile.md
-The contents of the store path corresponding to each version of the
-profile is a tree of symlinks to the files of the installed packages,
-e.g.
+Once you installed a package with [`nix profile`], you get the following error message when using [`nix-env`]:
```console
-$ ll -R /nix/var/nix/profiles/per-user/eelco/profile-7-link/
-/nix/var/nix/profiles/per-user/eelco/profile-7-link/:
-total 20
-dr-xr-xr-x 2 root root 4096 Jan 1 1970 bin
--r--r--r-- 2 root root 1402 Jan 1 1970 manifest.json
-dr-xr-xr-x 4 root root 4096 Jan 1 1970 share
-
-/nix/var/nix/profiles/per-user/eelco/profile-7-link/bin:
-total 20
-lrwxrwxrwx 5 root root 79 Jan 1 1970 chromium -> /nix/store/ijm5k0zqisvkdwjkc77mb9qzb35xfi4m-chromium-86.0.4240.111/bin/chromium
-lrwxrwxrwx 7 root root 87 Jan 1 1970 spotify -> /nix/store/w9182874m1bl56smps3m5zjj36jhp3rn-spotify-1.1.26.501.gbe11e53b-15/bin/spotify
-lrwxrwxrwx 3 root root 79 Jan 1 1970 zoom-us -> /nix/store/wbhg2ga8f3h87s9h5k0slxk0m81m4cxl-zoom-us-5.3.469451.0927/bin/zoom-us
-
-/nix/var/nix/profiles/per-user/eelco/profile-7-link/share/applications:
-total 12
-lrwxrwxrwx 4 root root 120 Jan 1 1970 chromium-browser.desktop -> /nix/store/4cf803y4vzfm3gyk3vzhzb2327v0kl8a-chromium-unwrapped-86.0.4240.111/share/applications/chromium-browser.desktop
-lrwxrwxrwx 7 root root 110 Jan 1 1970 spotify.desktop -> /nix/store/w9182874m1bl56smps3m5zjj36jhp3rn-spotify-1.1.26.501.gbe11e53b-15/share/applications/spotify.desktop
-lrwxrwxrwx 3 root root 107 Jan 1 1970 us.zoom.Zoom.desktop -> /nix/store/wbhg2ga8f3h87s9h5k0slxk0m81m4cxl-zoom-us-5.3.469451.0927/share/applications/us.zoom.Zoom.desktop
-
-…
+$ nix-env -f '<nixpkgs>' -iA 'hello'
+error: nix-env
+profile '/home/alice/.local/state/nix/profiles/profile' is incompatible with 'nix-env'; please use 'nix profile' instead
```
-The file `manifest.json` records the provenance of the packages that
-are installed in this version of the profile. It looks like this:
+To migrate back to `nix-env` you can delete your current profile:
-```json
-{
- "version": 1,
- "elements": [
- {
- "active": true,
- "attrPath": "legacyPackages.x86_64-linux.zoom-us",
- "originalUrl": "flake:nixpkgs",
- "storePaths": [
- "/nix/store/wbhg2ga8f3h87s9h5k0slxk0m81m4cxl-zoom-us-5.3.469451.0927"
- ],
- "uri": "github:NixOS/nixpkgs/13d0c311e3ae923a00f734b43fd1d35b47d8943a"
- },
- …
- ]
-}
-```
+> **Warning**
+>
+> This will delete packages that have been installed before, so you may want to back up this information before running the command.
-Each object in the array `elements` denotes an installed package and
-has the following fields:
-
-* `originalUrl`: The [flake reference](./nix3-flake.md) specified by
- the user at the time of installation (e.g. `nixpkgs`). This is also
- the flake reference that will be used by `nix profile upgrade`.
-
-* `uri`: The locked flake reference to which `originalUrl` resolved.
-
-* `attrPath`: The flake output attribute that provided this
- package. Note that this is not necessarily the attribute that the
- user specified, but the one resulting from applying the default
- attribute paths and prefixes; for instance, `hello` might resolve to
- `packages.x86_64-linux.hello` and the empty string to
- `packages.x86_64-linux.default`.
-
-* `storePath`: The paths in the Nix store containing the package.
-
-* `active`: Whether the profile contains symlinks to the files of this
- package. If set to false, the package is kept in the Nix store, but
- is not "visible" in the profile's symlink tree.
+```console
+ $ rm -rf "${XDG_STATE_HOME-$HOME/.local/state}/nix/profiles/profile"
+```
)""
diff --git a/src/nix/realisation.cc b/src/nix/realisation.cc
index 0d3466515..e19e93219 100644
--- a/src/nix/realisation.cc
+++ b/src/nix/realisation.cc
@@ -21,7 +21,6 @@ struct CmdRealisation : virtual NixMultiCommand
{
if (!command)
throw UsageError("'nix realisation' requires a sub-command.");
- command->second->prepare();
command->second->run();
}
};
@@ -46,7 +45,7 @@ struct CmdRealisationInfo : BuiltPathsCommand, MixJSON
void run(ref<Store> store, BuiltPaths && paths) override
{
- settings.requireExperimentalFeature(Xp::CaDerivations);
+ experimentalFeatureSettings.require(Xp::CaDerivations);
RealisedPath::Set realisations;
for (auto & builtPath : paths) {
diff --git a/src/nix/registry.cc b/src/nix/registry.cc
index b5bdfba95..cb94bbd31 100644
--- a/src/nix/registry.cc
+++ b/src/nix/registry.cc
@@ -224,10 +224,9 @@ struct CmdRegistry : virtual NixMultiCommand
void run() override
{
- settings.requireExperimentalFeature(Xp::Flakes);
+ experimentalFeatureSettings.require(Xp::Flakes);
if (!command)
throw UsageError("'nix registry' requires a sub-command.");
- command->second->prepare();
command->second->run();
}
};
diff --git a/src/nix/repl.cc b/src/nix/repl.cc
index 679bdea77..bb14f3f99 100644
--- a/src/nix/repl.cc
+++ b/src/nix/repl.cc
@@ -1,28 +1,23 @@
#include "eval.hh"
#include "globals.hh"
#include "command.hh"
+#include "installable-value.hh"
#include "repl.hh"
namespace nix {
-struct CmdRepl : InstallablesCommand
+struct CmdRepl : RawInstallablesCommand
{
CmdRepl() {
evalSettings.pureEval = false;
}
- void prepare() override
+ /**
+ * This command is stable before the others
+ */
+ std::optional<ExperimentalFeature> experimentalFeature() override
{
- if (!settings.isExperimentalFeatureEnabled(Xp::ReplFlake) && !(file) && this->_installables.size() >= 1) {
- warn("future versions of Nix will require using `--file` to load a file");
- if (this->_installables.size() > 1)
- warn("more than one input file is not currently supported");
- auto filePath = this->_installables[0].data();
- file = std::optional(filePath);
- _installables.front() = _installables.back();
- _installables.pop_back();
- }
- installables = InstallablesCommand::load();
+ return std::nullopt;
}
std::vector<std::string> files;
@@ -32,11 +27,6 @@ struct CmdRepl : InstallablesCommand
return {""};
}
- bool useDefaultInstallables() override
- {
- return file.has_value() or expr.has_value();
- }
-
bool forceImpureByDefault() override
{
return true;
@@ -54,17 +44,34 @@ struct CmdRepl : InstallablesCommand
;
}
- void run(ref<Store> store) override
+ void applyDefaultInstallables(std::vector<std::string> & rawInstallables) override
+ {
+ if (!experimentalFeatureSettings.isEnabled(Xp::ReplFlake) && !(file) && rawInstallables.size() >= 1) {
+ warn("future versions of Nix will require using `--file` to load a file");
+ if (rawInstallables.size() > 1)
+ warn("more than one input file is not currently supported");
+ auto filePath = rawInstallables[0].data();
+ file = std::optional(filePath);
+ rawInstallables.front() = rawInstallables.back();
+ rawInstallables.pop_back();
+ }
+ if (rawInstallables.empty() && (file.has_value() || expr.has_value())) {
+ rawInstallables.push_back(".");
+ }
+ }
+
+ void run(ref<Store> store, std::vector<std::string> && rawInstallables) override
{
auto state = getEvalState();
auto getValues = [&]()->AbstractNixRepl::AnnotatedValues{
- auto installables = load();
+ auto installables = parseInstallables(store, rawInstallables);
AbstractNixRepl::AnnotatedValues values;
- for (auto & installable: installables){
- auto what = installable->what();
+ for (auto & installable_: installables){
+ auto & installable = InstallableValue::require(*installable_);
+ auto what = installable.what();
if (file){
- auto [val, pos] = installable->toValue(*state);
- auto what = installable->what();
+ auto [val, pos] = installable.toValue(*state);
+ auto what = installable.what();
state->forceValue(*val, pos);
auto autoArgs = getAutoArgs(*state);
auto valPost = state->allocValue();
@@ -72,7 +79,7 @@ struct CmdRepl : InstallablesCommand
state->forceValue(*valPost, pos);
values.push_back( {valPost, what });
} else {
- auto [val, pos] = installable->toValue(*state);
+ auto [val, pos] = installable.toValue(*state);
values.push_back( {val, what} );
}
}
diff --git a/src/nix/run.cc b/src/nix/run.cc
index 6fca68047..1baf299ab 100644
--- a/src/nix/run.cc
+++ b/src/nix/run.cc
@@ -1,5 +1,5 @@
#include "run.hh"
-#include "command.hh"
+#include "command-installable-value.hh"
#include "common-args.hh"
#include "shared.hh"
#include "store-api.hh"
@@ -97,7 +97,7 @@ struct CmdShell : InstallablesCommand, MixEnvironment
;
}
- void run(ref<Store> store) override
+ void run(ref<Store> store, Installables && installables) override
{
auto outPaths = Installable::toStorePaths(getEvalStore(), store, Realise::Outputs, OperateOn::Output, installables);
@@ -137,7 +137,7 @@ struct CmdShell : InstallablesCommand, MixEnvironment
static auto rCmdShell = registerCommand<CmdShell>("shell");
-struct CmdRun : InstallableCommand
+struct CmdRun : InstallableValueCommand
{
using InstallableCommand::run;
@@ -183,7 +183,7 @@ struct CmdRun : InstallableCommand
return res;
}
- void run(ref<Store> store) override
+ void run(ref<Store> store, ref<InstallableValue> installable) override
{
auto state = getEvalState();
diff --git a/src/nix/run.hh b/src/nix/run.hh
index fed360158..97ddef19b 100644
--- a/src/nix/run.hh
+++ b/src/nix/run.hh
@@ -1,4 +1,5 @@
#pragma once
+///@file
#include "store-api.hh"
diff --git a/src/nix/run.md b/src/nix/run.md
index a0f362076..250ea65aa 100644
--- a/src/nix/run.md
+++ b/src/nix/run.md
@@ -35,7 +35,7 @@ R""(
# Description
-`nix run` builds and runs *installable*, which must evaluate to an
+`nix run` builds and runs [*installable*](./nix.md#installables), which must evaluate to an
*app* or a regular Nix derivation.
If *installable* evaluates to an *app* (see below), it executes the
diff --git a/src/nix/search.cc b/src/nix/search.cc
index 2e38f7e4b..c92ed1663 100644
--- a/src/nix/search.cc
+++ b/src/nix/search.cc
@@ -1,4 +1,4 @@
-#include "command.hh"
+#include "command-installable-value.hh"
#include "globals.hh"
#include "eval.hh"
#include "eval-inline.hh"
@@ -22,7 +22,7 @@ std::string wrap(std::string prefix, std::string s)
return concatStrings(prefix, s, ANSI_NORMAL);
}
-struct CmdSearch : InstallableCommand, MixJSON
+struct CmdSearch : InstallableValueCommand, MixJSON
{
std::vector<std::string> res;
std::vector<std::string> excludeRes;
@@ -61,7 +61,7 @@ struct CmdSearch : InstallableCommand, MixJSON
};
}
- void run(ref<Store> store) override
+ void run(ref<Store> store, ref<InstallableValue> installable) override
{
settings.readOnlyMode = true;
evalSettings.enableImportFromDerivation.setDefault(false);
diff --git a/src/nix/search.md b/src/nix/search.md
index 5a5b5ae05..0c5d22549 100644
--- a/src/nix/search.md
+++ b/src/nix/search.md
@@ -52,20 +52,20 @@ R""(
* Search for packages containing `neovim` but hide ones containing either `gui` or `python`:
```console
- # nix search nixpkgs neovim -e 'python|gui'
+ # nix search nixpkgs neovim --exclude 'python|gui'
```
or
```console
- # nix search nixpkgs neovim -e 'python' -e 'gui'
+ # nix search nixpkgs neovim --exclude 'python' --exclude 'gui'
```
# Description
-`nix search` searches *installable* (which must be evaluatable, e.g. a
-flake) for packages whose name or description matches all of the
+`nix search` searches [*installable*](./nix.md#installables) (which can be evaluated, that is, a
+flake or Nix expression, but not a store path or store derivation path) for packages whose name or description matches all of the
regular expressions *regex*. For each matching package, It prints the
-full attribute name (from the root of the installable), the version
+full attribute name (from the root of the [installable](./nix.md#installables)), the version
and the `meta.description` field, highlighting the substrings that
were matched by the regular expressions. If no regular expressions are
specified, all packages are shown.
diff --git a/src/nix/shell.md b/src/nix/shell.md
index 9fa1031f5..1668104b1 100644
--- a/src/nix/shell.md
+++ b/src/nix/shell.md
@@ -19,26 +19,26 @@ R""(
* Run GNU Hello:
```console
- # nix shell nixpkgs#hello -c hello --greeting 'Hi everybody!'
+ # nix shell nixpkgs#hello --command hello --greeting 'Hi everybody!'
Hi everybody!
```
* Run multiple commands in a shell environment:
```console
- # nix shell nixpkgs#gnumake -c sh -c "cd src && make"
+ # nix shell nixpkgs#gnumake --command sh --command "cd src && make"
```
* Run GNU Hello in a chroot store:
```console
- # nix shell --store ~/my-nix nixpkgs#hello -c hello
+ # nix shell --store ~/my-nix nixpkgs#hello --command hello
```
* Start a shell providing GNU Hello in a chroot store:
```console
- # nix shell --store ~/my-nix nixpkgs#hello nixpkgs#bashInteractive -c bash
+ # nix shell --store ~/my-nix nixpkgs#hello nixpkgs#bashInteractive --command bash
```
Note that it's necessary to specify `bash` explicitly because your
@@ -48,7 +48,7 @@ R""(
# Description
`nix shell` runs a command in an environment in which the `$PATH` variable
-provides the specified *installables*. If no command is specified, it starts the
+provides the specified [*installables*](./nix.md#installable). If no command is specified, it starts the
default shell of your user account specified by `$SHELL`.
)""
diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc
index ee27e3725..45cd2e1a6 100644
--- a/src/nix/sigs.cc
+++ b/src/nix/sigs.cc
@@ -45,7 +45,7 @@ struct CmdCopySigs : StorePathsCommand
//logger->setExpected(doneLabel, storePaths.size());
auto doPath = [&](const Path & storePathS) {
- //Activity act(*logger, lvlInfo, format("getting signatures for '%s'") % storePath);
+ //Activity act(*logger, lvlInfo, "getting signatures for '%s'", storePath);
checkInterrupt();
@@ -219,7 +219,6 @@ struct CmdKey : NixMultiCommand
{
if (!command)
throw UsageError("'nix key' requires a sub-command.");
- command->second->prepare();
command->second->run();
}
};
diff --git a/src/nix/store-copy-log.cc b/src/nix/store-copy-log.cc
index d5fab5f2f..a6e8aeff7 100644
--- a/src/nix/store-copy-log.cc
+++ b/src/nix/store-copy-log.cc
@@ -24,9 +24,7 @@ struct CmdCopyLog : virtual CopyCommand, virtual InstallablesCommand
;
}
- Category category() override { return catUtility; }
-
- void run(ref<Store> srcStore) override
+ void run(ref<Store> srcStore, Installables && installables) override
{
auto & srcLogStore = require<LogStore>(*srcStore);
diff --git a/src/nix/store-delete.cc b/src/nix/store-delete.cc
index ca43f1530..6719227df 100644
--- a/src/nix/store-delete.cc
+++ b/src/nix/store-delete.cc
@@ -32,7 +32,7 @@ struct CmdStoreDelete : StorePathsCommand
;
}
- void run(ref<Store> store, std::vector<StorePath> && storePaths) override
+ void run(ref<Store> store, StorePaths && storePaths) override
{
auto & gcStore = require<GcStore>(*store);
diff --git a/src/nix/store-delete.md b/src/nix/store-delete.md
index db535f87c..431bc5f5e 100644
--- a/src/nix/store-delete.md
+++ b/src/nix/store-delete.md
@@ -10,7 +10,7 @@ R""(
# Description
-This command deletes the store paths specified by *installables*. ,
+This command deletes the store paths specified by [*installables*](./nix.md#installables),
but only if it is safe to do so; that is, when the path is not
reachable from a root of the garbage collector. This means that you
can only delete paths that would also be deleted by `nix store
diff --git a/src/nix/store-dump-path.md b/src/nix/store-dump-path.md
index 4ef563526..56e2174b6 100644
--- a/src/nix/store-dump-path.md
+++ b/src/nix/store-dump-path.md
@@ -18,6 +18,6 @@ R""(
# Description
This command generates a NAR file containing the serialisation of the
-store path *installable*. The NAR is written to standard output.
+store path [*installable*](./nix.md#installables). The NAR is written to standard output.
)""
diff --git a/src/nix/store-ls.md b/src/nix/store-ls.md
index 836efce42..14c4627c9 100644
--- a/src/nix/store-ls.md
+++ b/src/nix/store-ls.md
@@ -5,7 +5,7 @@ R""(
* To list the contents of a store path in a binary cache:
```console
- # nix store ls --store https://cache.nixos.org/ -lR /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10
+ # nix store ls --store https://cache.nixos.org/ --long --recursive /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10
dr-xr-xr-x 0 ./bin
-r-xr-xr-x 38184 ./bin/hello
dr-xr-xr-x 0 ./share
@@ -15,7 +15,7 @@ R""(
* To show information about a specific file in a binary cache:
```console
- # nix store ls --store https://cache.nixos.org/ -l /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10/bin/hello
+ # nix store ls --store https://cache.nixos.org/ --long /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10/bin/hello
-r-xr-xr-x 38184 hello
```
diff --git a/src/nix/store-repair.cc b/src/nix/store-repair.cc
index 8fcb3639a..895e39685 100644
--- a/src/nix/store-repair.cc
+++ b/src/nix/store-repair.cc
@@ -17,7 +17,7 @@ struct CmdStoreRepair : StorePathsCommand
;
}
- void run(ref<Store> store, std::vector<StorePath> && storePaths) override
+ void run(ref<Store> store, StorePaths && storePaths) override
{
for (auto & path : storePaths)
store->repairPath(path);
diff --git a/src/nix/store-repair.md b/src/nix/store-repair.md
index 92d2205a9..180c577ac 100644
--- a/src/nix/store-repair.md
+++ b/src/nix/store-repair.md
@@ -17,7 +17,7 @@ R""(
# Description
This command attempts to "repair" the store paths specified by
-*installables* by redownloading them using the available
+[*installables*](./nix.md#installables) by redownloading them using the available
substituters. If no substitutes are available, then repair is not
possible.
diff --git a/src/nix/store.cc b/src/nix/store.cc
index 44e53c7c7..2879e03b3 100644
--- a/src/nix/store.cc
+++ b/src/nix/store.cc
@@ -18,7 +18,6 @@ struct CmdStore : virtual NixMultiCommand
{
if (!command)
throw UsageError("'nix store' requires a sub-command.");
- command->second->prepare();
command->second->run();
}
};
diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc
index 17796d6b8..3997c98bf 100644
--- a/src/nix/upgrade-nix.cc
+++ b/src/nix/upgrade-nix.cc
@@ -32,6 +32,14 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand
});
}
+ /**
+ * This command is stable before the others
+ */
+ std::optional<ExperimentalFeature> experimentalFeature() override
+ {
+ return std::nullopt;
+ }
+
std::string description() override
{
return "upgrade Nix to the stable version declared in Nixpkgs";
@@ -140,7 +148,7 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand
auto state = std::make_unique<EvalState>(Strings(), store);
auto v = state->allocValue();
- state->eval(state->parseExprFromString(res.data, "/no-such-path"), *v);
+ state->eval(state->parseExprFromString(res.data, state->rootPath(CanonPath("/no-such-path"))), *v);
Bindings & bindings(*state->allocBindings(0));
auto v2 = findAlongAttrPath(*state, settings.thisSystem, bindings, *v).first;
diff --git a/src/nix/upgrade-nix.md b/src/nix/upgrade-nix.md
index 084c80ba2..cce88c397 100644
--- a/src/nix/upgrade-nix.md
+++ b/src/nix/upgrade-nix.md
@@ -11,7 +11,7 @@ R""(
* Upgrade Nix in a specific profile:
```console
- # nix upgrade-nix -p /nix/var/nix/profiles/per-user/alice/profile
+ # nix upgrade-nix --profile ~alice/.local/state/nix/profiles/profile
```
# Description
diff --git a/src/nix/verify.md b/src/nix/verify.md
index 1c43792e7..e1d55eab4 100644
--- a/src/nix/verify.md
+++ b/src/nix/verify.md
@@ -12,7 +12,7 @@ R""(
signatures:
```console
- # nix store verify -r -n2 --no-contents $(type -p firefox)
+ # nix store verify --recursive --sigs-needed 2 --no-contents $(type -p firefox)
```
* Verify a store path in the binary cache `https://cache.nixos.org/`:
@@ -24,7 +24,7 @@ R""(
# Description
-This command verifies the integrity of the store paths *installables*,
+This command verifies the integrity of the store paths [*installables*](./nix.md#installables),
or, if `--all` is given, the entire Nix store. For each path, it
checks that
diff --git a/src/resolve-system-dependencies/resolve-system-dependencies.cc b/src/resolve-system-dependencies/resolve-system-dependencies.cc
index c6023eb03..4ea268d24 100644
--- a/src/resolve-system-dependencies/resolve-system-dependencies.cc
+++ b/src/resolve-system-dependencies/resolve-system-dependencies.cc
@@ -157,13 +157,9 @@ int main(int argc, char ** argv)
uname(&_uname);
- auto cacheParentDir = (format("%1%/dependency-maps") % settings.nixStateDir).str();
+ auto cacheParentDir = fmt("%1%/dependency-maps", settings.nixStateDir);
- cacheDir = (format("%1%/%2%-%3%-%4%")
- % cacheParentDir
- % _uname.machine
- % _uname.sysname
- % _uname.release).str();
+ cacheDir = fmt("%1%/%2%-%3%-%4%", cacheParentDir, _uname.machine, _uname.sysname, _uname.release);
mkdir(cacheParentDir.c_str(), 0755);
mkdir(cacheDir.c_str(), 0755);
diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh
index b38db8a15..7c64a115c 100644
--- a/tests/binary-cache.sh
+++ b/tests/binary-cache.sh
@@ -15,11 +15,11 @@ outPath=$(nix-build dependencies.nix --no-out-link)
nix copy --to file://$cacheDir $outPath
# Test copying build logs to the binary cache.
-nix log --store file://$cacheDir $outPath 2>&1 | grep 'is not available'
+expect 1 nix log --store file://$cacheDir $outPath 2>&1 | grep 'is not available'
nix store copy-log --to file://$cacheDir $outPath
nix log --store file://$cacheDir $outPath | grep FOO
rm -rf $TEST_ROOT/var/log/nix
-nix log $outPath 2>&1 | grep 'is not available'
+expect 1 nix log $outPath 2>&1 | grep 'is not available'
nix log --substituters file://$cacheDir $outPath | grep FOO
# Test copying build logs from the binary cache.
@@ -78,8 +78,8 @@ mv $nar $nar.good
mkdir -p $TEST_ROOT/empty
nix-store --dump $TEST_ROOT/empty | xz > $nar
-nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log
-grep -q "hash mismatch" $TEST_ROOT/log
+expect 1 nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log
+grepQuiet "hash mismatch" $TEST_ROOT/log
mv $nar.good $nar
@@ -126,9 +126,9 @@ clearStore
rm -v $(grep -l "StorePath:.*dependencies-input-2" $cacheDir/*.narinfo)
nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log
-grep -q "copying path.*input-0" $TEST_ROOT/log
-grep -q "copying path.*input-2" $TEST_ROOT/log
-grep -q "copying path.*top" $TEST_ROOT/log
+grepQuiet "copying path.*input-0" $TEST_ROOT/log
+grepQuiet "copying path.*input-2" $TEST_ROOT/log
+grepQuiet "copying path.*top" $TEST_ROOT/log
# Idem, but without cached .narinfo.
@@ -136,11 +136,11 @@ clearStore
clearCacheCache
nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log
-grep -q "don't know how to build" $TEST_ROOT/log
-grep -q "building.*input-1" $TEST_ROOT/log
-grep -q "building.*input-2" $TEST_ROOT/log
-grep -q "copying path.*input-0" $TEST_ROOT/log
-grep -q "copying path.*top" $TEST_ROOT/log
+grepQuiet "don't know how to build" $TEST_ROOT/log
+grepQuiet "building.*input-1" $TEST_ROOT/log
+grepQuiet "building.*input-2" $TEST_ROOT/log
+grepQuiet "copying path.*input-0" $TEST_ROOT/log
+grepQuiet "copying path.*top" $TEST_ROOT/log
# Create a signed binary cache.
diff --git a/tests/build-delete.sh b/tests/build-delete.sh
index 636681f64..9c56b00e8 100644
--- a/tests/build-delete.sh
+++ b/tests/build-delete.sh
@@ -2,8 +2,6 @@ source common.sh
clearStore
-set -o pipefail
-
# https://github.com/NixOS/nix/issues/6572
issue_6572_independent_outputs() {
nix build -f multiple-outputs.nix --json independent --no-link > $TEST_ROOT/independent.json
diff --git a/tests/build-dry.sh b/tests/build-dry.sh
index 5f29239dc..6d1754af5 100644
--- a/tests/build-dry.sh
+++ b/tests/build-dry.sh
@@ -54,7 +54,7 @@ clearCache
RES=$(nix build -f dependencies.nix --dry-run --json)
-if [[ -z "$NIX_TESTS_CA_BY_DEFAULT" ]]; then
+if [[ -z "${NIX_TESTS_CA_BY_DEFAULT-}" ]]; then
echo "$RES" | jq '.[0] | [
(.drvPath | test("'$NIX_STORE_DIR'.*\\.drv")),
(.outputs.out | test("'$NIX_STORE_DIR'"))
diff --git a/tests/build-remote-trustless-after.sh b/tests/build-remote-trustless-after.sh
new file mode 100644
index 000000000..19f59e6ae
--- /dev/null
+++ b/tests/build-remote-trustless-after.sh
@@ -0,0 +1,2 @@
+outPath=$(readlink -f $TEST_ROOT/result)
+grep 'FOO BAR BAZ' ${remoteDir}/${outPath}
diff --git a/tests/build-remote-trustless-should-fail-0.sh b/tests/build-remote-trustless-should-fail-0.sh
new file mode 100644
index 000000000..fad1def59
--- /dev/null
+++ b/tests/build-remote-trustless-should-fail-0.sh
@@ -0,0 +1,29 @@
+source common.sh
+
+enableFeatures "daemon-trust-override"
+
+restartDaemon
+
+[[ $busybox =~ busybox ]] || skipTest "no busybox"
+
+unset NIX_STORE_DIR
+unset NIX_STATE_DIR
+
+# We first build a dependency of the derivation we eventually want to
+# build.
+nix-build build-hook.nix -A passthru.input2 \
+ -o "$TEST_ROOT/input2" \
+ --arg busybox "$busybox" \
+ --store "$TEST_ROOT/local" \
+ --option system-features bar
+
+# Now when we go to build that downstream derivation, Nix will try to
+# copy our already-build `input2` to the remote store. That store object
+# is input-addressed, so this will fail.
+
+file=build-hook.nix
+prog=$(readlink -e ./nix-daemon-untrusting.sh)
+proto=ssh-ng
+
+expectStderr 1 source build-remote-trustless.sh \
+ | grepQuiet "cannot add path '[^ ]*' because it lacks a signature by a trusted key"
diff --git a/tests/build-remote-trustless-should-pass-0.sh b/tests/build-remote-trustless-should-pass-0.sh
new file mode 100644
index 000000000..2a7ebd8c6
--- /dev/null
+++ b/tests/build-remote-trustless-should-pass-0.sh
@@ -0,0 +1,9 @@
+source common.sh
+
+# Remote trusts us
+file=build-hook.nix
+prog=nix-store
+proto=ssh
+
+source build-remote-trustless.sh
+source build-remote-trustless-after.sh
diff --git a/tests/build-remote-trustless-should-pass-1.sh b/tests/build-remote-trustless-should-pass-1.sh
new file mode 100644
index 000000000..516bdf092
--- /dev/null
+++ b/tests/build-remote-trustless-should-pass-1.sh
@@ -0,0 +1,9 @@
+source common.sh
+
+# Remote trusts us
+file=build-hook.nix
+prog=nix-daemon
+proto=ssh-ng
+
+source build-remote-trustless.sh
+source build-remote-trustless-after.sh
diff --git a/tests/build-remote-trustless-should-pass-2.sh b/tests/build-remote-trustless-should-pass-2.sh
new file mode 100644
index 000000000..b769a88f0
--- /dev/null
+++ b/tests/build-remote-trustless-should-pass-2.sh
@@ -0,0 +1,13 @@
+source common.sh
+
+enableFeatures "daemon-trust-override"
+
+restartDaemon
+
+# Remote doesn't trust us
+file=build-hook.nix
+prog=$(readlink -e ./nix-daemon-untrusting.sh)
+proto=ssh-ng
+
+source build-remote-trustless.sh
+source build-remote-trustless-after.sh
diff --git a/tests/build-remote-trustless-should-pass-3.sh b/tests/build-remote-trustless-should-pass-3.sh
new file mode 100644
index 000000000..40f81da5a
--- /dev/null
+++ b/tests/build-remote-trustless-should-pass-3.sh
@@ -0,0 +1,14 @@
+source common.sh
+
+enableFeatures "daemon-trust-override"
+
+restartDaemon
+
+# Remote doesn't trusts us, but this is fine because we are only
+# building (fixed) CA derivations.
+file=build-hook-ca-fixed.nix
+prog=$(readlink -e ./nix-daemon-untrusting.sh)
+proto=ssh-ng
+
+source build-remote-trustless.sh
+source build-remote-trustless-after.sh
diff --git a/tests/build-remote-trustless.sh b/tests/build-remote-trustless.sh
new file mode 100644
index 000000000..9df44e0c5
--- /dev/null
+++ b/tests/build-remote-trustless.sh
@@ -0,0 +1,14 @@
+requireSandboxSupport
+[[ $busybox =~ busybox ]] || skipTest "no busybox"
+
+unset NIX_STORE_DIR
+unset NIX_STATE_DIR
+
+remoteDir=$TEST_ROOT/remote
+
+# Note: ssh{-ng}://localhost bypasses ssh. See tests/build-remote.sh for
+# more details.
+nix-build $file -o $TEST_ROOT/result --max-jobs 0 \
+ --arg busybox $busybox \
+ --store $TEST_ROOT/local \
+ --builders "$proto://localhost?remote-program=$prog&remote-store=${remoteDir}%3Fsystem-features=foo%20bar%20baz - - 1 1 foo,bar,baz"
diff --git a/tests/build-remote.sh b/tests/build-remote.sh
index 25a482003..78e12b477 100644
--- a/tests/build-remote.sh
+++ b/tests/build-remote.sh
@@ -1,5 +1,5 @@
-if ! canUseSandbox; then exit 99; fi
-if ! [[ $busybox =~ busybox ]]; then exit 99; fi
+requireSandboxSupport
+[[ $busybox =~ busybox ]] || skipTest "no busybox"
unset NIX_STORE_DIR
unset NIX_STATE_DIR
@@ -7,7 +7,7 @@ unset NIX_STATE_DIR
function join_by { local d=$1; shift; echo -n "$1"; shift; printf "%s" "${@/#/$d}"; }
EXTRA_SYSTEM_FEATURES=()
-if [[ -n "$CONTENT_ADDRESSED" ]]; then
+if [[ -n "${CONTENT_ADDRESSED-}" ]]; then
EXTRA_SYSTEM_FEATURES=("ca-derivations")
fi
@@ -42,25 +42,26 @@ testPrintOutPath=$(nix build -L -v -f $file --no-link --print-out-paths --max-jo
[[ $testPrintOutPath =~ store.*build-remote ]]
-set -o pipefail
-
# Ensure that input1 was built on store1 due to the required feature.
-nix path-info --store $TEST_ROOT/machine1 --all \
- | grep builder-build-remote-input-1.sh \
- | grep -v builder-build-remote-input-2.sh \
- | grep -v builder-build-remote-input-3.sh
+output=$(nix path-info --store $TEST_ROOT/machine1 --all)
+echo "$output" | grepQuiet builder-build-remote-input-1.sh
+echo "$output" | grepQuietInverse builder-build-remote-input-2.sh
+echo "$output" | grepQuietInverse builder-build-remote-input-3.sh
+unset output
# Ensure that input2 was built on store2 due to the required feature.
-nix path-info --store $TEST_ROOT/machine2 --all \
- | grep -v builder-build-remote-input-1.sh \
- | grep builder-build-remote-input-2.sh \
- | grep -v builder-build-remote-input-3.sh
+output=$(nix path-info --store $TEST_ROOT/machine2 --all)
+echo "$output" | grepQuietInverse builder-build-remote-input-1.sh
+echo "$output" | grepQuiet builder-build-remote-input-2.sh
+echo "$output" | grepQuietInverse builder-build-remote-input-3.sh
+unset output
# Ensure that input3 was built on store3 due to the required feature.
-nix path-info --store $TEST_ROOT/machine3 --all \
- | grep -v builder-build-remote-input-1.sh \
- | grep -v builder-build-remote-input-2.sh \
- | grep builder-build-remote-input-3.sh
+output=$(nix path-info --store $TEST_ROOT/machine3 --all)
+echo "$output" | grepQuietInverse builder-build-remote-input-1.sh
+echo "$output" | grepQuietInverse builder-build-remote-input-2.sh
+echo "$output" | grepQuiet builder-build-remote-input-3.sh
+unset output
for i in input1 input3; do
diff --git a/tests/build.sh b/tests/build.sh
index 2dfd43b65..697aff0f9 100644
--- a/tests/build.sh
+++ b/tests/build.sh
@@ -2,8 +2,6 @@ source common.sh
clearStore
-set -o pipefail
-
# Make sure that 'nix build' returns all outputs by default.
nix build -f multiple-outputs.nix --json a b --no-link | jq --exit-status '
(.[0] |
@@ -59,6 +57,30 @@ nix build -f multiple-outputs.nix --json 'e^*' --no-link | jq --exit-status '
(.outputs | keys == ["a_a", "b", "c"]))
'
+# test buidling from non-drv attr path
+
+nix build -f multiple-outputs.nix --json 'e.a_a.outPath' --no-link | jq --exit-status '
+ (.[0] |
+ (.drvPath | match(".*multiple-outputs-e.drv")) and
+ (.outputs | keys == ["a_a"]))
+'
+
+# Illegal type of string context
+expectStderr 1 nix build -f multiple-outputs.nix 'e.a_a.drvPath' \
+ | grepQuiet "has a context which refers to a complete source and binary closure."
+
+# No string context
+expectStderr 1 nix build --expr '""' --no-link \
+ | grepQuiet "has 0 entries in its context. It should only have exactly one entry"
+
+# Too much string context
+expectStderr 1 nix build --impure --expr 'with (import ./multiple-outputs.nix).e.a_a; "${drvPath}${outPath}"' --no-link \
+ | grepQuiet "has 2 entries in its context. It should only have exactly one entry"
+
+nix build --impure --json --expr 'builtins.unsafeDiscardOutputDependency (import ./multiple-outputs.nix).e.a_a.drvPath' --no-link | jq --exit-status '
+ (.[0] | .path | match(".*multiple-outputs-e.drv"))
+'
+
# Test building from raw store path to drv not expression.
drv=$(nix eval -f multiple-outputs.nix --raw a.drvPath)
diff --git a/tests/ca/build.sh b/tests/ca/build.sh
index 8783525e7..7754ad276 100644
--- a/tests/ca/build.sh
+++ b/tests/ca/build.sh
@@ -3,7 +3,7 @@
source common.sh
drv=$(nix-instantiate ./content-addressed.nix -A rootCA --arg seed 1)
-nix show-derivation "$drv" --arg seed 1
+nix derivation show "$drv" --arg seed 1
buildAttr () {
local derivationPath=$1
@@ -19,7 +19,7 @@ testRemoteCache () {
local outPath=$(buildAttr dependentNonCA 1)
nix copy --to file://$cacheDir $outPath
clearStore
- buildAttr dependentNonCA 1 --option substituters file://$cacheDir --no-require-sigs |& (! grep "building dependent-non-ca")
+ buildAttr dependentNonCA 1 --option substituters file://$cacheDir --no-require-sigs |& grepQuietInverse "building dependent-non-ca"
}
testDeterministicCA () {
diff --git a/tests/ca/derivation-json.sh b/tests/ca/derivation-json.sh
new file mode 100644
index 000000000..c1480fd17
--- /dev/null
+++ b/tests/ca/derivation-json.sh
@@ -0,0 +1,29 @@
+source common.sh
+
+export NIX_TESTS_CA_BY_DEFAULT=1
+
+drvPath=$(nix-instantiate ../simple.nix)
+
+nix derivation show $drvPath | jq .[] > $TEST_HOME/simple.json
+
+drvPath2=$(nix derivation add < $TEST_HOME/simple.json)
+
+[[ "$drvPath" = "$drvPath2" ]]
+
+# Content-addressed derivations can be renamed.
+jq '.name = "foo"' < $TEST_HOME/simple.json > $TEST_HOME/foo.json
+drvPath3=$(nix derivation add --dry-run < $TEST_HOME/foo.json)
+# With --dry-run nothing is actually written
+[[ ! -e "$drvPath3" ]]
+
+# But the JSON is rejected without the experimental feature
+expectStderr 1 nix derivation add < $TEST_HOME/foo.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'ca-derivations' is disabled"
+
+# Without --dry-run it is actually written
+drvPath4=$(nix derivation add < $TEST_HOME/foo.json)
+[[ "$drvPath4" = "$drvPath3" ]]
+[[ -e "$drvPath3" ]]
+
+# The modified derivation read back as JSON matches
+nix derivation show $drvPath3 | jq .[] > $TEST_HOME/foo-read.json
+diff $TEST_HOME/foo.json $TEST_HOME/foo-read.json
diff --git a/tests/check-refs.sh b/tests/check-refs.sh
index 65a72552a..2778e491d 100644
--- a/tests/check-refs.sh
+++ b/tests/check-refs.sh
@@ -8,14 +8,14 @@ dep=$(nix-build -o $RESULT check-refs.nix -A dep)
# test1 references dep, not itself.
test1=$(nix-build -o $RESULT check-refs.nix -A test1)
-(! nix-store -q --references $test1 | grep -q $test1)
-nix-store -q --references $test1 | grep -q $dep
+nix-store -q --references $test1 | grepQuietInverse $test1
+nix-store -q --references $test1 | grepQuiet $dep
# test2 references src, not itself nor dep.
test2=$(nix-build -o $RESULT check-refs.nix -A test2)
-(! nix-store -q --references $test2 | grep -q $test2)
-(! nix-store -q --references $test2 | grep -q $dep)
-nix-store -q --references $test2 | grep -q aux-ref
+nix-store -q --references $test2 | grepQuietInverse $test2
+nix-store -q --references $test2 | grepQuietInverse $dep
+nix-store -q --references $test2 | grepQuiet aux-ref
# test3 should fail (unallowed ref).
(! nix-build -o $RESULT check-refs.nix -A test3)
diff --git a/tests/check-reqs.sh b/tests/check-reqs.sh
index e9f65fc2a..856c94cec 100644
--- a/tests/check-reqs.sh
+++ b/tests/check-reqs.sh
@@ -8,8 +8,8 @@ nix-build -o $RESULT check-reqs.nix -A test1
(! nix-build -o $RESULT check-reqs.nix -A test2)
(! nix-build -o $RESULT check-reqs.nix -A test3)
-(! nix-build -o $RESULT check-reqs.nix -A test4) 2>&1 | grep -q 'check-reqs-dep1'
-(! nix-build -o $RESULT check-reqs.nix -A test4) 2>&1 | grep -q 'check-reqs-dep2'
+(! nix-build -o $RESULT check-reqs.nix -A test4) 2>&1 | grepQuiet 'check-reqs-dep1'
+(! nix-build -o $RESULT check-reqs.nix -A test4) 2>&1 | grepQuiet 'check-reqs-dep2'
(! nix-build -o $RESULT check-reqs.nix -A test5)
(! nix-build -o $RESULT check-reqs.nix -A test6)
diff --git a/tests/check.sh b/tests/check.sh
index e77c0405d..645b90222 100644
--- a/tests/check.sh
+++ b/tests/check.sh
@@ -37,7 +37,7 @@ checkBuildTempDirRemoved $TEST_ROOT/log
nix-build check.nix -A deterministic --argstr checkBuildId $checkBuildId \
--no-out-link --check --keep-failed 2> $TEST_ROOT/log
-if grep -q 'may not be deterministic' $TEST_ROOT/log; then false; fi
+if grepQuiet 'may not be deterministic' $TEST_ROOT/log; then false; fi
checkBuildTempDirRemoved $TEST_ROOT/log
nix-build check.nix -A nondeterministic --argstr checkBuildId $checkBuildId \
diff --git a/tests/common.sh b/tests/common.sh
index 68b90a85f..8941671d6 100644
--- a/tests/common.sh
+++ b/tests/common.sh
@@ -1,4 +1,4 @@
-set -e
+set -eu -o pipefail
if [[ -z "${COMMON_SH_SOURCED-}" ]]; then
diff --git a/tests/common/vars-and-functions.sh.in b/tests/common/vars-and-functions.sh.in
index 0deef4c1c..a9e6c802f 100644
--- a/tests/common/vars-and-functions.sh.in
+++ b/tests/common/vars-and-functions.sh.in
@@ -1,4 +1,4 @@
-set -e
+set -eu -o pipefail
if [[ -z "${COMMON_VARS_AND_FUNCTIONS_SH_SOURCED-}" ]]; then
@@ -152,31 +152,64 @@ isDaemonNewer () {
[[ $(nix eval --expr "builtins.compareVersions ''$daemonVersion'' ''$requiredVersion''") -ge 0 ]]
}
+skipTest () {
+ echo "$1, skipping this test..." >&2
+ exit 99
+}
+
requireDaemonNewerThan () {
- isDaemonNewer "$1" || exit 99
+ isDaemonNewer "$1" || skipTest "Daemon is too old"
}
canUseSandbox() {
- if [[ ! $_canUseSandbox ]]; then
- echo "Sandboxing not supported, skipping this test..."
- return 1
- fi
+ [[ ${_canUseSandbox-} ]]
+}
- return 0
+requireSandboxSupport () {
+ canUseSandbox || skipTest "Sandboxing not supported"
+}
+
+requireGit() {
+ [[ $(type -p git) ]] || skipTest "Git not installed"
}
fail() {
- echo "$1"
+ echo "$1" >&2
exit 1
}
+# Run a command failing if it didn't exit with the expected exit code.
+#
+# Has two advantages over the built-in `!`:
+#
+# 1. `!` conflates all non-0 codes. `expect` allows testing for an exact
+# code.
+#
+# 2. `!` unexpectedly negates `set -e`, and cannot be used on individual
+# pipeline stages with `set -o pipefail`. It only works on the entire
+# pipeline, which is useless if we want, say, `nix ...` invocation to
+# *fail*, but a grep on the error message it outputs to *succeed*.
expect() {
local expected res
expected="$1"
shift
- "$@" || res="$?"
+ "$@" && res=0 || res="$?"
if [[ $res -ne $expected ]]; then
- echo "Expected '$expected' but got '$res' while running '$*'"
+ echo "Expected '$expected' but got '$res' while running '${*@Q}'" >&2
+ return 1
+ fi
+ return 0
+}
+
+# Better than just doing `expect ... >&2` because the "Expected..."
+# message below will *not* be redirected.
+expectStderr() {
+ local expected res
+ expected="$1"
+ shift
+ "$@" 2>&1 && res=0 || res="$?"
+ if [[ $res -ne $expected ]]; then
+ echo "Expected '$expected' but got '$res' while running '${*@Q}'" >&2
return 1
fi
return 0
@@ -184,14 +217,13 @@ expect() {
needLocalStore() {
if [[ "$NIX_REMOTE" == "daemon" ]]; then
- echo "Can’t run through the daemon ($1), skipping this test..."
- return 99
+ skipTest "Can’t run through the daemon ($1)"
fi
}
# Just to make it easy to find which tests should be fixed
buggyNeedLocalStore() {
- needLocalStore
+ needLocalStore "$1"
}
enableFeatures() {
@@ -210,6 +242,35 @@ onError() {
done
}
+# `grep -v` doesn't work well for exit codes. We want `!(exist line l. l
+# matches)`. It gives us `exist line l. !(l matches)`.
+#
+# `!` normally doesn't work well with `set -e`, but when we wrap in a
+# function it *does*.
+grepInverse() {
+ ! grep "$@"
+}
+
+# A shorthand, `> /dev/null` is a bit noisy.
+#
+# `grep -q` would seem to do this, no function necessary, but it is a
+# bad fit with pipes and `set -o pipefail`: `-q` will exit after the
+# first match, and then subsequent writes will result in broken pipes.
+#
+# Note that reproducing the above is a bit tricky as it depends on
+# non-deterministic properties such as the timing between the match and
+# the closing of the pipe, the buffering of the pipe, and the speed of
+# the producer into the pipe. But rest assured we've seen it happen in
+# CI reliably.
+grepQuiet() {
+ grep "$@" > /dev/null
+}
+
+# The previous two, combined
+grepQuietInverse() {
+ ! grep "$@" > /dev/null
+}
+
trap onError ERR
fi # COMMON_VARS_AND_FUNCTIONS_SH_SOURCED
diff --git a/tests/compute-levels.sh b/tests/compute-levels.sh
index e4322dfa1..de3da2ebd 100644
--- a/tests/compute-levels.sh
+++ b/tests/compute-levels.sh
@@ -3,5 +3,5 @@ source common.sh
if [[ $(uname -ms) = "Linux x86_64" ]]; then
# x86_64 CPUs must always support the baseline
# microarchitecture level.
- nix -vv --version | grep -q "x86_64-v1-linux"
+ nix -vv --version | grepQuiet "x86_64-v1-linux"
fi
diff --git a/tests/db-migration.sh b/tests/db-migration.sh
index 92dd4f3ba..44cd16bc0 100644
--- a/tests/db-migration.sh
+++ b/tests/db-migration.sh
@@ -1,18 +1,18 @@
# Test that we can successfully migrate from an older db schema
+source common.sh
+
# Only run this if we have an older Nix available
# XXX: This assumes that the `daemon` package is older than the `client` one
-if [[ -z "$NIX_DAEMON_PACKAGE" ]]; then
- exit 99
+if [[ -z "${NIX_DAEMON_PACKAGE-}" ]]; then
+ skipTest "not using the Nix daemon"
fi
-source common.sh
-
killDaemon
# Fill the db using the older Nix
PATH_WITH_NEW_NIX="$PATH"
-export PATH="$NIX_DAEMON_PACKAGE/bin:$PATH"
+export PATH="${NIX_DAEMON_PACKAGE}/bin:$PATH"
clearStore
nix-build simple.nix --no-out-link
nix-store --generate-binary-cache-key cache1.example.org $TEST_ROOT/sk1 $TEST_ROOT/pk1
diff --git a/tests/dependencies.sh b/tests/dependencies.sh
index 092950aa7..f9da0c6bc 100644
--- a/tests/dependencies.sh
+++ b/tests/dependencies.sh
@@ -36,10 +36,10 @@ deps=$(nix-store -quR "$drvPath")
echo "output closure contains $deps"
# The output path should be in the closure.
-echo "$deps" | grep -q "$outPath"
+echo "$deps" | grepQuiet "$outPath"
# Input-1 is not retained.
-if echo "$deps" | grep -q "dependencies-input-1"; then exit 1; fi
+if echo "$deps" | grepQuiet "dependencies-input-1"; then exit 1; fi
# Input-2 is retained.
input2OutPath=$(echo "$deps" | grep "dependencies-input-2")
@@ -49,4 +49,4 @@ nix-store -q --referrers-closure "$input2OutPath" | grep "$outPath"
# Check that the derivers are set properly.
test $(nix-store -q --deriver "$outPath") = "$drvPath"
-nix-store -q --deriver "$input2OutPath" | grep -q -- "-input-2.drv"
+nix-store -q --deriver "$input2OutPath" | grepQuiet -- "-input-2.drv"
diff --git a/tests/derivation-json.sh b/tests/derivation-json.sh
new file mode 100644
index 000000000..b6be5d977
--- /dev/null
+++ b/tests/derivation-json.sh
@@ -0,0 +1,12 @@
+source common.sh
+
+drvPath=$(nix-instantiate simple.nix)
+
+nix derivation show $drvPath | jq .[] > $TEST_HOME/simple.json
+
+drvPath2=$(nix derivation add < $TEST_HOME/simple.json)
+
+[[ "$drvPath" = "$drvPath2" ]]
+
+# Input addressed derivations cannot be renamed.
+jq '.name = "foo"' < $TEST_HOME/simple.json | expectStderr 1 nix derivation add | grepQuiet "has incorrect output"
diff --git a/tests/describe-stores.sh b/tests/describe-stores.sh
deleted file mode 100644
index 3fea61483..000000000
--- a/tests/describe-stores.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-source common.sh
-
-# Query an arbitrary value in `nix describe-stores --json`'s output just to
-# check that it has the right structure
-[[ $(nix --experimental-features 'nix-command flakes' describe-stores --json | jq '.["SSH Store"]["compress"]["defaultValue"]') == false ]]
-
-# Ensure that the output of `nix describe-stores` isn't empty
-[[ -n $(nix --experimental-features 'nix-command flakes' describe-stores) ]]
diff --git a/tests/dyn-drv/common.sh b/tests/dyn-drv/common.sh
new file mode 100644
index 000000000..c786f6925
--- /dev/null
+++ b/tests/dyn-drv/common.sh
@@ -0,0 +1,8 @@
+source ../common.sh
+
+# Need backend to support text-hashing too
+requireDaemonNewerThan "2.16.0pre20230419"
+
+enableFeatures "ca-derivations dynamic-derivations"
+
+restartDaemon
diff --git a/tests/dyn-drv/config.nix.in b/tests/dyn-drv/config.nix.in
new file mode 120000
index 000000000..af24ddb30
--- /dev/null
+++ b/tests/dyn-drv/config.nix.in
@@ -0,0 +1 @@
+../config.nix.in \ No newline at end of file
diff --git a/tests/dyn-drv/recursive-mod-json.nix b/tests/dyn-drv/recursive-mod-json.nix
new file mode 100644
index 000000000..c6a24ca4f
--- /dev/null
+++ b/tests/dyn-drv/recursive-mod-json.nix
@@ -0,0 +1,33 @@
+with import ./config.nix;
+
+let innerName = "foo"; in
+
+mkDerivation rec {
+ name = "${innerName}.drv";
+ SHELL = shell;
+
+ requiredSystemFeatures = [ "recursive-nix" ];
+
+ drv = builtins.unsafeDiscardOutputDependency (import ./text-hashed-output.nix).hello.drvPath;
+
+ buildCommand = ''
+ export NIX_CONFIG='experimental-features = nix-command ca-derivations'
+
+ PATH=${builtins.getEnv "EXTRA_PATH"}:$PATH
+
+ # JSON of pre-existing drv
+ nix derivation show $drv | jq .[] > drv0.json
+
+ # Fix name
+ jq < drv0.json '.name = "${innerName}"' > drv1.json
+
+ # Extend `buildCommand`
+ jq < drv1.json '.env.buildCommand += "echo \"I am alive!\" >> $out/hello\n"' > drv0.json
+
+ # Used as our output
+ cp $(nix derivation add < drv0.json) $out
+ '';
+ __contentAddressed = true;
+ outputHashMode = "text";
+ outputHashAlgo = "sha256";
+}
diff --git a/tests/dyn-drv/recursive-mod-json.sh b/tests/dyn-drv/recursive-mod-json.sh
new file mode 100644
index 000000000..070c5c2cb
--- /dev/null
+++ b/tests/dyn-drv/recursive-mod-json.sh
@@ -0,0 +1,25 @@
+source common.sh
+
+# FIXME
+if [[ $(uname) != Linux ]]; then skipTest "Not running Linux"; fi
+
+enableFeatures 'recursive-nix'
+restartDaemon
+
+clearStore
+
+rm -f $TEST_ROOT/result
+
+EXTRA_PATH=$(dirname $(type -p nix)):$(dirname $(type -p jq))
+export EXTRA_PATH
+
+# Will produce a drv
+metaDrv=$(nix-instantiate ./recursive-mod-json.nix)
+
+# computed "dynamic" derivation
+drv=$(nix-store -r $metaDrv)
+
+# build that dyn drv
+res=$(nix-store -r $drv)
+
+grep 'I am alive!' $res/hello
diff --git a/tests/dyn-drv/text-hashed-output.nix b/tests/dyn-drv/text-hashed-output.nix
new file mode 100644
index 000000000..a700fd102
--- /dev/null
+++ b/tests/dyn-drv/text-hashed-output.nix
@@ -0,0 +1,29 @@
+with import ./config.nix;
+
+# A simple content-addressed derivation.
+# The derivation can be arbitrarily modified by passing a different `seed`,
+# but the output will always be the same
+rec {
+ hello = mkDerivation {
+ name = "hello";
+ buildCommand = ''
+ set -x
+ echo "Building a CA derivation"
+ mkdir -p $out
+ echo "Hello World" > $out/hello
+ '';
+ __contentAddressed = true;
+ outputHashMode = "recursive";
+ outputHashAlgo = "sha256";
+ };
+ producingDrv = mkDerivation {
+ name = "hello.drv";
+ buildCommand = ''
+ echo "Copying the derivation"
+ cp ${builtins.unsafeDiscardOutputDependency hello.drvPath} $out
+ '';
+ __contentAddressed = true;
+ outputHashMode = "text";
+ outputHashAlgo = "sha256";
+ };
+}
diff --git a/tests/dyn-drv/text-hashed-output.sh b/tests/dyn-drv/text-hashed-output.sh
new file mode 100644
index 000000000..f3e5aa93b
--- /dev/null
+++ b/tests/dyn-drv/text-hashed-output.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+
+source common.sh
+
+# In the corresponding nix file, we have two derivations: the first, named root,
+# is a normal recursive derivation, while the second, named dependent, has the
+# new outputHashMode "text". Note that in "dependent", we don't refer to the
+# build output of root, but only to the path of the drv file. For this reason,
+# we only need to:
+#
+# - instantiate the root derivation
+# - build the dependent derivation
+# - check that the path of the output coincides with that of the original derivation
+
+drv=$(nix-instantiate ./text-hashed-output.nix -A hello)
+nix show-derivation "$drv"
+
+drvProducingDrv=$(nix-instantiate ./text-hashed-output.nix -A producingDrv)
+nix show-derivation "$drvProducingDrv"
+
+out1=$(nix-build ./text-hashed-output.nix -A producingDrv --no-out-link)
+
+nix path-info $drv --derivation --json | jq
+nix path-info $out1 --derivation --json | jq
+
+test $out1 == $drv
diff --git a/tests/eval-store.sh b/tests/eval-store.sh
index 679da5741..8fc859730 100644
--- a/tests/eval-store.sh
+++ b/tests/eval-store.sh
@@ -2,7 +2,7 @@ source common.sh
# Using `--eval-store` with the daemon will eventually copy everything
# to the build store, invalidating most of the tests here
-needLocalStore
+needLocalStore "“--eval-store” doesn't achieve much with the daemon"
eval_store=$TEST_ROOT/eval-store
diff --git a/tests/eval.sh b/tests/eval.sh
index ffae08a6a..066d8fc36 100644
--- a/tests/eval.sh
+++ b/tests/eval.sh
@@ -16,9 +16,10 @@ nix eval --expr 'assert 1 + 2 == 3; true'
[[ $(nix eval int -f "./eval.nix") == 123 ]]
[[ $(nix eval str -f "./eval.nix") == '"foo"' ]]
[[ $(nix eval str --raw -f "./eval.nix") == 'foo' ]]
-[[ $(nix eval attr -f "./eval.nix") == '{ foo = "bar"; }' ]]
+[[ "$(nix eval attr -f "./eval.nix")" == '{ foo = "bar"; }' ]]
[[ $(nix eval attr --json -f "./eval.nix") == '{"foo":"bar"}' ]]
[[ $(nix eval int -f - < "./eval.nix") == 123 ]]
+[[ "$(nix eval --expr '{"assert"=1;bar=2;}')" == '{ "assert" = 1; bar = 2; }' ]]
# Check if toFile can be utilized during restricted eval
[[ $(nix eval --restrict-eval --expr 'import (builtins.toFile "source" "42")') == 42 ]]
@@ -26,9 +27,10 @@ nix eval --expr 'assert 1 + 2 == 3; true'
nix-instantiate --eval -E 'assert 1 + 2 == 3; true'
[[ $(nix-instantiate -A int --eval "./eval.nix") == 123 ]]
[[ $(nix-instantiate -A str --eval "./eval.nix") == '"foo"' ]]
-[[ $(nix-instantiate -A attr --eval "./eval.nix") == '{ foo = "bar"; }' ]]
+[[ "$(nix-instantiate -A attr --eval "./eval.nix")" == '{ foo = "bar"; }' ]]
[[ $(nix-instantiate -A attr --eval --json "./eval.nix") == '{"foo":"bar"}' ]]
[[ $(nix-instantiate -A int --eval - < "./eval.nix") == 123 ]]
+[[ "$(nix-instantiate --eval -E '{"assert"=1;bar=2;}')" == '{ "assert" = 1; bar = 2; }' ]]
# Check that symlink cycles don't cause a hang.
ln -sfn cycle.nix $TEST_ROOT/cycle.nix
diff --git a/tests/experimental-features.sh b/tests/experimental-features.sh
new file mode 100644
index 000000000..607bf0a8e
--- /dev/null
+++ b/tests/experimental-features.sh
@@ -0,0 +1,86 @@
+source common.sh
+
+# Skipping these two for now, because we actually *do* want flags and
+# config settings to always show up in the manual, just be marked
+# experimental. Will reenable once the manual generation takes advantage
+# of the JSON metadata on this.
+#
+# # Without flakes, flake options should not show up
+# # With flakes, flake options should show up
+#
+# function grep_both_ways {
+# nix --experimental-features 'nix-command' "$@" | grepQuietInverse flake
+# nix --experimental-features 'nix-command flakes' "$@" | grepQuiet flake
+#
+# # Also, the order should not matter
+# nix "$@" --experimental-features 'nix-command' | grepQuietInverse flake
+# nix "$@" --experimental-features 'nix-command flakes' | grepQuiet flake
+# }
+#
+# # Simple case, the configuration effects the running command
+# grep_both_ways show-config
+#
+# # Medium case, the configuration effects --help
+# grep_both_ways store gc --help
+
+# Test settings that are gated on experimental features; the setting is ignored
+# with a warning if the experimental feature is not enabled. The order of the
+# `setting = value` lines in the configuration should not matter.
+
+# 'flakes' experimental-feature is disabled before, ignore and warn
+NIX_CONFIG='
+ experimental-features = nix-command
+ accept-flake-config = true
+' nix show-config accept-flake-config 1>$TEST_ROOT/stdout 2>$TEST_ROOT/stderr
+grepQuiet "false" $TEST_ROOT/stdout
+grepQuiet "Ignoring setting 'accept-flake-config' because experimental feature 'flakes' is not enabled" $TEST_ROOT/stderr
+
+# 'flakes' experimental-feature is disabled after, ignore and warn
+NIX_CONFIG='
+ accept-flake-config = true
+ experimental-features = nix-command
+' nix show-config accept-flake-config 1>$TEST_ROOT/stdout 2>$TEST_ROOT/stderr
+grepQuiet "false" $TEST_ROOT/stdout
+grepQuiet "Ignoring setting 'accept-flake-config' because experimental feature 'flakes' is not enabled" $TEST_ROOT/stderr
+
+# 'flakes' experimental-feature is enabled before, process
+NIX_CONFIG='
+ experimental-features = nix-command flakes
+ accept-flake-config = true
+' nix show-config accept-flake-config 1>$TEST_ROOT/stdout 2>$TEST_ROOT/stderr
+grepQuiet "true" $TEST_ROOT/stdout
+grepQuietInverse "Ignoring setting 'accept-flake-config'" $TEST_ROOT/stderr
+
+# 'flakes' experimental-feature is enabled after, process
+NIX_CONFIG='
+ accept-flake-config = true
+ experimental-features = nix-command flakes
+' nix show-config accept-flake-config 1>$TEST_ROOT/stdout 2>$TEST_ROOT/stderr
+grepQuiet "true" $TEST_ROOT/stdout
+grepQuietInverse "Ignoring setting 'accept-flake-config'" $TEST_ROOT/stderr
+
+function exit_code_both_ways {
+ expect 1 nix --experimental-features 'nix-command' "$@" 1>/dev/null
+ nix --experimental-features 'nix-command flakes' "$@" 1>/dev/null
+
+ # Also, the order should not matter
+ expect 1 nix "$@" --experimental-features 'nix-command' 1>/dev/null
+ nix "$@" --experimental-features 'nix-command flakes' 1>/dev/null
+}
+
+exit_code_both_ways show-config --flake-registry 'https://no'
+
+# Double check these are stable
+nix --experimental-features '' --help 1>/dev/null
+nix --experimental-features '' doctor --help 1>/dev/null
+nix --experimental-features '' repl --help 1>/dev/null
+nix --experimental-features '' upgrade-nix --help 1>/dev/null
+
+# These 3 arguments are currently given to all commands, which is wrong (as not
+# all care). To deal with fixing later, we simply make them require the
+# nix-command experimental features --- it so happens that the commands we wish
+# stabilizing to do not need them anyways.
+for arg in '--print-build-logs' '--offline' '--refresh'; do
+ nix --experimental-features 'nix-command' "$arg" --help 1>/dev/null
+ expect 1 nix --experimental-features '' "$arg" --help 1>/dev/null
+done
diff --git a/tests/export-graph.sh b/tests/export-graph.sh
index 4954a6cbc..1f6232a40 100644
--- a/tests/export-graph.sh
+++ b/tests/export-graph.sh
@@ -4,7 +4,7 @@ clearStore
clearProfiles
checkRef() {
- nix-store -q --references $TEST_ROOT/result | grep -q "$1"'$' || fail "missing reference $1"
+ nix-store -q --references $TEST_ROOT/result | grepQuiet "$1"'$' || fail "missing reference $1"
}
# Test the export of the runtime dependency graph.
diff --git a/tests/fetchClosure.sh b/tests/fetchClosure.sh
index d88c55c3c..a207f647c 100644
--- a/tests/fetchClosure.sh
+++ b/tests/fetchClosure.sh
@@ -42,13 +42,13 @@ if [[ "$NIX_REMOTE" != "daemon" ]]; then
fi
# 'toPath' set to empty string should fail but print the expected path.
-nix eval -v --json --expr "
+expectStderr 1 nix eval -v --json --expr "
builtins.fetchClosure {
fromStore = \"file://$cacheDir\";
fromPath = $nonCaPath;
toPath = \"\";
}
-" 2>&1 | grep "error: rewriting.*$nonCaPath.*yielded.*$caPath"
+" | grep "error: rewriting.*$nonCaPath.*yielded.*$caPath"
# If fromPath is CA, then toPath isn't needed.
nix copy --to file://$cacheDir $caPath
diff --git a/tests/fetchGit.sh b/tests/fetchGit.sh
index a7a8df186..e2ccb0e97 100644
--- a/tests/fetchGit.sh
+++ b/tests/fetchGit.sh
@@ -1,9 +1,6 @@
source common.sh
-if [[ -z $(type -p git) ]]; then
- echo "Git not installed; skipping Git tests"
- exit 99
-fi
+requireGit
clearStore
diff --git a/tests/fetchGitRefs.sh b/tests/fetchGitRefs.sh
index 52926040b..d643fea04 100644
--- a/tests/fetchGitRefs.sh
+++ b/tests/fetchGitRefs.sh
@@ -1,9 +1,6 @@
source common.sh
-if [[ -z $(type -p git) ]]; then
- echo "Git not installed; skipping Git tests"
- exit 99
-fi
+requireGit
clearStore
@@ -56,7 +53,7 @@ invalid_ref() {
else
(! git check-ref-format --branch "$1" >/dev/null 2>&1)
fi
- nix --debug eval --raw --impure --expr "(builtins.fetchGit { url = $repo; ref = ''$1''; }).outPath" 2>&1 | grep 'invalid Git branch/tag name' >/dev/null
+ expect 1 nix --debug eval --raw --impure --expr "(builtins.fetchGit { url = $repo; ref = ''$1''; }).outPath" 2>&1 | grep 'invalid Git branch/tag name' >/dev/null
}
diff --git a/tests/fetchGitSubmodules.sh b/tests/fetchGitSubmodules.sh
index 08ccaa3cd..df81232e5 100644
--- a/tests/fetchGitSubmodules.sh
+++ b/tests/fetchGitSubmodules.sh
@@ -2,10 +2,7 @@ source common.sh
set -u
-if [[ -z $(type -p git) ]]; then
- echo "Git not installed; skipping Git submodule tests"
- exit 99
-fi
+requireGit
clearStore
diff --git a/tests/fetchMercurial.sh b/tests/fetchMercurial.sh
index 5c64ffd26..e6f8525c6 100644
--- a/tests/fetchMercurial.sh
+++ b/tests/fetchMercurial.sh
@@ -1,9 +1,6 @@
source common.sh
-if [[ -z $(type -p hg) ]]; then
- echo "Mercurial not installed; skipping Mercurial tests"
- exit 99
-fi
+[[ $(type -p hq) ]] || skipTest "Mercurial not installed"
clearStore
diff --git a/tests/fetchTree-file.sh b/tests/fetchTree-file.sh
index f0c530466..fe569cfb8 100644
--- a/tests/fetchTree-file.sh
+++ b/tests/fetchTree-file.sh
@@ -79,7 +79,7 @@ EOF
EOF
popd
- [[ -z "${NIX_DAEMON_PACKAGE}" ]] && return 0
+ [[ -z "${NIX_DAEMON_PACKAGE-}" ]] && return 0
# Ensure that a lockfile generated by the current Nix for tarball inputs
# can still be read by an older Nix
@@ -91,7 +91,7 @@ EOF
flake = false;
};
outputs = { self, tarball }: {
- foo = builtins.readFile "${tarball}/test_input_file";
+ foo = builtins.readFile "\${tarball}/test_input_file";
};
}
nix flake update
diff --git a/tests/fetchurl.sh b/tests/fetchurl.sh
index b41d8c4b7..8cd40c09f 100644
--- a/tests/fetchurl.sh
+++ b/tests/fetchurl.sh
@@ -62,7 +62,7 @@ hash=$(nix-hash --flat --type sha256 $nar)
outPath=$(nix-build -vvvvv --expr 'import <nix/fetchurl.nix>' --argstr url file://$nar --argstr sha256 $hash \
--arg unpack true --argstr name xyzzy --no-out-link)
-echo $outPath | grep -q 'xyzzy'
+echo $outPath | grepQuiet 'xyzzy'
test -x $outPath/fetchurl.sh
test -L $outPath/symlink
diff --git a/tests/flakes/build-paths.sh b/tests/flakes/build-paths.sh
index 08b4d1763..ff012e1b3 100644
--- a/tests/flakes/build-paths.sh
+++ b/tests/flakes/build-paths.sh
@@ -35,16 +35,33 @@ cat > $flake1Dir/flake.nix <<EOF
a6 = flake2.outPath;
# FIXME
- a7 = "${flake2}/config.nix";
+ a7 = "\${flake2}/config.nix";
# This is only allowed in impure mode.
a8 = builtins.storePath $dep;
a9 = "$dep";
+
+ drvCall = with import ./config.nix; mkDerivation {
+ name = "simple";
+ builder = ./simple.builder.sh;
+ PATH = "";
+ goodPath = path;
+ };
+
+ a10 = builtins.unsafeDiscardOutputDependency self.drvCall.drvPath;
+
+ a11 = self.drvCall.drvPath;
+
+ a12 = self.drvCall.outPath;
+
+ a13 = "\${self.drvCall.drvPath}\${self.drvCall.outPath}";
};
}
EOF
+cp ../simple.nix ../simple.builder.sh ../config.nix $flake1Dir/
+
echo bar > $flake1Dir/foo
nix build --json --out-link $TEST_ROOT/result $flake1Dir#a1
@@ -63,4 +80,17 @@ nix build --json --out-link $TEST_ROOT/result $flake1Dir#a6
nix build --impure --json --out-link $TEST_ROOT/result $flake1Dir#a8
diff common.sh $TEST_ROOT/result
-(! nix build --impure --json --out-link $TEST_ROOT/result $flake1Dir#a9)
+expectStderr 1 nix build --impure --json --out-link $TEST_ROOT/result $flake1Dir#a9 \
+ | grepQuiet "has 0 entries in its context. It should only have exactly one entry"
+
+nix build --json --out-link $TEST_ROOT/result $flake1Dir#a10
+[[ $(readlink -e $TEST_ROOT/result) = *simple.drv ]]
+
+expectStderr 1 nix build --json --out-link $TEST_ROOT/result $flake1Dir#a11 \
+ | grepQuiet "has a context which refers to a complete source and binary closure"
+
+nix build --json --out-link $TEST_ROOT/result $flake1Dir#a12
+[[ -e $TEST_ROOT/result/hello ]]
+
+expectStderr 1 nix build --impure --json --out-link $TEST_ROOT/result $flake1Dir#a13 \
+ | grepQuiet "has 2 entries in its context. It should only have exactly one entry"
diff --git a/tests/flakes/check.sh b/tests/flakes/check.sh
index 278ac7fa4..34b82c61c 100644
--- a/tests/flakes/check.sh
+++ b/tests/flakes/check.sh
@@ -72,6 +72,8 @@ cat > $flakeDir/flake.nix <<EOF
}
EOF
-checkRes=$(nix flake check --keep-going $flakeDir 2>&1 && fail "nix flake check should have failed" || true)
-echo "$checkRes" | grep -q "packages.system-1.default"
-echo "$checkRes" | grep -q "packages.system-2.default"
+nix flake check $flakeDir
+
+checkRes=$(nix flake check --all-systems --keep-going $flakeDir 2>&1 && fail "nix flake check --all-systems should have failed" || true)
+echo "$checkRes" | grepQuiet "packages.system-1.default"
+echo "$checkRes" | grepQuiet "packages.system-2.default"
diff --git a/tests/flakes/common.sh b/tests/flakes/common.sh
index 9d79080cd..427abcdde 100644
--- a/tests/flakes/common.sh
+++ b/tests/flakes/common.sh
@@ -2,13 +2,6 @@ source ../common.sh
registry=$TEST_ROOT/registry.json
-requireGit() {
- if [[ -z $(type -p git) ]]; then
- echo "Git not installed; skipping flake tests"
- exit 99
- fi
-}
-
writeSimpleFlake() {
local flakeDir="$1"
cat > $flakeDir/flake.nix <<EOF
@@ -66,7 +59,7 @@ EOF
createGitRepo() {
local repo="$1"
- local extraArgs="$2"
+ local extraArgs="${2-}"
rm -rf $repo $repo.tmp
mkdir -p $repo
diff --git a/tests/flakes/flake-in-submodule.sh b/tests/flakes/flake-in-submodule.sh
new file mode 100644
index 000000000..21a4b52de
--- /dev/null
+++ b/tests/flakes/flake-in-submodule.sh
@@ -0,0 +1,52 @@
+source common.sh
+
+# Tests that:
+# - flake.nix may reside inside of a git submodule
+# - the flake can access content outside of the submodule
+#
+# rootRepo
+# ├── root.nix
+# └── submodule
+# ├── flake.nix
+# └── sub.nix
+
+
+requireGit
+
+clearStore
+
+# Submodules can't be fetched locally by default.
+# See fetchGitSubmodules.sh
+export XDG_CONFIG_HOME=$TEST_HOME/.config
+git config --global protocol.file.allow always
+
+
+rootRepo=$TEST_ROOT/rootRepo
+subRepo=$TEST_ROOT/submodule
+
+
+createGitRepo $subRepo
+cat > $subRepo/flake.nix <<EOF
+{
+ outputs = { self }: {
+ sub = import ./sub.nix;
+ root = import ../root.nix;
+ };
+}
+EOF
+echo '"expression in submodule"' > $subRepo/sub.nix
+git -C $subRepo add flake.nix sub.nix
+git -C $subRepo commit -m Initial
+
+createGitRepo $rootRepo
+
+git -C $rootRepo submodule init
+git -C $rootRepo submodule add $subRepo submodule
+echo '"expression in root repo"' > $rootRepo/root.nix
+git -C $rootRepo add root.nix
+git -C $rootRepo commit -m "Add root.nix"
+
+# Flake can live inside a submodule and can be accessed via ?dir=submodule
+[[ $(nix eval --json git+file://$rootRepo\?submodules=1\&dir=submodule#sub ) = '"expression in submodule"' ]]
+# The flake can access content outside of the submodule
+[[ $(nix eval --json git+file://$rootRepo\?submodules=1\&dir=submodule#root ) = '"expression in root repo"' ]]
diff --git a/tests/flakes/flakes.sh b/tests/flakes/flakes.sh
index 07f1e6698..f2e216435 100644
--- a/tests/flakes/flakes.sh
+++ b/tests/flakes/flakes.sh
@@ -76,17 +76,17 @@ nix registry add --registry $registry nixpkgs flake1
# Test 'nix registry list'.
[[ $(nix registry list | wc -l) == 5 ]]
-nix registry list | grep -q '^global'
-nix registry list | grep -q -v '^user' # nothing in user registry
+nix registry list | grep '^global'
+nix registry list | grepInverse '^user' # nothing in user registry
# Test 'nix flake metadata'.
nix flake metadata flake1
-nix flake metadata flake1 | grep -q 'Locked URL:.*flake1.*'
+nix flake metadata flake1 | grepQuiet 'Locked URL:.*flake1.*'
# Test 'nix flake metadata' on a local flake.
-(cd $flake1Dir && nix flake metadata) | grep -q 'URL:.*flake1.*'
-(cd $flake1Dir && nix flake metadata .) | grep -q 'URL:.*flake1.*'
-nix flake metadata $flake1Dir | grep -q 'URL:.*flake1.*'
+(cd $flake1Dir && nix flake metadata) | grepQuiet 'URL:.*flake1.*'
+(cd $flake1Dir && nix flake metadata .) | grepQuiet 'URL:.*flake1.*'
+nix flake metadata $flake1Dir | grepQuiet 'URL:.*flake1.*'
# Test 'nix flake metadata --json'.
json=$(nix flake metadata flake1 --json | jq .)
@@ -96,7 +96,9 @@ json=$(nix flake metadata flake1 --json | jq .)
hash1=$(echo "$json" | jq -r .revision)
echo -n '# foo' >> $flake1Dir/flake.nix
+flake1OriginalCommit=$(git -C $flake1Dir rev-parse HEAD)
git -C $flake1Dir commit -a -m 'Foo'
+flake1NewCommit=$(git -C $flake1Dir rev-parse HEAD)
hash2=$(nix flake metadata flake1 --json --refresh | jq -r .revision)
[[ $hash1 != $hash2 ]]
@@ -134,11 +136,11 @@ nix build -o $TEST_ROOT/result flake2#bar --impure --no-write-lock-file
nix eval --expr "builtins.getFlake \"$flake2Dir\"" --impure
# Building a local flake with an unlocked dependency should fail with --no-update-lock-file.
-nix build -o $TEST_ROOT/result $flake2Dir#bar --no-update-lock-file 2>&1 | grep 'requires lock file changes'
+expect 1 nix build -o $TEST_ROOT/result $flake2Dir#bar --no-update-lock-file 2>&1 | grep 'requires lock file changes'
# But it should succeed without that flag.
nix build -o $TEST_ROOT/result $flake2Dir#bar --no-write-lock-file
-nix build -o $TEST_ROOT/result $flake2Dir#bar --no-update-lock-file 2>&1 | grep 'requires lock file changes'
+expect 1 nix build -o $TEST_ROOT/result $flake2Dir#bar --no-update-lock-file 2>&1 | grep 'requires lock file changes'
nix build -o $TEST_ROOT/result $flake2Dir#bar --commit-lock-file
[[ -e $flake2Dir/flake.lock ]]
[[ -z $(git -C $flake2Dir diff main || echo failed) ]]
@@ -196,10 +198,10 @@ git -C $flake3Dir add flake.lock
git -C $flake3Dir commit -m 'Add lockfile'
# Test whether registry caching works.
-nix registry list --flake-registry file://$registry | grep -q flake3
+nix registry list --flake-registry file://$registry | grepQuiet flake3
mv $registry $registry.tmp
nix store gc
-nix registry list --flake-registry file://$registry --refresh | grep -q flake3
+nix registry list --flake-registry file://$registry --refresh | grepQuiet flake3
mv $registry.tmp $registry
# Test whether flakes are registered as GC roots for offline use.
@@ -346,8 +348,8 @@ nix registry remove flake1
nix registry add user-flake1 git+file://$flake1Dir
nix registry add user-flake2 git+file://$flake2Dir
[[ $(nix --flake-registry "" registry list | wc -l) == 2 ]]
-nix --flake-registry "" registry list | grep -q -v '^global' # nothing in global registry
-nix --flake-registry "" registry list | grep -q '^user'
+nix --flake-registry "" registry list | grepQuietInverse '^global' # nothing in global registry
+nix --flake-registry "" registry list | grepQuiet '^user'
nix registry remove user-flake1
nix registry remove user-flake2
[[ $(nix registry list | wc -l) == 5 ]]
@@ -454,7 +456,7 @@ url=$(nix flake metadata --json file://$TEST_ROOT/flake.tar.gz | jq -r .url)
nix build -o $TEST_ROOT/result $url
# Building with an incorrect SRI hash should fail.
-nix build -o $TEST_ROOT/result "file://$TEST_ROOT/flake.tar.gz?narHash=sha256-qQ2Zz4DNHViCUrp6gTS7EE4+RMqFQtUfWF2UNUtJKS0=" 2>&1 | grep 'NAR hash mismatch'
+expectStderr 102 nix build -o $TEST_ROOT/result "file://$TEST_ROOT/flake.tar.gz?narHash=sha256-qQ2Zz4DNHViCUrp6gTS7EE4+RMqFQtUfWF2UNUtJKS0=" | grep 'NAR hash mismatch'
# Test --override-input.
git -C $flake3Dir reset --hard
@@ -491,3 +493,14 @@ nix store delete $(nix store add-path $badFlakeDir)
[[ $(nix-instantiate --eval flake:git+file://$flake3Dir -A x) = 123 ]]
[[ $(nix-instantiate -I flake3=flake:flake3 --eval '<flake3>' -A x) = 123 ]]
[[ $(NIX_PATH=flake3=flake:flake3 nix-instantiate --eval '<flake3>' -A x) = 123 ]]
+
+# Test alternate lockfile paths.
+nix flake lock $flake2Dir --output-lock-file $TEST_ROOT/flake2.lock
+cmp $flake2Dir/flake.lock $TEST_ROOT/flake2.lock >/dev/null # lockfiles should be identical, since we're referencing flake2's original one
+
+nix flake lock $flake2Dir --output-lock-file $TEST_ROOT/flake2-overridden.lock --override-input flake1 git+file://$flake1Dir?rev=$flake1OriginalCommit
+expectStderr 1 cmp $flake2Dir/flake.lock $TEST_ROOT/flake2-overridden.lock
+nix flake metadata $flake2Dir --reference-lock-file $TEST_ROOT/flake2-overridden.lock | grepQuiet $flake1OriginalCommit
+
+# reference-lock-file can only be used if allow-dirty is set.
+expectStderr 1 nix flake metadata $flake2Dir --no-allow-dirty --reference-lock-file $TEST_ROOT/flake2-overridden.lock
diff --git a/tests/flakes/follow-paths.sh b/tests/flakes/follow-paths.sh
index 19cc1bafa..fe9b51c65 100644
--- a/tests/flakes/follow-paths.sh
+++ b/tests/flakes/follow-paths.sh
@@ -128,7 +128,7 @@ EOF
git -C $flakeFollowsA add flake.nix
-nix flake lock $flakeFollowsA 2>&1 | grep 'points outside'
+expect 1 nix flake lock $flakeFollowsA 2>&1 | grep 'points outside'
# Non-existant follows should print a warning.
cat >$flakeFollowsA/flake.nix <<EOF
diff --git a/tests/flakes/mercurial.sh b/tests/flakes/mercurial.sh
index 2614006c8..0622c79b7 100644
--- a/tests/flakes/mercurial.sh
+++ b/tests/flakes/mercurial.sh
@@ -1,9 +1,6 @@
source ./common.sh
-if [[ -z $(type -p hg) ]]; then
- echo "Mercurial not installed; skipping"
- exit 99
-fi
+[[ $(type -p hq) ]] || skipTest "Mercurial not installed"
flake1Dir=$TEST_ROOT/flake-hg1
mkdir -p $flake1Dir
diff --git a/tests/flakes/show.sh b/tests/flakes/show.sh
index dd13264b9..a3d300552 100644
--- a/tests/flakes/show.sh
+++ b/tests/flakes/show.sh
@@ -64,3 +64,24 @@ in
assert show_output == { };
true
'
+
+# Test that attributes with errors are handled correctly.
+# nixpkgs.legacyPackages is a particularly prominent instance of this.
+cat >flake.nix <<EOF
+{
+ outputs = inputs: {
+ legacyPackages.$system = {
+ AAAAAASomeThingsFailToEvaluate = throw "nooo";
+ simple = import ./simple.nix;
+ };
+ };
+}
+EOF
+nix flake show --json --legacy --all-systems > show-output.json
+nix eval --impure --expr '
+let show_output = builtins.fromJSON (builtins.readFile ./show-output.json);
+in
+assert show_output.legacyPackages.${builtins.currentSystem}.AAAAAASomeThingsFailToEvaluate == { };
+assert show_output.legacyPackages.${builtins.currentSystem}.simple.name == "simple";
+true
+'
diff --git a/tests/fmt.sh b/tests/fmt.sh
index 254681ca2..3c1bd9989 100644
--- a/tests/fmt.sh
+++ b/tests/fmt.sh
@@ -1,7 +1,5 @@
source common.sh
-set -o pipefail
-
clearStore
rm -rf $TEST_HOME/.cache $TEST_HOME/.config $TEST_HOME/.local
diff --git a/tests/function-trace.sh b/tests/function-trace.sh
index b0d6c9d59..bd804bf18 100755
--- a/tests/function-trace.sh
+++ b/tests/function-trace.sh
@@ -10,17 +10,15 @@ expect_trace() {
--trace-function-calls \
--expr "$expr" 2>&1 \
| grep "function-trace" \
- | sed -e 's/ [0-9]*$//'
+ | sed -e 's/ [0-9]*$//' \
+ || true
)
echo -n "Tracing expression '$expr'"
- set +e
msg=$(diff -swB \
<(echo "$expect") \
<(echo "$actual")
- );
- result=$?
- set -e
+ ) && result=0 || result=$?
if [ $result -eq 0 ]; then
echo " ok."
else
@@ -67,5 +65,3 @@ expect_trace '1 2' "
function-trace entered «string»:1:1 at
function-trace exited «string»:1:1 at
"
-
-set -e
diff --git a/tests/gc-runtime.sh b/tests/gc-runtime.sh
index 6094959cb..dc1826a55 100644
--- a/tests/gc-runtime.sh
+++ b/tests/gc-runtime.sh
@@ -4,7 +4,7 @@ case $system in
*linux*)
;;
*)
- exit 99;
+ skipTest "Not running Linux";
esac
set -m # enable job control, needed for kill
diff --git a/tests/gc.sh b/tests/gc.sh
index ad09a8b39..95669e25c 100644
--- a/tests/gc.sh
+++ b/tests/gc.sh
@@ -50,3 +50,31 @@ if test -e $outPath/foobar; then false; fi
# Check that the store is empty.
rmdir $NIX_STORE_DIR/.links
rmdir $NIX_STORE_DIR
+
+## Test `nix-collect-garbage -d`
+testCollectGarbageD () {
+ clearProfiles
+ # Run two `nix-env` commands, should create two generations of
+ # the profile
+ nix-env -f ./user-envs.nix -i foo-1.0
+ nix-env -f ./user-envs.nix -i foo-2.0pre1
+ [[ $(nix-env --list-generations | wc -l) -eq 2 ]]
+
+ # Clear the profile history. There should be only one generation
+ # left
+ nix-collect-garbage -d
+ [[ $(nix-env --list-generations | wc -l) -eq 1 ]]
+}
+# `nix-env` doesn't work with CA derivations, so let's ignore that bit if we're
+# using them
+if [[ -z "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then
+ testCollectGarbageD
+
+ # Run the same test, but forcing the profiles at their legacy location under
+ # /nix/var/nix.
+ #
+ # Regression test for #8294
+ rm ~/.nix-profile
+ ln -s $NIX_STATE_DIR/profiles/per-user/me ~/.nix-profile
+ testCollectGarbageD
+fi
diff --git a/tests/hash.sh b/tests/hash.sh
index e5f75e2cf..34c1bb38a 100644
--- a/tests/hash.sh
+++ b/tests/hash.sh
@@ -2,13 +2,19 @@ source common.sh
try () {
printf "%s" "$2" > $TEST_ROOT/vector
- hash=$(nix hash file --base16 $EXTRA --type "$1" $TEST_ROOT/vector)
- if test "$hash" != "$3"; then
- echo "hash $1, expected $3, got $hash"
+ hash="$(nix-hash --flat ${FORMAT_FLAG-} --type "$1" "$TEST_ROOT/vector")"
+ if ! (( "${NO_TEST_CLASSIC-}" )) && test "$hash" != "$3"; then
+ echo "try nix-hash: hash $1, expected $3, got $hash"
+ exit 1
+ fi
+ hash="$(nix hash file ${FORMAT_FLAG-} --type "$1" "$TEST_ROOT/vector")"
+ if ! (( "${NO_TEST_NIX_COMMAND-}" )) && test "$hash" != "$3"; then
+ echo "try nix hash: hash $1, expected $3, got $hash"
exit 1
fi
}
+FORMAT_FLAG=--base16
try md5 "" "d41d8cd98f00b204e9800998ecf8427e"
try md5 "a" "0cc175b9c0f1b6a831c399e269772661"
try md5 "abc" "900150983cd24fb0d6963f7d28e17f72"
@@ -28,16 +34,24 @@ try sha256 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" "248d6a61d
try sha512 "" "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
try sha512 "abc" "ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f"
try sha512 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" "204a8fc6dda82f0a0ced7beb8e08a41657c16ef468b228a8279be331a703c33596fd15c13b1b07f9aa1d3bea57789ca031ad85c7a71dd70354ec631238ca3445"
+unset FORMAT_FLAG
-EXTRA=--base32
+FORMAT_FLAG=--base32
try sha256 "abc" "1b8m03r63zqhnjf7l5wnldhh7c134ap5vpj0850ymkq1iyzicy5s"
-EXTRA=
+unset FORMAT_FLAG
-EXTRA=--sri
+FORMAT_FLAG=--sri
try sha512 "" "sha512-z4PhNX7vuL3xVChQ1m2AB9Yg5AULVxXcg/SpIdNs6c5H0NE8XYXysP+DGNKHfuwvY7kxvUdBeoGlODJ6+SfaPg=="
try sha512 "abc" "sha512-3a81oZNherrMQXNJriBBMRLm+k6JqX6iCp7u5ktV05ohkpkqJ0/BqDa6PCOj/uu9RU1EI2Q86A4qmslPpUyknw=="
try sha512 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" "sha512-IEqPxt2oLwoM7XvrjgikFlfBbvRosiioJ5vjMacDwzWW/RXBOxsH+aodO+pXeJygMa2Fx6cd1wNU7GMSOMo0RQ=="
try sha256 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" "sha256-JI1qYdIGOLjlwCaTDD5gOaM85Flk/yFn9uzt1BnbBsE="
+unset FORMAT_FLAG
+
+# nix-hash [--flat] defaults to the Base16 format
+NO_TEST_NIX_COMMAND=1 try sha512 "abc" "ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f"
+
+# nix hash [file|path] defaults to the SRI format
+NO_TEST_CLASSIC=1 try sha512 "abc" "sha512-3a81oZNherrMQXNJriBBMRLm+k6JqX6iCp7u5ktV05ohkpkqJ0/BqDa6PCOj/uu9RU1EI2Q86A4qmslPpUyknw=="
try2 () {
hash=$(nix-hash --type "$1" $TEST_ROOT/hash-path)
@@ -69,12 +83,18 @@ try2 md5 "f78b733a68f5edbdf9413899339eaa4a"
# Conversion.
try3() {
+ h64=$(nix-hash --type "$1" --to-base64 "$2")
+ [ "$h64" = "$4" ]
h64=$(nix hash to-base64 --type "$1" "$2")
[ "$h64" = "$4" ]
+ sri=$(nix-hash --type "$1" --to-sri "$2")
+ [ "$sri" = "$1-$4" ]
sri=$(nix hash to-sri --type "$1" "$2")
[ "$sri" = "$1-$4" ]
h32=$(nix-hash --type "$1" --to-base32 "$2")
[ "$h32" = "$3" ]
+ h32=$(nix hash to-base32 --type "$1" "$2")
+ [ "$h32" = "$3" ]
h16=$(nix-hash --type "$1" --to-base16 "$h32")
[ "$h16" = "$2" ]
h16=$(nix hash to-base16 --type "$1" "$h64")
diff --git a/tests/impure-derivations.sh b/tests/impure-derivations.sh
index 23a193833..39d053a04 100644
--- a/tests/impure-derivations.sh
+++ b/tests/impure-derivations.sh
@@ -5,13 +5,20 @@ requireDaemonNewerThan "2.8pre20220311"
enableFeatures "ca-derivations impure-derivations"
restartDaemon
-set -o pipefail
-
clearStore
# Basic test of impure derivations: building one a second time should not use the previous result.
printf 0 > $TEST_ROOT/counter
+# `nix derivation add` with impure derivations work
+drvPath=$(nix-instantiate ./impure-derivations.nix -A impure)
+nix derivation show $drvPath | jq .[] > $TEST_HOME/impure-drv.json
+drvPath2=$(nix derivation add < $TEST_HOME/impure-drv.json)
+[[ "$drvPath" = "$drvPath2" ]]
+
+# But only with the experimental feature!
+expectStderr 1 nix derivation add < $TEST_HOME/impure-drv.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'impure-derivations' is disabled"
+
nix build --dry-run --json --file ./impure-derivations.nix impure.all
json=$(nix build -L --no-link --json --file ./impure-derivations.nix impure.all)
path1=$(echo $json | jq -r .[].outputs.out)
@@ -39,8 +46,8 @@ path4=$(nix build -L --no-link --json --file ./impure-derivations.nix impureOnIm
(! nix build -L --no-link --json --file ./impure-derivations.nix inputAddressed 2>&1) | grep 'depends on impure derivation'
drvPath=$(nix eval --json --file ./impure-derivations.nix impure.drvPath | jq -r .)
-[[ $(nix show-derivation $drvPath | jq ".[\"$drvPath\"].outputs.out.impure") = true ]]
-[[ $(nix show-derivation $drvPath | jq ".[\"$drvPath\"].outputs.stuff.impure") = true ]]
+[[ $(nix derivation show $drvPath | jq ".[\"$drvPath\"].outputs.out.impure") = true ]]
+[[ $(nix derivation show $drvPath | jq ".[\"$drvPath\"].outputs.stuff.impure") = true ]]
# Fixed-output derivations *can* depend on impure derivations.
path5=$(nix build -L --no-link --json --file ./impure-derivations.nix contentAddressed | jq -r .[].outputs.out)
diff --git a/tests/init.sh b/tests/init.sh
index fea659516..c420e8c9f 100755
--- a/tests/init.sh
+++ b/tests/init.sh
@@ -1,5 +1,3 @@
-set -eu -o pipefail
-
# Don't start the daemon
source common/vars-and-functions.sh
diff --git a/tests/install-darwin.sh b/tests/install-darwin.sh
index 7e44e54c4..ea2b75323 100755
--- a/tests/install-darwin.sh
+++ b/tests/install-darwin.sh
@@ -4,7 +4,7 @@ set -eux
cleanup() {
PLIST="/Library/LaunchDaemons/org.nixos.nix-daemon.plist"
- if sudo launchctl list | grep -q nix-daemon; then
+ if sudo launchctl list | grepQuiet nix-daemon; then
sudo launchctl unload "$PLIST"
fi
diff --git a/tests/installer/default.nix b/tests/installer/default.nix
index 31d83699d..49cfd2bcc 100644
--- a/tests/installer/default.nix
+++ b/tests/installer/default.nix
@@ -17,7 +17,7 @@ let
script = ''
tar -xf ./nix.tar.xz
mv ./nix-* nix
- ./nix/install --no-daemon
+ ./nix/install --no-daemon --no-channel-add
'';
};
@@ -30,6 +30,14 @@ let
};
};
+ mockChannel = pkgs:
+ pkgs.runCommandNoCC "mock-channel" {} ''
+ mkdir nixexprs
+ mkdir -p $out/channel
+ echo -n 'someContent' > nixexprs/someFile
+ tar cvf - nixexprs | bzip2 > $out/channel/nixexprs.tar.bz2
+ '';
+
disableSELinux = "sudo setenforce 0";
images = {
@@ -189,6 +197,11 @@ let
echo "Running installer..."
$ssh "set -eux; $installScript"
+ echo "Copying the mock channel"
+ # `scp -r` doesn't seem to work properly on some rhel instances, so let's
+ # use a plain tarpipe instead
+ tar -C ${mockChannel pkgs} -c channel | ssh -p 20022 $ssh_opts vagrant@localhost tar x -f-
+
echo "Testing Nix installation..."
$ssh <<EOF
set -ex
@@ -204,6 +217,17 @@ let
out=\$(nix-build --no-substitute -E 'derivation { name = "foo"; system = "x86_64-linux"; builder = "/bin/sh"; args = ["-c" "echo foobar > \$out"]; }')
[[ \$(cat \$out) = foobar ]]
+
+ if pgrep nix-daemon; then
+ MAYBESUDO="sudo"
+ else
+ MAYBESUDO=""
+ fi
+
+
+ $MAYBESUDO \$(which nix-channel) --add file://\$HOME/channel myChannel
+ $MAYBESUDO \$(which nix-channel) --update
+ [[ \$(nix-instantiate --eval --expr 'builtins.readFile <myChannel/someFile>') = '"someContent"' ]]
EOF
echo "Done!"
diff --git a/tests/lang.sh b/tests/lang.sh
index 95e795e2e..8170cb39d 100644
--- a/tests/lang.sh
+++ b/tests/lang.sh
@@ -4,12 +4,19 @@ export TEST_VAR=foo # for eval-okay-getenv.nix
export NIX_REMOTE=dummy://
export NIX_STORE_DIR=/nix/store
-nix-instantiate --eval -E 'builtins.trace "Hello" 123' 2>&1 | grep -q Hello
+nix-instantiate --eval -E 'builtins.trace "Hello" 123' 2>&1 | grepQuiet Hello
+nix-instantiate --eval -E 'builtins.trace "Hello" 123' 2>/dev/null | grepQuiet 123
nix-instantiate --eval -E 'builtins.addErrorContext "Hello" 123' 2>&1
-nix-instantiate --trace-verbose --eval -E 'builtins.traceVerbose "Hello" 123' 2>&1 | grep -q Hello
-(! nix-instantiate --eval -E 'builtins.traceVerbose "Hello" 123' 2>&1 | grep -q Hello)
-(! nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" 123' 2>&1 | grep -q Hello)
-nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" (throw "Foo")' 2>&1 | grep -q Hello
+nix-instantiate --trace-verbose --eval -E 'builtins.traceVerbose "Hello" 123' 2>&1 | grepQuiet Hello
+nix-instantiate --eval -E 'builtins.traceVerbose "Hello" 123' 2>&1 | grepQuietInverse Hello
+nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" 123' 2>&1 | grepQuietInverse Hello
+expectStderr 1 nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" (throw "Foo")' | grepQuiet Hello
+
+nix-instantiate --eval -E 'let x = builtins.trace { x = x; } true; in x' \
+ 2>&1 | grepQuiet -E 'trace: { x = «potential infinite recursion»; }'
+
+nix-instantiate --eval -E 'let x = { repeating = x; tracing = builtins.trace x true; }; in x.tracing'\
+ 2>&1 | grepQuiet -F 'trace: { repeating = «repeated»; tracing = «potential infinite recursion»; }'
set +x
diff --git a/tests/legacy-ssh-store.sh b/tests/legacy-ssh-store.sh
new file mode 100644
index 000000000..71b716b84
--- /dev/null
+++ b/tests/legacy-ssh-store.sh
@@ -0,0 +1,4 @@
+source common.sh
+
+# Check that store ping trusted doesn't yet work with ssh://
+nix --store ssh://localhost?remote-store=$TEST_ROOT/other-store store ping --json | jq -e 'has("trusted") | not'
diff --git a/tests/linux-sandbox-cert-test.nix b/tests/linux-sandbox-cert-test.nix
new file mode 100644
index 000000000..2b86dad2e
--- /dev/null
+++ b/tests/linux-sandbox-cert-test.nix
@@ -0,0 +1,29 @@
+{ fixed-output }:
+
+with import ./config.nix;
+
+mkDerivation ({
+ name = "ssl-export";
+ buildCommand = ''
+ # Add some indirection, otherwise grepping into the debug output finds the string.
+ report () { echo CERT_$1_IN_SANDBOX; }
+
+ if [ -f /etc/ssl/certs/ca-certificates.crt ]; then
+ content=$(</etc/ssl/certs/ca-certificates.crt)
+ if [ "$content" == CERT_CONTENT ]; then
+ report present
+ fi
+ else
+ report missing
+ fi
+
+ # Always fail, because we do not want to bother with fixed-output
+ # derivations being cached, and do not want to compute the right hash.
+ false;
+ '';
+} // (
+ if fixed-output == "fixed-output"
+ then { outputHash = "sha256:0000000000000000000000000000000000000000000000000000000000000000"; }
+ else { }
+))
+
diff --git a/tests/linux-sandbox.sh b/tests/linux-sandbox.sh
index e62039567..45f0ce7a4 100644
--- a/tests/linux-sandbox.sh
+++ b/tests/linux-sandbox.sh
@@ -4,13 +4,13 @@ needLocalStore "the sandbox only runs on the builder side, so it makes no sense
clearStore
-if ! canUseSandbox; then exit 99; fi
+requireSandboxSupport
# Note: we need to bind-mount $SHELL into the chroot. Currently we
# only support the case where $SHELL is in the Nix store, because
# otherwise things get complicated (e.g. if it's in /bin, do we need
# /lib as well?).
-if [[ ! $SHELL =~ /nix/store ]]; then exit 99; fi
+if [[ ! $SHELL =~ /nix/store ]]; then skipTest "Shell is not from Nix store"; fi
chmod -R u+w $TEST_ROOT/store0 || true
rm -rf $TEST_ROOT/store0
@@ -35,8 +35,32 @@ nix-build dependencies.nix --no-out-link --check --sandbox-paths /nix/store
nix-build check.nix -A nondeterministic --sandbox-paths /nix/store --no-out-link
(! nix-build check.nix -A nondeterministic --sandbox-paths /nix/store --no-out-link --check -K 2> $TEST_ROOT/log)
-if grep -q 'error: renaming' $TEST_ROOT/log; then false; fi
-grep -q 'may not be deterministic' $TEST_ROOT/log
+if grepQuiet 'error: renaming' $TEST_ROOT/log; then false; fi
+grepQuiet 'may not be deterministic' $TEST_ROOT/log
# Test that sandboxed builds cannot write to /etc easily
(! nix-build -E 'with import ./config.nix; mkDerivation { name = "etc-write"; buildCommand = "echo > /etc/test"; }' --no-out-link --sandbox-paths /nix/store)
+
+
+## Test mounting of SSL certificates into the sandbox
+testCert () {
+ (! nix-build linux-sandbox-cert-test.nix --argstr fixed-output "$2" --no-out-link --sandbox-paths /nix/store --option ssl-cert-file "$3" 2> $TEST_ROOT/log)
+ cat $TEST_ROOT/log
+ grepQuiet "CERT_${1}_IN_SANDBOX" $TEST_ROOT/log
+}
+
+nocert=$TEST_ROOT/no-cert-file.pem
+cert=$TEST_ROOT/some-cert-file.pem
+echo -n "CERT_CONTENT" > $cert
+
+# No cert in sandbox when not a fixed-output derivation
+testCert missing normal "$cert"
+
+# No cert in sandbox when ssl-cert-file is empty
+testCert missing fixed-output ""
+
+# No cert in sandbox when ssl-cert-file is a nonexistent file
+testCert missing fixed-output "$nocert"
+
+# Cert in sandbox when ssl-cert-file is set to an existing file
+testCert present fixed-output "$cert"
diff --git a/tests/local-store.sh b/tests/local-store.sh
index 0247346f1..89502f864 100644
--- a/tests/local-store.sh
+++ b/tests/local-store.sh
@@ -17,3 +17,6 @@ PATH2=$(nix path-info --store "$PWD/x" $CORRECT_PATH)
PATH3=$(nix path-info --store "local?root=$PWD/x" $CORRECT_PATH)
[ $CORRECT_PATH == $PATH3 ]
+
+# Ensure store ping trusted works with local store
+nix --store ./x store ping --json | jq -e '.trusted'
diff --git a/tests/local.mk b/tests/local.mk
index 4a620f18b..9e340e2e2 100644
--- a/tests/local.mk
+++ b/tests/local.mk
@@ -1,4 +1,5 @@
nix_tests = \
+ test-infra.sh \
init.sh \
flakes/flakes.sh \
flakes/run.sh \
@@ -12,10 +13,13 @@ nix_tests = \
flakes/unlocked-override.sh \
flakes/absolute-paths.sh \
flakes/build-paths.sh \
+ flakes/flake-in-submodule.sh \
ca/gc.sh \
gc.sh \
remote-store.sh \
+ legacy-ssh-store.sh \
lang.sh \
+ experimental-features.sh \
fetchMercurial.sh \
gc-auto.sh \
user-envs.sh \
@@ -66,6 +70,11 @@ nix_tests = \
check-reqs.sh \
build-remote-content-addressed-fixed.sh \
build-remote-content-addressed-floating.sh \
+ build-remote-trustless-should-pass-0.sh \
+ build-remote-trustless-should-pass-1.sh \
+ build-remote-trustless-should-pass-2.sh \
+ build-remote-trustless-should-pass-3.sh \
+ build-remote-trustless-should-fail-0.sh \
nar-access.sh \
pure-eval.sh \
eval.sh \
@@ -98,8 +107,12 @@ nix_tests = \
eval-store.sh \
why-depends.sh \
ca/why-depends.sh \
+ derivation-json.sh \
+ ca/derivation-json.sh \
import-derivation.sh \
ca/import-derivation.sh \
+ dyn-drv/text-hashed-output.sh \
+ dyn-drv/recursive-mod-json.sh \
nix_path.sh \
case-hack.sh \
placeholders.sh \
@@ -113,7 +126,6 @@ nix_tests = \
db-migration.sh \
bash-profile.sh \
pass-as-file.sh \
- describe-stores.sh \
nix-profile.sh \
suggestions.sh \
store-ping.sh \
@@ -128,11 +140,19 @@ ifeq ($(HAVE_LIBCPUID), 1)
nix_tests += compute-levels.sh
endif
-install-tests += $(foreach x, $(nix_tests), tests/$(x))
+install-tests += $(foreach x, $(nix_tests), $(d)/$(x))
-clean-files += $(d)/tests/common/vars-and-functions.sh $(d)/config.nix $(d)/ca/config.nix
+clean-files += \
+ $(d)/common/vars-and-functions.sh \
+ $(d)/config.nix \
+ $(d)/ca/config.nix \
+ $(d)/dyn-drv/config.nix
-test-deps += tests/common/vars-and-functions.sh tests/config.nix tests/ca/config.nix tests/plugins/libplugintest.$(SO_EXT)
+test-deps += \
+ tests/common/vars-and-functions.sh \
+ tests/config.nix \
+ tests/ca/config.nix \
+ tests/dyn-drv/config.nix
ifeq ($(BUILD_SHARED_LIBS), 1)
test-deps += tests/plugins/libplugintest.$(SO_EXT)
diff --git a/tests/misc.sh b/tests/misc.sh
index 2830856ae..60d58310e 100644
--- a/tests/misc.sh
+++ b/tests/misc.sh
@@ -3,17 +3,17 @@ source common.sh
# Tests miscellaneous commands.
# Do all commands have help?
-#nix-env --help | grep -q install
-#nix-store --help | grep -q realise
-#nix-instantiate --help | grep -q eval
-#nix-hash --help | grep -q base32
+#nix-env --help | grepQuiet install
+#nix-store --help | grepQuiet realise
+#nix-instantiate --help | grepQuiet eval
+#nix-hash --help | grepQuiet base32
# Can we ask for the version number?
nix-env --version | grep "$version"
# Usage errors.
-nix-env --foo 2>&1 | grep "no operation"
-nix-env -q --foo 2>&1 | grep "unknown flag"
+expect 1 nix-env --foo 2>&1 | grep "no operation"
+expect 1 nix-env -q --foo 2>&1 | grep "unknown flag"
# Eval Errors.
eval_arg_res=$(nix-instantiate --eval -E 'let a = {} // a; in a.foo' 2>&1 || true)
diff --git a/tests/multiple-outputs.sh b/tests/multiple-outputs.sh
index 66be6fa64..330600d08 100644
--- a/tests/multiple-outputs.sh
+++ b/tests/multiple-outputs.sh
@@ -19,8 +19,8 @@ echo "evaluating c..."
# outputs.
drvPath=$(nix-instantiate multiple-outputs.nix -A c)
#[ "$drvPath" = "$drvPath2" ]
-grep -q 'multiple-outputs-a.drv",\["first","second"\]' $drvPath
-grep -q 'multiple-outputs-b.drv",\["out"\]' $drvPath
+grepQuiet 'multiple-outputs-a.drv",\["first","second"\]' $drvPath
+grepQuiet 'multiple-outputs-b.drv",\["out"\]' $drvPath
# While we're at it, test the ‘unsafeDiscardOutputDependency’ primop.
outPath=$(nix-build multiple-outputs.nix -A d --no-out-link)
@@ -84,5 +84,5 @@ nix-store --gc --print-roots
rm -rf $NIX_STORE_DIR/.links
rmdir $NIX_STORE_DIR
-nix build -f multiple-outputs.nix invalid-output-name-1 2>&1 | grep 'contains illegal character'
-nix build -f multiple-outputs.nix invalid-output-name-2 2>&1 | grep 'contains illegal character'
+expect 1 nix build -f multiple-outputs.nix invalid-output-name-1 2>&1 | grep 'contains illegal character'
+expect 1 nix build -f multiple-outputs.nix invalid-output-name-2 2>&1 | grep 'contains illegal character'
diff --git a/tests/nar-access.sh b/tests/nar-access.sh
index dcc2e8a36..d487d58d2 100644
--- a/tests/nar-access.sh
+++ b/tests/nar-access.sh
@@ -46,8 +46,8 @@ diff -u \
<(echo '{"type":"regular","size":0}' | jq -S)
# Test missing files.
-nix store ls --json -R $storePath/xyzzy 2>&1 | grep 'does not exist in NAR'
-nix store ls $storePath/xyzzy 2>&1 | grep 'does not exist'
+expect 1 nix store ls --json -R $storePath/xyzzy 2>&1 | grep 'does not exist in NAR'
+expect 1 nix store ls $storePath/xyzzy 2>&1 | grep 'does not exist'
# Test failure to dump.
if nix-store --dump $storePath >/dev/full ; then
diff --git a/tests/nix-channel.sh b/tests/nix-channel.sh
index b64283f48..dbb3114f1 100644
--- a/tests/nix-channel.sh
+++ b/tests/nix-channel.sh
@@ -6,7 +6,7 @@ rm -f $TEST_HOME/.nix-channels $TEST_HOME/.nix-profile
# Test add/list/remove.
nix-channel --add http://foo/bar xyzzy
-nix-channel --list | grep -q http://foo/bar
+nix-channel --list | grepQuiet http://foo/bar
nix-channel --remove xyzzy
[ -e $TEST_HOME/.nix-channels ]
@@ -17,7 +17,7 @@ nix-channel --remove xyzzy
export NIX_CONFIG="use-xdg-base-directories = true"
nix-channel --add http://foo/bar xyzzy
-nix-channel --list | grep -q http://foo/bar
+nix-channel --list | grepQuiet http://foo/bar
nix-channel --remove xyzzy
unset NIX_CONFIG
@@ -41,8 +41,8 @@ nix-channel --update
# Do a query.
nix-env -qa \* --meta --xml --out-path > $TEST_ROOT/meta.xml
-grep -q 'meta.*description.*Random test package' $TEST_ROOT/meta.xml
-grep -q 'item.*attrPath="foo".*name="dependencies-top"' $TEST_ROOT/meta.xml
+grepQuiet 'meta.*description.*Random test package' $TEST_ROOT/meta.xml
+grepQuiet 'item.*attrPath="foo".*name="dependencies-top"' $TEST_ROOT/meta.xml
# Do an install.
nix-env -i dependencies-top
@@ -54,9 +54,9 @@ nix-channel --update
# Do a query.
nix-env -qa \* --meta --xml --out-path > $TEST_ROOT/meta.xml
-grep -q 'meta.*description.*Random test package' $TEST_ROOT/meta.xml
-grep -q 'item.*attrPath="bar".*name="dependencies-top"' $TEST_ROOT/meta.xml
-grep -q 'item.*attrPath="foo".*name="dependencies-top"' $TEST_ROOT/meta.xml
+grepQuiet 'meta.*description.*Random test package' $TEST_ROOT/meta.xml
+grepQuiet 'item.*attrPath="bar".*name="dependencies-top"' $TEST_ROOT/meta.xml
+grepQuiet 'item.*attrPath="foo".*name="dependencies-top"' $TEST_ROOT/meta.xml
# Do an install.
nix-env -i dependencies-top
diff --git a/tests/nix-daemon-untrusting.sh b/tests/nix-daemon-untrusting.sh
new file mode 100755
index 000000000..bcdb70989
--- /dev/null
+++ b/tests/nix-daemon-untrusting.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec nix-daemon --force-untrusted "$@"
diff --git a/tests/nix-profile.sh b/tests/nix-profile.sh
index 652e8a8f2..9da3f802b 100644
--- a/tests/nix-profile.sh
+++ b/tests/nix-profile.sh
@@ -144,6 +144,7 @@ expect 1 nix profile install $flake2Dir
diff -u <(
nix --offline profile install $flake2Dir 2>&1 1> /dev/null \
| grep -vE "^warning: " \
+ | grep -vE "^error \(ignored\): " \
|| true
) <(cat << EOF
error: An existing package already provides the following file:
@@ -156,17 +157,17 @@ error: An existing package already provides the following file:
To remove the existing package:
- nix profile remove path:${flake1Dir}
+ nix profile remove path:${flake1Dir}#packages.${system}.default
The new package can also be installed next to the existing one by assigning a different priority.
The conflicting packages have a priority of 5.
To prioritise the new package:
- nix profile install path:${flake2Dir} --priority 4
+ nix profile install path:${flake2Dir}#packages.${system}.default --priority 4
To prioritise the existing package:
- nix profile install path:${flake2Dir} --priority 6
+ nix profile install path:${flake2Dir}#packages.${system}.default --priority 6
EOF
)
[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]]
@@ -176,3 +177,10 @@ nix profile install $flake2Dir --priority 0
[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World2" ]]
# nix profile install $flake1Dir --priority 100
# [[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]]
+
+# Ensure that conflicts are handled properly even when the installables aren't
+# flake references.
+# Regression test for https://github.com/NixOS/nix/issues/8284
+clearProfiles
+nix profile install $(nix build $flake1Dir --no-link --print-out-paths)
+expect 1 nix profile install --impure --expr "(builtins.getFlake ''$flake2Dir'').packages.$system.default"
diff --git a/tests/nix-shell.sh b/tests/nix-shell.sh
index f291c6f79..edaa1249b 100644
--- a/tests/nix-shell.sh
+++ b/tests/nix-shell.sh
@@ -88,20 +88,55 @@ output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.rb abc ruby)
nix develop -f "$shellDotNix" shellDrv -c bash -c '[[ -n $stdenv ]]'
# Ensure `nix develop -c` preserves stdin
-echo foo | nix develop -f "$shellDotNix" shellDrv -c cat | grep -q foo
+echo foo | nix develop -f "$shellDotNix" shellDrv -c cat | grepQuiet foo
# Ensure `nix develop -c` actually executes the command if stdout isn't a terminal
-nix develop -f "$shellDotNix" shellDrv -c echo foo |& grep -q foo
+nix develop -f "$shellDotNix" shellDrv -c echo foo |& grepQuiet foo
# Test 'nix print-dev-env'.
-[[ $(nix print-dev-env -f "$shellDotNix" shellDrv --json | jq -r .variables.arr1.value[2]) = '3 4' ]]
-
-source <(nix print-dev-env -f "$shellDotNix" shellDrv)
-[[ -n $stdenv ]]
-[[ ${arr1[2]} = "3 4" ]]
-[[ ${arr2[1]} = $'\n' ]]
-[[ ${arr2[2]} = $'x\ny' ]]
-[[ $(fun) = blabla ]]
+
+nix print-dev-env -f "$shellDotNix" shellDrv > $TEST_ROOT/dev-env.sh
+nix print-dev-env -f "$shellDotNix" shellDrv --json > $TEST_ROOT/dev-env.json
+
+# Test with raw drv
+
+shellDrv=$(nix-instantiate "$shellDotNix" -A shellDrv.out)
+
+nix develop $shellDrv -c bash -c '[[ -n $stdenv ]]'
+
+nix print-dev-env $shellDrv > $TEST_ROOT/dev-env2.sh
+nix print-dev-env $shellDrv --json > $TEST_ROOT/dev-env2.json
+
+diff $TEST_ROOT/dev-env{,2}.sh
+diff $TEST_ROOT/dev-env{,2}.json
+
+# Ensure `nix print-dev-env --json` contains variable assignments.
+[[ $(jq -r .variables.arr1.value[2] $TEST_ROOT/dev-env.json) = '3 4' ]]
+
+# Run tests involving `source <(nix print-dev-inv)` in subshells to avoid modifying the current
+# environment.
+
+set +u # FIXME: Make print-dev-env `set -u` compliant (issue #7951)
+
+# Ensure `source <(nix print-dev-env)` modifies the environment.
+(
+ path=$PATH
+ source $TEST_ROOT/dev-env.sh
+ [[ -n $stdenv ]]
+ [[ ${arr1[2]} = "3 4" ]]
+ [[ ${arr2[1]} = $'\n' ]]
+ [[ ${arr2[2]} = $'x\ny' ]]
+ [[ $(fun) = blabla ]]
+ [[ $PATH = $(jq -r .variables.PATH.value $TEST_ROOT/dev-env.json):$path ]]
+)
+
+# Ensure `source <(nix print-dev-env)` handles the case when PATH is empty.
+(
+ path=$PATH
+ PATH=
+ source $TEST_ROOT/dev-env.sh
+ [[ $PATH = $(PATH=$path jq -r .variables.PATH.value $TEST_ROOT/dev-env.json) ]]
+)
# Test nix-shell with ellipsis and no `inNixShell` argument (for backwards compat with old nixpkgs)
cat >$TEST_ROOT/shell-ellipsis.nix <<EOF
diff --git a/tests/nixos/nix-copy.nix b/tests/nixos/nix-copy.nix
new file mode 100644
index 000000000..16c477bf9
--- /dev/null
+++ b/tests/nixos/nix-copy.nix
@@ -0,0 +1,95 @@
+# Test that ‘nix copy’ works over ssh.
+
+{ lib, config, nixpkgs, hostPkgs, ... }:
+
+let
+ pkgs = config.nodes.client.nixpkgs.pkgs;
+
+ pkgA = pkgs.cowsay;
+ pkgB = pkgs.wget;
+ pkgC = pkgs.hello;
+ pkgD = pkgs.tmux;
+
+in {
+ name = "nix-copy";
+
+ enableOCR = true;
+
+ nodes =
+ { client =
+ { config, lib, pkgs, ... }:
+ { virtualisation.writableStore = true;
+ virtualisation.additionalPaths = [ pkgA pkgD.drvPath ];
+ nix.settings.substituters = lib.mkForce [ ];
+ nix.settings.experimental-features = [ "nix-command" ];
+ services.getty.autologinUser = "root";
+ programs.ssh.extraConfig = ''
+ Host *
+ ControlMaster auto
+ ControlPath ~/.ssh/master-%h:%r@%n:%p
+ ControlPersist 15m
+ '';
+ };
+
+ server =
+ { config, pkgs, ... }:
+ { services.openssh.enable = true;
+ services.openssh.permitRootLogin = "yes";
+ users.users.root.password = "foobar";
+ virtualisation.writableStore = true;
+ virtualisation.additionalPaths = [ pkgB pkgC ];
+ };
+ };
+
+ testScript = { nodes }: ''
+ # fmt: off
+ import subprocess
+
+ # Create an SSH key on the client.
+ subprocess.run([
+ "${pkgs.openssh}/bin/ssh-keygen", "-t", "ed25519", "-f", "key", "-N", ""
+ ], capture_output=True, check=True)
+
+ start_all()
+
+ server.wait_for_unit("sshd")
+ client.wait_for_unit("network.target")
+ client.wait_for_unit("getty@tty1.service")
+ client.wait_for_text("]#")
+
+ # Copy the closure of package A from the client to the server using password authentication,
+ # and check that all prompts are visible
+ server.fail("nix-store --check-validity ${pkgA}")
+ client.send_chars("nix copy --to ssh://server ${pkgA} >&2; echo done\n")
+ client.wait_for_text("continue connecting")
+ client.send_chars("yes\n")
+ client.wait_for_text("Password:")
+ client.send_chars("foobar\n")
+ client.wait_for_text("done")
+ server.succeed("nix-store --check-validity ${pkgA}")
+
+ # Check that ControlMaster is working
+ client.send_chars("nix copy --to ssh://server ${pkgA} >&2; echo done\n")
+ client.wait_for_text("done")
+
+ client.copy_from_host("key", "/root/.ssh/id_ed25519")
+ client.succeed("chmod 600 /root/.ssh/id_ed25519")
+
+ # Install the SSH key on the server.
+ server.copy_from_host("key.pub", "/root/.ssh/authorized_keys")
+ server.succeed("systemctl restart sshd")
+ client.succeed(f"ssh -o StrictHostKeyChecking=no {server.name} 'echo hello world'")
+
+ # Copy the closure of package B from the server to the client, using ssh-ng.
+ client.fail("nix-store --check-validity ${pkgB}")
+ # Shouldn't download untrusted paths by default
+ client.fail("nix copy --from ssh-ng://server ${pkgB} >&2")
+ client.succeed("nix copy --no-check-sigs --from ssh-ng://server ${pkgB} >&2")
+ client.succeed("nix-store --check-validity ${pkgB}")
+
+ # Copy the derivation of package D's derivation from the client to the server.
+ server.fail("nix-store --check-validity ${pkgD.drvPath}")
+ client.succeed("nix copy --derivation --to ssh://server ${pkgD.drvPath} >&2")
+ server.succeed("nix-store --check-validity ${pkgD.drvPath}")
+ '';
+}
diff --git a/tests/plugins.sh b/tests/plugins.sh
index 6e278ad9d..baf71a362 100644
--- a/tests/plugins.sh
+++ b/tests/plugins.sh
@@ -1,10 +1,7 @@
source common.sh
-set -o pipefail
-
if [[ $BUILD_SHARED_LIBS != 1 ]]; then
- echo "plugins are not supported"
- exit 99
+ skipTest "Plugins are not supported"
fi
res=$(nix --option setting-set true --option plugin-files $PWD/plugins/libplugintest* eval --expr builtins.anotherNull)
diff --git a/tests/plugins/local.mk b/tests/plugins/local.mk
index 8182a6a83..40350aa96 100644
--- a/tests/plugins/local.mk
+++ b/tests/plugins/local.mk
@@ -8,4 +8,4 @@ libplugintest_ALLOW_UNDEFINED := 1
libplugintest_EXCLUDE_FROM_LIBRARY_LIST := 1
-libplugintest_CXXFLAGS := -I src/libutil -I src/libstore -I src/libexpr
+libplugintest_CXXFLAGS := -I src/libutil -I src/libstore -I src/libexpr -I src/libfetchers
diff --git a/tests/post-hook.sh b/tests/post-hook.sh
index 0266eb15d..752f8220c 100644
--- a/tests/post-hook.sh
+++ b/tests/post-hook.sh
@@ -17,6 +17,10 @@ fi
# Build the dependencies and push them to the remote store.
nix-build -o $TEST_ROOT/result dependencies.nix --post-build-hook "$pushToStore"
+# See if all outputs are passed to the post-build hook by only specifying one
+# We're not able to test CA tests this way
+export BUILD_HOOK_ONLY_OUT_PATHS=$([ ! $NIX_TESTS_CA_BY_DEFAULT ])
+nix-build -o $TEST_ROOT/result-mult multiple-outputs.nix -A a.first --post-build-hook "$pushToStore"
clearStore
@@ -24,3 +28,4 @@ clearStore
# closure of what we've just built.
nix copy --from "$REMOTE_STORE" --no-require-sigs -f dependencies.nix
nix copy --from "$REMOTE_STORE" --no-require-sigs -f dependencies.nix input1_drv
+nix copy --from "$REMOTE_STORE" --no-require-sigs -f multiple-outputs.nix a^second
diff --git a/tests/pure-eval.sh b/tests/pure-eval.sh
index b83ab8afe..5334bf28e 100644
--- a/tests/pure-eval.sh
+++ b/tests/pure-eval.sh
@@ -8,7 +8,7 @@ nix eval --expr 'assert 1 + 2 == 3; true'
missingImpureErrorMsg=$(! nix eval --expr 'builtins.readFile ./pure-eval.sh' 2>&1)
-echo "$missingImpureErrorMsg" | grep -q -- --impure || \
+echo "$missingImpureErrorMsg" | grepQuiet -- --impure || \
fail "The error message should mention the “--impure” flag to unblock users"
[[ $(nix eval --expr 'builtins.pathExists ./pure-eval.sh') == false ]] || \
diff --git a/tests/push-to-store-old.sh b/tests/push-to-store-old.sh
index b1495c9e2..4187958b2 100755
--- a/tests/push-to-store-old.sh
+++ b/tests/push-to-store-old.sh
@@ -7,4 +7,8 @@ set -e
[ -n "$DRV_PATH" ]
echo Pushing "$OUT_PATHS" to "$REMOTE_STORE"
-printf "%s" "$DRV_PATH" | xargs nix copy --to "$REMOTE_STORE" --no-require-sigs
+if [ -n "$BUILD_HOOK_ONLY_OUT_PATHS" ]; then
+ printf "%s" "$OUT_PATHS" | xargs nix copy --to "$REMOTE_STORE" --no-require-sigs
+else
+ printf "%s" "$DRV_PATH" | xargs nix copy --to "$REMOTE_STORE" --no-require-sigs
+fi
diff --git a/tests/push-to-store.sh b/tests/push-to-store.sh
index 0b090e1b3..9e4e475e0 100755
--- a/tests/push-to-store.sh
+++ b/tests/push-to-store.sh
@@ -7,4 +7,8 @@ set -e
[ -n "$DRV_PATH" ]
echo Pushing "$OUT_PATHS" to "$REMOTE_STORE"
-printf "%s" "$DRV_PATH"^'*' | xargs nix copy --to "$REMOTE_STORE" --no-require-sigs
+if [ -n "$BUILD_HOOK_ONLY_OUT_PATHS" ]; then
+ printf "%s" "$OUT_PATHS" | xargs nix copy --to "$REMOTE_STORE" --no-require-sigs
+else
+ printf "%s" "$DRV_PATH"^'*' | xargs nix copy --to "$REMOTE_STORE" --no-require-sigs
+fi
diff --git a/tests/recursive.nix b/tests/recursive.nix
new file mode 100644
index 000000000..fa8cc04db
--- /dev/null
+++ b/tests/recursive.nix
@@ -0,0 +1,56 @@
+with import ./config.nix;
+
+mkDerivation rec {
+ name = "recursive";
+ dummy = builtins.toFile "dummy" "bla bla";
+ SHELL = shell;
+
+ # Note: this is a string without context.
+ unreachable = builtins.getEnv "unreachable";
+
+ NIX_TESTS_CA_BY_DEFAULT = builtins.getEnv "NIX_TESTS_CA_BY_DEFAULT";
+
+ requiredSystemFeatures = [ "recursive-nix" ];
+
+ buildCommand = ''
+ mkdir $out
+ opts="--experimental-features nix-command ${if (NIX_TESTS_CA_BY_DEFAULT == "1") then "--extra-experimental-features ca-derivations" else ""}"
+
+ PATH=${builtins.getEnv "NIX_BIN_DIR"}:$PATH
+
+ # Check that we can query/build paths in our input closure.
+ nix $opts path-info $dummy
+ nix $opts build $dummy
+
+ # Make sure we cannot query/build paths not in out input closure.
+ [[ -e $unreachable ]]
+ (! nix $opts path-info $unreachable)
+ (! nix $opts build $unreachable)
+
+ # Add something to the store.
+ echo foobar > foobar
+ foobar=$(nix $opts store add-path ./foobar)
+
+ nix $opts path-info $foobar
+ nix $opts build $foobar
+
+ # Add it to our closure.
+ ln -s $foobar $out/foobar
+
+ [[ $(nix $opts path-info --all | wc -l) -eq 4 ]]
+
+ # Build a derivation.
+ nix $opts build -L --impure --expr '
+ with import ${./config.nix};
+ mkDerivation {
+ name = "inner1";
+ buildCommand = "echo $fnord blaat > $out";
+ fnord = builtins.toFile "fnord" "fnord";
+ }
+ '
+
+ [[ $(nix $opts path-info --json ./result) =~ fnord ]]
+
+ ln -s $(nix $opts path-info ./result) $out/inner1
+ '';
+}
diff --git a/tests/recursive.sh b/tests/recursive.sh
index 91518d67d..ffeb44e50 100644
--- a/tests/recursive.sh
+++ b/tests/recursive.sh
@@ -1,10 +1,10 @@
source common.sh
-sed -i 's/experimental-features .*/& recursive-nix/' "$NIX_CONF_DIR"/nix.conf
-restartDaemon
-
# FIXME
-if [[ $(uname) != Linux ]]; then exit 99; fi
+if [[ $(uname) != Linux ]]; then skipTest "Not running Linux"; fi
+
+enableFeatures 'recursive-nix'
+restartDaemon
clearStore
@@ -12,63 +12,7 @@ rm -f $TEST_ROOT/result
export unreachable=$(nix store add-path ./recursive.sh)
-NIX_BIN_DIR=$(dirname $(type -p nix)) nix --extra-experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --expr '
- with import ./config.nix;
- mkDerivation rec {
- name = "recursive";
- dummy = builtins.toFile "dummy" "bla bla";
- SHELL = shell;
-
- # Note: this is a string without context.
- unreachable = builtins.getEnv "unreachable";
-
- NIX_TESTS_CA_BY_DEFAULT = builtins.getEnv "NIX_TESTS_CA_BY_DEFAULT";
-
- requiredSystemFeatures = [ "recursive-nix" ];
-
- buildCommand = '\'\''
- mkdir $out
- opts="--experimental-features nix-command ${if (NIX_TESTS_CA_BY_DEFAULT == "1") then "--extra-experimental-features ca-derivations" else ""}"
-
- PATH=${builtins.getEnv "NIX_BIN_DIR"}:$PATH
-
- # Check that we can query/build paths in our input closure.
- nix $opts path-info $dummy
- nix $opts build $dummy
-
- # Make sure we cannot query/build paths not in out input closure.
- [[ -e $unreachable ]]
- (! nix $opts path-info $unreachable)
- (! nix $opts build $unreachable)
-
- # Add something to the store.
- echo foobar > foobar
- foobar=$(nix $opts store add-path ./foobar)
-
- nix $opts path-info $foobar
- nix $opts build $foobar
-
- # Add it to our closure.
- ln -s $foobar $out/foobar
-
- [[ $(nix $opts path-info --all | wc -l) -eq 4 ]]
-
- # Build a derivation.
- nix $opts build -L --impure --expr '\''
- with import ${./config.nix};
- mkDerivation {
- name = "inner1";
- buildCommand = "echo $fnord blaat > $out";
- fnord = builtins.toFile "fnord" "fnord";
- }
- '\''
-
- [[ $(nix $opts path-info --json ./result) =~ fnord ]]
-
- ln -s $(nix $opts path-info ./result) $out/inner1
- '\'\'';
- }
-'
+NIX_BIN_DIR=$(dirname $(type -p nix)) nix --extra-experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --file ./recursive.nix
[[ $(cat $TEST_ROOT/result/inner1) =~ blaat ]]
diff --git a/tests/remote-store.sh b/tests/remote-store.sh
index 1ae126794..ea32a20d3 100644
--- a/tests/remote-store.sh
+++ b/tests/remote-store.sh
@@ -5,8 +5,19 @@ clearStore
# Ensure "fake ssh" remote store works just as legacy fake ssh would.
nix --store ssh-ng://localhost?remote-store=$TEST_ROOT/other-store doctor
+# Ensure that store ping trusted works with ssh-ng://
+nix --store ssh-ng://localhost?remote-store=$TEST_ROOT/other-store store ping --json | jq -e '.trusted'
+
startDaemon
+if isDaemonNewer "2.15pre0"; then
+ # Ensure that ping works trusted with new daemon
+ nix store ping --json | jq -e '.trusted'
+else
+ # And the the field is absent with the old daemon
+ nix store ping --json | jq -e 'has("trusted") | not'
+fi
+
# Test import-from-derivation through the daemon.
[[ $(nix eval --impure --raw --expr '
with import ./config.nix;
diff --git a/tests/repl.sh b/tests/repl.sh
index c555560cc..2b3789521 100644
--- a/tests/repl.sh
+++ b/tests/repl.sh
@@ -33,14 +33,14 @@ testRepl () {
nix repl "${nixArgs[@]}" <<< "$replCmds" || fail "nix repl does not work twice with the same inputs"
# simple.nix prints a PATH during build
- echo "$replOutput" | grep -qs 'PATH=' || fail "nix repl :log doesn't output logs"
+ echo "$replOutput" | grepQuiet -s 'PATH=' || fail "nix repl :log doesn't output logs"
local replOutput="$(nix repl "${nixArgs[@]}" <<< "$replFailingCmds" 2>&1)"
echo "$replOutput"
- echo "$replOutput" | grep -qs 'This should fail' \
+ echo "$replOutput" | grepQuiet -s 'This should fail' \
|| fail "nix repl :log doesn't output logs for a failed derivation"
local replOutput="$(nix repl --show-trace "${nixArgs[@]}" <<< "$replUndefinedVariable" 2>&1)"
echo "$replOutput"
- echo "$replOutput" | grep -qs "while evaluating the file" \
+ echo "$replOutput" | grepQuiet -s "while evaluating the file" \
|| fail "nix repl --show-trace doesn't show the trace"
nix repl "${nixArgs[@]}" --option pure-eval true 2>&1 <<< "builtins.currentSystem" \
@@ -58,7 +58,7 @@ testReplResponse () {
local commands="$1"; shift
local expectedResponse="$1"; shift
local response="$(nix repl "$@" <<< "$commands")"
- echo "$response" | grep -qs "$expectedResponse" \
+ echo "$response" | grepQuiet -s "$expectedResponse" \
|| fail "repl command set:
$commands
@@ -79,6 +79,14 @@ testReplResponse '
"result: ${a}"
' "result: 2"
+# check dollar escaping https://github.com/NixOS/nix/issues/4909
+# note the escaped \,
+# \\
+# because the second argument is a regex
+testReplResponse '
+"$" + "{hi}"
+' '"\\${hi}"'
+
testReplResponse '
drvPath
' '".*-simple.drv"' \
@@ -121,5 +129,5 @@ sed -i 's/beforeChange/afterChange/' flake/flake.nix
echo ":reload"
echo "changingThing"
) | nix repl ./flake --experimental-features 'flakes repl-flake')
-echo "$replResult" | grep -qs beforeChange
-echo "$replResult" | grep -qs afterChange
+echo "$replResult" | grepQuiet -s beforeChange
+echo "$replResult" | grepQuiet -s afterChange
diff --git a/tests/restricted.sh b/tests/restricted.sh
index 9bd16cf51..776893a56 100644
--- a/tests/restricted.sh
+++ b/tests/restricted.sh
@@ -48,4 +48,4 @@ output="$(nix eval --raw --restrict-eval -I "$traverseDir" \
--expr "builtins.readFile \"$traverseDir/$goUp$(pwd)/restricted-innocent\"" \
2>&1 || :)"
echo "$output" | grep "is forbidden"
-! echo "$output" | grep -F restricted-secret
+echo "$output" | grepInverse -F restricted-secret
diff --git a/tests/search.sh b/tests/search.sh
index 1a98f5b49..8742f8736 100644
--- a/tests/search.sh
+++ b/tests/search.sh
@@ -20,9 +20,9 @@ clearCache
## Search expressions
# Check that empty search string matches all
-nix search -f search.nix '' |grep -q foo
-nix search -f search.nix '' |grep -q bar
-nix search -f search.nix '' |grep -q hello
+nix search -f search.nix '' |grepQuiet foo
+nix search -f search.nix '' |grepQuiet bar
+nix search -f search.nix '' |grepQuiet hello
## Tests for multiple regex/match highlighting
diff --git a/tests/shell.sh b/tests/shell.sh
index 6a80e8385..d2f7cf14e 100644
--- a/tests/shell.sh
+++ b/tests/shell.sh
@@ -10,7 +10,7 @@ nix shell -f shell-hello.nix hello -c hello NixOS | grep 'Hello NixOS'
nix shell -f shell-hello.nix hello^dev -c hello2 | grep 'Hello2'
nix shell -f shell-hello.nix 'hello^*' -c hello2 | grep 'Hello2'
-if ! canUseSandbox; then exit 99; fi
+requireSandboxSupport
chmod -R u+w $TEST_ROOT/store0 || true
rm -rf $TEST_ROOT/store0
diff --git a/tests/tarball.sh b/tests/tarball.sh
index d5cab879c..5f39658c9 100644
--- a/tests/tarball.sh
+++ b/tests/tarball.sh
@@ -19,7 +19,7 @@ test_tarball() {
tarball=$TEST_ROOT/tarball.tar$ext
(cd $TEST_ROOT && tar cf - tarball) | $compressor > $tarball
- nix-env -f file://$tarball -qa --out-path | grep -q dependencies
+ nix-env -f file://$tarball -qa --out-path | grepQuiet dependencies
nix-build -o $TEST_ROOT/result file://$tarball
@@ -34,7 +34,7 @@ test_tarball() {
nix-build -o $TEST_ROOT/result -E "import (fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"$hash\"; })"
# Do not re-fetch paths already present
nix-build -o $TEST_ROOT/result -E "import (fetchTree { type = \"tarball\"; url = file:///does-not-exist/must-remain-unused/$tarball; narHash = \"$hash\"; })"
- nix-build -o $TEST_ROOT/result -E "import (fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"sha256-xdKv2pq/IiwLSnBBJXW8hNowI4MrdZfW+SYqDQs7Tzc=\"; })" 2>&1 | grep 'NAR hash mismatch in input'
+ expectStderr 102 nix-build -o $TEST_ROOT/result -E "import (fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"sha256-xdKv2pq/IiwLSnBBJXW8hNowI4MrdZfW+SYqDQs7Tzc=\"; })" | grep 'NAR hash mismatch in input'
nix-instantiate --strict --eval -E "!((import (fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"$hash\"; })) ? submodules)" >&2
nix-instantiate --strict --eval -E "!((import (fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"$hash\"; })) ? submodules)" 2>&1 | grep 'true'
diff --git a/tests/test-infra.sh b/tests/test-infra.sh
new file mode 100644
index 000000000..54ae120e7
--- /dev/null
+++ b/tests/test-infra.sh
@@ -0,0 +1,85 @@
+# Test the functions for testing themselves!
+# Also test some assumptions on how bash works that they rely on.
+source common.sh
+
+# `true` should exit with 0
+expect 0 true
+
+# `false` should exit with 1
+expect 1 false
+
+# `expect` will fail when we get it wrong
+expect 1 expect 0 false
+
+noisyTrue () {
+ echo YAY! >&2
+ true
+}
+
+noisyFalse () {
+ echo NAY! >&2
+ false
+}
+
+# These should redirect standard error to standard output
+expectStderr 0 noisyTrue | grepQuiet YAY
+expectStderr 1 noisyFalse | grepQuiet NAY
+
+# `set -o pipefile` is enabled
+
+pipefailure () {
+ # shellcheck disable=SC2216
+ true | false | true
+}
+expect 1 pipefailure
+unset pipefailure
+
+pipefailure () {
+ # shellcheck disable=SC2216
+ false | true | true
+}
+expect 1 pipefailure
+unset pipefailure
+
+commandSubstitutionPipeFailure () {
+ # shellcheck disable=SC2216
+ res=$(set -eu -o pipefail; false | true | echo 0)
+}
+expect 1 commandSubstitutionPipeFailure
+
+# `set -u` is enabled
+
+# note (...), making function use subshell, as unbound variable errors
+# in the outer shell are *rightly* not recoverable.
+useUnbound () (
+ set -eu
+ # shellcheck disable=SC2154
+ echo "$thisVariableIsNotBound"
+)
+expect 1 useUnbound
+
+# ! alone unfortunately negates `set -e`, but it works in functions:
+# shellcheck disable=SC2251
+! true
+funBang () {
+ ! true
+}
+expect 1 funBang
+unset funBang
+
+# `grep -v -q` is not what we want for exit codes, but `grepInverse` is
+# Avoid `grep -v -q`. The following line proves the point, and if it fails,
+# we'll know that `grep` had a breaking change or `-v -q` may not be portable.
+{ echo foo; echo bar; } | grep -v -q foo
+{ echo foo; echo bar; } | expect 1 grepInverse foo
+
+# `grepQuiet` is quiet
+res=$(set -eu -o pipefail; echo foo | grepQuiet foo | wc -c)
+(( res == 0 ))
+unset res
+
+# `greqQietInverse` is both
+{ echo foo; echo bar; } | expect 1 grepQuietInverse foo
+res=$(set -eu -o pipefail; echo foo | expect 1 grepQuietInverse foo | wc -c)
+(( res == 0 ))
+unset res
diff --git a/tests/timeout.sh b/tests/timeout.sh
index e3fb3ebcc..b179b79a2 100644
--- a/tests/timeout.sh
+++ b/tests/timeout.sh
@@ -5,17 +5,14 @@ source common.sh
# XXX: This shouldn’t be, but #4813 cause this test to fail
needLocalStore "see #4813"
-set +e
-messages=$(nix-build -Q timeout.nix -A infiniteLoop --timeout 2 2>&1)
-status=$?
-set -e
+messages=$(nix-build -Q timeout.nix -A infiniteLoop --timeout 2 2>&1) && status=0 || status=$?
if [ $status -ne 101 ]; then
echo "error: 'nix-store' exited with '$status'; should have exited 101"
exit 1
fi
-if ! echo "$messages" | grep -q "timed out"; then
+if echo "$messages" | grepQuietInvert "timed out"; then
echo "error: build may have failed for reasons other than timeout; output:"
echo "$messages" >&2
exit 1
diff --git a/tests/user-envs-migration.sh b/tests/user-envs-migration.sh
index 467c28fbb..187372b16 100644
--- a/tests/user-envs-migration.sh
+++ b/tests/user-envs-migration.sh
@@ -4,7 +4,7 @@
source common.sh
if isDaemonNewer "2.4pre20211005"; then
- exit 99
+ skipTest "Daemon is too new"
fi
diff --git a/tests/user-envs.sh b/tests/user-envs.sh
index d63fe780a..d1260ba04 100644
--- a/tests/user-envs.sh
+++ b/tests/user-envs.sh
@@ -1,6 +1,6 @@
source common.sh
-if [ -z "$storeCleared" ]; then
+if [ -z "${storeCleared-}" ]; then
clearStore
fi
@@ -28,13 +28,13 @@ nix-env -f ./user-envs.nix -qa --json --out-path | jq -e '.[] | select(.name ==
] | all'
# Query descriptions.
-nix-env -f ./user-envs.nix -qa '*' --description | grep -q silly
+nix-env -f ./user-envs.nix -qa '*' --description | grepQuiet silly
rm -rf $HOME/.nix-defexpr
ln -s $(pwd)/user-envs.nix $HOME/.nix-defexpr
-nix-env -qa '*' --description | grep -q silly
+nix-env -qa '*' --description | grepQuiet silly
# Query the system.
-nix-env -qa '*' --system | grep -q $system
+nix-env -qa '*' --system | grepQuiet $system
# Install "foo-1.0".
nix-env -i foo-1.0
@@ -42,19 +42,19 @@ nix-env -i foo-1.0
# Query installed: should contain foo-1.0 now (which should be
# executable).
test "$(nix-env -q '*' | wc -l)" -eq 1
-nix-env -q '*' | grep -q foo-1.0
+nix-env -q '*' | grepQuiet foo-1.0
test "$($profiles/test/bin/foo)" = "foo-1.0"
# Test nix-env -qc to compare installed against available packages, and vice versa.
-nix-env -qc '*' | grep -q '< 2.0'
-nix-env -qac '*' | grep -q '> 1.0'
+nix-env -qc '*' | grepQuiet '< 2.0'
+nix-env -qac '*' | grepQuiet '> 1.0'
# Test the -b flag to filter out source-only packages.
[ "$(nix-env -qab | wc -l)" -eq 1 ]
# Test the -s flag to get package status.
-nix-env -qas | grep -q 'IP- foo-1.0'
-nix-env -qas | grep -q -- '--- bar-0.1'
+nix-env -qas | grepQuiet 'IP- foo-1.0'
+nix-env -qas | grepQuiet -- '--- bar-0.1'
# Disable foo.
nix-env --set-flag active false foo
@@ -74,15 +74,15 @@ nix-env -i foo-2.0pre1
# Query installed: should contain foo-2.0pre1 now.
test "$(nix-env -q '*' | wc -l)" -eq 1
-nix-env -q '*' | grep -q foo-2.0pre1
+nix-env -q '*' | grepQuiet foo-2.0pre1
test "$($profiles/test/bin/foo)" = "foo-2.0pre1"
# Upgrade "foo": should install foo-2.0.
-NIX_PATH=nixpkgs=./user-envs.nix:$NIX_PATH nix-env -f '<nixpkgs>' -u foo
+NIX_PATH=nixpkgs=./user-envs.nix:${NIX_PATH-} nix-env -f '<nixpkgs>' -u foo
# Query installed: should contain foo-2.0 now.
test "$(nix-env -q '*' | wc -l)" -eq 1
-nix-env -q '*' | grep -q foo-2.0
+nix-env -q '*' | grepQuiet foo-2.0
test "$($profiles/test/bin/foo)" = "foo-2.0"
# Store the path of foo-2.0.
@@ -94,20 +94,20 @@ nix-env -i bar-0.1
nix-env -e foo
# Query installed: should only contain bar-0.1 now.
-if nix-env -q '*' | grep -q foo; then false; fi
-nix-env -q '*' | grep -q bar
+if nix-env -q '*' | grepQuiet foo; then false; fi
+nix-env -q '*' | grepQuiet bar
# Rollback: should bring "foo" back.
oldGen="$(nix-store -q --resolve $profiles/test)"
nix-env --rollback
[ "$(nix-store -q --resolve $profiles/test)" != "$oldGen" ]
-nix-env -q '*' | grep -q foo-2.0
-nix-env -q '*' | grep -q bar
+nix-env -q '*' | grepQuiet foo-2.0
+nix-env -q '*' | grepQuiet bar
# Rollback again: should remove "bar".
nix-env --rollback
-nix-env -q '*' | grep -q foo-2.0
-if nix-env -q '*' | grep -q bar; then false; fi
+nix-env -q '*' | grepQuiet foo-2.0
+if nix-env -q '*' | grepQuiet bar; then false; fi
# Count generations.
nix-env --list-generations
@@ -129,7 +129,7 @@ nix-env --switch-generation 7
# Install foo-1.0, now using its store path.
nix-env -i "$outPath10"
-nix-env -q '*' | grep -q foo-1.0
+nix-env -q '*' | grepQuiet foo-1.0
nix-store -qR $profiles/test | grep "$outPath10"
nix-store -q --referrers-closure $profiles/test | grep "$(nix-store -q --resolve $profiles/test)"
[ "$(nix-store -q --deriver "$outPath10")" = $drvPath10 ]
@@ -137,12 +137,12 @@ nix-store -q --referrers-closure $profiles/test | grep "$(nix-store -q --resolve
# Uninstall foo-1.0, using a symlink to its store path.
ln -sfn $outPath10/bin/foo $TEST_ROOT/symlink
nix-env -e $TEST_ROOT/symlink
-if nix-env -q '*' | grep -q foo; then false; fi
-(! nix-store -qR $profiles/test | grep "$outPath10")
+if nix-env -q '*' | grepQuiet foo; then false; fi
+nix-store -qR $profiles/test | grepInverse "$outPath10"
# Install foo-1.0, now using a symlink to its store path.
nix-env -i $TEST_ROOT/symlink
-nix-env -q '*' | grep -q foo
+nix-env -q '*' | grepQuiet foo
# Delete all old generations.
nix-env --delete-generations old
@@ -160,7 +160,7 @@ test "$(nix-env -q '*' | wc -l)" -eq 0
# Installing "foo" should only install the newest foo.
nix-env -i foo
test "$(nix-env -q '*' | grep foo- | wc -l)" -eq 1
-nix-env -q '*' | grep -q foo-2.0
+nix-env -q '*' | grepQuiet foo-2.0
# On the other hand, this should install both (and should fail due to
# a collision).
@@ -171,8 +171,8 @@ nix-env -e '*'
nix-env -e '*'
nix-env -i '*'
test "$(nix-env -q '*' | wc -l)" -eq 2
-nix-env -q '*' | grep -q foo-2.0
-nix-env -q '*' | grep -q bar-0.1.1
+nix-env -q '*' | grepQuiet foo-2.0
+nix-env -q '*' | grepQuiet bar-0.1.1
# Test priorities: foo-0.1 has a lower priority than foo-1.0, so it
# should be possible to install both without a collision. Also test
diff --git a/tests/why-depends.sh b/tests/why-depends.sh
index a04d529b5..b35a0d1cf 100644
--- a/tests/why-depends.sh
+++ b/tests/why-depends.sh
@@ -16,9 +16,9 @@ FAST_WHY_DEPENDS_OUTPUT=$(nix why-depends ./toplevel ./dep)
PRECISE_WHY_DEPENDS_OUTPUT=$(nix why-depends ./toplevel ./dep --precise)
# Both outputs should show that `input-2` is in the dependency chain
-echo "$FAST_WHY_DEPENDS_OUTPUT" | grep -q input-2
-echo "$PRECISE_WHY_DEPENDS_OUTPUT" | grep -q input-2
+echo "$FAST_WHY_DEPENDS_OUTPUT" | grepQuiet input-2
+echo "$PRECISE_WHY_DEPENDS_OUTPUT" | grepQuiet input-2
-# But only the “precise” one should refere to `reference-to-input-2`
-echo "$FAST_WHY_DEPENDS_OUTPUT" | (! grep -q reference-to-input-2)
-echo "$PRECISE_WHY_DEPENDS_OUTPUT" | grep -q reference-to-input-2
+# But only the “precise” one should refer to `reference-to-input-2`
+echo "$FAST_WHY_DEPENDS_OUTPUT" | grepQuietInverse reference-to-input-2
+echo "$PRECISE_WHY_DEPENDS_OUTPUT" | grepQuiet reference-to-input-2