aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/STALE-BOT.md2
-rw-r--r--.github/workflows/backport.yml26
-rw-r--r--.github/workflows/test.yml25
-rw-r--r--.gitignore9
-rw-r--r--.version2
-rw-r--r--Makefile5
-rw-r--r--Makefile.config.in1
-rw-r--r--README.md3
-rw-r--r--boehmgc-coroutine-sp-fallback.diff45
-rwxr-xr-xconfig/config.guess20
-rwxr-xr-xconfig/config.sub20
-rw-r--r--configure.ac84
-rw-r--r--doc/manual/generate-builtins.nix10
-rw-r--r--doc/manual/generate-manpage.nix10
-rw-r--r--doc/manual/local.mk30
-rw-r--r--doc/manual/src/SUMMARY.md.in3
-rw-r--r--doc/manual/src/command-ref/conf-file-prefix.md5
-rw-r--r--doc/manual/src/command-ref/env-common.md42
-rw-r--r--doc/manual/src/command-ref/nix-env.md42
-rw-r--r--doc/manual/src/command-ref/nix-shell.md20
-rw-r--r--doc/manual/src/command-ref/nix-store.md2
-rw-r--r--doc/manual/src/command-ref/opt-common.md4
-rw-r--r--doc/manual/src/contributing/cli-guideline.md52
-rw-r--r--doc/manual/src/expressions/advanced-attributes.md2
-rw-r--r--doc/manual/src/expressions/builtins-prefix.md9
-rw-r--r--doc/manual/src/expressions/builtins-suffix.md1
-rw-r--r--doc/manual/src/expressions/expression-syntax.md2
-rw-r--r--doc/manual/src/expressions/language-operators.md12
-rw-r--r--doc/manual/src/expressions/language-values.md9
-rw-r--r--doc/manual/src/expressions/simple-building-testing.md2
-rw-r--r--doc/manual/src/installation/building-source.md11
-rw-r--r--doc/manual/src/installation/env-variables.md2
-rw-r--r--doc/manual/src/installation/installing-binary.md209
-rw-r--r--doc/manual/src/installation/installing-docker.md59
-rw-r--r--doc/manual/src/installation/installing-source.md4
-rw-r--r--doc/manual/src/installation/obtaining-source.md15
-rw-r--r--doc/manual/src/installation/prerequisites-source.md17
-rw-r--r--doc/manual/src/introduction.md6
-rw-r--r--doc/manual/src/package-management/basic-package-mgmt.md63
-rw-r--r--doc/manual/src/package-management/binary-cache-substituter.md4
-rw-r--r--doc/manual/src/package-management/garbage-collection.md2
-rw-r--r--doc/manual/src/package-management/profiles.md6
-rw-r--r--doc/manual/src/package-management/ssh-substituter.md2
-rw-r--r--doc/manual/src/quick-start.md14
-rw-r--r--doc/manual/src/release-notes/rl-2.4.md547
-rw-r--r--doc/manual/src/release-notes/rl-next.md7
-rw-r--r--doc/manual/theme/highlight.js (renamed from doc/manual/highlight.pack.js)0
-rw-r--r--docker.nix251
-rw-r--r--flake.lock15
-rw-r--r--flake.nix333
-rwxr-xr-xmaintainers/upload-release.pl78
-rw-r--r--misc/fish/completion.fish37
-rw-r--r--misc/fish/local.mk1
-rw-r--r--misc/launchd/local.mk2
-rw-r--r--misc/launchd/org.nixos.nix-daemon.plist.in2
-rw-r--r--misc/systemd/local.mk2
-rw-r--r--misc/upstart/local.mk2
-rw-r--r--misc/zsh/completion.zsh4
-rw-r--r--misc/zsh/local.mk1
-rw-r--r--mk/lib.mk27
-rw-r--r--mk/libraries.mk22
-rw-r--r--mk/tests.mk4
-rw-r--r--nix-rust/local.mk15
-rw-r--r--perl/Makefile2
-rw-r--r--perl/Makefile.config.in1
-rw-r--r--perl/configure.ac2
-rw-r--r--perl/lib/Nix/Config.pm.in1
-rw-r--r--perl/lib/Nix/Store.pm1
-rw-r--r--perl/lib/Nix/Store.xs13
-rw-r--r--perl/local.mk2
-rwxr-xr-xscripts/create-darwin-volume.sh926
-rw-r--r--scripts/install-darwin-multi-user.sh123
-rw-r--r--scripts/install-multi-user.sh338
-rw-r--r--scripts/install-nix-from-closure.sh116
-rwxr-xr-xscripts/install-systemd-multi-user.sh27
-rwxr-xr-xscripts/install.in33
-rw-r--r--scripts/local.mk4
-rwxr-xr-xscripts/nix-http-export.cgi.in51
-rw-r--r--scripts/nix-profile-daemon.sh.in6
-rwxr-xr-xscripts/nix-reduce-build.in171
-rwxr-xr-xscripts/prepare-installer-for-github-actions2
-rw-r--r--src/build-remote/build-remote.cc31
-rw-r--r--src/libcmd/command.cc69
-rw-r--r--src/libcmd/command.hh64
-rw-r--r--src/libcmd/installables.cc285
-rw-r--r--src/libcmd/installables.hh14
-rw-r--r--src/libcmd/local.mk5
-rw-r--r--src/libcmd/markdown.cc8
-rw-r--r--src/libcmd/repl.cc108
-rw-r--r--src/libexpr/attr-path.cc8
-rw-r--r--src/libexpr/attr-path.hh2
-rw-r--r--src/libexpr/attr-set.hh8
-rw-r--r--src/libexpr/common-eval-args.cc8
-rw-r--r--src/libexpr/common-eval-args.hh3
-rw-r--r--src/libexpr/eval.cc494
-rw-r--r--src/libexpr/eval.hh59
-rw-r--r--src/libexpr/flake/config.cc29
-rw-r--r--src/libexpr/flake/flake.cc181
-rw-r--r--src/libexpr/flake/flake.hh18
-rw-r--r--src/libexpr/flake/flakeref.cc6
-rw-r--r--src/libexpr/flake/lockfile.cc21
-rw-r--r--src/libexpr/lexer.l101
-rw-r--r--src/libexpr/local.mk6
-rw-r--r--src/libexpr/nixexpr.cc66
-rw-r--r--src/libexpr/nixexpr.hh52
-rw-r--r--src/libexpr/parser.y86
-rw-r--r--src/libexpr/primops.cc336
-rw-r--r--src/libexpr/primops.hh4
-rw-r--r--src/libexpr/primops/fetchMercurial.cc6
-rw-r--r--src/libexpr/primops/fetchTree.cc166
-rw-r--r--src/libexpr/value-to-json.cc24
-rw-r--r--src/libexpr/value-to-json.hh4
-rw-r--r--src/libexpr/value-to-xml.cc29
-rw-r--r--src/libexpr/value-to-xml.hh4
-rw-r--r--src/libexpr/value.hh3
-rw-r--r--src/libfetchers/attrs.hh2
-rw-r--r--src/libfetchers/fetchers.cc7
-rw-r--r--src/libfetchers/fetchers.hh5
-rw-r--r--src/libfetchers/git.cc52
-rw-r--r--src/libfetchers/github.cc12
-rw-r--r--src/libfetchers/local.mk2
-rw-r--r--src/libfetchers/mercurial.cc52
-rw-r--r--src/libfetchers/path.cc26
-rw-r--r--src/libfetchers/registry.cc7
-rw-r--r--src/libfetchers/registry.hh3
-rw-r--r--src/libfetchers/tarball.cc7
-rw-r--r--src/libmain/local.mk4
-rw-r--r--src/libmain/progress-bar.cc18
-rw-r--r--src/libmain/shared.cc32
-rw-r--r--src/libstore/binary-cache-store.cc65
-rw-r--r--src/libstore/binary-cache-store.hh17
-rw-r--r--src/libstore/build/derivation-goal.cc215
-rw-r--r--src/libstore/build/drv-output-substitution-goal.cc71
-rw-r--r--src/libstore/build/drv-output-substitution-goal.hh14
-rw-r--r--src/libstore/build/entry-points.cc19
-rw-r--r--src/libstore/build/goal.cc13
-rw-r--r--src/libstore/build/goal.hh2
-rw-r--r--src/libstore/build/local-derivation-goal.cc321
-rw-r--r--src/libstore/build/local-derivation-goal.hh8
-rw-r--r--src/libstore/build/substitution-goal.cc2
-rw-r--r--src/libstore/build/worker.cc5
-rw-r--r--src/libstore/build/worker.hh3
-rw-r--r--src/libstore/ca-specific-schema.sql11
-rw-r--r--src/libstore/content-address.cc22
-rw-r--r--src/libstore/daemon.cc82
-rw-r--r--src/libstore/derivations.cc48
-rw-r--r--src/libstore/derivations.hh4
-rw-r--r--src/libstore/derived-path.cc51
-rw-r--r--src/libstore/derived-path.hh42
-rw-r--r--src/libstore/dummy-store.cc10
-rw-r--r--src/libstore/filetransfer.cc27
-rw-r--r--src/libstore/gc.cc767
-rw-r--r--src/libstore/globals.cc18
-rw-r--r--src/libstore/globals.hh28
-rw-r--r--src/libstore/http-binary-cache-store.cc4
-rw-r--r--src/libstore/legacy-ssh-store.cc35
-rw-r--r--src/libstore/local-binary-cache-store.cc2
-rw-r--r--src/libstore/local-fs-store.hh5
-rw-r--r--src/libstore/local-store.cc350
-rw-r--r--src/libstore/local-store.hh56
-rw-r--r--src/libstore/local.mk10
-rw-r--r--src/libstore/machines.cc113
-rw-r--r--src/libstore/misc.cc191
-rw-r--r--src/libstore/nar-info-disk-cache.cc99
-rw-r--r--src/libstore/nar-info-disk-cache.hh10
-rw-r--r--src/libstore/optimise-store.cc2
-rw-r--r--src/libstore/parsed-derivations.cc107
-rw-r--r--src/libstore/parsed-derivations.hh4
-rw-r--r--src/libstore/path-info.cc46
-rw-r--r--src/libstore/path-info.hh5
-rw-r--r--src/libstore/path-with-outputs.cc4
-rw-r--r--src/libstore/pathlocks.cc13
-rw-r--r--src/libstore/pathlocks.hh14
-rw-r--r--src/libstore/profiles.cc42
-rw-r--r--src/libstore/profiles.hh9
-rw-r--r--src/libstore/realisation.cc66
-rw-r--r--src/libstore/realisation.hh13
-rw-r--r--src/libstore/references.cc101
-rw-r--r--src/libstore/references.hh24
-rw-r--r--src/libstore/remote-store.cc139
-rw-r--r--src/libstore/remote-store.hh21
-rw-r--r--src/libstore/s3-binary-cache-store.cc6
-rw-r--r--src/libstore/sandbox-defaults.sb9
-rw-r--r--src/libstore/serve-protocol.hh2
-rw-r--r--src/libstore/sqlite.cc7
-rw-r--r--src/libstore/ssh-store.cc5
-rw-r--r--src/libstore/store-api.cc332
-rw-r--r--src/libstore/store-api.hh99
-rw-r--r--src/libstore/tests/local.mk15
-rw-r--r--src/libstore/tests/machines.cc169
-rw-r--r--src/libstore/tests/references.cc45
-rw-r--r--src/libstore/tests/test-data/machines.bad_format1
-rw-r--r--src/libstore/tests/test-data/machines.valid3
-rw-r--r--src/libstore/uds-remote-store.cc28
-rw-r--r--src/libstore/uds-remote-store.hh6
-rw-r--r--src/libstore/worker-protocol.hh3
-rw-r--r--src/libutil/ansicolor.hh2
-rw-r--r--src/libutil/archive.cc4
-rw-r--r--src/libutil/args.cc1
-rw-r--r--src/libutil/args.hh16
-rw-r--r--src/libutil/closure.hh69
-rw-r--r--src/libutil/comparator.hh4
-rw-r--r--src/libutil/compression.cc26
-rw-r--r--src/libutil/compression.hh4
-rw-r--r--src/libutil/config.cc52
-rw-r--r--src/libutil/config.hh13
-rw-r--r--src/libutil/error.cc10
-rw-r--r--src/libutil/experimental-features.cc59
-rw-r--r--src/libutil/experimental-features.hh56
-rw-r--r--src/libutil/fmt.hh2
-rw-r--r--src/libutil/local.mk2
-rw-r--r--src/libutil/logging.cc6
-rw-r--r--src/libutil/ref.hh55
-rw-r--r--src/libutil/serialise.cc3
-rw-r--r--src/libutil/tarfile.cc33
-rw-r--r--src/libutil/tarfile.hh3
-rw-r--r--src/libutil/tests/closure.cc70
-rw-r--r--src/libutil/tests/logging.cc4
-rw-r--r--src/libutil/tests/tests.cc17
-rw-r--r--src/libutil/url.cc2
-rw-r--r--src/libutil/util.cc184
-rw-r--r--src/libutil/util.hh65
-rwxr-xr-xsrc/nix-build/nix-build.cc93
-rwxr-xr-xsrc/nix-copy-closure/nix-copy-closure.cc5
-rw-r--r--src/nix-env/nix-env.cc43
-rw-r--r--src/nix-env/user-env.cc4
-rw-r--r--src/nix-instantiate/nix-instantiate.cc7
-rw-r--r--src/nix-store/nix-store.cc3
-rw-r--r--src/nix/app.cc89
-rw-r--r--src/nix/build.cc16
-rw-r--r--src/nix/bundle.cc5
-rw-r--r--src/nix/copy.cc20
-rw-r--r--src/nix/daemon.cc6
-rw-r--r--src/nix/develop.cc323
-rw-r--r--src/nix/develop.md12
-rw-r--r--src/nix/diff-closures.cc4
-rw-r--r--src/nix/edit.cc2
-rw-r--r--src/nix/eval.cc2
-rw-r--r--src/nix/flake-check.md23
-rw-r--r--src/nix/flake-show.md3
-rw-r--r--src/nix/flake.cc211
-rw-r--r--src/nix/flake.md6
-rw-r--r--src/nix/get-env.sh119
-rw-r--r--src/nix/local.mk4
-rw-r--r--src/nix/log.cc6
-rw-r--r--src/nix/main.cc61
-rw-r--r--src/nix/make-content-addressable.cc2
-rw-r--r--src/nix/path-info.cc2
-rw-r--r--src/nix/path-info.md2
-rw-r--r--src/nix/prefetch.cc24
-rw-r--r--src/nix/print-dev-env.md37
-rw-r--r--src/nix/profile-history.md4
-rw-r--r--src/nix/profile-remove.md1
-rw-r--r--src/nix/profile-rollback.md26
-rw-r--r--src/nix/profile-upgrade.md2
-rw-r--r--src/nix/profile-wipe-history.md20
-rw-r--r--src/nix/profile.cc96
-rw-r--r--src/nix/realisation.cc17
-rw-r--r--src/nix/registry-add.md7
-rw-r--r--src/nix/registry-pin.md7
-rw-r--r--src/nix/registry-remove.md6
-rw-r--r--src/nix/registry.cc88
-rw-r--r--src/nix/registry.md2
-rw-r--r--src/nix/run.cc71
-rw-r--r--src/nix/run.hh11
-rw-r--r--src/nix/search.cc1
-rw-r--r--src/nix/show-config.cc5
-rw-r--r--src/nix/show-derivation.cc8
-rw-r--r--src/nix/sigs.cc7
-rw-r--r--src/nix/store-delete.cc2
-rw-r--r--src/nix/store-repair.cc2
-rw-r--r--src/nix/verify.cc12
-rw-r--r--src/nix/why-depends.cc4
-rw-r--r--src/nlohmann/json.hpp15501
-rw-r--r--src/nlohmann/json_fwd.hpp78
-rw-r--r--src/resolve-system-dependencies/local.mk2
-rw-r--r--tests/add.sh2
-rw-r--r--tests/binary-cache.sh2
-rw-r--r--tests/build-remote-content-addressed-floating.sh2
-rw-r--r--tests/build-remote.sh24
-rw-r--r--tests/build.sh9
-rwxr-xr-xtests/ca/build-with-garbage-path.sh21
-rw-r--r--tests/ca/build.sh8
-rw-r--r--tests/ca/common.sh4
-rwxr-xr-xtests/ca/concurrent-builds.sh18
l---------tests/ca/config.nix.in1
-rw-r--r--tests/ca/content-addressed.nix34
-rw-r--r--tests/ca/duplicate-realisation-in-closure.sh28
-rw-r--r--tests/ca/flake.nix3
-rwxr-xr-xtests/ca/gc.sh10
-rwxr-xr-xtests/ca/nix-run.sh9
-rwxr-xr-xtests/ca/nix-shell.sh10
-rw-r--r--tests/ca/nondeterministic.nix35
-rwxr-xr-xtests/ca/post-hook.sh13
-rw-r--r--tests/ca/racy.nix15
-rwxr-xr-xtests/ca/recursive.sh13
-rw-r--r--tests/ca/repl.sh5
-rw-r--r--tests/ca/signatures.sh4
-rw-r--r--tests/ca/substitute.sh23
-rw-r--r--tests/check.nix2
-rw-r--r--tests/check.sh8
-rw-r--r--tests/common.sh.in66
-rw-r--r--tests/compression-levels.sh22
-rw-r--r--tests/config.nix.in11
-rw-r--r--tests/config.sh2
-rw-r--r--tests/db-migration.sh5
-rw-r--r--tests/dummy1
-rw-r--r--tests/dump-db.sh2
-rw-r--r--tests/eval-store.sh30
-rw-r--r--tests/failing.nix22
-rw-r--r--tests/fetchGit.sh4
-rw-r--r--tests/fetchMercurial.sh5
-rw-r--r--tests/fetchurl.sh14
-rw-r--r--tests/fixed.nix8
-rw-r--r--tests/fixed.sh8
-rw-r--r--tests/flake-local-settings.sh34
-rw-r--r--tests/flakes.sh282
-rwxr-xr-xtests/function-trace.sh2
-rw-r--r--tests/gc-auto.sh2
-rw-r--r--tests/gc-non-blocking.sh33
-rw-r--r--tests/gc.sh14
-rw-r--r--tests/init.sh3
-rw-r--r--tests/lang/eval-fail-nonexist-path.nix (renamed from tests/lang/eval-fail-antiquoted-path.nix)0
-rw-r--r--tests/lang/eval-okay-floor-ceil.exp1
-rw-r--r--tests/lang/eval-okay-floor-ceil.nix9
-rw-r--r--tests/lang/eval-okay-path-antiquotation.nix12
-rw-r--r--tests/lang/eval-okay-sort.exp2
-rw-r--r--tests/lang/eval-okay-sort.nix14
-rw-r--r--tests/lang/parse-okay-url.nix1
-rw-r--r--tests/linux-sandbox.sh2
-rw-r--r--tests/local-store.sh5
-rw-r--r--tests/local.mk20
-rw-r--r--tests/multiple-outputs.sh5
-rw-r--r--tests/nix-shell.sh36
-rw-r--r--tests/nss-preload.nix123
-rw-r--r--tests/optimise-store.sh3
-rw-r--r--tests/post-hook.sh5
-rw-r--r--tests/recursive.sh20
-rw-r--r--tests/referrers.sh2
-rw-r--r--tests/repair.sh4
-rw-r--r--tests/repl.sh20
-rw-r--r--tests/shell.nix34
-rw-r--r--tests/structured-attrs-shell.nix21
-rw-r--r--tests/structured-attrs.nix2
-rw-r--r--tests/structured-attrs.sh13
-rw-r--r--tests/substitute-with-invalid-ca.sh38
-rw-r--r--tests/tarball.sh5
-rw-r--r--tests/timeout.sh2
348 files changed, 20411 insertions, 9436 deletions
diff --git a/.github/STALE-BOT.md b/.github/STALE-BOT.md
index 5e8f5d929..383717bfc 100644
--- a/.github/STALE-BOT.md
+++ b/.github/STALE-BOT.md
@@ -3,7 +3,7 @@
- Thanks for your contribution!
- To remove the stale label, just leave a new comment.
- _How to find the right people to ping?_ → [`git blame`](https://git-scm.com/docs/git-blame) to the rescue! (or GitHub's history and blame buttons.)
-- You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on the [#nixos IRC channel](https://webchat.freenode.net/#nixos).
+- You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on [Matrix - #nix:nixos.org](https://matrix.to/#/#nix:nixos.org).
## Suggestions for PRs
diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml
new file mode 100644
index 000000000..ec7ab4516
--- /dev/null
+++ b/.github/workflows/backport.yml
@@ -0,0 +1,26 @@
+name: Backport
+on:
+ pull_request_target:
+ types: [closed, labeled]
+jobs:
+ backport:
+ name: Backport Pull Request
+ if: github.repository_owner == 'NixOS' && github.event.pull_request.merged == true && (github.event_name != 'labeled' || startsWith('backport', github.event.label.name))
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ ref: ${{ github.event.pull_request.head.sha }}
+ # required to find all branches
+ fetch-depth: 0
+ - name: Create backport PRs
+ # should be kept in sync with `version`
+ uses: zeebe-io/backport-action@v0.0.7
+ with:
+ # Config README: https://github.com/zeebe-io/backport-action#backport-action
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ github_workspace: ${{ github.workspace }}
+ pull_description: |-
+ Bot-based backport to `${target_branch}`, triggered by a label in #${pull_number}.
+ # should be kept in sync with `uses`
+ version: v0.0.5
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 17a79dc97..1b655e27d 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -1,27 +1,32 @@
name: "Test"
+
on:
pull_request:
push:
+
jobs:
+
tests:
+ needs: [check_cachix]
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
-
+ timeout-minutes: 60
steps:
- - uses: actions/checkout@v2.3.4
+ - uses: actions/checkout@v2.4.0
with:
fetch-depth: 0
- - uses: cachix/install-nix-action@v13
+ - uses: cachix/install-nix-action@v16
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/cachix-action@v10
+ if: needs.check_cachix.outputs.secret == 'true'
with:
name: '${{ env.CACHIX_NAME }}'
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
- #- run: nix flake check
- - run: nix-build -A checks.$(if [[ `uname` = Linux ]]; then echo x86_64-linux; else echo x86_64-darwin; fi)
+ - run: nix-build -A checks.$(nix-instantiate --eval -E '(builtins.currentSystem)')
+
check_cachix:
name: Cachix secret present for installer tests
runs-on: ubuntu-latest
@@ -33,6 +38,7 @@ jobs:
env:
_CACHIX_SECRETS: ${{ secrets.CACHIX_SIGNING_KEY }}${{ secrets.CACHIX_AUTH_TOKEN }}
run: echo "::set-output name=secret::${{ env._CACHIX_SECRETS != '' }}"
+
installer:
needs: [tests, check_cachix]
if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true'
@@ -40,11 +46,11 @@ jobs:
outputs:
installerURL: ${{ steps.prepare-installer.outputs.installerURL }}
steps:
- - uses: actions/checkout@v2.3.4
+ - uses: actions/checkout@v2.4.0
with:
fetch-depth: 0
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- - uses: cachix/install-nix-action@v13
+ - uses: cachix/install-nix-action@v16
- uses: cachix/cachix-action@v10
with:
name: '${{ env.CACHIX_NAME }}'
@@ -52,6 +58,7 @@ jobs:
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
- id: prepare-installer
run: scripts/prepare-installer-for-github-actions
+
installer_test:
needs: [installer, check_cachix]
if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true'
@@ -60,9 +67,9 @@ jobs:
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
steps:
- - uses: actions/checkout@v2.3.4
+ - uses: actions/checkout@v2.4.0
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- - uses: cachix/install-nix-action@v13
+ - uses: cachix/install-nix-action@v16
with:
install_url: '${{needs.installer.outputs.installerURL}}'
install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve"
diff --git a/.gitignore b/.gitignore
index 37aada307..2889a56eb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,6 +15,7 @@ perl/Makefile.config
/doc/manual/*.1
/doc/manual/*.5
/doc/manual/*.8
+/doc/manual/generated/*
/doc/manual/nix.json
/doc/manual/conf-file.json
/doc/manual/builtins.json
@@ -25,8 +26,6 @@ perl/Makefile.config
# /scripts/
/scripts/nix-profile.sh
-/scripts/nix-reduce-build
-/scripts/nix-http-export.cgi
/scripts/nix-profile-daemon.sh
# /src/libexpr/
@@ -39,6 +38,7 @@ perl/Makefile.config
# /src/libstore/
*.gen.*
+/src/libstore/tests/libstore-tests
# /src/libutil/
/src/libutil/tests/libutil-tests
@@ -56,9 +56,6 @@ perl/Makefile.config
/src/nix-prefetch-url/nix-prefetch-url
-# /src/nix-daemon/
-/src/nix-daemon/nix-daemon
-
/src/nix-collect-garbage/nix-collect-garbage
# /src/nix-channel/
@@ -76,12 +73,12 @@ perl/Makefile.config
# /tests/
/tests/test-tmp
/tests/common.sh
-/tests/dummy
/tests/result*
/tests/restricted-innocent
/tests/shell
/tests/shell.drv
/tests/config.nix
+/tests/ca/config.nix
# /tests/lang/
/tests/lang/*.out
diff --git a/.version b/.version
index 7208c2182..68151b2e1 100644
--- a/.version
+++ b/.version
@@ -1 +1 @@
-2.4 \ No newline at end of file
+2.5 \ No newline at end of file
diff --git a/Makefile b/Makefile
index b0636cf49..02228910a 100644
--- a/Makefile
+++ b/Makefile
@@ -4,6 +4,7 @@ makefiles = \
src/libutil/local.mk \
src/libutil/tests/local.mk \
src/libstore/local.mk \
+ src/libstore/tests/local.mk \
src/libfetchers/local.mk \
src/libmain/local.mk \
src/libexpr/local.mk \
@@ -12,6 +13,8 @@ makefiles = \
src/resolve-system-dependencies/local.mk \
scripts/local.mk \
misc/bash/local.mk \
+ misc/fish/local.mk \
+ misc/zsh/local.mk \
misc/systemd/local.mk \
misc/launchd/local.mk \
misc/upstart/local.mk \
@@ -32,4 +35,4 @@ endif
include mk/lib.mk
# GLOBAL_CXXFLAGS += -g -Wall -include config.h -std=c++17 -fstack-usage
-GLOBAL_CXXFLAGS += -g -Wall -include config.h -std=c++17
+GLOBAL_CXXFLAGS += -g -Wall -include config.h -std=c++17 -I src
diff --git a/Makefile.config.in b/Makefile.config.in
index fd92365eb..c8c4446b4 100644
--- a/Makefile.config.in
+++ b/Makefile.config.in
@@ -1,3 +1,4 @@
+HOST_OS = @host_os@
AR = @AR@
BDW_GC_LIBS = @BDW_GC_LIBS@
BOOST_LDFLAGS = @BOOST_LDFLAGS@
diff --git a/README.md b/README.md
index 4686010ef..80d6f128c 100644
--- a/README.md
+++ b/README.md
@@ -28,7 +28,8 @@ build nix from source with nix-build or how to get a development environment.
- [Nix manual](https://nixos.org/nix/manual)
- [Nix jobsets on hydra.nixos.org](https://hydra.nixos.org/project/nix)
- [NixOS Discourse](https://discourse.nixos.org/)
-- [IRC - #nixos on freenode.net](irc://irc.freenode.net/#nixos)
+- [Matrix - #nix:nixos.org](https://matrix.to/#/#nix:nixos.org)
+- [IRC - #nixos on libera.chat](irc://irc.libera.chat/#nixos)
## License
diff --git a/boehmgc-coroutine-sp-fallback.diff b/boehmgc-coroutine-sp-fallback.diff
new file mode 100644
index 000000000..e659bf470
--- /dev/null
+++ b/boehmgc-coroutine-sp-fallback.diff
@@ -0,0 +1,45 @@
+diff --git a/pthread_stop_world.c b/pthread_stop_world.c
+index 4b2c429..1fb4c52 100644
+--- a/pthread_stop_world.c
++++ b/pthread_stop_world.c
+@@ -673,6 +673,8 @@ GC_INNER void GC_push_all_stacks(void)
+ struct GC_traced_stack_sect_s *traced_stack_sect;
+ pthread_t self = pthread_self();
+ word total_size = 0;
++ size_t stack_limit;
++ pthread_attr_t pattr;
+
+ if (!EXPECT(GC_thr_initialized, TRUE))
+ GC_thr_init();
+@@ -722,6 +724,31 @@ GC_INNER void GC_push_all_stacks(void)
+ hi = p->altstack + p->altstack_size;
+ /* FIXME: Need to scan the normal stack too, but how ? */
+ /* FIXME: Assume stack grows down */
++ } else {
++ if (pthread_getattr_np(p->id, &pattr)) {
++ ABORT("GC_push_all_stacks: pthread_getattr_np failed!");
++ }
++ if (pthread_attr_getstacksize(&pattr, &stack_limit)) {
++ ABORT("GC_push_all_stacks: pthread_attr_getstacksize failed!");
++ }
++ if (pthread_attr_destroy(&pattr)) {
++ ABORT("GC_push_all_stacks: pthread_attr_destroy failed!");
++ }
++ // When a thread goes into a coroutine, we lose its original sp until
++ // control flow returns to the thread.
++ // While in the coroutine, the sp points outside the thread stack,
++ // so we can detect this and push the entire thread stack instead,
++ // as an approximation.
++ // We assume that the coroutine has similarly added its entire stack.
++ // This could be made accurate by cooperating with the application
++ // via new functions and/or callbacks.
++ #ifndef STACK_GROWS_UP
++ if (lo >= hi || lo < hi - stack_limit) { // sp outside stack
++ lo = hi - stack_limit;
++ }
++ #else
++ #error "STACK_GROWS_UP not supported in boost_coroutine2 (as of june 2021), so we don't support it in Nix."
++ #endif
+ }
+ GC_push_all_stack_sections(lo, hi, traced_stack_sect);
+ # ifdef STACK_GROWS_UP
diff --git a/config/config.guess b/config/config.guess
index 699b3a10b..1972fda8e 100755
--- a/config/config.guess
+++ b/config/config.guess
@@ -1,8 +1,8 @@
#! /bin/sh
# Attempt to guess a canonical system name.
-# Copyright 1992-2020 Free Software Foundation, Inc.
+# Copyright 1992-2021 Free Software Foundation, Inc.
-timestamp='2020-11-19'
+timestamp='2021-01-25'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
@@ -50,7 +50,7 @@ version="\
GNU config.guess ($timestamp)
Originally written by Per Bothner.
-Copyright 1992-2020 Free Software Foundation, Inc.
+Copyright 1992-2021 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -188,10 +188,9 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
#
# Note: NetBSD doesn't particularly care about the vendor
# portion of the name. We always set it to "unknown".
- sysctl="sysctl -n hw.machine_arch"
UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \
- "/sbin/$sysctl" 2>/dev/null || \
- "/usr/sbin/$sysctl" 2>/dev/null || \
+ /sbin/sysctl -n hw.machine_arch 2>/dev/null || \
+ /usr/sbin/sysctl -n hw.machine_arch 2>/dev/null || \
echo unknown))
case "$UNAME_MACHINE_ARCH" in
aarch64eb) machine=aarch64_be-unknown ;;
@@ -996,6 +995,9 @@ EOF
k1om:Linux:*:*)
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
+ loongarch32:Linux:*:* | loongarch64:Linux:*:* | loongarchx32:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
m32r*:Linux:*:*)
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
@@ -1084,7 +1086,7 @@ EOF
ppcle:Linux:*:*)
echo powerpcle-unknown-linux-"$LIBC"
exit ;;
- riscv32:Linux:*:* | riscv64:Linux:*:*)
+ riscv32:Linux:*:* | riscv32be:Linux:*:* | riscv64:Linux:*:* | riscv64be:Linux:*:*)
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
exit ;;
s390:Linux:*:* | s390x:Linux:*:*)
@@ -1480,8 +1482,8 @@ EOF
i*86:rdos:*:*)
echo "$UNAME_MACHINE"-pc-rdos
exit ;;
- i*86:AROS:*:*)
- echo "$UNAME_MACHINE"-pc-aros
+ *:AROS:*:*)
+ echo "$UNAME_MACHINE"-unknown-aros
exit ;;
x86_64:VMkernel:*:*)
echo "$UNAME_MACHINE"-unknown-esx
diff --git a/config/config.sub b/config/config.sub
index 19c9553b1..63c1f1c8b 100755
--- a/config/config.sub
+++ b/config/config.sub
@@ -1,8 +1,8 @@
#! /bin/sh
# Configuration validation subroutine script.
-# Copyright 1992-2020 Free Software Foundation, Inc.
+# Copyright 1992-2021 Free Software Foundation, Inc.
-timestamp='2020-12-02'
+timestamp='2021-01-08'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
@@ -67,7 +67,7 @@ Report bugs and patches to <config-patches@gnu.org>."
version="\
GNU config.sub ($timestamp)
-Copyright 1992-2020 Free Software Foundation, Inc.
+Copyright 1992-2021 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -1185,6 +1185,7 @@ case $cpu-$vendor in
| k1om \
| le32 | le64 \
| lm32 \
+ | loongarch32 | loongarch64 | loongarchx32 \
| m32c | m32r | m32rle \
| m5200 | m68000 | m680[012346]0 | m68360 | m683?2 | m68k \
| m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \
@@ -1229,7 +1230,7 @@ case $cpu-$vendor in
| powerpc | powerpc64 | powerpc64le | powerpcle | powerpcspe \
| pru \
| pyramid \
- | riscv | riscv32 | riscv64 \
+ | riscv | riscv32 | riscv32be | riscv64 | riscv64be \
| rl78 | romp | rs6000 | rx \
| s390 | s390x \
| score \
@@ -1682,11 +1683,14 @@ fi
# Now, validate our (potentially fixed-up) OS.
case $os in
- # Sometimes we do "kernel-abi", so those need to count as OSes.
+ # Sometimes we do "kernel-libc", so those need to count as OSes.
musl* | newlib* | uclibc*)
;;
- # Likewise for "kernel-libc"
- eabi | eabihf | gnueabi | gnueabihf)
+ # Likewise for "kernel-abi"
+ eabi* | gnueabi*)
+ ;;
+ # VxWorks passes extra cpu info in the 4th filed.
+ simlinux | simwindows | spe)
;;
# Now accept the basic system types.
# The portable systems comes first.
@@ -1750,6 +1754,8 @@ case $kernel-$os in
;;
kfreebsd*-gnu* | kopensolaris*-gnu*)
;;
+ vxworks-simlinux | vxworks-simwindows | vxworks-spe)
+ ;;
nto-qnx*)
;;
os2-emx)
diff --git a/configure.ac b/configure.ac
index 6c36787f3..65478ecc5 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,4 +1,4 @@
-AC_INIT(nix, m4_esyscmd([bash -c "echo -n $(cat ./.version)$VERSION_SUFFIX"]))
+AC_INIT([nix],[m4_esyscmd(bash -c "echo -n $(cat ./.version)$VERSION_SUFFIX")])
AC_CONFIG_MACRO_DIRS([m4])
AC_CONFIG_SRCDIR(README.md)
AC_CONFIG_AUX_DIR(config)
@@ -9,8 +9,7 @@ AC_PROG_SED
AC_CANONICAL_HOST
AC_MSG_CHECKING([for the canonical Nix system name])
-AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
- [Platform identifier (e.g., `i686-linux').]),
+AC_ARG_WITH(system, AS_HELP_STRING([--with-system=SYSTEM],[Platform identifier (e.g., `i686-linux').]),
[system=$withval],
[case "$host_cpu" in
i*86)
@@ -33,14 +32,6 @@ AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";;
esac])
-sys_name=$(uname -s | tr 'A-Z ' 'a-z_')
-
-case $sys_name in
- cygwin*)
- sys_name=cygwin
- ;;
-esac
-
AC_MSG_RESULT($system)
AC_SUBST(system)
AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier ('cpu-os')])
@@ -64,10 +55,12 @@ AC_SYS_LARGEFILE
# Solaris-specific stuff.
AC_STRUCT_DIRENT_D_TYPE
-if test "$sys_name" = sunos; then
+case "$host_os" in
+ solaris*)
# Solaris requires -lsocket -lnsl for network functions
- LIBS="-lsocket -lnsl $LIBS"
-fi
+ LDFLAGS="-lsocket -lnsl $LDFLAGS"
+ ;;
+esac
# Check for pubsetbuf.
@@ -127,8 +120,7 @@ NEED_PROG(jq, jq)
AC_SUBST(coreutils, [$(dirname $(type -p cat))])
-AC_ARG_WITH(store-dir, AC_HELP_STRING([--with-store-dir=PATH],
- [path of the Nix store (defaults to /nix/store)]),
+AC_ARG_WITH(store-dir, AS_HELP_STRING([--with-store-dir=PATH],[path of the Nix store (defaults to /nix/store)]),
storedir=$withval, storedir='/nix/store')
AC_SUBST(storedir)
@@ -152,13 +144,12 @@ int main() {
}]])], GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=no, GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=yes)
AC_MSG_RESULT($GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC)
if test "x$GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC" = xyes; then
- LIBS="-latomic $LIBS"
+ LDFLAGS="-latomic $LDFLAGS"
fi
PKG_PROG_PKG_CONFIG
-AC_ARG_ENABLE(shared, AC_HELP_STRING([--enable-shared],
- [Build shared libraries for Nix [default=yes]]),
+AC_ARG_ENABLE(shared, AS_HELP_STRING([--enable-shared],[Build shared libraries for Nix [default=yes]]),
shared=$enableval, shared=yes)
if test "$shared" = yes; then
AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.])
@@ -213,30 +204,32 @@ AC_SUBST(HAVE_LIBCPUID, [$have_libcpuid])
# Look for libseccomp, required for Linux sandboxing.
-if test "$sys_name" = linux; then
- AC_ARG_ENABLE([seccomp-sandboxing],
- AC_HELP_STRING([--disable-seccomp-sandboxing],
- [Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)]
- ))
- if test "x$enable_seccomp_sandboxing" != "xno"; then
- PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp],
- [CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"])
- have_seccomp=1
- AC_DEFINE([HAVE_SECCOMP], [1], [Whether seccomp is available and should be used for sandboxing.])
- else
+case "$host_os" in
+ linux*)
+ AC_ARG_ENABLE([seccomp-sandboxing],
+ AS_HELP_STRING([--disable-seccomp-sandboxing],[Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)
+ ]))
+ if test "x$enable_seccomp_sandboxing" != "xno"; then
+ PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp],
+ [CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"])
+ have_seccomp=1
+ AC_DEFINE([HAVE_SECCOMP], [1], [Whether seccomp is available and should be used for sandboxing.])
+ else
+ have_seccomp=
+ fi
+ ;;
+ *)
have_seccomp=
- fi
-else
- have_seccomp=
-fi
+ ;;
+esac
AC_SUBST(HAVE_SECCOMP, [$have_seccomp])
# Look for aws-cpp-sdk-s3.
AC_LANG_PUSH(C++)
AC_CHECK_HEADERS([aws/s3/S3Client.h],
- [AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.])
- enable_s3=1], [enable_s3=])
+ [AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=1],
+ [AC_DEFINE([ENABLE_S3], [0], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=])
AC_SUBST(ENABLE_S3, [$enable_s3])
AC_LANG_POP(C++)
@@ -249,8 +242,7 @@ fi
# Whether to use the Boehm garbage collector.
-AC_ARG_ENABLE(gc, AC_HELP_STRING([--enable-gc],
- [enable garbage collection in the Nix expression evaluator (requires Boehm GC) [default=yes]]),
+AC_ARG_ENABLE(gc, AS_HELP_STRING([--enable-gc],[enable garbage collection in the Nix expression evaluator (requires Boehm GC) [default=yes]]),
gc=$enableval, gc=yes)
if test "$gc" = yes; then
PKG_CHECK_MODULES([BDW_GC], [bdw-gc])
@@ -264,11 +256,12 @@ PKG_CHECK_MODULES([GTEST], [gtest_main])
# documentation generation switch
-AC_ARG_ENABLE(doc-gen, AC_HELP_STRING([--disable-doc-gen],
- [disable documentation generation]),
+AC_ARG_ENABLE(doc-gen, AS_HELP_STRING([--disable-doc-gen],[disable documentation generation]),
doc_generate=$enableval, doc_generate=yes)
AC_SUBST(doc_generate)
+# Look for lowdown library.
+PKG_CHECK_MODULES([LOWDOWN], [lowdown >= 0.8.0], [CXXFLAGS="$LOWDOWN_CFLAGS $CXXFLAGS"])
# Setuid installations.
AC_CHECK_FUNCS([setresuid setreuid lchown])
@@ -280,13 +273,14 @@ AC_CHECK_FUNCS([strsignal posix_fallocate sysconf])
# This is needed if bzip2 is a static library, and the Nix libraries
# are dynamic.
-if test "$(uname)" = "Darwin"; then
+case "${host_os}" in
+ darwin*)
LDFLAGS="-all_load $LDFLAGS"
-fi
+ ;;
+esac
-AC_ARG_WITH(sandbox-shell, AC_HELP_STRING([--with-sandbox-shell=PATH],
- [path of a statically-linked shell to use as /bin/sh in sandboxes]),
+AC_ARG_WITH(sandbox-shell, AS_HELP_STRING([--with-sandbox-shell=PATH],[path of a statically-linked shell to use as /bin/sh in sandboxes]),
sandbox_shell=$withval)
AC_SUBST(sandbox_shell)
@@ -301,6 +295,6 @@ done
rm -f Makefile.config
-AC_CONFIG_HEADER([config.h])
+AC_CONFIG_HEADERS([config.h])
AC_CONFIG_FILES([])
AC_OUTPUT
diff --git a/doc/manual/generate-builtins.nix b/doc/manual/generate-builtins.nix
index 416a7fdba..92c7b1a31 100644
--- a/doc/manual/generate-builtins.nix
+++ b/doc/manual/generate-builtins.nix
@@ -6,9 +6,11 @@ builtins:
concatStrings (map
(name:
let builtin = builtins.${name}; in
- " - `builtins.${name}` " + concatStringsSep " " (map (s: "*${s}*") builtin.args)
- + " \n\n"
- + concatStrings (map (s: " ${s}\n") (splitLines builtin.doc)) + "\n\n"
+ "<dt><code>${name} "
+ + concatStringsSep " " (map (s: "<var>${s}</var>") builtin.args)
+ + "</code></dt>"
+ + "<dd>\n\n"
+ + builtin.doc
+ + "\n\n</dd>"
)
(attrNames builtins))
-
diff --git a/doc/manual/generate-manpage.nix b/doc/manual/generate-manpage.nix
index 964b57086..244cfa0c2 100644
--- a/doc/manual/generate-manpage.nix
+++ b/doc/manual/generate-manpage.nix
@@ -1,4 +1,4 @@
-command:
+{ command, renderLinks ? false }:
with builtins;
with import ./utils.nix;
@@ -20,7 +20,11 @@ let
categories = sort (x: y: x.id < y.id) (unique (map (cmd: cmd.category) (attrValues def.commands)));
listCommands = cmds:
concatStrings (map (name:
- "* [`${command} ${name}`](./${appendName filename name}.md) - ${cmds.${name}.description}\n")
+ "* "
+ + (if renderLinks
+ then "[`${command} ${name}`](./${appendName filename name}.md)"
+ else "`${command} ${name}`")
+ + " - ${cmds.${name}.description}\n")
(attrNames cmds));
in
"where *subcommand* is one of the following:\n\n"
@@ -89,7 +93,7 @@ let
in
let
- manpages = processCommand { filename = "nix"; command = "nix"; def = command; };
+ manpages = processCommand { filename = "nix"; command = "nix"; def = builtins.fromJSON command; };
summary = concatStrings (map (manpage: " - [${manpage.command}](command-ref/new-cli/${manpage.name})\n") manpages);
in
(listToAttrs manpages) // { "SUMMARY.md" = summary; }
diff --git a/doc/manual/local.mk b/doc/manual/local.mk
index 271529b38..e43d9f2fb 100644
--- a/doc/manual/local.mk
+++ b/doc/manual/local.mk
@@ -1,7 +1,5 @@
ifeq ($(doc_generate),yes)
-MANUAL_SRCS := $(call rwildcard, $(d)/src, *.md)
-
# Generate man pages.
man-pages := $(foreach n, \
nix-env.1 nix-build.1 nix-shell.1 nix-store.1 nix-instantiate.1 \
@@ -46,7 +44,7 @@ $(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli
$(d)/src/command-ref/new-cli: $(d)/nix.json $(d)/generate-manpage.nix $(bindir)/nix
@rm -rf $@
- $(trace-gen) $(nix-eval) --write-to $@ --expr 'import doc/manual/generate-manpage.nix (builtins.fromJSON (builtins.readFile $<))'
+ $(trace-gen) $(nix-eval) --write-to $@ --expr 'import doc/manual/generate-manpage.nix { command = builtins.readFile $<; renderLinks = true; }'
$(d)/src/command-ref/conf-file.md: $(d)/conf-file.json $(d)/generate-options.nix $(d)/src/command-ref/conf-file-prefix.md $(bindir)/nix
@cat doc/manual/src/command-ref/conf-file-prefix.md > $@.tmp
@@ -64,6 +62,7 @@ $(d)/conf-file.json: $(bindir)/nix
$(d)/src/expressions/builtins.md: $(d)/builtins.json $(d)/generate-builtins.nix $(d)/src/expressions/builtins-prefix.md $(bindir)/nix
@cat doc/manual/src/expressions/builtins-prefix.md > $@.tmp
$(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-builtins.nix (builtins.fromJSON (builtins.readFile $<))' >> $@.tmp
+ @cat doc/manual/src/expressions/builtins-suffix.md >> $@.tmp
@mv $@.tmp $@
$(d)/builtins.json: $(bindir)/nix
@@ -74,17 +73,28 @@ $(d)/builtins.json: $(bindir)/nix
install: $(docdir)/manual/index.html
# Generate 'nix' manpages.
-install: $(d)/src/command-ref/new-cli
+install: $(mandir)/man1/nix3-manpages
+man: doc/manual/generated/man1/nix3-manpages
+all: doc/manual/generated/man1/nix3-manpages
+
+$(mandir)/man1/nix3-manpages: doc/manual/generated/man1/nix3-manpages
+ @mkdir -p $(DESTDIR)$$(dirname $@)
+ $(trace-install) install -m 0644 $$(dirname $<)/* $(DESTDIR)$$(dirname $@)
+
+doc/manual/generated/man1/nix3-manpages: $(d)/src/command-ref/new-cli
+ @mkdir -p $(DESTDIR)$$(dirname $@)
$(trace-gen) for i in doc/manual/src/command-ref/new-cli/*.md; do \
name=$$(basename $$i .md); \
+ tmpFile=$$(mktemp); \
if [[ $$name = SUMMARY ]]; then continue; fi; \
- printf "Title: %s\n\n" "$$name" > $$i.tmp; \
- cat $$i >> $$i.tmp; \
- lowdown -sT man -M section=1 $$i.tmp -o $(mandir)/man1/$$name.1; \
+ printf "Title: %s\n\n" "$$name" > $$tmpFile; \
+ cat $$i >> $$tmpFile; \
+ lowdown -sT man -M section=1 $$tmpFile -o $(DESTDIR)$$(dirname $@)/$$name.1; \
+ rm $$tmpFile; \
done
+ @touch $@
-$(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/command-ref/conf-file.md $(d)/src/expressions/builtins.md
- $(trace-gen) RUST_LOG=warn mdbook build doc/manual -d $(docdir)/manual
- @cp doc/manual/highlight.pack.js $(docdir)/manual/highlight.js
+$(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/command-ref/conf-file.md $(d)/src/expressions/builtins.md $(call rwildcard, $(d)/src, *.md)
+ $(trace-gen) RUST_LOG=warn mdbook build doc/manual -d $(DESTDIR)$(docdir)/manual
endif
diff --git a/doc/manual/src/SUMMARY.md.in b/doc/manual/src/SUMMARY.md.in
index 448fee803..8d9b061ba 100644
--- a/doc/manual/src/SUMMARY.md.in
+++ b/doc/manual/src/SUMMARY.md.in
@@ -9,6 +9,7 @@
- [Prerequisites](installation/prerequisites-source.md)
- [Obtaining a Source Distribution](installation/obtaining-source.md)
- [Building Nix from Source](installation/building-source.md)
+ - [Using Nix within Docker](installation/installing-docker.md)
- [Security](installation/nix-security.md)
- [Single-User Mode](installation/single-user.md)
- [Multi-User Mode](installation/multi-user.md)
@@ -70,6 +71,8 @@
- [Hacking](contributing/hacking.md)
- [CLI guideline](contributing/cli-guideline.md)
- [Release Notes](release-notes/release-notes.md)
+ - [Release X.Y (202?-??-??)](release-notes/rl-next.md)
+ - [Release 2.4 (2021-11-01)](release-notes/rl-2.4.md)
- [Release 2.3 (2019-09-04)](release-notes/rl-2.3.md)
- [Release 2.2 (2019-01-11)](release-notes/rl-2.2.md)
- [Release 2.1 (2018-09-02)](release-notes/rl-2.1.md)
diff --git a/doc/manual/src/command-ref/conf-file-prefix.md b/doc/manual/src/command-ref/conf-file-prefix.md
index 3140170ab..44b7ba86d 100644
--- a/doc/manual/src/command-ref/conf-file-prefix.md
+++ b/doc/manual/src/command-ref/conf-file-prefix.md
@@ -16,8 +16,9 @@ By default Nix reads settings from the following places:
will be loaded in reverse order.
Otherwise it will look for `nix/nix.conf` files in `XDG_CONFIG_DIRS`
- and `XDG_CONFIG_HOME`. If these are unset, it will look in
- `$HOME/.config/nix.conf`.
+ and `XDG_CONFIG_HOME`. If unset, `XDG_CONFIG_DIRS` defaults to
+ `/etc/xdg`, and `XDG_CONFIG_HOME` defaults to `$HOME/.config`
+ as per [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html).
- If `NIX_CONFIG` is set, its contents is treated as the contents of
a configuration file.
diff --git a/doc/manual/src/command-ref/env-common.md b/doc/manual/src/command-ref/env-common.md
index b709ca9d1..6e2403461 100644
--- a/doc/manual/src/command-ref/env-common.md
+++ b/doc/manual/src/command-ref/env-common.md
@@ -10,35 +10,39 @@ Most Nix commands interpret the following environment variables:
A colon-separated list of directories used to look up Nix
expressions enclosed in angle brackets (i.e., `<path>`). For
instance, the value
-
+
/home/eelco/Dev:/etc/nixos
-
+
will cause Nix to look for paths relative to `/home/eelco/Dev` and
`/etc/nixos`, in this order. It is also possible to match paths
against a prefix. For example, the value
-
+
nixpkgs=/home/eelco/Dev/nixpkgs-branch:/etc/nixos
-
+
will cause Nix to search for `<nixpkgs/path>` in
`/home/eelco/Dev/nixpkgs-branch/path` and `/etc/nixos/nixpkgs/path`.
-
+
If a path in the Nix search path starts with `http://` or
`https://`, it is interpreted as the URL of a tarball that will be
downloaded and unpacked to a temporary location. The tarball must
consist of a single top-level directory. For example, setting
`NIX_PATH` to
-
- nixpkgs=https://github.com/NixOS/nixpkgs/archive/nixos-15.09.tar.gz
-
- tells Nix to download the latest revision in the Nixpkgs/NixOS 15.09
- channel.
-
- A following shorthand can be used to refer to the official channels:
-
- nixpkgs=channel:nixos-15.09
-
- The search path can be extended using the `-I` option, which takes
- precedence over `NIX_PATH`.
+
+ nixpkgs=https://github.com/NixOS/nixpkgs/archive/master.tar.gz
+
+ tells Nix to download and use the current contents of the
+ `master` branch in the `nixpkgs` repository.
+
+ The URLs of the tarballs from the official nixos.org channels (see
+ [the manual for `nix-channel`](nix-channel.md)) can be abbreviated
+ as `channel:<channel-name>`. For instance, the following two
+ values of `NIX_PATH` are equivalent:
+
+ nixpkgs=channel:nixos-21.05
+ nixpkgs=https://nixos.org/channels/nixos-21.05/nixexprs.tar.xz
+
+ The Nix search path can also be extended using the `-I` option to
+ many Nix commands, which takes precedence over `NIX_PATH`.
- `NIX_IGNORE_SYMLINK_STORE`\
Normally, the Nix store directory (typically `/nix/store`) is not
@@ -50,7 +54,7 @@ Most Nix commands interpret the following environment variables:
builds are deployed to machines where `/nix/store` resolves
differently. If you are sure that you’re not going to do that, you
can set `NIX_IGNORE_SYMLINK_STORE` to `1`.
-
+
Note that if you’re symlinking the Nix store so that you can put it
on another file system than the root file system, on Linux you’re
better off using `bind` mount points, e.g.,
@@ -59,7 +63,7 @@ Most Nix commands interpret the following environment variables:
$ mkdir /nix
$ mount -o bind /mnt/otherdisk/nix /nix
```
-
+
Consult the mount 8 manual page for details.
- `NIX_STORE_DIR`\
diff --git a/doc/manual/src/command-ref/nix-env.md b/doc/manual/src/command-ref/nix-env.md
index 9138fa05a..8d6abaf52 100644
--- a/doc/manual/src/command-ref/nix-env.md
+++ b/doc/manual/src/command-ref/nix-env.md
@@ -238,7 +238,16 @@ a number of possible ways:
## Examples
-To install a specific version of `gcc` from the active Nix expression:
+To install a package using a specific attribute path from the active Nix expression:
+
+```console
+$ nix-env -iA gcc40mips
+installing `gcc-4.0.2'
+$ nix-env -iA xorg.xorgserver
+installing `xorg-server-1.2.0'
+```
+
+To install a specific version of `gcc` using the derivation name:
```console
$ nix-env --install gcc-3.3.2
@@ -246,6 +255,9 @@ installing `gcc-3.3.2'
uninstalling `gcc-3.1'
```
+Using attribute path for selecting a package is preferred,
+as it is much faster and there will not be multiple matches.
+
Note the previously installed version is removed, since
`--preserve-installed` was not specified.
@@ -256,13 +268,6 @@ $ nix-env --install gcc
installing `gcc-3.3.2'
```
-To install using a specific attribute:
-
-```console
-$ nix-env -i -A gcc40mips
-$ nix-env -i -A xorg.xorgserver
-```
-
To install all derivations in the Nix expression `foo.nix`:
```console
@@ -374,22 +379,29 @@ For the other flags, see `--install`.
## Examples
```console
-$ nix-env --upgrade gcc
+$ nix-env --upgrade -A nixpkgs.gcc
upgrading `gcc-3.3.1' to `gcc-3.4'
```
+When there are no updates available, nothing will happen:
+
```console
-$ nix-env -u gcc-3.3.2 --always (switch to a specific version)
-upgrading `gcc-3.4' to `gcc-3.3.2'
+$ nix-env --upgrade -A nixpkgs.pan
```
+Using `-A` is preferred when possible, as it is faster and unambiguous but
+it is also possible to upgrade to a specific version by matching the derivation name:
+
```console
-$ nix-env --upgrade pan
-(no upgrades available, so nothing happens)
+$ nix-env -u gcc-3.3.2 --always
+upgrading `gcc-3.4' to `gcc-3.3.2'
```
+To try to upgrade everything
+(matching packages based on the part of the derivation name without version):
+
```console
-$ nix-env -u (try to upgrade everything)
+$ nix-env -u
upgrading `hello-2.1.2' to `hello-2.1.3'
upgrading `mozilla-1.2' to `mozilla-1.4'
```
@@ -401,7 +413,7 @@ of a derivation `x` by looking at their respective `name` attributes.
The names (e.g., `gcc-3.3.1` are split into two parts: the package name
(`gcc`), and the version (`3.3.1`). The version part starts after the
first dash not followed by a letter. `x` is considered an upgrade of `y`
-if their package names match, and the version of `y` is higher that that
+if their package names match, and the version of `y` is higher than that
of `x`.
The versions are compared by splitting them into contiguous components
diff --git a/doc/manual/src/command-ref/nix-shell.md b/doc/manual/src/command-ref/nix-shell.md
index dcd7cc70c..873311649 100644
--- a/doc/manual/src/command-ref/nix-shell.md
+++ b/doc/manual/src/command-ref/nix-shell.md
@@ -11,8 +11,8 @@
[`--command` *cmd*]
[`--run` *cmd*]
[`--exclude` *regexp*]
- [--pure]
- [--keep *name*]
+ [`--pure`]
+ [`--keep` *name*]
{{`--packages` | `-p`} {*packages* | *expressions*} … | [*path*]}
# Description
@@ -78,9 +78,7 @@ All options not listed here are passed to `nix-store
cleared before the interactive shell is started, so you get an
environment that more closely corresponds to the “real” Nix build. A
few variables, in particular `HOME`, `USER` and `DISPLAY`, are
- retained. Note that (depending on your Bash
- installation) `/etc/bashrc` is still sourced, so any variables set
- there will affect the interactive shell.
+ retained.
- `--packages` / `-p` *packages*…\
Set up an environment in which the specified packages are present.
@@ -112,13 +110,19 @@ shell in which to build it:
```console
$ nix-shell '<nixpkgs>' -A pan
-[nix-shell]$ unpackPhase
+[nix-shell]$ eval ${unpackPhase:-unpackPhase}
[nix-shell]$ cd pan-*
-[nix-shell]$ configurePhase
-[nix-shell]$ buildPhase
+[nix-shell]$ eval ${configurePhase:-configurePhase}
+[nix-shell]$ eval ${buildPhase:-buildPhase}
[nix-shell]$ ./pan/gui/pan
```
+The reason we use form `eval ${configurePhase:-configurePhase}` here is because
+those packages that override these phases do so by exporting the overridden
+values in the environment variable of the same name.
+Here bash is being told to either evaluate the contents of 'configurePhase',
+if it exists as a variable, otherwise evaluate the configurePhase function.
+
To clear the environment first, and do some additional automatic
initialisation of the interactive shell:
diff --git a/doc/manual/src/command-ref/nix-store.md b/doc/manual/src/command-ref/nix-store.md
index 7a131dc02..26292f1bb 100644
--- a/doc/manual/src/command-ref/nix-store.md
+++ b/doc/manual/src/command-ref/nix-store.md
@@ -125,7 +125,7 @@ Special exit codes:
- `104`\
Not deterministic, the build succeeded in check mode but the
- resulting output is not binary reproducable.
+ resulting output is not binary reproducible.
With the `--keep-going` flag it's possible for multiple failures to
occur, in this case the 1xx status codes are or combined using binary
diff --git a/doc/manual/src/command-ref/opt-common.md b/doc/manual/src/command-ref/opt-common.md
index 47862bc09..7ee1a26bc 100644
--- a/doc/manual/src/command-ref/opt-common.md
+++ b/doc/manual/src/command-ref/opt-common.md
@@ -162,11 +162,11 @@ Most Nix commands accept the following command-line options:
}: ...
```
- So if you call this Nix expression (e.g., when you do `nix-env -i
+ So if you call this Nix expression (e.g., when you do `nix-env -iA
pkgname`), the function will be called automatically using the
value [`builtins.currentSystem`](../expressions/builtins.md) for
the `system` argument. You can override this using `--arg`, e.g.,
- `nix-env -i pkgname --arg system \"i686-freebsd\"`. (Note that
+ `nix-env -iA pkgname --arg system \"i686-freebsd\"`. (Note that
since the argument is a Nix string literal, you have to escape the
quotes.)
diff --git a/doc/manual/src/contributing/cli-guideline.md b/doc/manual/src/contributing/cli-guideline.md
index 0132867c8..01a1b1e73 100644
--- a/doc/manual/src/contributing/cli-guideline.md
+++ b/doc/manual/src/contributing/cli-guideline.md
@@ -3,7 +3,7 @@
## Goals
Purpose of this document is to provide a clear direction to **help design
-delightful command line** experience. This document contain guidelines to
+delightful command line** experience. This document contains guidelines to
follow to ensure a consistent and approachable user experience.
## Overview
@@ -103,7 +103,7 @@ impacted the most by bad user experience.
# Help is essential
Help should be built into your command line so that new users can gradually
-discover new features when they need them.
+discover new features when they need them.
## Looking for help
@@ -115,7 +115,7 @@ The rules are:
- Help is shown by using `--help` or `help` command (eg `nix` `--``help` or
`nix help`).
-- For non-COMMANDs (eg. `nix` `--``help` and `nix store` `--``help`) we **show
+- For non-COMMANDs (eg. `nix` `--``help` and `nix store` `--``help`) we **show
a summary** of most common use cases. Summary is presented on the STDOUT
without any use of PAGER.
- For COMMANDs (eg. `nix init` `--``help` or `nix help init`) we display the
@@ -176,7 +176,7 @@ $ nix init --template=template#pyton
------------------------------------------------------------------------
Initializing Nix project at `/path/to/here`.
Select a template for you new project:
- |> template#pyton
+ |> template#python
template#python-pip
template#python-poetry
```
@@ -230,17 +230,17 @@ Now **Learn** part of the output is where you educate users. You should only
show it when you know that a build will take some time and not annoy users of
the builds that take only few seconds.
-Every feature like this should go though a intensive review and testing to
-collect as much a feedback as possible and to fine tune every little detail. If
+Every feature like this should go through an intensive review and testing to
+collect as much feedback as possible and to fine tune every little detail. If
done right this can be an awesome features beginners and advance users will
love, but if not done perfectly it will annoy users and leave bad impression.
# Input
-Input to a command is provided via `ARGUMENTS` and `OPTIONS`.
+Input to a command is provided via `ARGUMENTS` and `OPTIONS`.
`ARGUMENTS` represent a required input for a function. When choosing to use
-`ARGUMENT` over function please be aware of the downsides that come with it:
+`ARGUMENTS` over `OPTIONS` please be aware of the downsides that come with it:
- User will need to remember the order of `ARGUMENTS`. This is not a problem if
there is only one `ARGUMENT`.
@@ -253,7 +253,7 @@ developer consider the downsides and choose wisely.
## Naming the `OPTIONS`
-Then only naming convention - apart from the ones mentioned in Naming the
+The only naming convention - apart from the ones mentioned in Naming the
`COMMANDS` section is how flags are named.
Flags are a type of `OPTION` that represent an option that can be turned ON of
@@ -271,12 +271,12 @@ to improve the discoverability of possible input. A new user will most likely
not know which `ARGUMENTS` and `OPTIONS` are required or which values are
possible for those options.
-In cases, the user might not provide the input or they provide wrong input,
-rather then show the error, prompt a user with an option to find and select
+In case the user does not provide the input or they provide wrong input,
+rather than show the error, prompt a user with an option to find and select
correct input (see examples).
Prompting is of course not required when TTY is not attached to STDIN. This
-would mean that scripts wont need to handle prompt, but rather handle errors.
+would mean that scripts won't need to handle prompt, but rather handle errors.
A place to use prompt and provide user with interactive select
@@ -300,9 +300,9 @@ going to happen.
```shell
$ nix build --option substitutors https://cache.example.org
------------------------------------------------------------------------
- Warning! A security related question need to be answered.
+ Warning! A security related question needs to be answered.
------------------------------------------------------------------------
- The following substitutors will be used to in `my-project`:
+ The following substitutors will be used to in `my-project`:
- https://cache.example.org
Do you allow `my-project` to use above mentioned substitutors?
@@ -311,14 +311,14 @@ $ nix build --option substitutors https://cache.example.org
# Output
-Terminal output can be quite limiting in many ways. Which should forces us to
+Terminal output can be quite limiting in many ways. Which should force us to
think about the experience even more. As with every design the output is a
compromise between being terse and being verbose, between showing help to
beginners and annoying advance users. For this it is important that we know
what are the priorities.
Nix command line should be first and foremost written with beginners in mind.
-But users wont stay beginners for long and what was once useful might quickly
+But users won't stay beginners for long and what was once useful might quickly
become annoying. There is no golden rule that we can give in this guideline
that would make it easier how to draw a line and find best compromise.
@@ -342,7 +342,7 @@ also allowing them to redirect content to a file. For example:
```shell
$ nix build > build.txt
------------------------------------------------------------------------
- Error! Atrribute `bin` missing at (1:94) from string.
+ Error! Attribute `bin` missing at (1:94) from string.
------------------------------------------------------------------------
1| with import <nixpkgs> { }; (pkgs.runCommandCC or pkgs.runCommand) "shell" { buildInputs = [ (surge.bin) ]; } ""
@@ -408,7 +408,7 @@ Above command clearly states that command successfully completed. And in case
of `nix build`, which is a command that might take some time to complete, it is
equally important to also show that a command started.
-## Text alignment
+## Text alignment
Text alignment is the number one design element that will present all of the
Nix commands as a family and not as separate tools glued together.
@@ -419,7 +419,7 @@ The format we should follow is:
$ nix COMMAND
VERB_1 NOUN and other words
VERB__1 NOUN and other words
- |> Some details
+ |> Some details
```
Few rules that we can extract from above example:
@@ -444,13 +444,13 @@ is not even notable, therefore relying on it wouldn’t make much sense.
**The bright text is much better supported** across terminals and color
schemes. Most of the time the difference is perceived as if the bright text
-would be bold.
+would be bold.
## Colors
Humans are already conditioned by society to attach certain meaning to certain
colors. While the meaning is not universal, a simple collection of colors is
-used to represent basic emotions.
+used to represent basic emotions.
Colors that can be used in output
@@ -508,7 +508,7 @@ can, with a few key strokes, be changed into and advance introspection tool.
### Progress
-For longer running commands we should provide and overview of the progress.
+For longer running commands we should provide and overview the progress.
This is shown best in `nix build` example:
```shell
@@ -553,9 +553,9 @@ going to happen.
```shell
$ nix build --option substitutors https://cache.example.org
------------------------------------------------------------------------
- Warning! A security related question need to be answered.
+ Warning! A security related question needs to be answered.
------------------------------------------------------------------------
- The following substitutors will be used to in `my-project`:
+ The following substitutors will be used to in `my-project`:
- https://cache.example.org
Do you allow `my-project` to use above mentioned substitutors?
@@ -566,7 +566,7 @@ $ nix build --option substitutors https://cache.example.org
There are many ways that you can control verbosity.
-Verbosity levels are:
+Verbosity levels are:
- `ERROR` (level 0)
- `WARN` (level 1)
@@ -586,4 +586,4 @@ There are also two shortcuts, `--debug` to run in `DEBUG` verbosity level and
# Appendix 1: Commands naming exceptions
-`nix init` and `nix repl` are well established
+`nix init` and `nix repl` are well established
diff --git a/doc/manual/src/expressions/advanced-attributes.md b/doc/manual/src/expressions/advanced-attributes.md
index 5b208df67..000595815 100644
--- a/doc/manual/src/expressions/advanced-attributes.md
+++ b/doc/manual/src/expressions/advanced-attributes.md
@@ -237,7 +237,7 @@ Derivations can declare some infrequently used optional attributes.
- `preferLocalBuild`\
If this attribute is set to `true` and [distributed building is
enabled](../advanced-topics/distributed-builds.md), then, if
- possible, the derivaton will be built locally instead of forwarded
+ possible, the derivation will be built locally instead of forwarded
to a remote machine. This is appropriate for trivial builders
where the cost of doing a download or remote build would exceed
the cost of building locally.
diff --git a/doc/manual/src/expressions/builtins-prefix.md b/doc/manual/src/expressions/builtins-prefix.md
index c16b2805f..c631a8453 100644
--- a/doc/manual/src/expressions/builtins-prefix.md
+++ b/doc/manual/src/expressions/builtins-prefix.md
@@ -9,7 +9,8 @@ scope. Instead, you can access them through the `builtins` built-in
value, which is a set that contains all built-in functions and values.
For instance, `derivation` is also available as `builtins.derivation`.
- - `derivation` *attrs*; `builtins.derivation` *attrs*\
-
- `derivation` is described in [its own section](derivations.md).
-
+<dl>
+ <dt><code>derivation <var>attrs</var></code>;
+ <code>builtins.derivation <var>attrs</var></code></dt>
+ <dd><p><var>derivation</var> is described in
+ <a href="derivations.md">its own section</a>.</p></dd>
diff --git a/doc/manual/src/expressions/builtins-suffix.md b/doc/manual/src/expressions/builtins-suffix.md
new file mode 100644
index 000000000..a74db2857
--- /dev/null
+++ b/doc/manual/src/expressions/builtins-suffix.md
@@ -0,0 +1 @@
+</dl>
diff --git a/doc/manual/src/expressions/expression-syntax.md b/doc/manual/src/expressions/expression-syntax.md
index 2a1306e32..6b93e692c 100644
--- a/doc/manual/src/expressions/expression-syntax.md
+++ b/doc/manual/src/expressions/expression-syntax.md
@@ -26,7 +26,7 @@ elements (referenced from the figure by number):
called with three arguments: `stdenv`, `fetchurl`, and `perl`. They
are needed to build Hello, but we don't know how to build them here;
that's why they are function arguments. `stdenv` is a package that
- is used by almost all Nix Packages packages; it provides a
+ is used by almost all Nix Packages; it provides a
“standard” environment consisting of the things you would expect
in a basic Unix environment: a C/C++ compiler (GCC, to be precise),
the Bash shell, fundamental Unix tools such as `cp`, `grep`, `tar`,
diff --git a/doc/manual/src/expressions/language-operators.md b/doc/manual/src/expressions/language-operators.md
index b7fd6f4c6..268b44f4c 100644
--- a/doc/manual/src/expressions/language-operators.md
+++ b/doc/manual/src/expressions/language-operators.md
@@ -17,12 +17,12 @@ order of precedence (from strongest to weakest binding).
| String Concatenation | *string1* `+` *string2* | left | String concatenation. | 7 |
| Not | `!` *e* | none | Boolean negation. | 8 |
| Update | *e1* `//` *e2* | right | Return a set consisting of the attributes in *e1* and *e2* (with the latter taking precedence over the former in case of equally named attributes). | 9 |
-| Less Than | *e1* `<` *e2*, | none | Arithmetic comparison. | 10 |
-| Less Than or Equal To | *e1* `<=` *e2* | none | Arithmetic comparison. | 10 |
-| Greater Than | *e1* `>` *e2* | none | Arithmetic comparison. | 10 |
-| Greater Than or Equal To | *e1* `>=` *e2* | none | Arithmetic comparison. | 10 |
+| Less Than | *e1* `<` *e2*, | none | Arithmetic/lexicographic comparison. | 10 |
+| Less Than or Equal To | *e1* `<=` *e2* | none | Arithmetic/lexicographic comparison. | 10 |
+| Greater Than | *e1* `>` *e2* | none | Arithmetic/lexicographic comparison. | 10 |
+| Greater Than or Equal To | *e1* `>=` *e2* | none | Arithmetic/lexicographic comparison. | 10 |
| Equality | *e1* `==` *e2* | none | Equality. | 11 |
| Inequality | *e1* `!=` *e2* | none | Inequality. | 11 |
| Logical AND | *e1* `&&` *e2* | left | Logical AND. | 12 |
-| Logical OR | *e1* `\|\|` *e2* | left | Logical OR. | 13 |
-| Logical Implication | *e1* `->` *e2* | none | Logical implication (equivalent to `!e1 \|\| e2`). | 14 |
+| Logical OR | *e1* <code>&#124;&#124;</code> *e2* | left | Logical OR. | 13 |
+| Logical Implication | *e1* `->` *e2* | none | Logical implication (equivalent to <code>!e1 &#124;&#124; e2</code>). | 14 |
diff --git a/doc/manual/src/expressions/language-values.md b/doc/manual/src/expressions/language-values.md
index ce31029cc..75ae9f2eb 100644
--- a/doc/manual/src/expressions/language-values.md
+++ b/doc/manual/src/expressions/language-values.md
@@ -64,7 +64,7 @@ Nix has the following basic data types:
the start of each line. To be precise, it strips from each line a
number of spaces equal to the minimal indentation of the string as a
whole (disregarding the indentation of empty lines). For instance,
- the first and second line are indented two space, while the third
+ the first and second line are indented two spaces, while the third
line is indented four spaces. Thus, two spaces are stripped from
each line, so the resulting string is
@@ -139,6 +139,13 @@ Nix has the following basic data types:
environment variable `NIX_PATH` will be searched for the given file
or directory name.
+ Antiquotation is supported in any paths except those in angle brackets.
+ `./${foo}-${bar}.nix` is a more convenient way of writing
+ `./. + "/" + foo + "-" + bar + ".nix"` or `./. + "/${foo}-${bar}.nix"`. At
+ least one slash must appear *before* any antiquotations for this to be
+ recognized as a path. `a.${foo}/b.${bar}` is a syntactically valid division
+ operation. `./a.${foo}/b.${bar}` is a path.
+
- *Booleans* with values `true` and `false`.
- The null value, denoted as `null`.
diff --git a/doc/manual/src/expressions/simple-building-testing.md b/doc/manual/src/expressions/simple-building-testing.md
index 6f730a936..7f0d8f841 100644
--- a/doc/manual/src/expressions/simple-building-testing.md
+++ b/doc/manual/src/expressions/simple-building-testing.md
@@ -1,6 +1,6 @@
# Building and Testing
-You can now try to build Hello. Of course, you could do `nix-env -i
+You can now try to build Hello. Of course, you could do `nix-env -f . -iA
hello`, but you may not want to install a possibly broken package just
yet. The best way to test the package is by using the command
`nix-build`, which builds a Nix expression and creates a symlink named
diff --git a/doc/manual/src/installation/building-source.md b/doc/manual/src/installation/building-source.md
index d21a51a82..ed1efffd8 100644
--- a/doc/manual/src/installation/building-source.md
+++ b/doc/manual/src/installation/building-source.md
@@ -1,9 +1,9 @@
# Building Nix from Source
-After unpacking or checking out the Nix sources, issue the following
-commands:
+After cloning Nix's Git repository, issue the following commands:
```console
+$ ./bootstrap.sh
$ ./configure options...
$ make
$ make install
@@ -11,13 +11,6 @@ $ make install
Nix requires GNU Make so you may need to invoke `gmake` instead.
-When building from the Git repository, these should be preceded by the
-command:
-
-```console
-$ ./bootstrap.sh
-```
-
The installation path can be specified by passing the `--prefix=prefix`
to `configure`. The default installation directory is `/usr/local`. You
can change this to any location you like. You must have write permission
diff --git a/doc/manual/src/installation/env-variables.md b/doc/manual/src/installation/env-variables.md
index 4a49897e4..bb35c0e9f 100644
--- a/doc/manual/src/installation/env-variables.md
+++ b/doc/manual/src/installation/env-variables.md
@@ -40,7 +40,7 @@ export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt
> **Note**
>
> You must not add the export and then do the install, as the Nix
-> installer will detect the presense of Nix configuration, and abort.
+> installer will detect the presence of Nix configuration, and abort.
## `NIX_SSL_CERT_FILE` with macOS and the Nix daemon
diff --git a/doc/manual/src/installation/installing-binary.md b/doc/manual/src/installation/installing-binary.md
index ae7fd458b..96fa34635 100644
--- a/doc/manual/src/installation/installing-binary.md
+++ b/doc/manual/src/installation/installing-binary.md
@@ -1,18 +1,26 @@
# Installing a Binary Distribution
-If you are using Linux or macOS versions up to 10.14 (Mojave), the
-easiest way to install Nix is to run the following command:
+The easiest way to install Nix is to run the following command:
```console
$ sh <(curl -L https://nixos.org/nix/install)
```
-If you're using macOS 10.15 (Catalina) or newer, consult [the macOS
-installation instructions](#macos-installation) before installing.
+This will run the installer interactively (causing it to explain what
+it is doing more explicitly), and perform the default "type" of install
+for your platform:
+- single-user on Linux
+- multi-user on macOS
-As of Nix 2.1.0, the Nix installer will always default to creating a
-single-user installation, however opting in to the multi-user
-installation is highly recommended.
+ > **Notes on read-only filesystem root in macOS 10.15 Catalina +**
+ >
+ > - It took some time to support this cleanly. You may see posts,
+ > examples, and tutorials using obsolete workarounds.
+ > - Supporting it cleanly made macOS installs too complex to qualify
+ > as single-user, so this type is no longer supported on macOS.
+
+We recommend the multi-user install if it supports your platform and
+you can authenticate with `sudo`.
# Single User Installation
@@ -50,9 +58,9 @@ $ rm -rf /nix
The multi-user Nix installation creates system users, and a system
service for the Nix daemon.
- - Linux running systemd, with SELinux disabled
-
- - macOS
+**Supported Systems**
+- Linux running systemd, with SELinux disabled
+- macOS
You can instruct the installer to perform a multi-user installation on
your system:
@@ -96,165 +104,28 @@ sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist
There may also be references to Nix in `/etc/profile`, `/etc/bashrc`,
and `/etc/zshrc` which you may remove.
-# macOS Installation
-
-Starting with macOS 10.15 (Catalina), the root filesystem is read-only.
-This means `/nix` can no longer live on your system volume, and that
-you'll need a workaround to install Nix.
-
-The recommended approach, which creates an unencrypted APFS volume for
-your Nix store and a "synthetic" empty directory to mount it over at
-`/nix`, is least likely to impair Nix or your system.
-
-> **Note**
->
-> With all separate-volume approaches, it's possible something on your
-> system (particularly daemons/services and restored apps) may need
-> access to your Nix store before the volume is mounted. Adding
-> additional encryption makes this more likely.
-
-If you're using a recent Mac with a [T2
-chip](https://www.apple.com/euro/mac/shared/docs/Apple_T2_Security_Chip_Overview.pdf),
-your drive will still be encrypted at rest (in which case "unencrypted"
-is a bit of a misnomer). To use this approach, just install Nix with:
-
-```console
-$ sh <(curl -L https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume
-```
-
-If you don't like the sound of this, you'll want to weigh the other
-approaches and tradeoffs detailed in this section.
-
-> **Note**
->
-> All of the known workarounds have drawbacks, but we hope better
-> solutions will be available in the future. Some that we have our eye
-> on are:
->
-> 1. A true firmlink would enable the Nix store to live on the primary
-> data volume without the build problems caused by the symlink
-> approach. End users cannot currently create true firmlinks.
->
-> 2. If the Nix store volume shared FileVault encryption with the
-> primary data volume (probably by using the same volume group and
-> role), FileVault encryption could be easily supported by the
-> installer without requiring manual setup by each user.
-
-## Change the Nix store path prefix
-
-Changing the default prefix for the Nix store is a simple approach which
-enables you to leave it on your root volume, where it can take full
-advantage of FileVault encryption if enabled. Unfortunately, this
-approach also opts your device out of some benefits that are enabled by
-using the same prefix across systems:
-
- - Your system won't be able to take advantage of the binary cache
- (unless someone is able to stand up and support duplicate caching
- infrastructure), which means you'll spend more time waiting for
- builds.
-
- - It's harder to build and deploy packages to Linux systems.
-
-It would also possible (and often requested) to just apply this change
-ecosystem-wide, but it's an intrusive process that has side effects we
-want to avoid for now.
-
-## Use a separate encrypted volume
-
-If you like, you can also add encryption to the recommended approach
-taken by the installer. You can do this by pre-creating an encrypted
-volume before you run the installer--or you can run the installer and
-encrypt the volume it creates later.
-
-In either case, adding encryption to a second volume isn't quite as
-simple as enabling FileVault for your boot volume. Before you dive in,
-there are a few things to weigh:
-
-1. The additional volume won't be encrypted with your existing
- FileVault key, so you'll need another mechanism to decrypt the
- volume.
-
-2. You can store the password in Keychain to automatically decrypt the
- volume on boot--but it'll have to wait on Keychain and may not mount
- before your GUI apps restore. If any of your launchd agents or apps
- depend on Nix-installed software (for example, if you use a
- Nix-installed login shell), the restore may fail or break.
-
- On a case-by-case basis, you may be able to work around this problem
- by using `wait4path` to block execution until your executable is
- available.
-
- It's also possible to decrypt and mount the volume earlier with a
- login hook--but this mechanism appears to be deprecated and its
- future is unclear.
-
-3. You can hard-code the password in the clear, so that your store
- volume can be decrypted before Keychain is available.
-
-If you are comfortable navigating these tradeoffs, you can encrypt the
-volume with something along the lines of:
-
-```console
-$ diskutil apfs enableFileVault /nix -user disk
-```
-
-## Symlink the Nix store to a custom location
-
-Another simple approach is using `/etc/synthetic.conf` to symlink the
-Nix store to the data volume. This option also enables your store to
-share any configured FileVault encryption. Unfortunately, builds that
-resolve the symlink may leak the canonical path or even fail.
-
-Because of these downsides, we can't recommend this approach.
-
-## Notes on the recommended approach
-
-This section goes into a little more detail on the recommended approach.
-You don't need to understand it to run the installer, but it can serve
-as a helpful reference if you run into trouble.
-
-1. In order to compose user-writable locations into the new read-only
- system root, Apple introduced a new concept called `firmlinks`,
- which it describes as a "bi-directional wormhole" between two
- filesystems. You can see the current firmlinks in
- `/usr/share/firmlinks`. Unfortunately, firmlinks aren't (currently?)
- user-configurable.
-
- For special cases like NFS mount points or package manager roots,
- [synthetic.conf(5)](https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man5/synthetic.conf.5.html)
- supports limited user-controlled file-creation (of symlinks, and
- synthetic empty directories) at `/`. To create a synthetic empty
- directory for mounting at `/nix`, add the following line to
- `/etc/synthetic.conf` (create it if necessary):
-
- nix
-
-2. This configuration is applied at boot time, but you can use
- `apfs.util` to trigger creation (not deletion) of new entries
- without a reboot:
-
- ```console
- $ /System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B
- ```
-
-3. Create the new APFS volume with diskutil:
-
- ```console
- $ sudo diskutil apfs addVolume diskX APFS 'Nix Store' -mountpoint /nix
- ```
-
-4. Using `vifs`, add the new mount to `/etc/fstab`. If it doesn't
- already have other entries, it should look something like:
-
- #
- # Warning - this file should only be modified with vifs(8)
- #
- # Failure to do so is unsupported and may be destructive.
- #
- LABEL=Nix\040Store /nix apfs rw,nobrowse
-
- The nobrowse setting will keep Spotlight from indexing this volume,
- and keep it from showing up on your desktop.
+# macOS Installation <a name="sect-macos-installation-change-store-prefix"></a><a name="sect-macos-installation-encrypted-volume"></a><a name="sect-macos-installation-symlink"></a><a name="sect-macos-installation-recommended-notes"></a>
+<!-- Note: anchors above to catch permalinks to old explanations -->
+
+We believe we have ironed out how to cleanly support the read-only root
+on modern macOS. New installs will do this automatically, and you can
+also re-run a new installer to convert your existing setup.
+
+This section previously detailed the situation, options, and trade-offs,
+but it now only outlines what the installer does. You don't need to know
+this to run the installer, but it may help if you run into trouble:
+
+- create a new APFS volume for your Nix store
+- update `/etc/synthetic.conf` to direct macOS to create a "synthetic"
+ empty root directory to mount your volume
+- specify mount options for the volume in `/etc/fstab`
+- if you have FileVault enabled
+ - generate an encryption password
+ - put it in your system Keychain
+ - use it to encrypt the volume
+- create a system LaunchDaemon to mount this volume early enough in the
+ boot process to avoid problems loading or restoring any programs that
+ need access to your Nix store
# Installing a pinned Nix version from a URL
diff --git a/doc/manual/src/installation/installing-docker.md b/doc/manual/src/installation/installing-docker.md
new file mode 100644
index 000000000..3d2255b7a
--- /dev/null
+++ b/doc/manual/src/installation/installing-docker.md
@@ -0,0 +1,59 @@
+# Using Nix within Docker
+
+To run the latest stable release of Nix with Docker run the following command:
+
+```console
+$ docker -ti run nixos/nix
+Unable to find image 'nixos/nix:latest' locally
+latest: Pulling from nixos/nix
+5843afab3874: Pull complete
+b52bf13f109c: Pull complete
+1e2415612aa3: Pull complete
+Digest: sha256:27f6e7f60227e959ee7ece361f75d4844a40e1cc6878b6868fe30140420031ff
+Status: Downloaded newer image for nixos/nix:latest
+35ca4ada6e96:/# nix --version
+nix (Nix) 2.3.12
+35ca4ada6e96:/# exit
+```
+
+# What is included in Nix' Docker image?
+
+The official Docker image is created using `pkgs.dockerTools.buildLayeredImage`
+(and not with `Dockerfile` as it is usual with Docker images). You can still
+base your custom Docker image on it as you would do with any other Docker
+image.
+
+The Docker image is also not based on any other image and includes minimal set
+of runtime dependencies that are required to use Nix:
+
+ - pkgs.nix
+ - pkgs.bashInteractive
+ - pkgs.coreutils-full
+ - pkgs.gnutar
+ - pkgs.gzip
+ - pkgs.gnugrep
+ - pkgs.which
+ - pkgs.curl
+ - pkgs.less
+ - pkgs.wget
+ - pkgs.man
+ - pkgs.cacert.out
+ - pkgs.findutils
+
+# Docker image with the latest development version of Nix
+
+To get the latest image that was built by [Hydra](https://hydra.nixos.org) run
+the following command:
+
+```console
+$ curl -L https://hydra.nixos.org/job/nix/master/dockerImage.x86_64-linux/latest/download/1 | docker load
+$ docker run -ti nix:2.5pre20211105
+```
+
+You can also build a Docker image from source yourself:
+
+```console
+$ nix build ./\#hydraJobs.dockerImage.x86_64-linux
+$ docker load -i ./result
+$ docker run -ti nix:2.5pre20211105
+```
diff --git a/doc/manual/src/installation/installing-source.md b/doc/manual/src/installation/installing-source.md
index e52d38a03..09b4e4887 100644
--- a/doc/manual/src/installation/installing-source.md
+++ b/doc/manual/src/installation/installing-source.md
@@ -1,4 +1,4 @@
# Installing Nix from Source
-If no binary package is available, you can download and compile a source
-distribution.
+If no binary package is available or if you want to hack on Nix, you
+can build Nix from its Git repository.
diff --git a/doc/manual/src/installation/obtaining-source.md b/doc/manual/src/installation/obtaining-source.md
index 0a906e390..da05d243d 100644
--- a/doc/manual/src/installation/obtaining-source.md
+++ b/doc/manual/src/installation/obtaining-source.md
@@ -1,14 +1,9 @@
-# Obtaining a Source Distribution
+# Obtaining the Source
-The source tarball of the most recent stable release can be downloaded
-from the [Nix homepage](http://nixos.org/nix/download.html). You can
-also grab the [most recent development
-release](http://hydra.nixos.org/job/nix/master/release/latest-finished#tabs-constituents).
-
-Alternatively, the most recent sources of Nix can be obtained from its
-[Git repository](https://github.com/NixOS/nix). For example, the
-following command will check out the latest revision into a directory
-called `nix`:
+The most recent sources of Nix can be obtained from its [Git
+repository](https://github.com/NixOS/nix). For example, the following
+command will check out the latest revision into a directory called
+`nix`:
```console
$ git clone https://github.com/NixOS/nix
diff --git a/doc/manual/src/installation/prerequisites-source.md b/doc/manual/src/installation/prerequisites-source.md
index 6825af707..0323a4f55 100644
--- a/doc/manual/src/installation/prerequisites-source.md
+++ b/doc/manual/src/installation/prerequisites-source.md
@@ -2,9 +2,8 @@
- GNU Autoconf (<https://www.gnu.org/software/autoconf/>) and the
autoconf-archive macro collection
- (<https://www.gnu.org/software/autoconf-archive/>). These are only
- needed to run the bootstrap script, and are not necessary if your
- source distribution came with a pre-built `./configure` script.
+ (<https://www.gnu.org/software/autoconf-archive/>). These are
+ needed to run the bootstrap script.
- GNU Make.
@@ -26,15 +25,6 @@
available for download from the official repository
<https://github.com/google/brotli>.
- - The bzip2 compressor program and the `libbz2` library. Thus you must
- have bzip2 installed, including development headers and libraries.
- If your distribution does not provide these, you can obtain bzip2
- from
- <https://sourceware.org/bzip2/>.
-
- - `liblzma`, which is provided by XZ Utils. If your distribution does
- not provide this, you can get it from <https://tukaani.org/xz/>.
-
- cURL and its library. If your distribution does not provide it, you
can get it from <https://curl.haxx.se/>.
@@ -61,8 +51,7 @@
you need version 2.5.35, which is available on
[SourceForge](http://lex.sourceforge.net/). Slightly older versions
may also work, but ancient versions like the ubiquitous 2.5.4a
- won't. Note that these are only required if you modify the parser or
- when you are building from the Git repository.
+ won't.
- The `libseccomp` is used to provide syscall filtering on Linux. This
is an optional dependency and can be disabled passing a
diff --git a/doc/manual/src/introduction.md b/doc/manual/src/introduction.md
index d68445c95..d87487a07 100644
--- a/doc/manual/src/introduction.md
+++ b/doc/manual/src/introduction.md
@@ -76,7 +76,7 @@ there after an upgrade. This means that you can _roll back_ to the
old version:
```console
-$ nix-env --upgrade some-packages
+$ nix-env --upgrade -A nixpkgs.some-package
$ nix-env --rollback
```
@@ -122,12 +122,12 @@ Nix expressions generally describe how to build a package from
source, so an installation action like
```console
-$ nix-env --install firefox
+$ nix-env --install -A nixpkgs.firefox
```
_could_ cause quite a bit of build activity, as not only Firefox but
also all its dependencies (all the way up to the C library and the
-compiler) would have to built, at least if they are not already in the
+compiler) would have to be built, at least if they are not already in the
Nix store. This is a _source deployment model_. For most users,
building from source is not very pleasant as it takes far too long.
However, Nix can automatically skip building from source and instead
diff --git a/doc/manual/src/package-management/basic-package-mgmt.md b/doc/manual/src/package-management/basic-package-mgmt.md
index 9702a29eb..50c6d3c2d 100644
--- a/doc/manual/src/package-management/basic-package-mgmt.md
+++ b/doc/manual/src/package-management/basic-package-mgmt.md
@@ -24,7 +24,7 @@ collection; you could write your own Nix expressions based on Nixpkgs,
or completely new ones.)
You can manually download the latest version of Nixpkgs from
-<http://nixos.org/nixpkgs/download.html>. However, it’s much more
+<https://github.com/NixOS/nixpkgs>. However, it’s much more
convenient to use the Nixpkgs [*channel*](channels.md), since it makes
it easy to stay up to date with new versions of Nixpkgs. Nixpkgs is
automatically added to your list of “subscribed” channels when you
@@ -47,41 +47,45 @@ $ nix-channel --update
You can view the set of available packages in Nixpkgs:
```console
-$ nix-env -qa
-aterm-2.2
-bash-3.0
-binutils-2.15
-bison-1.875d
-blackdown-1.4.2
-bzip2-1.0.2
+$ nix-env -qaP
+nixpkgs.aterm aterm-2.2
+nixpkgs.bash bash-3.0
+nixpkgs.binutils binutils-2.15
+nixpkgs.bison bison-1.875d
+nixpkgs.blackdown blackdown-1.4.2
+nixpkgs.bzip2 bzip2-1.0.2
```
-The flag `-q` specifies a query operation, and `-a` means that you want
+The flag `-q` specifies a query operation, `-a` means that you want
to show the “available” (i.e., installable) packages, as opposed to the
-installed packages. If you downloaded Nixpkgs yourself, or if you
-checked it out from GitHub, then you need to pass the path to your
-Nixpkgs tree using the `-f` flag:
+installed packages, and `-P` prints the attribute paths that can be used
+to unambiguously select a package for installation (listed in the first column).
+If you downloaded Nixpkgs yourself, or if you checked it out from GitHub,
+then you need to pass the path to your Nixpkgs tree using the `-f` flag:
```console
-$ nix-env -qaf /path/to/nixpkgs
+$ nix-env -qaPf /path/to/nixpkgs
+aterm aterm-2.2
+bash bash-3.0
+…
```
where */path/to/nixpkgs* is where you’ve unpacked or checked out
Nixpkgs.
-You can select specific packages by name:
+You can filter the packages by name:
```console
-$ nix-env -qa firefox
-firefox-34.0.5
-firefox-with-plugins-34.0.5
+$ nix-env -qaP firefox
+nixpkgs.firefox-esr firefox-91.3.0esr
+nixpkgs.firefox firefox-94.0.1
```
and using regular expressions:
```console
-$ nix-env -qa 'firefox.*'
+$ nix-env -qaP 'firefox.*'
```
It is also possible to see the *status* of available packages, i.e.,
@@ -89,11 +93,11 @@ whether they are installed into the user environment and/or present in
the system:
```console
-$ nix-env -qas
+$ nix-env -qaPs
--PS bash-3.0
---S binutils-2.15
-IPS bison-1.875d
+-PS nixpkgs.bash bash-3.0
+--S nixpkgs.binutils binutils-2.15
+IPS nixpkgs.bison bison-1.875d
```
@@ -106,13 +110,13 @@ which is Nix’s mechanism for doing binary deployment. It just means that
Nix knows that it can fetch a pre-built package from somewhere
(typically a network server) instead of building it locally.
-You can install a package using `nix-env -i`. For instance,
+You can install a package using `nix-env -iA`. For instance,
```console
-$ nix-env -i subversion
+$ nix-env -iA nixpkgs.subversion
```
-will install the package called `subversion` (which is, of course, the
+will install the package called `subversion` from `nixpkgs` channel (which is, of course, the
[Subversion version management system](http://subversion.tigris.org/)).
> **Note**
@@ -122,7 +126,7 @@ will install the package called `subversion` (which is, of course, the
> binary cache <https://cache.nixos.org>; it contains binaries for most
> packages in Nixpkgs. Only if no binary is available in the binary
> cache, Nix will build the package from source. So if `nix-env
-> -i subversion` results in Nix building stuff from source, then either
+> -iA nixpkgs.subversion` results in Nix building stuff from source, then either
> the package is not built for your platform by the Nixpkgs build
> servers, or your version of Nixpkgs is too old or too new. For
> instance, if you have a very recent checkout of Nixpkgs, then the
@@ -133,7 +137,10 @@ will install the package called `subversion` (which is, of course, the
> using a Git checkout of the Nixpkgs tree), you will get binaries for
> most packages.
-Naturally, packages can also be uninstalled:
+Naturally, packages can also be uninstalled. Unlike when installing, you will
+need to use the derivation name (though the version part can be omitted),
+instead of the attribute path, as `nix-env` does not record which attribute
+was used for installing:
```console
$ nix-env -e subversion
@@ -143,7 +150,7 @@ Upgrading to a new version is just as easy. If you have a new release of
Nix Packages, you can do:
```console
-$ nix-env -u subversion
+$ nix-env -uA nixpkgs.subversion
```
This will *only* upgrade Subversion if there is a “newer” version in the
diff --git a/doc/manual/src/package-management/binary-cache-substituter.md b/doc/manual/src/package-management/binary-cache-substituter.md
index bdc5038fc..ef738794b 100644
--- a/doc/manual/src/package-management/binary-cache-substituter.md
+++ b/doc/manual/src/package-management/binary-cache-substituter.md
@@ -9,7 +9,7 @@ The daemon that handles binary cache requests via HTTP, `nix-serve`, is
not part of the Nix distribution, but you can install it from Nixpkgs:
```console
-$ nix-env -i nix-serve
+$ nix-env -iA nixpkgs.nix-serve
```
You can then start the server, listening for HTTP connections on
@@ -35,7 +35,7 @@ On the client side, you can tell Nix to use your binary cache using
`--option extra-binary-caches`, e.g.:
```console
-$ nix-env -i firefox --option extra-binary-caches http://avalon:8080/
+$ nix-env -iA nixpkgs.firefox --option extra-binary-caches http://avalon:8080/
```
The option `extra-binary-caches` tells Nix to use this binary cache in
diff --git a/doc/manual/src/package-management/garbage-collection.md b/doc/manual/src/package-management/garbage-collection.md
index fecb30fd6..29a3b3101 100644
--- a/doc/manual/src/package-management/garbage-collection.md
+++ b/doc/manual/src/package-management/garbage-collection.md
@@ -44,7 +44,7 @@ collector as follows:
$ nix-store --gc
```
-The behaviour of the gargage collector is affected by the
+The behaviour of the garbage collector is affected by the
`keep-derivations` (default: true) and `keep-outputs` (default: false)
options in the Nix configuration file. The defaults will ensure that all
derivations that are build-time dependencies of garbage collector roots
diff --git a/doc/manual/src/package-management/profiles.md b/doc/manual/src/package-management/profiles.md
index fbbfb7320..d1a2580d4 100644
--- a/doc/manual/src/package-management/profiles.md
+++ b/doc/manual/src/package-management/profiles.md
@@ -39,7 +39,7 @@ just Subversion 1.1.2 (arrows in the figure indicate symlinks). This
would be what we would obtain if we had done
```console
-$ nix-env -i subversion
+$ nix-env -iA nixpkgs.subversion
```
on a set of Nix expressions that contained Subversion 1.1.2.
@@ -54,7 +54,7 @@ environment is generated based on the current one. For instance,
generation 43 was created from generation 42 when we did
```console
-$ nix-env -i subversion firefox
+$ nix-env -iA nixpkgs.subversion nixpkgs.firefox
```
on a set of Nix expressions that contained Firefox and a new version of
@@ -127,7 +127,7 @@ All `nix-env` operations work on the profile pointed to by
(abbreviation `-p`):
```console
-$ nix-env -p /nix/var/nix/profiles/other-profile -i subversion
+$ nix-env -p /nix/var/nix/profiles/other-profile -iA nixpkgs.subversion
```
This will *not* change the `~/.nix-profile` symlink.
diff --git a/doc/manual/src/package-management/ssh-substituter.md b/doc/manual/src/package-management/ssh-substituter.md
index 6e5e258bc..c59933f61 100644
--- a/doc/manual/src/package-management/ssh-substituter.md
+++ b/doc/manual/src/package-management/ssh-substituter.md
@@ -6,7 +6,7 @@ automatically fetching any store paths in Firefox’s closure if they are
available on the server `avalon`:
```console
-$ nix-env -i firefox --substituters ssh://alice@avalon
+$ nix-env -iA nixpkgs.firefox --substituters ssh://alice@avalon
```
This works similar to the binary cache substituter that Nix usually
diff --git a/doc/manual/src/quick-start.md b/doc/manual/src/quick-start.md
index 71205923b..b54e73500 100644
--- a/doc/manual/src/quick-start.md
+++ b/doc/manual/src/quick-start.md
@@ -19,19 +19,19 @@ to subsequent chapters.
channel:
```console
- $ nix-env -qa
- docbook-xml-4.3
- docbook-xml-4.5
- firefox-33.0.2
- hello-2.9
- libxslt-1.1.28
+ $ nix-env -qaP
+ nixpkgs.docbook_xml_dtd_43 docbook-xml-4.3
+ nixpkgs.docbook_xml_dtd_45 docbook-xml-4.5
+ nixpkgs.firefox firefox-33.0.2
+ nixpkgs.hello hello-2.9
+ nixpkgs.libxslt libxslt-1.1.28
```
1. Install some packages from the channel:
```console
- $ nix-env -i hello
+ $ nix-env -iA nixpkgs.hello
```
This should download pre-built packages; it should not build them
diff --git a/doc/manual/src/release-notes/rl-2.4.md b/doc/manual/src/release-notes/rl-2.4.md
index f7ab9f6ad..70b715053 100644
--- a/doc/manual/src/release-notes/rl-2.4.md
+++ b/doc/manual/src/release-notes/rl-2.4.md
@@ -1,8 +1,539 @@
-# Release 2.4 (202X-XX-XX)
-
- - It is now an error to modify the `plugin-files` setting via a
- command-line flag that appears after the first non-flag argument
- to any command, including a subcommand to `nix`. For example,
- `nix-instantiate default.nix --plugin-files ""` must now become
- `nix-instantiate --plugin-files "" default.nix`.
- - Plugins that add new `nix` subcommands are now actually respected.
+# Release 2.4 (2021-11-01)
+
+This is the first release in more than two years and is the result of
+more than 2800 commits from 195 contributors since release 2.3.
+
+## Highlights
+
+* Nix's **error messages** have been improved a lot. For instance,
+ evaluation errors now point out the location of the error:
+
+ ```
+ $ nix build
+ error: undefined variable 'bzip3'
+
+ at /nix/store/449lv242z0zsgwv95a8124xi11sp419f-source/flake.nix:88:13:
+
+ 87| [ curl
+ 88| bzip3 xz brotli editline
+ | ^
+ 89| openssl sqlite
+ ```
+
+* The **`nix` command** has seen a lot of work and is now almost at
+ feature parity with the old command-line interface (the `nix-*`
+ commands). It aims to be [more modern, consistent and pleasant to
+ use](../contributing/cli-guideline.md) than the old CLI. It is still
+ marked as experimental but its interface should not change much
+ anymore in future releases.
+
+* **Flakes** are a new format to package Nix-based projects in a more
+ discoverable, composable, consistent and reproducible way. A flake
+ is just a repository or tarball containing a file named `flake.nix`
+ that specifies dependencies on other flakes and returns any Nix
+ assets such as packages, Nixpkgs overlays, NixOS modules or CI
+ tests. The new `nix` CLI is primarily based around flakes; for
+ example, a command like `nix run nixpkgs#hello` runs the `hello`
+ application from the `nixpkgs` flake.
+
+ Flakes are currently marked as experimental. For an introduction,
+ see [this blog
+ post](https://www.tweag.io/blog/2020-05-25-flakes/). For detailed
+ information about flake syntax and semantics, see the [`nix flake`
+ manual page](../command-ref/new-cli/nix3-flake.md).
+
+* Nix's store can now be **content-addressed**, meaning that the hash
+ component of a store path is the hash of the path's
+ contents. Previously Nix could only build **input-addressed** store
+ paths, where the hash is computed from the derivation dependency
+ graph. Content-addressing allows deduplication, early cutoff in
+ build systems, and unprivileged closure copying. This is still [an
+ experimental
+ feature](https://discourse.nixos.org/t/content-addressed-nix-call-for-testers/12881).
+
+* The Nix manual has been converted into Markdown, making it easier to
+ contribute. In addition, every `nix` subcommand now has a manual
+ page, documenting every option.
+
+* A new setting that allows **experimental features** to be enabled
+ selectively. This allows us to merge unstable features into Nix more
+ quickly and do more frequent releases.
+
+## Other features
+
+* There are many new `nix` subcommands:
+
+ - `nix develop` is intended to replace `nix-shell`. It has a number
+ of new features:
+
+ * It automatically sets the output environment variables (such as
+ `$out`) to writable locations (such as `./outputs/out`).
+
+ * It can store the environment in a profile. This is useful for
+ offline work.
+
+ * It can run specific phases directly. For instance, `nix develop
+ --build` runs `buildPhase`.
+
+ - It allows dependencies in the Nix store to be "redirected" to
+ arbitrary directories using the `--redirect` flag. This is
+ useful if you want to hack on a package *and* some of its
+ dependencies at the same time.
+
+ - `nix print-dev-env` prints the environment variables and bash
+ functions defined by a derivation. This is useful for users of
+ other shells than bash (especially with `--json`).
+
+ - `nix shell` was previously named `nix run` and is intended to
+ replace `nix-shell -p`, but without the `stdenv` overhead. It
+ simply starts a shell where some packages have been added to
+ `$PATH`.
+
+ - `nix run` (not to be confused with the old subcommand that has
+ been renamed to `nix shell`) runs an "app", a flake output that
+ specifies a command to run, or an eponymous program from a
+ package. For example, `nix run nixpkgs#hello` runs the `hello`
+ program from the `hello` package in `nixpkgs`.
+
+ - `nix flake` is the container for flake-related operations, such as
+ creating a new flake, querying the contents of a flake or updating
+ flake lock files.
+
+ - `nix registry` allows you to query and update the flake registry,
+ which maps identifiers such as `nixpkgs` to concrete flake URLs.
+
+ - `nix profile` is intended to replace `nix-env`. Its main advantage
+ is that it keeps track of the provenance of installed packages
+ (e.g. exactly which flake version a package came from). It also
+ has some helpful subcommands:
+
+ * `nix profile history` shows what packages were added, upgraded
+ or removed between each version of a profile.
+
+ * `nix profile diff-closures` shows the changes between the
+ closures of each version of a profile. This allows you to
+ discover the addition or removal of dependencies or size
+ changes.
+
+ **Warning**: after a profile has been updated using `nix profile`,
+ it is no longer usable with `nix-env`.
+
+ - `nix store diff-closures` shows the differences between the
+ closures of two store paths in terms of the versions and sizes of
+ dependencies in the closures.
+
+ - `nix store make-content-addressable` rewrites an arbitrary closure
+ to make it content-addressed. Such paths can be copied into other
+ stores without requiring signatures.
+
+ - `nix bundle` uses the [`nix-bundle`
+ program](https://github.com/matthewbauer/nix-bundle) to convert a
+ closure into a self-extracting executable.
+
+ - Various other replacements for the old CLI, e.g. `nix store gc`,
+ `nix store delete`, `nix store repair`, `nix nar dump-path`, `nix
+ store prefetch-file`, `nix store prefetch-tarball`, `nix key` and
+ `nix daemon`.
+
+* Nix now has an **evaluation cache** for flake outputs. For example,
+ a second invocation of the command `nix run nixpkgs#firefox` will
+ not need to evaluate the `firefox` attribute because it's already in
+ the evaluation cache. This is made possible by the hermetic
+ evaluation model of flakes.
+
+* The new `--offline` flag disables substituters and causes all
+ locally cached tarballs and repositories to be considered
+ up-to-date.
+
+* The new `--refresh` flag causes all locally cached tarballs and
+ repositories to be considered out-of-date.
+
+* Many `nix` subcommands now have a `--json` option to produce
+ machine-readable output.
+
+* `nix repl` has a new `:doc` command to show documentation about
+ builtin functions (e.g. `:doc builtins.map`).
+
+* Binary cache stores now have an option `index-debug-info` to create
+ an index of DWARF debuginfo files for use by
+ [`dwarffs`](https://github.com/edolstra/dwarffs).
+
+* To support flakes, Nix now has an extensible mechanism for fetching
+ source trees. Currently it has the following backends:
+
+ * Git repositories
+
+ * Mercurial repositories
+
+ * GitHub and GitLab repositories (an optimisation for faster
+ fetching than Git)
+
+ * Tarballs
+
+ * Arbitrary directories
+
+ The fetcher infrastructure is exposed via flake input specifications
+ and via the `fetchTree` built-in.
+
+* **Languages changes**: the only new language feature is that you can
+ now have antiquotations in paths, e.g. `./${foo}` instead of `./. +
+ foo`.
+
+* **New built-in functions**:
+
+ - `builtins.fetchTree` allows fetching a source tree using any
+ backends supported by the fetcher infrastructure. It subsumes the
+ functionality of existing built-ins like `fetchGit`,
+ `fetchMercurial` and `fetchTarball`.
+
+ - `builtins.getFlake` fetches a flake and returns its output
+ attributes. This function should not be used inside flakes! Use
+ flake inputs instead.
+
+ - `builtins.floor` and `builtins.ceil` round a floating-point number
+ down and up, respectively.
+
+* Experimental support for recursive Nix. This means that Nix
+ derivations can now call Nix to build other derivations. This is not
+ in a stable state yet and not well
+ [documented](https://github.com/NixOS/nix/commit/c4d7c76b641d82b2696fef73ce0ac160043c18da).
+
+* The new experimental feature `no-url-literals` disables URL
+ literals. This helps to implement [RFC
+ 45](https://github.com/NixOS/rfcs/pull/45).
+
+* Nix now uses `libarchive` to decompress and unpack tarballs and zip
+ files, so `tar` is no longer required.
+
+* The priority of substituters can now be overridden using the
+ `priority` substituter setting (e.g. `--substituters
+ 'http://cache.nixos.org?priority=100 daemon?priority=10'`).
+
+* `nix edit` now supports non-derivation attributes, e.g. `nix edit
+ .#nixosConfigurations.bla`.
+
+* The `nix` command now provides command line completion for `bash`,
+ `zsh` and `fish`. Since the support for getting completions is built
+ into `nix`, it's easy to add support for other shells.
+
+* The new `--log-format` flag selects what Nix's output looks like. It
+ defaults to a terse progress indicator. There is a new
+ `internal-json` output format for use by other programs.
+
+* `nix eval` has a new `--apply` flag that applies a function to the
+ evaluation result.
+
+* `nix eval` has a new `--write-to` flag that allows it to write a
+ nested attribute set of string leaves to a corresponding directory
+ tree.
+
+* Memory improvements: many operations that add paths to the store or
+ copy paths between stores now run in constant memory.
+
+* Many `nix` commands now support the flag `--derivation` to operate
+ on a `.drv` file itself instead of its outputs.
+
+* There is a new store called `dummy://` that does not support
+ building or adding paths. This is useful if you want to use the Nix
+ evaluator but don't have a Nix store.
+
+* The `ssh-ng://` store now allows substituting paths on the remote,
+ as `ssh://` already did.
+
+* When auto-calling a function with an ellipsis, all arguments are now
+ passed.
+
+* New `nix-shell` features:
+
+ - It preserves the `PS1` environment variable if
+ `NIX_SHELL_PRESERVE_PROMPT` is set.
+
+ - With `-p`, it passes any `--arg`s as Nixpkgs arguments.
+
+ - Support for structured attributes.
+
+* `nix-prefetch-url` has a new `--executable` flag.
+
+* On `x86_64` systems, [`x86_64` microarchitecture
+ levels](https://lwn.net/Articles/844831/) are mapped to additional
+ system types (e.g. `x86_64-v1-linux`).
+
+* The new `--eval-store` flag allows you to use a different store for
+ evaluation than for building or storing the build result. This is
+ primarily useful when you want to query whether something exists in
+ a read-only store, such as a binary cache:
+
+ ```
+ # nix path-info --json --store https://cache.nixos.org \
+ --eval-store auto nixpkgs#hello
+ ```
+
+ (Here `auto` indicates the local store.)
+
+* The Nix daemon has a new low-latency mechanism for copying
+ closures. This is useful when building on remote stores such as
+ `ssh-ng://`.
+
+* Plugins can now register `nix` subcommands.
+
+## Incompatible changes
+
+* The `nix` command is now marked as an experimental feature. This
+ means that you need to add
+
+ ```
+ experimental-features = nix-command
+ ```
+
+ to your `nix.conf` if you want to use it, or pass
+ `--extra-experimental-features nix-command` on the command line.
+
+* The `nix` command no longer has a syntax for referring to packages
+ in a channel. This means that the following no longer works:
+
+ ```console
+ nix build nixpkgs.hello # Nix 2.3
+ ```
+
+ Instead, you can either use the `#` syntax to select a package from
+ a flake, e.g.
+
+ ```console
+ nix build nixpkgs#hello
+ ```
+
+ Or, if you want to use the `nixpkgs` channel in the `NIX_PATH`
+ environment variable:
+
+ ```console
+ nix build -f '<nixpkgs>' hello
+ ```
+
+* The old `nix run` has been renamed to `nix shell`, while there is a
+ new `nix run` that runs a default command. So instead of
+
+ ```console
+ nix run nixpkgs.hello -c hello # Nix 2.3
+ ```
+
+ you should use
+
+ ```console
+ nix shell nixpkgs#hello -c hello
+ ```
+
+ or just
+
+ ```console
+ nix run nixpkgs#hello
+ ```
+
+ if the command you want to run has the same name as the package.
+
+* It is now an error to modify the `plugin-files` setting via a
+ command-line flag that appears after the first non-flag argument to
+ any command, including a subcommand to `nix`. For example,
+ `nix-instantiate default.nix --plugin-files ""` must now become
+ `nix-instantiate --plugin-files "" default.nix`.
+
+* We no longer release source tarballs. If you want to build from
+ source, please build from the tags in the Git repository.
+
+## Contributors
+
+This release has contributions from
+Adam Höse,
+Albert Safin,
+Alex Kovar,
+Alex Zero,
+Alexander Bantyev,
+Alexandre Esteves,
+Alyssa Ross,
+Anatole Lucet,
+Anders Kaseorg,
+Andreas Rammhold,
+Antoine Eiche,
+Antoine Martin,
+Arnout Engelen,
+Arthur Gautier,
+aszlig,
+Ben Burdette,
+Benjamin Hipple,
+Bernardo Meurer,
+Björn Gohla,
+Bjørn Forsman,
+Bob van der Linden,
+Brian Leung,
+Brian McKenna,
+Brian Wignall,
+Bruce Toll,
+Bryan Richter,
+Calle Rosenquist,
+Calvin Loncaric,
+Carlo Nucera,
+Carlos D'Agostino,
+Chaz Schlarp,
+Christian Höppner,
+Christian Kampka,
+Chua Hou,
+Chuck,
+Cole Helbling,
+Daiderd Jordan,
+Dan Callahan,
+Dani,
+Daniel Fitzpatrick,
+Danila Fedorin,
+Daniël de Kok,
+Danny Bautista,
+DavHau,
+David McFarland,
+Dima,
+Domen Kožar,
+Dominik Schrempf,
+Dominique Martinet,
+dramforever,
+Dustin DeWeese,
+edef,
+Eelco Dolstra,
+Emilio Karakey,
+Emily,
+Eric Culp,
+Ersin Akinci,
+Fabian Möller,
+Farid Zakaria,
+Federico Pellegrin,
+Finn Behrens,
+Florian Franzen,
+Félix Baylac-Jacqué,
+Gabriel Gonzalez,
+Geoff Reedy,
+Georges Dubus,
+Graham Christensen,
+Greg Hale,
+Greg Price,
+Gregor Kleen,
+Gregory Hale,
+Griffin Smith,
+Guillaume Bouchard,
+Harald van Dijk,
+illustris,
+Ivan Zvonimir Horvat,
+Jade,
+Jake Waksbaum,
+jakobrs,
+James Ottaway,
+Jan Tojnar,
+Janne Heß,
+Jaroslavas Pocepko,
+Jarrett Keifer,
+Jeremy Schlatter,
+Joachim Breitner,
+Joe Hermaszewski,
+Joe Pea,
+John Ericson,
+Jonathan Ringer,
+Josef Kemetmüller,
+Joseph Lucas,
+Jude Taylor,
+Julian Stecklina,
+Julien Tanguy,
+Jörg Thalheim,
+Kai Wohlfahrt,
+keke,
+Keshav Kini,
+Kevin Quick,
+Kevin Stock,
+Kjetil Orbekk,
+Krzysztof Gogolewski,
+kvtb,
+Lars Mühmel,
+Leonhard Markert,
+Lily Ballard,
+Linus Heckemann,
+Lorenzo Manacorda,
+Lucas Desgouilles,
+Lucas Franceschino,
+Lucas Hoffmann,
+Luke Granger-Brown,
+Madeline Haraj,
+Marwan Aljubeh,
+Mat Marini,
+Mateusz Piotrowski,
+Matthew Bauer,
+Matthew Kenigsberg,
+Mauricio Scheffer,
+Maximilian Bosch,
+Michael Adler,
+Michael Bishop,
+Michael Fellinger,
+Michael Forney,
+Michael Reilly,
+mlatus,
+Mykola Orliuk,
+Nathan van Doorn,
+Naïm Favier,
+ng0,
+Nick Van den Broeck,
+Nicolas Stig124 Formichella,
+Niels Egberts,
+Niklas Hambüchen,
+Nikola Knezevic,
+oxalica,
+p01arst0rm,
+Pamplemousse,
+Patrick Hilhorst,
+Paul Opiyo,
+Pavol Rusnak,
+Peter Kolloch,
+Philipp Bartsch,
+Philipp Middendorf,
+Piotr Szubiakowski,
+Profpatsch,
+Puck Meerburg,
+Ricardo M. Correia,
+Rickard Nilsson,
+Robert Hensing,
+Robin Gloster,
+Rodrigo,
+Rok Garbas,
+Ronnie Ebrin,
+Rovanion Luckey,
+Ryan Burns,
+Ryan Mulligan,
+Ryne Everett,
+Sam Doshi,
+Sam Lidder,
+Samir Talwar,
+Samuel Dionne-Riel,
+Sebastian Ullrich,
+Sergei Trofimovich,
+Sevan Janiyan,
+Shao Cheng,
+Shea Levy,
+Silvan Mosberger,
+Stefan Frijters,
+Stefan Jaax,
+sternenseemann,
+Steven Shaw,
+Stéphan Kochen,
+SuperSandro2000,
+Suraj Barkale,
+Taeer Bar-Yam,
+Thomas Churchman,
+Théophane Hufschmitt,
+Timothy DeHerrera,
+Timothy Klim,
+Tobias Möst,
+Tobias Pflug,
+Tom Bereknyei,
+Travis A. Everett,
+Ujjwal Jain,
+Vladimír Čunát,
+Wil Taylor,
+Will Dietz,
+Yaroslav Bolyukin,
+Yestin L. Harrison,
+YI,
+Yorick van Pelt,
+Yuriy Taraday and
+zimbatm.
diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md
new file mode 100644
index 000000000..26c7d2cce
--- /dev/null
+++ b/doc/manual/src/release-notes/rl-next.md
@@ -0,0 +1,7 @@
+# Release 2.5 (2021-XX-XX)
+
+* Binary cache stores now have a setting `compression-level`.
+
+* `nix develop` now has a flag `--unpack` to run `unpackPhase`.
+
+* Lists can now be compared lexicographically using the `<` operator.
diff --git a/doc/manual/highlight.pack.js b/doc/manual/theme/highlight.js
index fba8b4a5a..fba8b4a5a 100644
--- a/doc/manual/highlight.pack.js
+++ b/doc/manual/theme/highlight.js
diff --git a/docker.nix b/docker.nix
new file mode 100644
index 000000000..2a13c23fb
--- /dev/null
+++ b/docker.nix
@@ -0,0 +1,251 @@
+{ pkgs ? import <nixpkgs> { }
+, lib ? pkgs.lib
+, name ? "nix"
+, tag ? "latest"
+, channelName ? "nixpkgs"
+, channelURL ? "https://nixos.org/channels/nixpkgs-unstable"
+}:
+let
+ defaultPkgs = with pkgs; [
+ nix
+ bashInteractive
+ coreutils-full
+ gnutar
+ gzip
+ gnugrep
+ which
+ curl
+ less
+ wget
+ man
+ cacert.out
+ findutils
+ ];
+
+ users = {
+
+ root = {
+ uid = 0;
+ shell = "/bin/bash";
+ home = "/root";
+ gid = 0;
+ };
+
+ } // lib.listToAttrs (
+ map
+ (
+ n: {
+ name = "nixbld${toString n}";
+ value = {
+ uid = 30000 + n;
+ gid = 30000;
+ groups = [ "nixbld" ];
+ description = "Nix build user ${toString n}";
+ };
+ }
+ )
+ (lib.lists.range 1 32)
+ );
+
+ groups = {
+ root.gid = 0;
+ nixbld.gid = 30000;
+ };
+
+ userToPasswd = (
+ k:
+ { uid
+ , gid ? 65534
+ , home ? "/var/empty"
+ , description ? ""
+ , shell ? "/bin/false"
+ , groups ? [ ]
+ }: "${k}:x:${toString uid}:${toString gid}:${description}:${home}:${shell}"
+ );
+ passwdContents = (
+ lib.concatStringsSep "\n"
+ (lib.attrValues (lib.mapAttrs userToPasswd users))
+ );
+
+ userToShadow = k: { ... }: "${k}:!:1::::::";
+ shadowContents = (
+ lib.concatStringsSep "\n"
+ (lib.attrValues (lib.mapAttrs userToShadow users))
+ );
+
+ # Map groups to members
+ # {
+ # group = [ "user1" "user2" ];
+ # }
+ groupMemberMap = (
+ let
+ # Create a flat list of user/group mappings
+ mappings = (
+ builtins.foldl'
+ (
+ acc: user:
+ let
+ groups = users.${user}.groups or [ ];
+ in
+ acc ++ map
+ (group: {
+ inherit user group;
+ })
+ groups
+ )
+ [ ]
+ (lib.attrNames users)
+ );
+ in
+ (
+ builtins.foldl'
+ (
+ acc: v: acc // {
+ ${v.group} = acc.${v.group} or [ ] ++ [ v.user ];
+ }
+ )
+ { }
+ mappings)
+ );
+
+ groupToGroup = k: { gid }:
+ let
+ members = groupMemberMap.${k} or [ ];
+ in
+ "${k}:x:${toString gid}:${lib.concatStringsSep "," members}";
+ groupContents = (
+ lib.concatStringsSep "\n"
+ (lib.attrValues (lib.mapAttrs groupToGroup groups))
+ );
+
+ nixConf = {
+ sandbox = "false";
+ build-users-group = "nixbld";
+ trusted-public-keys = "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=";
+ };
+ nixConfContents = (lib.concatStringsSep "\n" (lib.mapAttrsFlatten (n: v: "${n} = ${v}") nixConf)) + "\n";
+
+ baseSystem =
+ let
+ nixpkgs = pkgs.path;
+ channel = pkgs.runCommand "channel-nixos" { } ''
+ mkdir $out
+ ln -s ${nixpkgs} $out/nixpkgs
+ echo "[]" > $out/manifest.nix
+ '';
+ rootEnv = pkgs.buildPackages.buildEnv {
+ name = "root-profile-env";
+ paths = defaultPkgs;
+ };
+ profile = pkgs.buildPackages.runCommand "user-environment" { } ''
+ mkdir $out
+ cp -a ${rootEnv}/* $out/
+
+ cat > $out/manifest.nix <<EOF
+ [
+ ${lib.concatStringsSep "\n" (builtins.map (drv: let
+ outputs = drv.outputsToInstall or [ "out" ];
+ in ''
+ {
+ ${lib.concatStringsSep "\n" (builtins.map (output: ''
+ ${output} = { outPath = "${lib.getOutput output drv}"; };
+ '') outputs)}
+ outputs = [ ${lib.concatStringsSep " " (builtins.map (x: "\"${x}\"") outputs)} ];
+ name = "${drv.name}";
+ outPath = "${drv}";
+ system = "${drv.system}";
+ type = "derivation";
+ meta = { };
+ }
+ '') defaultPkgs)}
+ ]
+ EOF
+ '';
+ in
+ pkgs.runCommand "base-system"
+ {
+ inherit passwdContents groupContents shadowContents nixConfContents;
+ passAsFile = [
+ "passwdContents"
+ "groupContents"
+ "shadowContents"
+ "nixConfContents"
+ ];
+ allowSubstitutes = false;
+ preferLocalBuild = true;
+ } ''
+ env
+ set -x
+ mkdir -p $out/etc
+
+ cat $passwdContentsPath > $out/etc/passwd
+ echo "" >> $out/etc/passwd
+
+ cat $groupContentsPath > $out/etc/group
+ echo "" >> $out/etc/group
+
+ cat $shadowContentsPath > $out/etc/shadow
+ echo "" >> $out/etc/shadow
+
+ mkdir -p $out/usr
+ ln -s /nix/var/nix/profiles/share $out/usr/
+
+ mkdir -p $out/nix/var/nix/gcroots
+
+ mkdir $out/tmp
+
+ mkdir -p $out/etc/nix
+ cat $nixConfContentsPath > $out/etc/nix/nix.conf
+
+ mkdir -p $out/root
+ mkdir -p $out/nix/var/nix/profiles/per-user/root
+
+ ln -s ${profile} $out/nix/var/nix/profiles/default-1-link
+ ln -s $out/nix/var/nix/profiles/default-1-link $out/nix/var/nix/profiles/default
+ ln -s /nix/var/nix/profiles/default $out/root/.nix-profile
+
+ ln -s ${channel} $out/nix/var/nix/profiles/per-user/root/channels-1-link
+ ln -s $out/nix/var/nix/profiles/per-user/root/channels-1-link $out/nix/var/nix/profiles/per-user/root/channels
+
+ mkdir -p $out/root/.nix-defexpr
+ ln -s $out/nix/var/nix/profiles/per-user/root/channels $out/root/.nix-defexpr/channels
+ echo "${channelURL} ${channelName}" > $out/root/.nix-channels
+
+ mkdir -p $out/bin $out/usr/bin
+ ln -s ${pkgs.coreutils}/bin/env $out/usr/bin/env
+ ln -s ${pkgs.bashInteractive}/bin/bash $out/bin/sh
+ '';
+
+in
+pkgs.dockerTools.buildLayeredImageWithNixDb {
+
+ inherit name tag;
+
+ contents = [ baseSystem ];
+
+ extraCommands = ''
+ rm -rf nix-support
+ ln -s /nix/var/nix/profiles nix/var/nix/gcroots/profiles
+ '';
+
+ config = {
+ Cmd = [ "/root/.nix-profile/bin/bash" ];
+ Env = [
+ "USER=root"
+ "PATH=${lib.concatStringsSep ":" [
+ "/root/.nix-profile/bin"
+ "/nix/var/nix/profiles/default/bin"
+ "/nix/var/nix/profiles/default/sbin"
+ ]}"
+ "MANPATH=${lib.concatStringsSep ":" [
+ "/root/.nix-profile/share/man"
+ "/nix/var/nix/profiles/default/share/man"
+ ]}"
+ "SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
+ "GIT_SSL_CAINFO=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
+ "NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"
+ "NIX_PATH=/nix/var/nix/profiles/per-user/root/channels:/root/.nix-defexpr/channels"
+ ];
+ };
+
+}
diff --git a/flake.lock b/flake.lock
index 06c507e7d..861af1c54 100644
--- a/flake.lock
+++ b/flake.lock
@@ -3,32 +3,31 @@
"lowdown-src": {
"flake": false,
"locked": {
- "lastModified": 1617481909,
- "narHash": "sha256-SqnfOFuLuVRRNeVJr1yeEPJue/qWoCp5N6o5Kr///p4=",
+ "lastModified": 1633514407,
+ "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
"owner": "kristapsdz",
"repo": "lowdown",
- "rev": "148f9b2f586c41b7e36e73009db43ea68c7a1a4d",
+ "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
"type": "github"
},
"original": {
"owner": "kristapsdz",
- "ref": "VERSION_0_8_4",
"repo": "lowdown",
"type": "github"
}
},
"nixpkgs": {
"locked": {
- "lastModified": 1614309161,
- "narHash": "sha256-93kRxDPyEW9QIpxU71kCaV1r+hgOgP6/aVgC7vvO8IU=",
+ "lastModified": 1632864508,
+ "narHash": "sha256-d127FIvGR41XbVRDPVvozUPQ/uRHbHwvfyKHwEt5xFM=",
"owner": "NixOS",
"repo": "nixpkgs",
- "rev": "0e499fde7af3c28d63e9b13636716b86c3162b93",
+ "rev": "82891b5e2c2359d7e58d08849e4c89511ab94234",
"type": "github"
},
"original": {
"id": "nixpkgs",
- "ref": "nixos-20.09-small",
+ "ref": "nixos-21.05-small",
"type": "indirect"
}
},
diff --git a/flake.nix b/flake.nix
index ebaafb049..ff152ebd6 100644
--- a/flake.nix
+++ b/flake.nix
@@ -1,8 +1,8 @@
{
description = "The purely functional package manager";
- inputs.nixpkgs.url = "nixpkgs/nixos-20.09-small";
- inputs.lowdown-src = { url = "github:kristapsdz/lowdown/VERSION_0_8_4"; flake = false; };
+ inputs.nixpkgs.url = "nixpkgs/nixos-21.05-small";
+ inputs.lowdown-src = { url = "github:kristapsdz/lowdown"; flake = false; };
outputs = { self, nixpkgs, lowdown-src }:
@@ -18,7 +18,9 @@
linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ];
linuxSystems = linux64BitSystems ++ [ "i686-linux" ];
- systems = linuxSystems ++ [ "x86_64-darwin" ];
+ systems = linuxSystems ++ [ "x86_64-darwin" "aarch64-darwin" ];
+
+ crossSystems = [ "armv6l-linux" "armv7l-linux" ];
forAllSystems = f: nixpkgs.lib.genAttrs systems (system: f system);
@@ -59,6 +61,7 @@
configureFlags =
lib.optionals stdenv.isLinux [
+ "--with-boost=${boost}/lib"
"--with-sandbox-shell=${sh}/bin/busybox"
"LDFLAGS=-fuse-ld=gold"
];
@@ -68,7 +71,7 @@
[
buildPackages.bison
buildPackages.flex
- (lib.getBin buildPackages.lowdown)
+ (lib.getBin buildPackages.lowdown-nix)
buildPackages.mdbook
buildPackages.autoconf-archive
buildPackages.autoreconfHook
@@ -76,10 +79,10 @@
# Tests
buildPackages.git
- buildPackages.mercurial
+ buildPackages.mercurial # FIXME: remove? only needed for tests
buildPackages.jq
]
- ++ lib.optionals stdenv.isLinux [(pkgs.util-linuxMinimal or pkgs.utillinuxMinimal)];
+ ++ lib.optionals stdenv.hostPlatform.isLinux [(buildPackages.util-linuxMinimal or buildPackages.utillinuxMinimal)];
buildDeps =
[ curl
@@ -87,13 +90,12 @@
openssl sqlite
libarchive
boost
- nlohmann_json
- lowdown
- gmock
+ lowdown-nix
+ gtest
]
++ lib.optionals stdenv.isLinux [libseccomp]
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
- ++ lib.optional stdenv.isx86_64 libcpuid;
+ ++ lib.optional stdenv.hostPlatform.isx86_64 libcpuid;
awsDeps = lib.optional (stdenv.isLinux || stdenv.isDarwin)
(aws-sdk-cpp.override {
@@ -102,7 +104,13 @@
});
propagatedDeps =
- [ (boehmgc.override { enableLargeConfig = true; })
+ [ ((boehmgc.override {
+ enableLargeConfig = true;
+ }).overrideAttrs(o: {
+ patches = (o.patches or []) ++ [
+ ./boehmgc-coroutine-sp-fallback.diff
+ ];
+ }))
];
perlDeps =
@@ -119,8 +127,7 @@
''
mkdir -p $out/nix-support
- # Converts /nix/store/50p3qk8kka9dl6wyq40vydq945k0j3kv-nix-2.4pre20201102_550e11f/bin/nix
- # To 50p3qk8kka9dl6wyq40vydq945k0j3kv/bin/nix
+ # Converts /nix/store/50p3qk8k...-nix-2.4pre20201102_550e11f/bin/nix to 50p3qk8k.../bin/nix.
tarballPath() {
# Remove the store prefix
local path=''${1#${builtins.storeDir}/}
@@ -133,10 +140,11 @@
substitute ${./scripts/install.in} $out/install \
${pkgs.lib.concatMapStrings
- (system:
- '' \
- --replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \
- --replace '@tarballPath_${system}@' $(tarballPath ${self.hydraJobs.binaryTarball.${system}}/*.tar.xz) \
+ (system: let
+ tarball = if builtins.elem system crossSystems then self.hydraJobs.binaryTarballCross.x86_64-linux.${system} else self.hydraJobs.binaryTarball.${system};
+ in '' \
+ --replace '@tarballHash_${system}@' $(nix --experimental-features nix-command hash-file --base16 --type sha256 ${tarball}/*.tar.xz) \
+ --replace '@tarballPath_${system}@' $(tarballPath ${tarball}/*.tar.xz) \
''
)
systems
@@ -145,13 +153,15 @@
echo "file installer $out/install" >> $out/nix-support/hydra-build-products
'';
- testNixVersions = pkgs: client: daemon: with commonDeps pkgs; pkgs.stdenv.mkDerivation {
+ testNixVersions = pkgs: client: daemon: with commonDeps pkgs; with pkgs.lib; pkgs.stdenv.mkDerivation {
NIX_DAEMON_PACKAGE = daemon;
NIX_CLIENT_PACKAGE = client;
- # Must keep this name short as OSX has a rather strict limit on the
- # socket path length, and this name appears in the path of the
- # nix-daemon socket used in the tests
- name = "nix-tests";
+ name =
+ "nix-tests"
+ + optionalString
+ (versionAtLeast daemon.version "2.4pre20211005" &&
+ versionAtLeast client.version "2.4pre20211005")
+ "-${client.version}-against-${daemon.version}";
inherit version;
src = self;
@@ -170,21 +180,92 @@
installPhase = ''
mkdir -p $out
'';
- installCheckPhase = "make installcheck";
+ installCheckPhase = "make installcheck -j$NIX_BUILD_CORES -l$NIX_BUILD_CORES";
};
+ binaryTarball = buildPackages: nix: pkgs: let
+ inherit (pkgs) cacert;
+ installerClosureInfo = buildPackages.closureInfo { rootPaths = [ nix cacert ]; };
+ in
+
+ buildPackages.runCommand "nix-binary-tarball-${version}"
+ { #nativeBuildInputs = lib.optional (system != "aarch64-linux") shellcheck;
+ meta.description = "Distribution-independent Nix bootstrap binaries for ${pkgs.system}";
+ }
+ ''
+ cp ${installerClosureInfo}/registration $TMPDIR/reginfo
+ cp ${./scripts/create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh
+ substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \
+ --subst-var-by nix ${nix} \
+ --subst-var-by cacert ${cacert}
+
+ substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \
+ --subst-var-by nix ${nix} \
+ --subst-var-by cacert ${cacert}
+ substitute ${./scripts/install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \
+ --subst-var-by nix ${nix} \
+ --subst-var-by cacert ${cacert}
+ substitute ${./scripts/install-multi-user.sh} $TMPDIR/install-multi-user \
+ --subst-var-by nix ${nix} \
+ --subst-var-by cacert ${cacert}
+
+ if type -p shellcheck; then
+ # SC1090: Don't worry about not being able to find
+ # $nix/etc/profile.d/nix.sh
+ shellcheck --exclude SC1090 $TMPDIR/install
+ shellcheck $TMPDIR/create-darwin-volume.sh
+ shellcheck $TMPDIR/install-darwin-multi-user.sh
+ shellcheck $TMPDIR/install-systemd-multi-user.sh
+
+ # SC1091: Don't panic about not being able to source
+ # /etc/profile
+ # SC2002: Ignore "useless cat" "error", when loading
+ # .reginfo, as the cat is a much cleaner
+ # implementation, even though it is "useless"
+ # SC2116: Allow ROOT_HOME=$(echo ~root) for resolving
+ # root's home directory
+ shellcheck --external-sources \
+ --exclude SC1091,SC2002,SC2116 $TMPDIR/install-multi-user
+ fi
+
+ chmod +x $TMPDIR/install
+ chmod +x $TMPDIR/create-darwin-volume.sh
+ chmod +x $TMPDIR/install-darwin-multi-user.sh
+ chmod +x $TMPDIR/install-systemd-multi-user.sh
+ chmod +x $TMPDIR/install-multi-user
+ dir=nix-${version}-${pkgs.system}
+ fn=$out/$dir.tar.xz
+ mkdir -p $out/nix-support
+ echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products
+ tar cvfJ $fn \
+ --owner=0 --group=0 --mode=u+rw,uga+r \
+ --absolute-names \
+ --hard-dereference \
+ --transform "s,$TMPDIR/install,$dir/install," \
+ --transform "s,$TMPDIR/create-darwin-volume.sh,$dir/create-darwin-volume.sh," \
+ --transform "s,$TMPDIR/reginfo,$dir/.reginfo," \
+ --transform "s,$NIX_STORE,$dir/store,S" \
+ $TMPDIR/install \
+ $TMPDIR/create-darwin-volume.sh \
+ $TMPDIR/install-darwin-multi-user.sh \
+ $TMPDIR/install-systemd-multi-user.sh \
+ $TMPDIR/install-multi-user \
+ $TMPDIR/reginfo \
+ $(cat ${installerClosureInfo}/store-paths)
+ '';
+
in {
# A Nixpkgs overlay that overrides the 'nix' and
# 'nix.perl-bindings' packages.
overlay = final: prev: {
- # An older version of Nix to test against when using the daemon.
- # Currently using `nixUnstable` as the stable one doesn't respect
- # `NIX_DAEMON_SOCKET_PATH` which is needed for the tests.
nixStable = prev.nix;
+ # Forward from the previous stage as we don’t want it to pick the lowdown override
+ nixUnstable = prev.nixUnstable;
+
nix = with final; with commonDeps pkgs; stdenv.mkDerivation {
name = "nix-${version}";
inherit version;
@@ -254,9 +335,9 @@
xz
pkgs.perl
boost
- nlohmann_json
]
- ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium;
+ ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
+ ++ lib.optional stdenv.isDarwin darwin.apple_sdk.frameworks.Security;
configureFlags = ''
--with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix}
@@ -270,24 +351,17 @@
};
- lowdown = with final; stdenv.mkDerivation rec {
- name = "lowdown-0.8.4";
-
- /*
- src = fetchurl {
- url = "https://kristaps.bsd.lv/lowdown/snapshots/${name}.tar.gz";
- hash = "sha512-U9WeGoInT9vrawwa57t6u9dEdRge4/P+0wLxmQyOL9nhzOEUU2FRz2Be9H0dCjYE7p2v3vCXIYk40M+jjULATw==";
- };
- */
+ lowdown-nix = with final; stdenv.mkDerivation rec {
+ name = "lowdown-0.9.0";
src = lowdown-src;
outputs = [ "out" "bin" "dev" ];
- nativeBuildInputs = [ which ];
+ nativeBuildInputs = [ buildPackages.which ];
- configurePhase =
- ''
+ configurePhase = ''
+ ${if (stdenv.isDarwin && stdenv.isAarch64) then "echo \"HAVE_SANDBOX_INIT=false\" > configure.local" else ""}
./configure \
PREFIX=${placeholder "dev"} \
BINDIR=${placeholder "bin"}/bin
@@ -303,93 +377,49 @@
buildStatic = nixpkgs.lib.genAttrs linux64BitSystems (system: self.packages.${system}.nix-static);
+ buildCross = nixpkgs.lib.genAttrs crossSystems (crossSystem:
+ nixpkgs.lib.genAttrs ["x86_64-linux"] (system: self.packages.${system}."nix-${crossSystem}"));
+
# Perl bindings for various platforms.
perlBindings = nixpkgs.lib.genAttrs systems (system: self.packages.${system}.nix.perl-bindings);
# Binary tarball for various platforms, containing a Nix store
# with the closure of 'nix' package, and the second half of
# the installation script.
- binaryTarball = nixpkgs.lib.genAttrs systems (system:
+ binaryTarball = nixpkgs.lib.genAttrs systems (system: binaryTarball nixpkgsFor.${system} nixpkgsFor.${system}.nix nixpkgsFor.${system});
+
+ binaryTarballCross = nixpkgs.lib.genAttrs ["x86_64-linux"] (system: builtins.listToAttrs (map (crossSystem: {
+ name = crossSystem;
+ value = let
+ nixpkgsCross = import nixpkgs {
+ inherit system crossSystem;
+ overlays = [ self.overlay ];
+ };
+ in binaryTarball nixpkgsFor.${system} self.packages.${system}."nix-${crossSystem}" nixpkgsCross;
+ }) crossSystems));
- with nixpkgsFor.${system};
+ # The first half of the installation script. This is uploaded
+ # to https://nixos.org/nix/install. It downloads the binary
+ # tarball for the user's system and calls the second half of the
+ # installation script.
+ installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" "armv6l-linux" "armv7l-linux" ];
+ installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" "armv6l-linux" "armv7l-linux"];
+ # docker image with Nix inside
+ dockerImage = nixpkgs.lib.genAttrs linux64BitSystems (system:
let
- installerClosureInfo = closureInfo { rootPaths = [ nix cacert ]; };
- in
-
- runCommand "nix-binary-tarball-${version}"
- { #nativeBuildInputs = lib.optional (system != "aarch64-linux") shellcheck;
- meta.description = "Distribution-independent Nix bootstrap binaries for ${system}";
+ pkgs = nixpkgsFor.${system};
+ image = import ./docker.nix { inherit pkgs; tag = version; };
+ in pkgs.runCommand "docker-image-tarball-${version}"
+ { meta.description = "Docker image with Nix for ${system}";
}
''
- cp ${installerClosureInfo}/registration $TMPDIR/reginfo
- cp ${./scripts/create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh
- substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \
- --subst-var-by nix ${nix} \
- --subst-var-by cacert ${cacert}
-
- substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \
- --subst-var-by nix ${nix} \
- --subst-var-by cacert ${cacert}
- substitute ${./scripts/install-systemd-multi-user.sh} $TMPDIR/install-systemd-multi-user.sh \
- --subst-var-by nix ${nix} \
- --subst-var-by cacert ${cacert}
- substitute ${./scripts/install-multi-user.sh} $TMPDIR/install-multi-user \
- --subst-var-by nix ${nix} \
- --subst-var-by cacert ${cacert}
-
- if type -p shellcheck; then
- # SC1090: Don't worry about not being able to find
- # $nix/etc/profile.d/nix.sh
- shellcheck --exclude SC1090 $TMPDIR/install
- shellcheck $TMPDIR/create-darwin-volume.sh
- shellcheck $TMPDIR/install-darwin-multi-user.sh
- shellcheck $TMPDIR/install-systemd-multi-user.sh
-
- # SC1091: Don't panic about not being able to source
- # /etc/profile
- # SC2002: Ignore "useless cat" "error", when loading
- # .reginfo, as the cat is a much cleaner
- # implementation, even though it is "useless"
- # SC2116: Allow ROOT_HOME=$(echo ~root) for resolving
- # root's home directory
- shellcheck --external-sources \
- --exclude SC1091,SC2002,SC2116 $TMPDIR/install-multi-user
- fi
-
- chmod +x $TMPDIR/install
- chmod +x $TMPDIR/create-darwin-volume.sh
- chmod +x $TMPDIR/install-darwin-multi-user.sh
- chmod +x $TMPDIR/install-systemd-multi-user.sh
- chmod +x $TMPDIR/install-multi-user
- dir=nix-${version}-${system}
- fn=$out/$dir.tar.xz
mkdir -p $out/nix-support
- echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products
- tar cvfJ $fn \
- --owner=0 --group=0 --mode=u+rw,uga+r \
- --absolute-names \
- --hard-dereference \
- --transform "s,$TMPDIR/install,$dir/install," \
- --transform "s,$TMPDIR/create-darwin-volume.sh,$dir/create-darwin-volume.sh," \
- --transform "s,$TMPDIR/reginfo,$dir/.reginfo," \
- --transform "s,$NIX_STORE,$dir/store,S" \
- $TMPDIR/install \
- $TMPDIR/create-darwin-volume.sh \
- $TMPDIR/install-darwin-multi-user.sh \
- $TMPDIR/install-systemd-multi-user.sh \
- $TMPDIR/install-multi-user \
- $TMPDIR/reginfo \
- $(cat ${installerClosureInfo}/store-paths)
+ image=$out/image.tar.gz
+ ln -s ${image} $image
+ echo "file binary-dist $image" >> $out/nix-support/hydra-build-products
'');
- # The first half of the installation script. This is uploaded
- # to https://nixos.org/nix/install. It downloads the binary
- # tarball for the user's system and calls the second half of the
- # installation script.
- installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ];
- installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" ];
-
# Line coverage analysis.
coverage =
with nixpkgsFor.x86_64-linux;
@@ -430,6 +460,12 @@
inherit (self) overlay;
};
+ tests.nssPreload = (import ./tests/nss-preload.nix rec {
+ system = "x86_64-linux";
+ inherit nixpkgs;
+ inherit (self) overlay;
+ });
+
tests.githubFlakes = (import ./tests/github-flakes.nix rec {
system = "x86_64-linux";
inherit nixpkgs;
@@ -468,25 +504,33 @@
'';
*/
- };
-
- checks = forAllSystems (system: {
- binaryTarball = self.hydraJobs.binaryTarball.${system};
- perlBindings = self.hydraJobs.perlBindings.${system};
- installTests =
+ installTests = forAllSystems (system:
let pkgs = nixpkgsFor.${system}; in
pkgs.runCommand "install-tests" {
againstSelf = testNixVersions pkgs pkgs.nix pkgs.pkgs.nix;
- againstCurrentUnstable = testNixVersions pkgs pkgs.nix pkgs.nixUnstable;
+ againstCurrentUnstable =
+ # FIXME: temporarily disable this on macOS because of #3605.
+ if system == "x86_64-linux"
+ then testNixVersions pkgs pkgs.nix pkgs.nixUnstable
+ else null;
# Disabled because the latest stable version doesn't handle
# `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work
# againstLatestStable = testNixVersions pkgs pkgs.nix pkgs.nixStable;
- } "touch $out";
- });
+ } "touch $out");
+
+ };
+
+ checks = forAllSystems (system: {
+ binaryTarball = self.hydraJobs.binaryTarball.${system};
+ perlBindings = self.hydraJobs.perlBindings.${system};
+ installTests = self.hydraJobs.installTests.${system};
+ } // (if system == "x86_64-linux" then {
+ dockerImage = self.hydraJobs.dockerImage.${system};
+ } else {}));
packages = forAllSystems (system: {
inherit (nixpkgsFor.${system}) nix;
- } // nixpkgs.lib.optionalAttrs (builtins.elem system linux64BitSystems) {
+ } // (nixpkgs.lib.optionalAttrs (builtins.elem system linux64BitSystems) {
nix-static = let
nixpkgs = nixpkgsFor.${system}.pkgsStatic;
in with commonDeps nixpkgs; nixpkgs.stdenv.mkDerivation {
@@ -524,8 +568,49 @@
stripAllList = ["bin"];
strictDeps = true;
+
+ hardeningDisable = [ "pie" ];
+ };
+ } // builtins.listToAttrs (map (crossSystem: {
+ name = "nix-${crossSystem}";
+ value = let
+ nixpkgsCross = import nixpkgs {
+ inherit system crossSystem;
+ overlays = [ self.overlay ];
+ };
+ in with commonDeps nixpkgsCross; nixpkgsCross.stdenv.mkDerivation {
+ name = "nix-${version}";
+
+ src = self;
+
+ VERSION_SUFFIX = versionSuffix;
+
+ outputs = [ "out" "dev" "doc" ];
+
+ nativeBuildInputs = nativeBuildDeps;
+ buildInputs = buildDeps ++ propagatedDeps;
+
+ configureFlags = [ "--sysconfdir=/etc" "--disable-doc-gen" ];
+
+ enableParallelBuilding = true;
+
+ makeFlags = "profiledir=$(out)/etc/profile.d";
+
+ doCheck = true;
+
+ installFlags = "sysconfdir=$(out)/etc";
+
+ postInstall = ''
+ mkdir -p $doc/nix-support
+ echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products
+ mkdir -p $out/nix-support
+ echo "file binary-dist $out/bin/nix" >> $out/nix-support/hydra-build-products
+ '';
+
+ doInstallCheck = true;
+ installCheckFlags = "sysconfdir=$(out)/etc";
};
- });
+ }) crossSystems)));
defaultPackage = forAllSystems (system: self.packages.${system}.nix);
diff --git a/maintainers/upload-release.pl b/maintainers/upload-release.pl
index 6f3882a12..18ab33424 100755
--- a/maintainers/upload-release.pl
+++ b/maintainers/upload-release.pl
@@ -19,6 +19,8 @@ my $nixpkgsDir = "/home/eelco/Dev/nixpkgs-pristine";
my $TMPDIR = $ENV{'TMPDIR'} // "/tmp";
+my $isLatest = ($ENV{'IS_LATEST'} // "") eq "1";
+
# FIXME: cut&paste from nixos-channel-scripts.
sub fetch {
my ($url, $type) = @_;
@@ -35,16 +37,18 @@ sub fetch {
my $evalUrl = "https://hydra.nixos.org/eval/$evalId";
my $evalInfo = decode_json(fetch($evalUrl, 'application/json'));
#print Dumper($evalInfo);
+my $flakeUrl = $evalInfo->{flake} or die;
+my $flakeInfo = decode_json(`nix flake metadata --json "$flakeUrl"` or die);
+my $nixRev = $flakeInfo->{revision} or die;
-my $nixRev = $evalInfo->{jobsetevalinputs}->{nix}->{revision} or die;
-
-my $tarballInfo = decode_json(fetch("$evalUrl/job/tarball", 'application/json'));
+my $buildInfo = decode_json(fetch("$evalUrl/job/build.x86_64-linux", 'application/json'));
+#print Dumper($buildInfo);
-my $releaseName = $tarballInfo->{releasename};
+my $releaseName = $buildInfo->{nixname};
$releaseName =~ /nix-(.*)$/ or die;
my $version = $1;
-print STDERR "Nix revision is $nixRev, version is $version\n";
+print STDERR "Flake URL is $flakeUrl, Nix revision is $nixRev, version is $version\n";
my $releaseDir = "nix/$releaseName";
@@ -83,12 +87,12 @@ sub downloadFile {
if (!-e $tmpFile) {
print STDERR "downloading $srcFile to $tmpFile...\n";
- system("NIX_REMOTE=https://cache.nixos.org/ nix cat-store '$srcFile' > '$tmpFile'") == 0
+ system("NIX_REMOTE=https://cache.nixos.org/ nix store cat '$srcFile' > '$tmpFile'") == 0
or die "unable to fetch $srcFile\n";
}
my $sha256_expected = $buildInfo->{buildproducts}->{$productNr}->{sha256hash} or die;
- my $sha256_actual = `nix hash-file --base16 --type sha256 '$tmpFile'`;
+ my $sha256_actual = `nix hash file --base16 --type sha256 '$tmpFile'`;
chomp $sha256_actual;
if ($sha256_expected ne $sha256_actual) {
print STDERR "file $tmpFile is corrupt, got $sha256_actual, expected $sha256_expected\n";
@@ -104,12 +108,13 @@ sub downloadFile {
return $sha256_expected;
}
-downloadFile("tarball", "2"); # .tar.bz2
-my $tarballHash = downloadFile("tarball", "3"); # .tar.xz
downloadFile("binaryTarball.i686-linux", "1");
downloadFile("binaryTarball.x86_64-linux", "1");
downloadFile("binaryTarball.aarch64-linux", "1");
downloadFile("binaryTarball.x86_64-darwin", "1");
+downloadFile("binaryTarball.aarch64-darwin", "1");
+downloadFile("binaryTarballCross.x86_64-linux.armv6l-linux", "1");
+downloadFile("binaryTarballCross.x86_64-linux.armv7l-linux", "1");
downloadFile("installerScript", "1");
for my $fn (glob "$tmpDir/*") {
@@ -131,53 +136,38 @@ for my $fn (glob "$tmpDir/*") {
}
}
-exit if $version =~ /pre/;
-
-# Update Nixpkgs in a very hacky way.
-system("cd $nixpkgsDir && git pull") == 0 or die;
-my $oldName = `nix-instantiate --eval $nixpkgsDir -A nix.name`; chomp $oldName;
-my $oldHash = `nix-instantiate --eval $nixpkgsDir -A nix.src.outputHash`; chomp $oldHash;
-print STDERR "old stable version in Nixpkgs = $oldName / $oldHash\n";
-
-my $fn = "$nixpkgsDir/pkgs/tools/package-management/nix/default.nix";
-my $oldFile = read_file($fn);
-$oldFile =~ s/$oldName/"$releaseName"/g;
-$oldFile =~ s/$oldHash/"$tarballHash"/g;
-write_file($fn, $oldFile);
+# Update nix-fallback-paths.nix.
+if ($isLatest) {
+ system("cd $nixpkgsDir && git pull") == 0 or die;
-$oldName =~ s/nix-//g;
-$oldName =~ s/"//g;
-
-sub getStorePath {
- my ($jobName) = @_;
- my $buildInfo = decode_json(fetch("$evalUrl/job/$jobName", 'application/json'));
- for my $product (values %{$buildInfo->{buildproducts}}) {
- next unless $product->{type} eq "nix-build";
- next if $product->{path} =~ /[a-z]+$/;
- return $product->{path};
+ sub getStorePath {
+ my ($jobName) = @_;
+ my $buildInfo = decode_json(fetch("$evalUrl/job/$jobName", 'application/json'));
+ return $buildInfo->{buildoutputs}->{out}->{path} or die "cannot get store path for '$jobName'";
}
- die;
-}
-write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix",
- "{\n" .
- " x86_64-linux = \"" . getStorePath("build.x86_64-linux") . "\";\n" .
- " i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" .
- " aarch64-linux = \"" . getStorePath("build.aarch64-linux") . "\";\n" .
- " x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" .
- "}\n");
+ write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix",
+ "{\n" .
+ " x86_64-linux = \"" . getStorePath("build.x86_64-linux") . "\";\n" .
+ " i686-linux = \"" . getStorePath("build.i686-linux") . "\";\n" .
+ " aarch64-linux = \"" . getStorePath("build.aarch64-linux") . "\";\n" .
+ " x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" .
+ " aarch64-darwin = \"" . getStorePath("build.aarch64-darwin") . "\";\n" .
+ "}\n");
-system("cd $nixpkgsDir && git commit -a -m 'nix: $oldName -> $version'") == 0 or die;
+ system("cd $nixpkgsDir && git commit -a -m 'nix-fallback-paths.nix: Update to $version'") == 0 or die;
+}
# Update the "latest" symlink.
$channelsBucket->add_key(
"nix-latest/install", "",
{ "x-amz-website-redirect-location" => "https://releases.nixos.org/$releaseDir/install" })
- or die $channelsBucket->err . ": " . $channelsBucket->errstr;
+ or die $channelsBucket->err . ": " . $channelsBucket->errstr
+ if $isLatest;
# Tag the release in Git.
chdir("/home/eelco/Dev/nix-pristine") or die;
system("git remote update origin") == 0 or die;
system("git tag --force --sign $version $nixRev -m 'Tagging release $version'") == 0 or die;
system("git push --tags") == 0 or die;
-system("git push --force-with-lease origin $nixRev:refs/heads/latest-release") == 0 or die;
+system("git push --force-with-lease origin $nixRev:refs/heads/latest-release") == 0 or die if $isLatest;
diff --git a/misc/fish/completion.fish b/misc/fish/completion.fish
new file mode 100644
index 000000000..bedbefaf8
--- /dev/null
+++ b/misc/fish/completion.fish
@@ -0,0 +1,37 @@
+function _nix_complete
+ # Get the current command up to a cursor.
+ # - Behaves correctly even with pipes and nested in commands like env.
+ # - TODO: Returns the command verbatim (does not interpolate variables).
+ # That might not be optimal for arguments like -f.
+ set -l nix_args (commandline --current-process --tokenize --cut-at-cursor)
+ # --cut-at-cursor with --tokenize removes the current token so we need to add it separately.
+ # https://github.com/fish-shell/fish-shell/issues/7375
+ # Can be an empty string.
+ set -l current_token (commandline --current-token --cut-at-cursor)
+
+ # Nix wants the index of the argv item to complete but the $nix_args variable
+ # also contains the program name (argv[0]) so we would need to subtract 1.
+ # But the variable also misses the current token so it cancels out.
+ set -l nix_arg_to_complete (count $nix_args)
+
+ env NIX_GET_COMPLETIONS=$nix_arg_to_complete $nix_args $current_token
+end
+
+function _nix_accepts_files
+ set -l response (_nix_complete)
+ # First line is either filenames or no-filenames.
+ test $response[1] = 'filenames'
+end
+
+function _nix
+ set -l response (_nix_complete)
+ # Skip the first line since it handled by _nix_accepts_files.
+ # Tail lines each contain a command followed by a tab character and, optionally, a description.
+ # This is also the format fish expects.
+ string collect -- $response[2..-1]
+end
+
+# Disable file path completion if paths do not belong in the current context.
+complete --command nix --condition 'not _nix_accepts_files' --no-files
+
+complete --command nix --arguments '(_nix)'
diff --git a/misc/fish/local.mk b/misc/fish/local.mk
new file mode 100644
index 000000000..ece899fc3
--- /dev/null
+++ b/misc/fish/local.mk
@@ -0,0 +1 @@
+$(eval $(call install-file-as, $(d)/completion.fish, $(datarootdir)/fish/vendor_completions.d/nix.fish, 0644))
diff --git a/misc/launchd/local.mk b/misc/launchd/local.mk
index 0ba722efb..a39188fe6 100644
--- a/misc/launchd/local.mk
+++ b/misc/launchd/local.mk
@@ -1,4 +1,4 @@
-ifeq ($(OS), Darwin)
+ifdef HOST_DARWIN
$(eval $(call install-data-in, $(d)/org.nixos.nix-daemon.plist, $(prefix)/Library/LaunchDaemons))
diff --git a/misc/launchd/org.nixos.nix-daemon.plist.in b/misc/launchd/org.nixos.nix-daemon.plist.in
index c334639e2..f1b439840 100644
--- a/misc/launchd/org.nixos.nix-daemon.plist.in
+++ b/misc/launchd/org.nixos.nix-daemon.plist.in
@@ -19,7 +19,7 @@
<array>
<string>/bin/sh</string>
<string>-c</string>
- <string>/bin/wait4path /nix/var/nix/profiles/default/bin/nix-daemon &amp;&amp; /nix/var/nix/profiles/default/bin/nix-daemon</string>
+ <string>/bin/wait4path /nix/var/nix/profiles/default/bin/nix-daemon &amp;&amp; exec /nix/var/nix/profiles/default/bin/nix-daemon</string>
</array>
<key>StandardErrorPath</key>
<string>/var/log/nix-daemon.log</string>
diff --git a/misc/systemd/local.mk b/misc/systemd/local.mk
index 785db52a4..1fa037485 100644
--- a/misc/systemd/local.mk
+++ b/misc/systemd/local.mk
@@ -1,4 +1,4 @@
-ifeq ($(OS), Linux)
+ifdef HOST_LINUX
$(foreach n, nix-daemon.socket nix-daemon.service, $(eval $(call install-file-in, $(d)/$(n), $(prefix)/lib/systemd/system, 0644)))
diff --git a/misc/upstart/local.mk b/misc/upstart/local.mk
index 5071676dc..2fbfb29b9 100644
--- a/misc/upstart/local.mk
+++ b/misc/upstart/local.mk
@@ -1,4 +1,4 @@
-ifeq ($(OS), Linux)
+ifdef HOST_LINUX
$(foreach n, nix-daemon.conf, $(eval $(call install-file-in, $(d)/$(n), $(sysconfdir)/init, 0644)))
diff --git a/misc/zsh/completion.zsh b/misc/zsh/completion.zsh
index d4df6447e..a902e37dc 100644
--- a/misc/zsh/completion.zsh
+++ b/misc/zsh/completion.zsh
@@ -1,3 +1,5 @@
+#compdef nix
+
function _nix() {
local ifs_bk="$IFS"
local input=("${(Q)words[@]}")
@@ -18,4 +20,4 @@ function _nix() {
_describe 'nix' suggestions
}
-compdef _nix nix
+_nix "$@"
diff --git a/misc/zsh/local.mk b/misc/zsh/local.mk
new file mode 100644
index 000000000..418fb1377
--- /dev/null
+++ b/misc/zsh/local.mk
@@ -0,0 +1 @@
+$(eval $(call install-file-as, $(d)/completion.zsh, $(datarootdir)/zsh/site-functions/_nix, 0644))
diff --git a/mk/lib.mk b/mk/lib.mk
index 975102531..92f0983d5 100644
--- a/mk/lib.mk
+++ b/mk/lib.mk
@@ -10,8 +10,25 @@ bin-scripts :=
noinst-scripts :=
man-pages :=
install-tests :=
-OS = $(shell uname -s)
+ifdef HOST_OS
+ HOST_KERNEL = $(firstword $(subst -, ,$(HOST_OS)))
+ ifeq ($(HOST_KERNEL), cygwin)
+ HOST_CYGWIN = 1
+ endif
+ ifeq ($(patsubst darwin%,,$(HOST_KERNEL)),)
+ HOST_DARWIN = 1
+ endif
+ ifeq ($(patsubst freebsd%,,$(HOST_KERNEL)),)
+ HOST_FREEBSD = 1
+ endif
+ ifeq ($(HOST_KERNEL), linux)
+ HOST_LINUX = 1
+ endif
+ ifeq ($(patsubst solaris%,,$(HOST_KERNEL)),)
+ HOST_SOLARIS = 1
+ endif
+endif
# Hack to define a literal space.
space :=
@@ -50,16 +67,16 @@ endif
BUILD_SHARED_LIBS ?= 1
ifeq ($(BUILD_SHARED_LIBS), 1)
- ifeq (CYGWIN,$(findstring CYGWIN,$(OS)))
+ ifdef HOST_CYGWIN
GLOBAL_CFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE
GLOBAL_CXXFLAGS += -U__STRICT_ANSI__ -D_GNU_SOURCE
else
GLOBAL_CFLAGS += -fPIC
GLOBAL_CXXFLAGS += -fPIC
endif
- ifneq ($(OS), Darwin)
- ifneq ($(OS), SunOS)
- ifneq ($(OS), FreeBSD)
+ ifndef HOST_DARWIN
+ ifndef HOST_SOLARIS
+ ifndef HOST_FREEBSD
GLOBAL_LDFLAGS += -Wl,--no-copy-dt-needed-entries
endif
endif
diff --git a/mk/libraries.mk b/mk/libraries.mk
index 7c0e4f100..ffd7b5610 100644
--- a/mk/libraries.mk
+++ b/mk/libraries.mk
@@ -1,9 +1,9 @@
libs-list :=
-ifeq ($(OS), Darwin)
+ifdef HOST_DARWIN
SO_EXT = dylib
else
- ifeq (CYGWIN,$(findstring CYGWIN,$(OS)))
+ ifdef HOST_CYGWIN
SO_EXT = dll
else
SO_EXT = so
@@ -59,7 +59,7 @@ define build-library
$(1)_OBJS := $$(addprefix $(buildprefix), $$(addsuffix .o, $$(basename $$(_srcs))))
_libs := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_PATH))
- ifeq (CYGWIN,$(findstring CYGWIN,$(OS)))
+ ifdef HOST_CYGWIN
$(1)_INSTALL_DIR ?= $$(bindir)
else
$(1)_INSTALL_DIR ?= $$(libdir)
@@ -73,18 +73,18 @@ define build-library
ifeq ($(BUILD_SHARED_LIBS), 1)
ifdef $(1)_ALLOW_UNDEFINED
- ifeq ($(OS), Darwin)
+ ifdef HOST_DARWIN
$(1)_LDFLAGS += -undefined suppress -flat_namespace
endif
else
- ifneq ($(OS), Darwin)
- ifneq (CYGWIN,$(findstring CYGWIN,$(OS)))
+ ifndef HOST_DARWIN
+ ifndef HOST_CYGWIN
$(1)_LDFLAGS += -Wl,-z,defs
endif
endif
endif
- ifneq ($(OS), Darwin)
+ ifndef HOST_DARWIN
$(1)_LDFLAGS += -Wl,-soname=$$($(1)_NAME).$(SO_EXT)
endif
@@ -93,7 +93,7 @@ define build-library
$$($(1)_PATH): $$($(1)_OBJS) $$(_libs) | $$(_d)/
$$(trace-ld) $(CXX) -o $$(abspath $$@) -shared $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE)) $$($(1)_LDFLAGS_UNINSTALLED)
- ifneq ($(OS), Darwin)
+ ifndef HOST_DARWIN
$(1)_LDFLAGS_USE += -Wl,-rpath,$$(abspath $$(_d))
endif
$(1)_LDFLAGS_USE += -L$$(_d) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME)))
@@ -108,7 +108,7 @@ define build-library
$$(trace-ld) $(CXX) -o $$@ -shared $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$($(1)_LDFLAGS_PROPAGATED) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE_INSTALLED))
$(1)_LDFLAGS_USE_INSTALLED += -L$$(DESTDIR)$$($(1)_INSTALL_DIR) -l$$(patsubst lib%,%,$$(strip $$($(1)_NAME)))
- ifneq ($(OS), Darwin)
+ ifndef HOST_DARWIN
ifeq ($(SET_RPATH_TO_LIBS), 1)
$(1)_LDFLAGS_USE_INSTALLED += -Wl,-rpath,$$($(1)_INSTALL_DIR)
else
@@ -125,8 +125,8 @@ define build-library
$(1)_PATH := $$(_d)/$$($(1)_NAME).a
$$($(1)_PATH): $$($(1)_OBJS) | $$(_d)/
- $(trace-ld) $(LD) -Ur -o $$(_d)/$$($(1)_NAME).o $$?
- $(trace-ar) $(AR) crs $$@ $$(_d)/$$($(1)_NAME).o
+ $$(trace-ld) $(LD) -Ur -o $$(_d)/$$($(1)_NAME).o $$?
+ $$(trace-ar) $(AR) crs $$@ $$(_d)/$$($(1)_NAME).o
$(1)_LDFLAGS_USE += $$($(1)_PATH) $$($(1)_LDFLAGS)
diff --git a/mk/tests.mk b/mk/tests.mk
index 21bdc5748..a2e30a378 100644
--- a/mk/tests.mk
+++ b/mk/tests.mk
@@ -13,3 +13,7 @@ define run-install-test
endef
.PHONY: check installcheck
+
+print-top-help += \
+ echo " check: Run unit tests"; \
+ echo " installcheck: Run functional tests";
diff --git a/nix-rust/local.mk b/nix-rust/local.mk
index 50db4783c..538244594 100644
--- a/nix-rust/local.mk
+++ b/nix-rust/local.mk
@@ -8,10 +8,15 @@ endif
libnixrust_PATH := $(d)/target/$(RUST_DIR)/libnixrust.$(SO_EXT)
libnixrust_INSTALL_PATH := $(libdir)/libnixrust.$(SO_EXT)
-libnixrust_LDFLAGS_USE := -L$(d)/target/$(RUST_DIR) -lnixrust -ldl
-libnixrust_LDFLAGS_USE_INSTALLED := -L$(libdir) -lnixrust -ldl
+libnixrust_LDFLAGS_USE := -L$(d)/target/$(RUST_DIR) -lnixrust
+libnixrust_LDFLAGS_USE_INSTALLED := -L$(libdir) -lnixrust
-ifeq ($(OS), Darwin)
+ifdef HOST_LINUX
+libnixrust_LDFLAGS_USE += -ldl
+libnixrust_LDFLAGS_USE_INSTALLED += -ldl
+endif
+
+ifdef HOST_DARWIN
libnixrust_BUILD_FLAGS = NIX_LDFLAGS="-undefined dynamic_lookup"
else
libnixrust_LDFLAGS_USE += -Wl,-rpath,$(abspath $(d)/target/$(RUST_DIR))
@@ -26,7 +31,7 @@ $(libnixrust_PATH): $(call rwildcard, $(d)/src, *.rs) $(d)/Cargo.toml
$(libnixrust_INSTALL_PATH): $(libnixrust_PATH)
$(target-gen) cp $^ $@
-ifeq ($(OS), Darwin)
+ifdef HOST_DARWIN
install_name_tool -id $@ $@
endif
@@ -35,7 +40,7 @@ clean: clean-rust
clean-rust:
$(suppress) rm -rfv nix-rust/target
-ifneq ($(OS), Darwin)
+ifndef HOST_DARWIN
check: rust-tests
rust-tests:
diff --git a/perl/Makefile b/perl/Makefile
index 259ed7dc3..708f86882 100644
--- a/perl/Makefile
+++ b/perl/Makefile
@@ -1,6 +1,6 @@
makefiles = local.mk
-GLOBAL_CXXFLAGS += -g -Wall -std=c++17
+GLOBAL_CXXFLAGS += -g -Wall -std=c++17 -I ../src
-include Makefile.config
diff --git a/perl/Makefile.config.in b/perl/Makefile.config.in
index eccfbd9f6..d856de3ad 100644
--- a/perl/Makefile.config.in
+++ b/perl/Makefile.config.in
@@ -1,3 +1,4 @@
+HOST_OS = @host_os@
CC = @CC@
CFLAGS = @CFLAGS@
CXX = @CXX@
diff --git a/perl/configure.ac b/perl/configure.ac
index 85183c005..eb65ac17b 100644
--- a/perl/configure.ac
+++ b/perl/configure.ac
@@ -7,6 +7,8 @@ CXXFLAGS=
AC_PROG_CC
AC_PROG_CXX
+AC_CANONICAL_HOST
+
# Use 64-bit file system calls so that we can support files > 2 GiB.
AC_SYS_LARGEFILE
diff --git a/perl/lib/Nix/Config.pm.in b/perl/lib/Nix/Config.pm.in
index f7c6f2484..508a15e15 100644
--- a/perl/lib/Nix/Config.pm.in
+++ b/perl/lib/Nix/Config.pm.in
@@ -1,6 +1,7 @@
package Nix::Config;
use MIME::Base64;
+use Nix::Store;
$version = "@PACKAGE_VERSION@";
diff --git a/perl/lib/Nix/Store.pm b/perl/lib/Nix/Store.pm
index 179f1dc90..3e4bbee0a 100644
--- a/perl/lib/Nix/Store.pm
+++ b/perl/lib/Nix/Store.pm
@@ -22,6 +22,7 @@ our @EXPORT = qw(
derivationFromPath
addTempRoot
getBinDir getStoreDir
+ queryRawRealisation
);
our $VERSION = '0.15';
diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs
index ad9042a2a..edbf12f7c 100644
--- a/perl/lib/Nix/Store.xs
+++ b/perl/lib/Nix/Store.xs
@@ -15,6 +15,7 @@
#include "crypto.hh"
#include <sodium.h>
+#include <nlohmann/json.hpp>
using namespace nix;
@@ -120,6 +121,18 @@ SV * queryPathInfo(char * path, int base32)
croak("%s", e.what());
}
+SV * queryRawRealisation(char * outputId)
+ PPCODE:
+ try {
+ auto realisation = store()->queryRealisation(DrvOutput::parse(outputId));
+ if (realisation)
+ XPUSHs(sv_2mortal(newSVpv(realisation->toJSON().dump().c_str(), 0)));
+ else
+ XPUSHs(sv_2mortal(newSVpv("", 0)));
+ } catch (Error & e) {
+ croak("%s", e.what());
+ }
+
SV * queryPathFromHashPart(char * hashPart)
PPCODE:
diff --git a/perl/local.mk b/perl/local.mk
index b13d4c0d6..0eae651d8 100644
--- a/perl/local.mk
+++ b/perl/local.mk
@@ -28,7 +28,7 @@ Store_CXXFLAGS = \
Store_LDFLAGS := $(SODIUM_LIBS) $(NIX_LIBS)
-ifeq (CYGWIN,$(findstring CYGWIN,$(OS)))
+ifdef HOST_CYGWIN
archlib = $(shell perl -E 'use Config; print $$Config{archlib};')
libperl = $(shell perl -E 'use Config; print $$Config{libperl};')
Store_LDFLAGS += $(shell find ${archlib} -name ${libperl})
diff --git a/scripts/create-darwin-volume.sh b/scripts/create-darwin-volume.sh
index 32fa577a8..334b75045 100755
--- a/scripts/create-darwin-volume.sh
+++ b/scripts/create-darwin-volume.sh
@@ -1,33 +1,262 @@
-#!/bin/sh
-set -e
+#!/usr/bin/env bash
+set -eu
+set -o pipefail
-root_disk() {
- diskutil info -plist /
-}
+# I'm a little agnostic on the choices, but supporting a wide
+# slate of uses for now, including:
+# - import-only: `. create-darwin-volume.sh no-main[ ...]`
+# - legacy: `./create-darwin-volume.sh` or `. create-darwin-volume.sh`
+# (both will run main())
+# - external alt-routine: `./create-darwin-volume.sh no-main func[ ...]`
+if [ "${1-}" = "no-main" ]; then
+ shift
+ readonly _CREATE_VOLUME_NO_MAIN=1
+else
+ readonly _CREATE_VOLUME_NO_MAIN=0
+ # declare some things we expect to inherit from install-multi-user
+ # I don't love this (because it's a bit of a kludge).
+ #
+ # CAUTION: (Dec 19 2020)
+ # This is a stopgap. It doesn't cover the full slate of
+ # identifiers we inherit--just those necessary to:
+ # - avoid breaking direct invocations of this script (here/now)
+ # - avoid hard-to-reverse structural changes before the call to rm
+ # single-user support is verified
+ #
+ # In the near-mid term, I (personally) think we should:
+ # - decide to deprecate the direct call and add a notice
+ # - fold all of this into install-darwin-multi-user.sh
+ # - intentionally remove the old direct-invocation form (kill the
+ # routine, replace this script w/ deprecation notice and a note
+ # on the remove-after date)
+ #
+ readonly NIX_ROOT="${NIX_ROOT:-/nix}"
+
+ _sudo() {
+ shift # throw away the 'explanation'
+ /usr/bin/sudo "$@"
+ }
+ failure() {
+ if [ "$*" = "" ]; then
+ cat
+ else
+ echo "$@"
+ fi
+ exit 1
+ }
+ task() {
+ echo "$@"
+ }
+fi
-# i.e., "disk1"
+# usually "disk1"
root_disk_identifier() {
- diskutil info -plist / | xmllint --xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" -
+ # For performance (~10ms vs 280ms) I'm parsing 'diskX' from stat output
+ # (~diskXsY)--but I'm retaining the more-semantic approach since
+ # it documents intent better.
+ # /usr/sbin/diskutil info -plist / | xmllint --xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" -
+ #
+ local special_device
+ special_device="$(/usr/bin/stat -f "%Sd" /)"
+ echo "${special_device%s[0-9]*}"
+}
+
+# make it easy to play w/ 'Case-sensitive APFS'
+readonly NIX_VOLUME_FS="${NIX_VOLUME_FS:-APFS}"
+readonly NIX_VOLUME_LABEL="${NIX_VOLUME_LABEL:-Nix Store}"
+# Strongly assuming we'll make a volume on the device / is on
+# But you can override NIX_VOLUME_USE_DISK to create it on some other device
+readonly NIX_VOLUME_USE_DISK="${NIX_VOLUME_USE_DISK:-$(root_disk_identifier)}"
+NIX_VOLUME_USE_SPECIAL="${NIX_VOLUME_USE_SPECIAL:-}"
+NIX_VOLUME_USE_UUID="${NIX_VOLUME_USE_UUID:-}"
+readonly NIX_VOLUME_MOUNTD_DEST="${NIX_VOLUME_MOUNTD_DEST:-/Library/LaunchDaemons/org.nixos.darwin-store.plist}"
+
+if /usr/bin/fdesetup isactive >/dev/null; then
+ test_filevault_in_use() { return 0; }
+ # no readonly; we may modify if user refuses from cure_volume
+ NIX_VOLUME_DO_ENCRYPT="${NIX_VOLUME_DO_ENCRYPT:-1}"
+else
+ test_filevault_in_use() { return 1; }
+ NIX_VOLUME_DO_ENCRYPT="${NIX_VOLUME_DO_ENCRYPT:-0}"
+fi
+
+should_encrypt_volume() {
+ test_filevault_in_use && (( NIX_VOLUME_DO_ENCRYPT == 1 ))
+}
+
+substep() {
+ printf " %s\n" "" "- $1" "" "${@:2}"
+}
+
+
+volumes_labeled() {
+ local label="$1"
+ xsltproc --novalid --stringparam label "$label" - <(/usr/sbin/ioreg -ra -c "AppleAPFSVolume") <<'EOF'
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+ <xsl:output method="text"/>
+ <xsl:template match="/">
+ <xsl:apply-templates select="/plist/array/dict/key[text()='IORegistryEntryName']/following-sibling::*[1][text()=$label]/.."/>
+ </xsl:template>
+ <xsl:template match="dict">
+ <xsl:apply-templates match="string" select="key[text()='BSD Name']/following-sibling::*[1]"/>
+ <xsl:text>=</xsl:text>
+ <xsl:apply-templates match="string" select="key[text()='UUID']/following-sibling::*[1]"/>
+ <xsl:text>&#xA;</xsl:text>
+ </xsl:template>
+</xsl:stylesheet>
+EOF
+ # I cut label out of the extracted values, but here it is for reference:
+ # <xsl:apply-templates match="string" select="key[text()='IORegistryEntryName']/following-sibling::*[1]"/>
+ # <xsl:text>=</xsl:text>
+}
+
+right_disk() {
+ local volume_special="$1" # (i.e., disk1s7)
+ [[ "$volume_special" == "$NIX_VOLUME_USE_DISK"s* ]]
+}
+
+right_volume() {
+ local volume_special="$1" # (i.e., disk1s7)
+ # if set, it must match; otherwise ensure it's on the right disk
+ if [ -z "$NIX_VOLUME_USE_SPECIAL" ]; then
+ if right_disk "$volume_special"; then
+ NIX_VOLUME_USE_SPECIAL="$volume_special" # latch on
+ return 0
+ else
+ return 1
+ fi
+ else
+ [ "$volume_special" = "$NIX_VOLUME_USE_SPECIAL" ]
+ fi
+}
+
+right_uuid() {
+ local volume_uuid="$1"
+ # if set, it must match; otherwise allow
+ if [ -z "$NIX_VOLUME_USE_UUID" ]; then
+ NIX_VOLUME_USE_UUID="$volume_uuid" # latch on
+ return 0
+ else
+ [ "$volume_uuid" = "$NIX_VOLUME_USE_UUID" ]
+ fi
+}
+
+cure_volumes() {
+ local found volume special uuid
+ # loop just in case they have more than one volume
+ # (nothing stops you from doing this)
+ for volume in $(volumes_labeled "$NIX_VOLUME_LABEL"); do
+ # CAUTION: this could (maybe) be a more normal read
+ # loop like:
+ # while IFS== read -r special uuid; do
+ # # ...
+ # done <<<"$(volumes_labeled "$NIX_VOLUME_LABEL")"
+ #
+ # I did it with for to skirt a problem with the obvious
+ # pattern replacing stdin and causing user prompts
+ # inside (which also use read and access stdin) to skip
+ #
+ # If there's an existing encrypted volume we can't find
+ # in keychain, the user never gets prompted to delete
+ # the volume, and the install fails.
+ #
+ # If you change this, a human needs to test a very
+ # specific scenario: you already have an encrypted
+ # Nix Store volume, and have deleted its credential
+ # from keychain. Ensure the script asks you if it can
+ # delete the volume, and then prompts for your sudo
+ # password to confirm.
+ #
+ # shellcheck disable=SC1097
+ IFS== read -r special uuid <<< "$volume"
+ # take the first one that's on the right disk
+ if [ -z "${found:-}" ]; then
+ if right_volume "$special" && right_uuid "$uuid"; then
+ cure_volume "$special" "$uuid"
+ found="${special} (${uuid})"
+ else
+ warning <<EOF
+Ignoring ${special} (${uuid}) because I am looking for:
+disk=${NIX_VOLUME_USE_DISK} special=${NIX_VOLUME_USE_SPECIAL:-${NIX_VOLUME_USE_DISK}sX} uuid=${NIX_VOLUME_USE_UUID:-any}
+EOF
+ # TODO: give chance to delete if ! headless?
+ fi
+ else
+ warning <<EOF
+Ignoring ${special} (${uuid}), already found target: $found
+EOF
+ # TODO reminder? I feel like I want one
+ # idiom that reminds some warnings, or warns
+ # some reminders?
+ # TODO: if ! headless, chance to delete?
+ fi
+ done
+ if [ -z "${found:-}" ]; then
+ readonly NIX_VOLUME_USE_SPECIAL NIX_VOLUME_USE_UUID
+ fi
}
-find_nix_volume() {
- diskutil apfs list -plist "$1" | xmllint --xpath "(/plist/dict/array/dict/key[text()='Volumes']/following-sibling::array/dict/key[text()='Name']/following-sibling::string[starts-with(translate(text(),'N','n'),'nix')]/text())[1]" - 2>/dev/null || true
+volume_encrypted() {
+ local volume_special="$1" # (i.e., disk1s7)
+ # Trying to match the first line of output; known first lines:
+ # No cryptographic users for <special>
+ # Cryptographic user for <special> (1 found)
+ # Cryptographic users for <special> (2 found)
+ /usr/sbin/diskutil apfs listCryptoUsers -plist "$volume_special" | /usr/bin/grep -q APFSCryptoUserUUID
}
test_fstab() {
- grep -q "/nix apfs rw" /etc/fstab 2>/dev/null
+ /usr/bin/grep -q "$NIX_ROOT apfs rw" /etc/fstab 2>/dev/null
}
-test_nix_symlink() {
- [ -L "/nix" ] || grep -q "^nix." /etc/synthetic.conf 2>/dev/null
+test_nix_root_is_symlink() {
+ [ -L "$NIX_ROOT" ]
}
-test_synthetic_conf() {
- grep -q "^nix$" /etc/synthetic.conf 2>/dev/null
+test_synthetic_conf_either(){
+ /usr/bin/grep -qE "^${NIX_ROOT:1}($|\t.{3,}$)" /etc/synthetic.conf 2>/dev/null
+}
+
+test_synthetic_conf_mountable() {
+ /usr/bin/grep -q "^${NIX_ROOT:1}$" /etc/synthetic.conf 2>/dev/null
+}
+
+test_synthetic_conf_symlinked() {
+ /usr/bin/grep -qE "^${NIX_ROOT:1}\t.{3,}$" /etc/synthetic.conf 2>/dev/null
+}
+
+test_nix_volume_mountd_installed() {
+ test -e "$NIX_VOLUME_MOUNTD_DEST"
+}
+
+# current volume password
+test_keychain_by_uuid() {
+ local volume_uuid="$1"
+ # Note: doesn't need sudo just to check; doesn't output pw
+ security find-generic-password -s "$volume_uuid" &>/dev/null
+}
+
+get_volume_pass() {
+ local volume_uuid="$1"
+ _sudo \
+ "to confirm keychain has a password that unlocks this volume" \
+ security find-generic-password -s "$volume_uuid" -w
+}
+
+verify_volume_pass() {
+ local volume_special="$1" # (i.e., disk1s7)
+ local volume_uuid="$2"
+ /usr/sbin/diskutil apfs unlockVolume "$volume_special" -verify -stdinpassphrase -user "$volume_uuid"
+}
+
+volume_pass_works() {
+ local volume_special="$1" # (i.e., disk1s7)
+ local volume_uuid="$2"
+ get_volume_pass "$volume_uuid" | verify_volume_pass "$volume_special" "$volume_uuid"
}
# Create the paths defined in synthetic.conf, saving us a reboot.
-create_synthetic_objects(){
+create_synthetic_objects() {
# Big Sur takes away the -B flag we were using and replaces it
# with a -t flag that appears to do the same thing (but they
# don't behave exactly the same way in terms of return values).
@@ -41,129 +270,578 @@ create_synthetic_objects(){
}
test_nix() {
- test -d "/nix"
-}
-
-test_t2_chip_present(){
- # Use xartutil to see if system has a t2 chip.
- #
- # This isn't well-documented on its own; until it is,
- # let's keep track of knowledge/assumptions.
- #
- # Warnings:
- # - Don't search "xart" if porn will cause you trouble :)
- # - Other xartutil flags do dangerous things. Don't run them
- # naively. If you must, search "xartutil" first.
- #
- # Assumptions:
- # - the "xART session seeds recovery utility"
- # appears to interact with xartstorageremoted
- # - `sudo xartutil --list` lists xART sessions
- # and their seeds and exits 0 if successful. If
- # not, it exits 1 and prints an error such as:
- # xartutil: ERROR: No supported link to the SEP present
- # - xART sessions/seeds are present when a T2 chip is
- # (and not, otherwise)
- # - the presence of a T2 chip means a newly-created
- # volume on the primary drive will be
- # encrypted at rest
- # - all together: `sudo xartutil --list`
- # should exit 0 if a new Nix Store volume will
- # be encrypted at rest, and exit 1 if not.
- sudo xartutil --list >/dev/null 2>/dev/null
-}
-
-test_filevault_in_use() {
- fdesetup isactive >/dev/null
-}
-
-# use after error msg for conditions we don't understand
-suggest_report_error(){
- # ex "error: something sad happened :(" >&2
- echo " please report this @ https://github.com/nixos/nix/issues" >&2
-}
-
-main() {
- (
- echo ""
- echo " ------------------------------------------------------------------ "
- echo " | This installer will create a volume for the nix store and |"
- echo " | configure it to mount at /nix. Follow these steps to uninstall. |"
- echo " ------------------------------------------------------------------ "
- echo ""
- echo " 1. Remove the entry from fstab using 'sudo vifs'"
- echo " 2. Destroy the data volume using 'diskutil apfs deleteVolume'"
- echo " 3. Remove the 'nix' line from /etc/synthetic.conf or the file"
- echo ""
- ) >&2
-
- if test_nix_symlink; then
- echo "error: /nix is a symlink, please remove it and make sure it's not in synthetic.conf (in which case a reboot is required)" >&2
- echo " /nix -> $(readlink "/nix")" >&2
- exit 2
- fi
-
- if ! test_synthetic_conf; then
- echo "Configuring /etc/synthetic.conf..." >&2
- echo nix | sudo tee -a /etc/synthetic.conf
- if ! test_synthetic_conf; then
- echo "error: failed to configure synthetic.conf;" >&2
- suggest_report_error
- exit 1
+ test -d "$NIX_ROOT"
+}
+
+test_voldaemon() {
+ test -f "$NIX_VOLUME_MOUNTD_DEST"
+}
+
+generate_mount_command() {
+ local cmd_type="$1" # encrypted|unencrypted
+ local volume_uuid mountpoint cmd=()
+ printf -v volume_uuid "%q" "$2"
+ printf -v mountpoint "%q" "$NIX_ROOT"
+
+ case "$cmd_type" in
+ encrypted)
+ cmd=(/bin/sh -c "/usr/bin/security find-generic-password -s '$volume_uuid' -w | /usr/sbin/diskutil apfs unlockVolume '$volume_uuid' -mountpoint '$mountpoint' -stdinpassphrase");;
+ unencrypted)
+ cmd=(/usr/sbin/diskutil mount -mountPoint "$mountpoint" "$volume_uuid");;
+ *)
+ failure "Invalid first arg $cmd_type to generate_mount_command";;
+ esac
+
+ printf " <string>%s</string>\n" "${cmd[@]}"
+}
+
+generate_mount_daemon() {
+ local cmd_type="$1" # encrypted|unencrypted
+ local volume_uuid="$2"
+ cat <<EOF
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>RunAtLoad</key>
+ <true/>
+ <key>Label</key>
+ <string>org.nixos.darwin-store</string>
+ <key>ProgramArguments</key>
+ <array>
+$(generate_mount_command "$cmd_type" "$volume_uuid")
+ </array>
+</dict>
+</plist>
+EOF
+}
+
+_eat_bootout_err() {
+ /usr/bin/grep -v "Boot-out failed: 36: Operation now in progress"
+}
+
+# TODO: remove with --uninstall?
+uninstall_launch_daemon_directions() {
+ local daemon_label="$1" # i.e., org.nixos.blah-blah
+ local daemon_plist="$2" # abspath
+ substep "Uninstall LaunchDaemon $daemon_label" \
+ " sudo launchctl bootout system/$daemon_label" \
+ " sudo rm $daemon_plist"
+}
+
+uninstall_launch_daemon_prompt() {
+ local daemon_label="$1" # i.e., org.nixos.blah-blah
+ local daemon_plist="$2" # abspath
+ local reason_for_daemon="$3"
+ cat <<EOF
+
+The installer adds a LaunchDaemon to $reason_for_daemon: $daemon_label
+EOF
+ if ui_confirm "Can I remove it?"; then
+ _sudo "to terminate the daemon" \
+ launchctl bootout "system/$daemon_label" 2> >(_eat_bootout_err >&2) || true
+ # this can "fail" with a message like:
+ # Boot-out failed: 36: Operation now in progress
+ _sudo "to remove the daemon definition" rm "$daemon_plist"
+ fi
+}
+
+nix_volume_mountd_uninstall_directions() {
+ uninstall_launch_daemon_directions "org.nixos.darwin-store" \
+ "$NIX_VOLUME_MOUNTD_DEST"
+}
+
+nix_volume_mountd_uninstall_prompt() {
+ uninstall_launch_daemon_prompt "org.nixos.darwin-store" \
+ "$NIX_VOLUME_MOUNTD_DEST" \
+ "mount your Nix volume"
+}
+
+# TODO: move nix_daemon to install-darwin-multi-user if/when uninstall_launch_daemon_prompt moves up to install-multi-user
+nix_daemon_uninstall_prompt() {
+ uninstall_launch_daemon_prompt "org.nixos.nix-daemon" \
+ "$NIX_DAEMON_DEST" \
+ "run the nix-daemon"
+}
+
+# TODO: remove with --uninstall?
+nix_daemon_uninstall_directions() {
+ uninstall_launch_daemon_directions "org.nixos.nix-daemon" \
+ "$NIX_DAEMON_DEST"
+}
+
+
+# TODO: remove with --uninstall?
+synthetic_conf_uninstall_directions() {
+ # :1 to strip leading slash
+ substep "Remove ${NIX_ROOT:1} from /etc/synthetic.conf" \
+ " If nix is the only entry: sudo rm /etc/synthetic.conf" \
+ " Otherwise: sudo /usr/bin/sed -i '' -e '/^${NIX_ROOT:1}$/d' /etc/synthetic.conf"
+}
+
+synthetic_conf_uninstall_prompt() {
+ cat <<EOF
+
+During install, I add '${NIX_ROOT:1}' to /etc/synthetic.conf, which instructs
+macOS to create an empty root directory for mounting the Nix volume.
+EOF
+ # make the edit to a copy
+ /usr/bin/grep -vE "^${NIX_ROOT:1}($|\t.{3,}$)" /etc/synthetic.conf > "$SCRATCH/synthetic.conf.edit"
+
+ if test_synthetic_conf_symlinked; then
+ warning <<EOF
+
+/etc/synthetic.conf already contains a line instructing your system
+to make '${NIX_ROOT}' as a symlink:
+ $(/usr/bin/grep -nE "^${NIX_ROOT:1}\t.{3,}$" /etc/synthetic.conf)
+
+This may mean your system has/had a non-standard Nix install.
+
+The volume-creation process in this installer is *not* compatible
+with a symlinked store, so I'll have to remove this instruction to
+continue.
+
+If you want/need to keep this instruction, answer 'n' to abort.
+
+EOF
+ fi
+
+ # ask to rm if this left the file empty aside from comments, else edit
+ if /usr/bin/diff -q <(:) <(/usr/bin/grep -v "^#" "$SCRATCH/synthetic.conf.edit") &>/dev/null; then
+ if confirm_rm "/etc/synthetic.conf"; then
+ if test_nix_root_is_symlink; then
+ failure >&2 <<EOF
+I removed /etc/synthetic.conf, but $NIX_ROOT is already a symlink
+(-> $(readlink "$NIX_ROOT")). The system should remove it when you reboot.
+Once you've rebooted, run the installer again.
+EOF
+ fi
+ return 0
+ fi
+ else
+ if confirm_edit "$SCRATCH/synthetic.conf.edit" "/etc/synthetic.conf"; then
+ if test_nix_root_is_symlink; then
+ failure >&2 <<EOF
+I edited Nix out of /etc/synthetic.conf, but $NIX_ROOT is already a symlink
+(-> $(readlink "$NIX_ROOT")). The system should remove it when you reboot.
+Once you've rebooted, run the installer again.
+EOF
+ fi
+ return 0
fi
fi
+ # fallback instructions
+ echo "Manually remove nix from /etc/synthetic.conf"
+ return 1
+}
- if ! test_nix; then
- echo "Creating mountpoint for /nix..." >&2
- create_synthetic_objects # the ones we defined in synthetic.conf
- if ! test_nix; then
- sudo mkdir -p /nix 2>/dev/null || true
+add_nix_vol_fstab_line() {
+ local uuid="$1"
+ # shellcheck disable=SC1003,SC2026
+ local escaped_mountpoint="${NIX_ROOT/ /'\\\'040}"
+ shift
+ EDITOR="/usr/bin/ex" _sudo "to add nix to fstab" "$@" <<EOF
+:a
+UUID=$uuid $escaped_mountpoint apfs rw,noauto,nobrowse,suid,owners
+.
+:x
+EOF
+ # TODO: preserving my notes on suid,owners above until resolved
+ # There *may* be some issue regarding volume ownership, see nix#3156
+ #
+ # It seems like the cheapest fix is adding "suid,owners" to fstab, but:
+ # - We don't have much info on this condition yet
+ # - I'm not certain if these cause other problems?
+ # - There's a "chown" component some people claim to need to fix this
+ # that I don't understand yet
+ # (Note however that I've had to add a chown step to handle
+ # single->multi-user reinstalls, which may cover this)
+ #
+ # I'm not sure if it's safe to approach this way?
+ #
+ # I think I think the most-proper way to test for it is:
+ # diskutil info -plist "$NIX_VOLUME_LABEL" | xmllint --xpath "(/plist/dict/key[text()='GlobalPermissionsEnabled'])/following-sibling::*[1][name()='true']" -; echo $?
+ #
+ # There's also `sudo /usr/sbin/vsdbutil -c /path` (which is much faster, but is also
+ # deprecated and needs minor parsing).
+ #
+ # If no one finds a problem with doing so, I think the simplest approach
+ # is to just eagerly set this. I found a few imperative approaches:
+ # (diskutil enableOwnership, ~100ms), a cheap one (/usr/sbin/vsdbutil -a, ~40-50ms),
+ # a very cheap one (append the internal format to /var/db/volinfo.database).
+ #
+ # But vsdbutil's deprecation notice suggests using fstab, so I want to
+ # give that a whirl first.
+ #
+ # TODO: when this is workable, poke infinisil about reproducing the issue
+ # and confirming this fix?
+}
+
+delete_nix_vol_fstab_line() {
+ # TODO: I'm scaffolding this to handle the new nix volumes
+ # but it might be nice to generalize a smidge further to
+ # go ahead and set up a pattern for curing "old" things
+ # we no longer do?
+ EDITOR="/usr/bin/patch" _sudo "to cut nix from fstab" "$@" < <(/usr/bin/diff /etc/fstab <(/usr/bin/grep -v "$NIX_ROOT apfs rw" /etc/fstab))
+ # leaving some parts out of the grep; people may fiddle this a little?
+}
+
+# TODO: hope to remove with --uninstall
+fstab_uninstall_directions() {
+ substep "Remove ${NIX_ROOT} from /etc/fstab" \
+ " If nix is the only entry: sudo rm /etc/fstab" \
+ " Otherwise, run 'sudo /usr/sbin/vifs' to remove the nix line"
+}
+
+fstab_uninstall_prompt() {
+ cat <<EOF
+During install, I add '${NIX_ROOT}' to /etc/fstab so that macOS knows what
+mount options to use for the Nix volume.
+EOF
+ cp /etc/fstab "$SCRATCH/fstab.edit"
+ # technically doesn't need the _sudo path, but throwing away the
+ # output is probably better than mostly-duplicating the code...
+ delete_nix_vol_fstab_line patch "$SCRATCH/fstab.edit" &>/dev/null
+
+ # if the patch test edit, minus comment lines, is equal to empty (:)
+ if /usr/bin/diff -q <(:) <(/usr/bin/grep -v "^#" "$SCRATCH/fstab.edit") &>/dev/null; then
+ # this edit would leave it empty; propose deleting it
+ if confirm_rm "/etc/fstab"; then
+ return 0
+ else
+ echo "Remove nix from /etc/fstab (or remove the file)"
fi
- if ! test_nix; then
- echo "error: failed to bootstrap /nix; if a reboot doesn't help," >&2
- suggest_report_error
- exit 1
+ else
+ echo "I might be able to help you make this edit. Here's the diff:"
+ if ! _diff "/etc/fstab" "$SCRATCH/fstab.edit" && ui_confirm "Does the change above look right?"; then
+ delete_nix_vol_fstab_line /usr/sbin/vifs
+ else
+ echo "Remove nix from /etc/fstab (or remove the file)"
fi
fi
+}
- disk="$(root_disk_identifier)"
- volume=$(find_nix_volume "$disk")
- if [ -z "$volume" ]; then
- echo "Creating a Nix Store volume..." >&2
-
- if test_filevault_in_use; then
- # TODO: Not sure if it's in-scope now, but `diskutil apfs list`
- # shows both filevault and encrypted at rest status, and it
- # may be the more semantic way to test for this? It'll show
- # `FileVault: No (Encrypted at rest)`
- # `FileVault: No`
- # `FileVault: Yes (Unlocked)`
- # and so on.
- if test_t2_chip_present; then
- echo "warning: boot volume is FileVault-encrypted, but the Nix store volume" >&2
- echo " is only encrypted at rest." >&2
- echo " See https://nixos.org/nix/manual/#sect-macos-installation" >&2
+remove_volume() {
+ local volume_special="$1" # (i.e., disk1s7)
+ _sudo "to unmount the Nix volume" \
+ /usr/sbin/diskutil unmount force "$volume_special" || true # might not be mounted
+ _sudo "to delete the Nix volume" \
+ /usr/sbin/diskutil apfs deleteVolume "$volume_special"
+}
+
+# aspiration: robust enough to both fix problems
+# *and* update older darwin volumes
+cure_volume() {
+ local volume_special="$1" # (i.e., disk1s7)
+ local volume_uuid="$2"
+ header "Found existing Nix volume"
+ row " special" "$volume_special"
+ row " uuid" "$volume_uuid"
+
+ if volume_encrypted "$volume_special"; then
+ row "encrypted" "yes"
+ if volume_pass_works "$volume_special" "$volume_uuid"; then
+ NIX_VOLUME_DO_ENCRYPT=0
+ ok "Found a working decryption password in keychain :)"
+ echo ""
+ else
+ # - this is a volume we made, and
+ # - the user encrypted it on their own
+ # - something deleted the credential
+ # - this is an old or BYO volume and the pw
+ # just isn't somewhere we can find it.
+ #
+ # We're going to explain why we're freaking out
+ # and prompt them to either delete the volume
+ # (requiring a sudo auth), or abort to fix
+ warning <<EOF
+
+This volume is encrypted, but I don't see a password to decrypt it.
+The quick fix is to let me delete this volume and make you a new one.
+If that's okay, enter your (sudo) password to continue. If not, you
+can ensure the decryption password is in your system keychain with a
+"Where" (service) field set to this volume's UUID:
+ $volume_uuid
+EOF
+ if password_confirm "delete this volume"; then
+ remove_volume "$volume_special"
else
- echo "error: refusing to create Nix store volume because the boot volume is" >&2
- echo " FileVault encrypted, but encryption-at-rest is not available." >&2
- echo " Manually create a volume for the store and re-run this script." >&2
- echo " See https://nixos.org/nix/manual/#sect-macos-installation" >&2
- exit 1
+ # TODO: this is a good design case for a warn-and
+ # remind idiom...
+ failure <<EOF
+Your Nix volume is encrypted, but I couldn't find its password. Either:
+- Delete or rename the volume out of the way
+- Ensure its decryption password is in the system keychain with a
+ "Where" (service) field set to this volume's UUID:
+ $volume_uuid
+EOF
+ fi
+ fi
+ elif test_filevault_in_use; then
+ row "encrypted" "no"
+ warning <<EOF
+FileVault is on, but your $NIX_VOLUME_LABEL volume isn't encrypted.
+EOF
+ # if we're interactive, give them a chance to
+ # encrypt the volume. If not, /shrug
+ if ! headless && (( NIX_VOLUME_DO_ENCRYPT == 1 )); then
+ if ui_confirm "Should I encrypt it and add the decryption key to your keychain?"; then
+ encrypt_volume "$volume_uuid" "$NIX_VOLUME_LABEL"
+ NIX_VOLUME_DO_ENCRYPT=0
+ else
+ NIX_VOLUME_DO_ENCRYPT=0
+ reminder "FileVault is on, but your $NIX_VOLUME_LABEL volume isn't encrypted."
fi
fi
-
- sudo diskutil apfs addVolume "$disk" APFS 'Nix Store' -mountpoint /nix
- volume="Nix Store"
else
- echo "Using existing '$volume' volume" >&2
+ row "encrypted" "no"
+ fi
+}
+
+remove_volume_artifacts() {
+ if test_synthetic_conf_either; then
+ # NIX_ROOT is in synthetic.conf
+ if synthetic_conf_uninstall_prompt; then
+ # TODO: moot until we tackle uninstall, but when we're
+ # actually uninstalling, we should issue:
+ # reminder "macOS will clean up the empty mount-point directory at $NIX_ROOT on reboot."
+ :
+ fi
+ fi
+ if test_fstab; then
+ fstab_uninstall_prompt
+ fi
+
+ if test_nix_volume_mountd_installed; then
+ nix_volume_mountd_uninstall_prompt
+ fi
+}
+
+setup_synthetic_conf() {
+ if test_nix_root_is_symlink; then
+ if ! test_synthetic_conf_symlinked; then
+ failure >&2 <<EOF
+error: $NIX_ROOT is a symlink (-> $(readlink "$NIX_ROOT")).
+Please remove it. If nix is in /etc/synthetic.conf, remove it and reboot.
+EOF
+ fi
+ fi
+ if ! test_synthetic_conf_mountable; then
+ task "Configuring /etc/synthetic.conf to make a mount-point at $NIX_ROOT" >&2
+ # technically /etc/synthetic.d/nix is supported in Big Sur+
+ # but handling both takes even more code...
+ _sudo "to add Nix to /etc/synthetic.conf" \
+ /usr/bin/ex /etc/synthetic.conf <<EOF
+:a
+${NIX_ROOT:1}
+.
+:x
+EOF
+ if ! test_synthetic_conf_mountable; then
+ failure "error: failed to configure synthetic.conf" >&2
+ fi
+ create_synthetic_objects
+ if ! test_nix; then
+ failure >&2 <<EOF
+error: failed to bootstrap $NIX_ROOT
+If you enabled FileVault after booting, this is likely a known issue
+with macOS that you'll have to reboot to fix. If you didn't enable FV,
+though, please open an issue describing how the system that you see
+this error on was set up.
+EOF
+ fi
fi
+}
+setup_fstab() {
+ local volume_uuid="$1"
+ # fstab used to be responsible for mounting the volume. Now the last
+ # step adds a LaunchDaemon responsible for mounting. This is technically
+ # redundant for mounting, but diskutil appears to pick up mount options
+ # from fstab (and diskutil's support for specifying them directly is not
+ # consistent across versions/subcommands).
if ! test_fstab; then
- echo "Configuring /etc/fstab..." >&2
- label=$(echo "$volume" | sed 's/ /\\040/g')
- # shellcheck disable=SC2209
- printf "\$a\nLABEL=%s /nix apfs rw,nobrowse\n.\nwq\n" "$label" | EDITOR=ed sudo vifs
+ task "Configuring /etc/fstab to specify volume mount options" >&2
+ add_nix_vol_fstab_line "$volume_uuid" /usr/sbin/vifs
fi
}
-main "$@"
+encrypt_volume() {
+ local volume_uuid="$1"
+ local volume_label="$2"
+ local password
+ # Note: mount/unmount are late additions to support the right order
+ # of operations for creating the volume and then baking its uuid into
+ # other artifacts; not as well-trod wrt to potential errors, race
+ # conditions, etc.
+
+ /usr/sbin/diskutil mount "$volume_label"
+
+ password="$(/usr/bin/xxd -l 32 -p -c 256 /dev/random)"
+ _sudo "to add your Nix volume's password to Keychain" \
+ /usr/bin/security -i <<EOF
+add-generic-password -a "$volume_label" -s "$volume_uuid" -l "$volume_label encryption password" -D "Encrypted volume password" -j "Added automatically by the Nix installer for use by $NIX_VOLUME_MOUNTD_DEST" -w "$password" -T /System/Library/CoreServices/APFSUserAgent -T /System/Library/CoreServices/CSUserAgent -T /usr/bin/security "/Library/Keychains/System.keychain"
+EOF
+ builtin printf "%s" "$password" | _sudo "to encrypt your Nix volume" \
+ /usr/sbin/diskutil apfs encryptVolume "$volume_label" -user disk -stdinpassphrase
+
+ /usr/sbin/diskutil unmount force "$volume_label"
+}
+
+create_volume() {
+ # Notes:
+ # 1) using `-nomount` instead of `-mountpoint "$NIX_ROOT"` to get
+ # its UUID and set mount opts in fstab before first mount
+ #
+ # 2) system is in some sense less secure than user keychain... (it's
+ # possible to read the password for decrypting the keychain) but
+ # the user keychain appears to be available too late. As far as I
+ # can tell, the file with this password (/var/db/SystemKey) is
+ # inside the FileVault envelope. If that isn't true, it may make
+ # sense to store the password inside the envelope?
+ #
+ # 3) At some point it would be ideal to have a small binary to serve
+ # as the daemon itself, and for it to replace /usr/bin/security here.
+ #
+ # 4) *UserAgent exemptions should let the system seamlessly supply the
+ # password if noauto is removed from fstab entry. This is intentional;
+ # the user will hopefully look for help if the volume stops mounting,
+ # rather than failing over into subtle race-condition problems.
+ #
+ # 5) If we ever get users griping about not having space to do
+ # anything useful with Nix, it is possibly to specify
+ # `-reserve 10g` or something, which will fail w/o that much
+ #
+ # 6) getting special w/ awk may be fragile, but doing it to:
+ # - save time over running slow diskutil commands
+ # - skirt risk we grab wrong volume if multiple match
+ _sudo "to create a new APFS volume '$NIX_VOLUME_LABEL' on $NIX_VOLUME_USE_DISK" \
+ /usr/sbin/diskutil apfs addVolume "$NIX_VOLUME_USE_DISK" "$NIX_VOLUME_FS" "$NIX_VOLUME_LABEL" -nomount | /usr/bin/awk '/Created new APFS Volume/ {print $5}'
+}
+
+volume_uuid_from_special() {
+ local volume_special="$1" # (i.e., disk1s7)
+ # For reasons I won't pretend to fathom, this returns 253 when it works
+ /System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -k "$volume_special" || true
+}
+
+# this sometimes clears immediately, and AFAIK clears
+# within about 1s. diskutil info on an unmounted path
+# fails in around 50-100ms and a match takes about
+# 250-300ms. I suspect it's usually ~250-750ms
+await_volume() {
+ # caution: this could, in theory, get stuck
+ until /usr/sbin/diskutil info "$NIX_ROOT" &>/dev/null; do
+ :
+ done
+}
+
+setup_volume() {
+ local use_special use_uuid profile_packages
+ task "Creating a Nix volume" >&2
+
+ use_special="${NIX_VOLUME_USE_SPECIAL:-$(create_volume)}"
+
+ _sudo "to ensure the Nix volume is not mounted" \
+ /usr/sbin/diskutil unmount force "$use_special" || true # might not be mounted
+
+ use_uuid=${NIX_VOLUME_USE_UUID:-$(volume_uuid_from_special "$use_special")}
+
+ setup_fstab "$use_uuid"
+
+ if should_encrypt_volume; then
+ encrypt_volume "$use_uuid" "$NIX_VOLUME_LABEL"
+ setup_volume_daemon "encrypted" "$use_uuid"
+ # TODO: might be able to save ~60ms by caching or setting
+ # this somewhere rather than re-checking here.
+ elif volume_encrypted "$use_special"; then
+ setup_volume_daemon "encrypted" "$use_uuid"
+ else
+ setup_volume_daemon "unencrypted" "$use_uuid"
+ fi
+
+ await_volume
+
+ if [ "$(/usr/sbin/diskutil info -plist "$NIX_ROOT" | xmllint --xpath "(/plist/dict/key[text()='GlobalPermissionsEnabled'])/following-sibling::*[1]" -)" = "<false/>" ]; then
+ _sudo "to set enableOwnership (enabling users to own files)" \
+ /usr/sbin/diskutil enableOwnership "$NIX_ROOT"
+ fi
+
+ # TODO: below is a vague kludge for now; I just don't know
+ # what if any safe action there is to take here. Also, the
+ # reminder isn't very helpful.
+ # I'm less sure where this belongs, but it also wants mounted, pre-install
+ if type -p nix-env; then
+ profile_packages="$(nix-env --query --installed)"
+ # TODO: can probably do below faster w/ read
+ # intentionally unquoted string to eat whitespace in wc output
+ # shellcheck disable=SC2046,SC2059
+ if ! [ $(printf "$profile_packages" | /usr/bin/wc -l) = "0" ]; then
+ reminder <<EOF
+Nix now supports only multi-user installs on Darwin/macOS, and your user's
+Nix profile has some packages in it. These packages may obscure those in the
+default profile, including the Nix this installer will add. You should
+review these packages:
+$profile_packages
+EOF
+ fi
+ fi
+
+}
+
+setup_volume_daemon() {
+ local cmd_type="$1" # encrypted|unencrypted
+ local volume_uuid="$2"
+ if ! test_voldaemon; then
+ task "Configuring LaunchDaemon to mount '$NIX_VOLUME_LABEL'" >&2
+ _sudo "to install the Nix volume mounter" /usr/bin/ex "$NIX_VOLUME_MOUNTD_DEST" <<EOF
+:a
+$(generate_mount_daemon "$cmd_type" "$volume_uuid")
+.
+:x
+EOF
+
+ # TODO: should probably alert the user if this is disabled?
+ _sudo "to launch the Nix volume mounter" \
+ launchctl bootstrap system "$NIX_VOLUME_MOUNTD_DEST" || true
+ # TODO: confirm whether kickstart is necessesary?
+ # I feel a little superstitous, but it can guard
+ # against multiple problems (doesn't start, old
+ # version still running for some reason...)
+ _sudo "to launch the Nix volume mounter" \
+ launchctl kickstart -k system/org.nixos.darwin-store
+ fi
+}
+
+setup_darwin_volume() {
+ setup_synthetic_conf
+ setup_volume
+}
+
+if [ "$_CREATE_VOLUME_NO_MAIN" = 1 ]; then
+ if [ -n "$*" ]; then
+ "$@" # expose functions in case we want multiple routines?
+ fi
+else
+ # no reason to pay for bash to process this
+ main() {
+ {
+ echo ""
+ echo " ------------------------------------------------------------------ "
+ echo " | This installer will create a volume for the nix store and |"
+ echo " | configure it to mount at $NIX_ROOT. Follow these steps to uninstall. |"
+ echo " ------------------------------------------------------------------ "
+ echo ""
+ echo " 1. Remove the entry from fstab using 'sudo /usr/sbin/vifs'"
+ echo " 2. Run 'sudo launchctl bootout system/org.nixos.darwin-store'"
+ echo " 3. Remove $NIX_VOLUME_MOUNTD_DEST"
+ echo " 4. Destroy the data volume using '/usr/sbin/diskutil apfs deleteVolume'"
+ echo " 5. Remove the 'nix' line from /etc/synthetic.conf (or the file)"
+ echo ""
+ } >&2
+
+ setup_darwin_volume
+ }
+
+ main "$@"
+fi
diff --git a/scripts/install-darwin-multi-user.sh b/scripts/install-darwin-multi-user.sh
index f6575ae2f..96eba8310 100644
--- a/scripts/install-darwin-multi-user.sh
+++ b/scripts/install-darwin-multi-user.sh
@@ -3,59 +3,110 @@
set -eu
set -o pipefail
-readonly PLIST_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist
+readonly NIX_DAEMON_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist
+# create by default; set 0 to DIY, use a symlink, etc.
+readonly NIX_VOLUME_CREATE=${NIX_VOLUME_CREATE:-1} # now default
NIX_FIRST_BUILD_UID="301"
NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d"
+# caution: may update times on / if not run as normal non-root user
+read_only_root() {
+ # this touch command ~should~ always produce an error
+ # as of this change I confirmed /usr/bin/touch emits:
+ # "touch: /: Operation not permitted" Monterey
+ # "touch: /: Read-only file system" Catalina+ and Big Sur
+ # "touch: /: Permission denied" Mojave
+ # (not matching prefix for compat w/ coreutils touch in case using
+ # an explicit path causes problems; its prefix differs)
+ case "$(/usr/bin/touch / 2>&1)" in
+ *"Read-only file system") # Catalina, Big Sur
+ return 0
+ ;;
+ *"Operation not permitted") # Monterey
+ return 0
+ ;;
+ *)
+ return 1
+ ;;
+ esac
+
+ # Avoiding the slow semantic way to get this information (~330ms vs ~8ms)
+ # unless using touch causes problems. Just in case, that approach is:
+ # diskutil info -plist / | <find the Writable or WritableVolume keys>, i.e.
+ # diskutil info -plist / | xmllint --xpath "name(/plist/dict/key[text()='Writable']/following-sibling::*[1])" -
+}
+
+if read_only_root && [ "$NIX_VOLUME_CREATE" = 1 ]; then
+ should_create_volume() { return 0; }
+else
+ should_create_volume() { return 1; }
+fi
+
+# shellcheck source=./create-darwin-volume.sh
+. "$EXTRACTED_NIX_PATH/create-darwin-volume.sh" "no-main"
+
dsclattr() {
/usr/bin/dscl . -read "$1" \
- | awk "/$2/ { print \$2 }"
+ | /usr/bin/awk "/$2/ { print \$2 }"
+}
+
+test_nix_daemon_installed() {
+ test -e "$NIX_DAEMON_DEST"
}
-poly_validate_assumptions() {
- if [ "$(uname -s)" != "Darwin" ]; then
- failure "This script is for use with macOS!"
+poly_cure_artifacts() {
+ if should_create_volume; then
+ task "Fixing any leftover Nix volume state"
+ cat <<EOF
+Before I try to install, I'll check for any existing Nix volume config
+and ask for your permission to remove it (so that the installer can
+start fresh). I'll also ask for permission to fix any issues I spot.
+EOF
+ cure_volumes
+ remove_volume_artifacts
fi
}
poly_service_installed_check() {
- [ -e "$PLIST_DEST" ]
+ if should_create_volume; then
+ test_nix_daemon_installed || test_nix_volume_mountd_installed
+ else
+ test_nix_daemon_installed
+ fi
}
poly_service_uninstall_directions() {
- cat <<EOF
-$1. Delete $PLIST_DEST
-
- sudo launchctl unload $PLIST_DEST
- sudo rm $PLIST_DEST
-
-EOF
+ echo "$1. Remove macOS-specific components:"
+ if should_create_volume && test_nix_volume_mountd_installed; then
+ nix_volume_mountd_uninstall_directions
+ fi
+ if test_nix_daemon_installed; then
+ nix_daemon_uninstall_directions
+ fi
}
poly_service_setup_note() {
- cat <<EOF
- - load and start a LaunchDaemon (at $PLIST_DEST) for nix-daemon
-
-EOF
+ if should_create_volume; then
+ echo " - create a Nix volume and a LaunchDaemon to mount it"
+ fi
+ echo " - create a LaunchDaemon (at $NIX_DAEMON_DEST) for nix-daemon"
+ echo ""
}
-poly_extra_try_me_commands(){
- :
-}
-poly_extra_setup_instructions(){
- :
+poly_extra_try_me_commands() {
+ :
}
poly_configure_nix_daemon_service() {
+ task "Setting up the nix-daemon LaunchDaemon"
_sudo "to set up the nix-daemon as a LaunchDaemon" \
- cp -f "/nix/var/nix/profiles/default$PLIST_DEST" "$PLIST_DEST"
+ /bin/cp -f "/nix/var/nix/profiles/default$NIX_DAEMON_DEST" "$NIX_DAEMON_DEST"
_sudo "to load the LaunchDaemon plist for nix-daemon" \
launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist
_sudo "to start the nix-daemon" \
- launchctl start org.nixos.nix-daemon
-
+ launchctl kickstart -k system/org.nixos.nix-daemon
}
poly_group_exists() {
@@ -96,6 +147,8 @@ poly_user_home_get() {
}
poly_user_home_set() {
+ # This can trigger a permission prompt now:
+ # "Terminal" would like to administer your computer. Administration can include modifying passwords, networking, and system settings.
_sudo "in order to give $1 a safe home directory" \
/usr/bin/dscl . -create "/Users/$1" "NFSHomeDirectory" "$2"
}
@@ -121,7 +174,7 @@ poly_user_shell_set() {
poly_user_in_group_check() {
username=$1
group=$2
- dseditgroup -o checkmember -m "$username" "$group" > /dev/null 2>&1
+ /usr/sbin/dseditgroup -o checkmember -m "$username" "$group" > /dev/null 2>&1
}
poly_user_in_group_set() {
@@ -151,3 +204,21 @@ poly_create_build_user() {
/usr/bin/dscl . create "/Users/$username" \
UniqueID "${uid}"
}
+
+poly_prepare_to_install() {
+ if should_create_volume; then
+ header "Preparing a Nix volume"
+ # intentional indent below to match task indent
+ cat <<EOF
+ Nix traditionally stores its data in the root directory $NIX_ROOT, but
+ macOS now (starting in 10.15 Catalina) has a read-only root directory.
+ To support Nix, I will create a volume and configure macOS to mount it
+ at $NIX_ROOT.
+EOF
+ setup_darwin_volume
+ fi
+
+ if [ "$(diskutil info -plist /nix | xmllint --xpath "(/plist/dict/key[text()='GlobalPermissionsEnabled'])/following-sibling::*[1]" -)" = "<false/>" ]; then
+ failure "This script needs a /nix volume with global permissions! This may require running sudo diskutil enableOwnership /nix."
+ fi
+}
diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh
index 66825f9de..0dba36f51 100644
--- a/scripts/install-multi-user.sh
+++ b/scripts/install-multi-user.sh
@@ -33,7 +33,7 @@ NIX_BUILD_USER_NAME_TEMPLATE="nixbld%d"
readonly NIX_ROOT="/nix"
readonly NIX_EXTRA_CONF=${NIX_EXTRA_CONF:-}
-readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshenv" "/etc/bash.bashrc" "/etc/zsh/zshenv")
+readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshrc" "/etc/bash.bashrc" "/etc/zsh/zshrc")
readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix"
readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh"
@@ -43,7 +43,7 @@ readonly NIX_INSTALLED_CACERT="@cacert@"
#readonly NIX_INSTALLED_CACERT="/nix/store/7dxhzymvy330i28ii676fl1pqwcahv2f-nss-cacert-3.49.2"
readonly EXTRACTED_NIX_PATH="$(dirname "$0")"
-readonly ROOT_HOME=$(echo ~root)
+readonly ROOT_HOME=~root
if [ -t 0 ]; then
readonly IS_HEADLESS='no'
@@ -59,14 +59,19 @@ headless() {
fi
}
-contactme() {
- echo "We'd love to help if you need it."
+contact_us() {
+ echo "You can open an issue at https://github.com/nixos/nix/issues"
echo ""
- echo "If you can, open an issue at https://github.com/nixos/nix/issues"
+ echo "Or feel free to contact the team:"
+ echo " - Matrix: #nix:nixos.org"
+ echo " - IRC: in #nixos on irc.libera.chat"
+ echo " - twitter: @nixos_org"
+ echo " - forum: https://discourse.nixos.org"
+}
+get_help() {
+ echo "We'd love to help if you need it."
echo ""
- echo "Or feel free to contact the team,"
- echo " - on IRC #nixos on irc.freenode.net"
- echo " - on twitter @nixos_org"
+ contact_us
}
uninstall_directions() {
@@ -102,7 +107,6 @@ $step. Delete the files Nix added to your system:
and that is it.
EOF
-
}
nix_user_for_core() {
@@ -170,7 +174,7 @@ failure() {
header "oh no!"
_textout "$RED" "$@"
echo ""
- _textout "$RED" "$(contactme)"
+ _textout "$RED" "$(get_help)"
trap finish_cleanup EXIT
exit 1
}
@@ -201,6 +205,95 @@ ui_confirm() {
return 1
}
+printf -v _UNCHANGED_GRP_FMT "%b" $'\033[2m%='"$ESC" # "dim"
+# bold+invert+red and bold+invert+green just for the +/- below
+# red/green foreground for rest of the line
+printf -v _OLD_LINE_FMT "%b" $'\033[1;7;31m-'"$ESC ${RED}%L${ESC}"
+printf -v _NEW_LINE_FMT "%b" $'\033[1;7;32m+'"$ESC ${GREEN}%L${ESC}"
+
+_diff() {
+ # simple colorized diff comatible w/ pre `--color` versions
+ diff --unchanged-group-format="$_UNCHANGED_GRP_FMT" --old-line-format="$_OLD_LINE_FMT" --new-line-format="$_NEW_LINE_FMT" --unchanged-line-format=" %L" "$@"
+}
+
+confirm_rm() {
+ local path="$1"
+ if ui_confirm "Can I remove $path?"; then
+ _sudo "to remove $path" rm "$path"
+ fi
+}
+
+confirm_edit() {
+ local path="$1"
+ local edit_path="$2"
+ cat <<EOF
+
+Nix isn't the only thing in $path,
+but I think I know how to edit it out.
+Here's the diff:
+EOF
+
+ # could technically test the diff, but caller should do it
+ _diff "$path" "$edit_path"
+ if ui_confirm "Does the change above look right?"; then
+ _sudo "remove nix from $path" cp "$edit_path" "$path"
+ fi
+}
+
+_SERIOUS_BUSINESS="${RED}%s:${ESC} "
+password_confirm() {
+ local do_something_consequential="$1"
+ if ui_confirm "Can I $do_something_consequential?"; then
+ # shellcheck disable=SC2059
+ sudo -kv --prompt="$(printf "${_SERIOUS_BUSINESS}" "Enter your password to $do_something_consequential")"
+ else
+ return 1
+ fi
+}
+
+# Support accumulating reminders over the course of a run and showing
+# them at the end. An example where this helps: the installer changes
+# something, but it won't work without a reboot. If you tell the user
+# when you do it, they may miss it in the stream. The value of the
+# setting isn't enough to decide whether to message because you only
+# need to message if you *changed* it.
+
+# reminders stored in array delimited by empty entry; if ! headless,
+# user is asked to confirm after each delimiter.
+_reminders=()
+((_remind_num=1))
+
+remind() {
+ # (( arithmetic expression ))
+ if (( _remind_num > 1 )); then
+ header "Reminders"
+ for line in "${_reminders[@]}"; do
+ echo "$line"
+ if ! headless && [ "${#line}" = 0 ]; then
+ if read -r -p "Press enter/return to acknowledge."; then
+ printf $'\033[A\33[2K\r'
+ fi
+ fi
+ done
+ fi
+}
+
+reminder() {
+ printf -v label "${BLUE}[ %d ]${ESC}" "$_remind_num"
+ _reminders+=("$label")
+ if [[ "$*" = "" ]]; then
+ while read -r line; do
+ _reminders+=("$line")
+ done
+ else
+ # this expands each arg to an array entry (and each entry will
+ # ultimately be a separate line in the output)
+ _reminders+=("$@")
+ fi
+ _reminders+=("")
+ ((_remind_num++))
+}
+
__sudo() {
local expl="$1"
local cmd="$2"
@@ -221,18 +314,18 @@ _sudo() {
local expl="$1"
shift
if ! headless; then
- __sudo "$expl" "$*"
+ __sudo "$expl" "$*" >&2
fi
sudo "$@"
}
-readonly SCRATCH=$(mktemp -d -t tmp.XXXXXXXXXX)
-function finish_cleanup {
+readonly SCRATCH=$(mktemp -d "${TMPDIR:-/tmp/}tmp.XXXXXXXXXX")
+finish_cleanup() {
rm -rf "$SCRATCH"
}
-function finish_fail {
+finish_fail() {
finish_cleanup
failure <<EOF
@@ -244,45 +337,46 @@ EOF
}
trap finish_fail EXIT
-channel_update_failed=0
-function finish_success {
- finish_cleanup
-
+finish_success() {
ok "Alright! We're done!"
- if [ "x$channel_update_failed" = x1 ]; then
- echo ""
- echo "But fetching the nixpkgs channel failed. (Are you offline?)"
- echo "To try again later, run \"sudo -i nix-channel --update nixpkgs\"."
- fi
cat <<EOF
-
-Before Nix will work in your existing shells, you'll need to close
-them and open them again. Other than that, you should be ready to go.
-
Try it! Open a new terminal, and type:
$(poly_extra_try_me_commands)
$ nix-shell -p nix-info --run "nix-info -m"
-$(poly_extra_setup_instructions)
-Thank you for using this installer. If you have any feedback, don't
-hesitate:
-$(contactme)
-EOF
+Thank you for using this installer. If you have any feedback or need
+help, don't hesitate:
+$(contact_us)
+EOF
+ remind
+ finish_cleanup
}
+finish_uninstall_success() {
+ ok "Alright! Nix should be removed!"
-validate_starting_assumptions() {
- poly_validate_assumptions
+ cat <<EOF
+If you spot anything this uninstaller missed or have feedback,
+don't hesitate:
- if [ $EUID -eq 0 ]; then
- failure <<EOF
-Please do not run this script with root privileges. We will call sudo
-when we need to.
+$(contact_us)
EOF
- fi
+ remind
+ finish_cleanup
+}
+
+remove_nix_artifacts() {
+ failure "Not implemented yet"
+}
+
+cure_artifacts() {
+ poly_cure_artifacts
+ # remove_nix_artifacts (LATER)
+}
+validate_starting_assumptions() {
if type nix-env 2> /dev/null >&2; then
warning <<EOF
Nix already appears to be installed. This installer may run into issues.
@@ -293,19 +387,28 @@ EOF
fi
for profile_target in "${PROFILE_TARGETS[@]}"; do
+ # TODO: I think it would be good to accumulate a list of all
+ # of the copies so that people don't hit this 2 or 3x in
+ # a row for different files.
if [ -e "$profile_target$PROFILE_BACKUP_SUFFIX" ]; then
+ # this backup process first released in Nix 2.1
failure <<EOF
-When this script runs, it backs up the current $profile_target to
-$profile_target$PROFILE_BACKUP_SUFFIX. This backup file already exists, though.
+I back up shell profile/rc scripts before I add Nix to them.
+I need to back up $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX,
+but the latter already exists.
-Please follow these instructions to clean up the old backup file:
+Here's how to clean up the old backup file:
-1. Copy $profile_target and $profile_target$PROFILE_BACKUP_SUFFIX to another place, just
-in case.
+1. Back up (copy) $profile_target and $profile_target$PROFILE_BACKUP_SUFFIX
+ to another location, just in case.
-2. Take care to make sure that $profile_target$PROFILE_BACKUP_SUFFIX doesn't look like
-it has anything nix-related in it. If it does, something is probably
-quite wrong. Please open an issue or get in touch immediately.
+2. Ensure $profile_target$PROFILE_BACKUP_SUFFIX does not have anything
+ Nix-related in it. If it does, something is probably quite
+ wrong. Please open an issue or get in touch immediately.
+
+3. Once you confirm $profile_target is backed up and
+ $profile_target$PROFILE_BACKUP_SUFFIX doesn't mention Nix, run:
+ mv $profile_target$PROFILE_BACKUP_SUFFIX $profile_target
EOF
fi
done
@@ -444,18 +547,46 @@ create_build_users() {
create_directories() {
# FIXME: remove all of this because it duplicates LocalStore::LocalStore().
-
+ task "Setting up the basic directory structure"
+ if [ -d "$NIX_ROOT" ]; then
+ # if /nix already exists, take ownership
+ #
+ # Caution: notes below are macOS-y
+ # This is a bit of a goldilocks zone for taking ownership
+ # if there are already files on the volume; the volume is
+ # now mounted, but we haven't added a bunch of new files
+
+ # this is probably a bit slow; I've been seeing 3.3-4s even
+ # when promptly installed over a fresh single-user install.
+ # In case anyone's aware of a shortcut.
+ # `|| true`: .Trashes errors w/o full disk perm
+
+ # rumor per #4488 that macOS 11.2 may not have
+ # sbin on path, and that's where chown is, but
+ # since this bit is cross-platform:
+ # - first try with `command -vp` to try and find
+ # chown in the usual places
+ # - fall back on `command -v` which would find
+ # any chown on path
+ # if we don't find one, the command is already
+ # hiding behind || true, and the general state
+ # should be one the user can repair once they
+ # figure out where chown is...
+ local get_chr_own="$(command -vp chown)"
+ if [[ -z "$get_chr_own" ]]; then
+ get_chr_own="$(command -v chown)"
+ fi
+ _sudo "to take root ownership of existing Nix store files" \
+ "$get_chr_own" -R "root:$NIX_BUILD_GROUP_NAME" "$NIX_ROOT" || true
+ fi
_sudo "to make the basic directory structure of Nix (part 1)" \
- mkdir -pv -m 0755 /nix /nix/var /nix/var/log /nix/var/log/nix /nix/var/log/nix/drvs /nix/var/nix{,/db,/gcroots,/profiles,/temproots,/userpool} /nix/var/nix/{gcroots,profiles}/per-user
+ install -dv -m 0755 /nix /nix/var /nix/var/log /nix/var/log/nix /nix/var/log/nix/drvs /nix/var/nix{,/db,/gcroots,/profiles,/temproots,/userpool} /nix/var/nix/{gcroots,profiles}/per-user
_sudo "to make the basic directory structure of Nix (part 2)" \
- mkdir -pv -m 1775 /nix/store
-
- _sudo "to make the basic directory structure of Nix (part 3)" \
- chgrp "$NIX_BUILD_GROUP_NAME" /nix/store
+ install -dv -g "$NIX_BUILD_GROUP_NAME" -m 1775 /nix/store
_sudo "to place the default nix daemon configuration (part 1)" \
- mkdir -pv -m 0555 /etc/nix
+ install -dv -m 0555 /etc/nix
}
place_channel_configuration() {
@@ -475,9 +606,9 @@ This installation tool will set up your computer with the Nix package
manager. This will happen in a few stages:
1. Make sure your computer doesn't already have Nix. If it does, I
- will show you instructions on how to clean up your old one.
+ will show you instructions on how to clean up your old install.
-2. Show you what we are going to install and where. Then we will ask
+2. Show you what I am going to install and where. Then I will ask
if you are ready to continue.
3. Create the system users and groups that the Nix daemon uses to run
@@ -492,14 +623,14 @@ manager. This will happen in a few stages:
EOF
- if ui_confirm "Would you like to see a more detailed list of what we will do?"; then
+ if ui_confirm "Would you like to see a more detailed list of what I will do?"; then
cat <<EOF
-We will:
+I will:
- make sure your computer doesn't already have Nix files
(if it does, I will tell you how to clean them up.)
- - create local users (see the list above for the users we'll make)
+ - create local users (see the list above for the users I'll make)
- create a local group ($NIX_BUILD_GROUP_NAME)
- install Nix in to $NIX_ROOT
- create a configuration file in /etc/nix
@@ -534,7 +665,7 @@ run in a headless fashion, like this:
$ curl -L https://nixos.org/nix/install | sh
-or maybe in a CI pipeline. Because of that, we're going to skip the
+or maybe in a CI pipeline. Because of that, I'm going to skip the
verbose output in the interest of brevity.
If you would like to
@@ -548,7 +679,7 @@ EOF
fi
cat <<EOF
-This script is going to call sudo a lot. Every time we do, it'll
+This script is going to call sudo a lot. Every time I do, it'll
output exactly what it'll do, and why.
Just like this:
@@ -560,25 +691,29 @@ EOF
cat <<EOF
This might look scary, but everything can be undone by running just a
-few commands. We used to ask you to confirm each time sudo ran, but it
+few commands. I used to ask you to confirm each time sudo ran, but it
was too many times. Instead, I'll just ask you this one time:
EOF
- if ui_confirm "Can we use sudo?"; then
+ if ui_confirm "Can I use sudo?"; then
ok "Yay! Thanks! Let's get going!"
else
failure <<EOF
-That is okay, but we can't install.
+That is okay, but I can't install.
EOF
fi
}
install_from_extracted_nix() {
+ task "Installing Nix"
(
cd "$EXTRACTED_NIX_PATH"
_sudo "to copy the basic Nix files to the new store at $NIX_ROOT/store" \
- rsync -rlpt --chmod=-w ./store/* "$NIX_ROOT/store/"
+ cp -RLp ./store/* "$NIX_ROOT/store/"
+
+ _sudo "to make the new store non-writable at $NIX_ROOT/store" \
+ chmod -R ugo-w "$NIX_ROOT/store/"
if [ -d "$NIX_INSTALLED_NIX" ]; then
echo " Alright! We have our first nix at $NIX_INSTALLED_NIX"
@@ -589,9 +724,8 @@ $NIX_INSTALLED_NIX.
EOF
fi
- cat ./.reginfo \
- | _sudo "to load data for the first time in to the Nix Database" \
- "$NIX_INSTALLED_NIX/bin/nix-store" --load-db
+ _sudo "to load data for the first time in to the Nix Database" \
+ "$NIX_INSTALLED_NIX/bin/nix-store" --load-db < ./.reginfo
echo " Just finished getting the nix database ready."
)
@@ -610,6 +744,7 @@ EOF
}
configure_shell_profile() {
+ task "Setting up shell profiles: ${PROFILE_TARGETS[*]}"
for profile_target in "${PROFILE_TARGETS[@]}"; do
if [ -e "$profile_target" ]; then
_sudo "to back up your current $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX" \
@@ -629,14 +764,27 @@ configure_shell_profile() {
tee -a "$profile_target"
fi
done
+ # TODO: should we suggest '. $PROFILE_NIX_FILE'? It would get them on
+ # their way less disruptively, but a counter-argument is that they won't
+ # immediately notice if something didn't get set up right?
+ reminder "Nix won't work in active shell sessions until you restart them."
+}
+
+cert_in_store() {
+ # in a subshell
+ # - change into the cert-file dir
+ # - get the phyiscal pwd
+ # and test if this path is in the Nix store
+ [[ "$(cd -- "$(dirname "$NIX_SSL_CERT_FILE")" && exec pwd -P)" == "$NIX_ROOT/store/"* ]]
}
setup_default_profile() {
- _sudo "to installing a bootstrapping Nix in to the default Profile" \
+ task "Setting up the default profile"
+ _sudo "to install a bootstrapping Nix in to the default profile" \
HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_NIX"
- if [ -z "${NIX_SSL_CERT_FILE:-}" ] || ! [ -f "${NIX_SSL_CERT_FILE:-}" ]; then
- _sudo "to installing a bootstrapping SSL certificate just for Nix in to the default Profile" \
+ if [ -z "${NIX_SSL_CERT_FILE:-}" ] || ! [ -f "${NIX_SSL_CERT_FILE:-}" ] || cert_in_store; then
+ _sudo "to install a bootstrapping SSL certificate just for Nix in to the default profile" \
HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT"
export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt
fi
@@ -645,9 +793,13 @@ setup_default_profile() {
# Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call,
# otherwise it will be lost in environments where sudo doesn't pass
# all the environment variables by default.
- _sudo "to update the default channel in the default profile" \
- HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs \
- || channel_update_failed=1
+ if ! _sudo "to update the default channel in the default profile" \
+ HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs; then
+ reminder <<EOF
+I had trouble fetching the nixpkgs channel (are you offline?)
+To try again later, run: sudo -i nix-channel --update nixpkgs
+EOF
+ fi
fi
}
@@ -662,6 +814,17 @@ EOF
}
main() {
+ # TODO: I've moved this out of validate_starting_assumptions so we
+ # can fail faster in this case. Sourcing install-darwin... now runs
+ # `touch /` to detect Read-only root, but it could update times on
+ # pre-Catalina macOS if run as root user.
+ if [ "$EUID" -eq 0 ]; then
+ failure <<EOF
+Please do not run this script with root privileges. I will call sudo
+when I need to.
+EOF
+ fi
+
if [ "$(uname -s)" = "Darwin" ]; then
# shellcheck source=./install-darwin-multi-user.sh
. "$EXTRACTED_NIX_PATH/install-darwin-multi-user.sh"
@@ -675,17 +838,24 @@ main() {
welcome_to_nix
chat_about_sudo
+ cure_artifacts
+ # TODO: there's a tension between cure and validate. I moved the
+ # the sudo/root check out of validate to the head of this func.
+ # Cure is *intended* to subsume the validate-and-abort approach,
+ # so it may eventually obsolete it.
validate_starting_assumptions
setup_report
if ! ui_confirm "Ready to continue?"; then
ok "Alright, no changes have been made :)"
- contactme
+ get_help
trap finish_cleanup EXIT
exit 1
fi
+ poly_prepare_to_install
+
create_build_group
create_build_users
create_directories
@@ -695,6 +865,7 @@ main() {
configure_shell_profile
set +eu
+ # shellcheck disable=SC1091
. /etc/profile
set -eu
@@ -706,5 +877,20 @@ main() {
trap finish_success EXIT
}
+# set an empty initial arg for bare invocations in case we need to
+# disambiguate someone directly invoking this later.
+if [ "${#@}" = 0 ]; then
+ set ""
+fi
-main
+# ACTION for override
+case "${1-}" in
+ # uninstall)
+ # shift
+ # uninstall "$@";;
+ # install == same as the no-arg condition for now (but, explicit)
+ ""|install)
+ main;;
+ *) # holding space for future options (like uninstall + install?)
+ failure "install-multi-user: invalid argument";;
+esac
diff --git a/scripts/install-nix-from-closure.sh b/scripts/install-nix-from-closure.sh
index 0ee7ce5af..b5e2fea83 100644
--- a/scripts/install-nix-from-closure.sh
+++ b/scripts/install-nix-from-closure.sh
@@ -26,18 +26,9 @@ fi
# macOS support for 10.12.6 or higher
if [ "$(uname -s)" = "Darwin" ]; then
- IFS='.' read macos_major macos_minor macos_patch << EOF
+ IFS='.' read -r macos_major macos_minor macos_patch << EOF
$(sw_vers -productVersion)
EOF
- # TODO: this is a temporary speed-bump to keep people from naively installing Nix
- # on macOS Big Sur (11.0+, 10.16+) until nixpkgs updates are ready for them.
- # *Ideally* this is gone before next Nix release. If you're intentionally working on
- # Nix + Big Sur, just comment out this block and be on your way :)
- if [ "$macos_major" -gt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -gt 15 ]; }; then
- echo "$0: nixpkgs isn't quite ready to support macOS $(sw_vers -productVersion) yet"
- exit 1
- fi
-
if [ "$macos_major" -lt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -lt 12 ]; } || { [ "$macos_minor" -eq 12 ] && [ "$macos_patch" -lt 6 ]; }; then
# patch may not be present; command substitution for simplicity
echo "$0: macOS $(sw_vers -productVersion) is not supported, upgrade to 10.12.6 or higher"
@@ -46,21 +37,40 @@ EOF
fi
# Determine if we could use the multi-user installer or not
-if [ "$(uname -s)" = "Darwin" ]; then
- echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2
-elif [ "$(uname -s)" = "Linux" ]; then
+if [ "$(uname -s)" = "Linux" ]; then
echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2
fi
-INSTALL_MODE=no-daemon
-CREATE_DARWIN_VOLUME=0
+case "$(uname -s)" in
+ "Darwin")
+ INSTALL_MODE=daemon;;
+ *)
+ INSTALL_MODE=no-daemon;;
+esac
+
+# space-separated string
+ACTIONS=
+
# handle the command line flags
while [ $# -gt 0 ]; do
case $1 in
--daemon)
- INSTALL_MODE=daemon;;
+ INSTALL_MODE=daemon
+ ACTIONS="${ACTIONS}install "
+ ;;
--no-daemon)
- INSTALL_MODE=no-daemon;;
+ if [ "$(uname -s)" = "Darwin" ]; then
+ printf '\e[1;31mError: --no-daemon installs are no-longer supported on Darwin/macOS!\e[0m\n' >&2
+ exit 1
+ fi
+ INSTALL_MODE=no-daemon
+ # intentional tail space
+ ACTIONS="${ACTIONS}install "
+ ;;
+ # --uninstall)
+ # # intentional tail space
+ # ACTIONS="${ACTIONS}uninstall "
+ # ;;
--no-channel-add)
export NIX_INSTALLER_NO_CHANNEL_ADD=1;;
--daemon-user-count)
@@ -69,13 +79,18 @@ while [ $# -gt 0 ]; do
--no-modify-profile)
NIX_INSTALLER_NO_MODIFY_PROFILE=1;;
--darwin-use-unencrypted-nix-store-volume)
- CREATE_DARWIN_VOLUME=1;;
+ {
+ echo "Warning: the flag --darwin-use-unencrypted-nix-store-volume"
+ echo " is no longer needed and will be removed in the future."
+ echo ""
+ } >&2;;
--nix-extra-conf-file)
- export NIX_EXTRA_CONF="$(cat $2)"
+ # shellcheck disable=SC2155
+ export NIX_EXTRA_CONF="$(cat "$2")"
shift;;
*)
- (
- echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--darwin-use-unencrypted-nix-store-volume] [--nix-extra-conf-file FILE]"
+ {
+ echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--nix-extra-conf-file FILE]"
echo "Choose installation method."
echo ""
@@ -91,55 +106,25 @@ while [ $# -gt 0 ]; do
echo ""
echo " --no-channel-add: Don't add any channels. nixpkgs-unstable is installed by default."
echo ""
- echo " --no-modify-profile: Skip channel installation. When not provided nixpkgs-unstable"
- echo " is installed by default."
+ echo " --no-modify-profile: Don't modify the user profile to automatically load nix."
echo ""
echo " --daemon-user-count: Number of build users to create. Defaults to 32."
echo ""
- echo " --nix-extra-conf-file: Path to nix.conf to prepend when installing /etc/nix.conf"
+ echo " --nix-extra-conf-file: Path to nix.conf to prepend when installing /etc/nix/nix.conf"
echo ""
if [ -n "${INVOKED_FROM_INSTALL_IN:-}" ]; then
echo " --tarball-url-prefix URL: Base URL to download the Nix tarball from."
fi
- ) >&2
-
- # darwin and Catalina+
- if [ "$(uname -s)" = "Darwin" ] && { [ "$macos_major" -gt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -gt 14 ]; }; }; then
- (
- echo " --darwin-use-unencrypted-nix-store-volume: Create an APFS volume for the Nix"
- echo " store and mount it at /nix. This is the recommended way to create"
- echo " /nix with a read-only / on macOS >=10.15."
- echo " See: https://nixos.org/nix/manual/#sect-macos-installation"
- echo ""
- ) >&2
- fi
+ } >&2
+
exit;;
esac
shift
done
-if [ "$(uname -s)" = "Darwin" ]; then
- if [ "$CREATE_DARWIN_VOLUME" = 1 ]; then
- printf '\e[1;31mCreating volume and mountpoint /nix.\e[0m\n'
- "$self/create-darwin-volume.sh"
- fi
-
- writable="$(diskutil info -plist / | xmllint --xpath "name(/plist/dict/key[text()='Writable']/following-sibling::*[1])" -)"
- if ! [ -e $dest ] && [ "$writable" = "false" ]; then
- (
- echo ""
- echo "Installing on macOS >=10.15 requires relocating the store to an apfs volume."
- echo "Use sh <(curl -L https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume or run the preparation steps manually."
- echo "See https://nixos.org/nix/manual/#sect-macos-installation"
- echo ""
- ) >&2
- exit 1
- fi
-fi
-
if [ "$INSTALL_MODE" = "daemon" ]; then
printf '\e[1;31mSwitching to the Multi-user Installer\e[0m\n'
- exec "$self/install-multi-user"
+ exec "$self/install-multi-user" $ACTIONS # let ACTIONS split
exit 0
fi
@@ -149,7 +134,7 @@ fi
echo "performing a single-user installation of Nix..." >&2
-if ! [ -e $dest ]; then
+if ! [ -e "$dest" ]; then
cmd="mkdir -m 0755 $dest && chown $USER $dest"
echo "directory $dest does not exist; creating it by running '$cmd' using sudo" >&2
if ! sudo sh -c "$cmd"; then
@@ -158,12 +143,12 @@ if ! [ -e $dest ]; then
fi
fi
-if ! [ -w $dest ]; then
+if ! [ -w "$dest" ]; then
echo "$0: directory $dest exists, but is not writable by you. This could indicate that another user has already performed a single-user installation of Nix on this system. If you wish to enable multi-user support see https://nixos.org/nix/manual/#ssec-multi-user. If you wish to continue with a single-user install for $USER please run 'chown -R $USER $dest' as root." >&2
exit 1
fi
-mkdir -p $dest/store
+mkdir -p "$dest/store"
printf "copying Nix to %s..." "${dest}/store" >&2
# Insert a newline if no progress is shown.
@@ -194,6 +179,7 @@ if ! "$nix/bin/nix-store" --load-db < "$self/.reginfo"; then
exit 1
fi
+# shellcheck source=./nix-profile.sh.in
. "$nix/etc/profile.d/nix.sh"
if ! "$nix/bin/nix-env" -i "$nix"; then
@@ -203,17 +189,17 @@ fi
# Install an SSL certificate bundle.
if [ -z "$NIX_SSL_CERT_FILE" ] || ! [ -f "$NIX_SSL_CERT_FILE" ]; then
- $nix/bin/nix-env -i "$cacert"
+ "$nix/bin/nix-env" -i "$cacert"
export NIX_SSL_CERT_FILE="$HOME/.nix-profile/etc/ssl/certs/ca-bundle.crt"
fi
# Subscribe the user to the Nixpkgs channel and fetch it.
if [ -z "$NIX_INSTALLER_NO_CHANNEL_ADD" ]; then
- if ! $nix/bin/nix-channel --list | grep -q "^nixpkgs "; then
- $nix/bin/nix-channel --add https://nixos.org/channels/nixpkgs-unstable
+ if ! "$nix/bin/nix-channel" --list | grep -q "^nixpkgs "; then
+ "$nix/bin/nix-channel" --add https://nixos.org/channels/nixpkgs-unstable
fi
if [ -z "$_NIX_INSTALLER_TEST" ]; then
- if ! $nix/bin/nix-channel --update nixpkgs; then
+ if ! "$nix/bin/nix-channel" --update nixpkgs; then
echo "Fetching the nixpkgs channel failed. (Are you offline?)"
echo "To try again later, run \"nix-channel --update nixpkgs\"."
fi
@@ -229,7 +215,7 @@ if [ -z "$NIX_INSTALLER_NO_MODIFY_PROFILE" ]; then
if [ -w "$fn" ]; then
if ! grep -q "$p" "$fn"; then
echo "modifying $fn..." >&2
- echo -e "\nif [ -e $p ]; then . $p; fi # added by Nix installer" >> "$fn"
+ printf '\nif [ -e %s ]; then . %s; fi # added by Nix installer\n' "$p" "$p" >> "$fn"
fi
added=1
break
@@ -240,7 +226,7 @@ if [ -z "$NIX_INSTALLER_NO_MODIFY_PROFILE" ]; then
if [ -w "$fn" ]; then
if ! grep -q "$p" "$fn"; then
echo "modifying $fn..." >&2
- echo -e "\nif [ -e $p ]; then . $p; fi # added by Nix installer" >> "$fn"
+ printf '\nif [ -e %s ]; then . %s; fi # added by Nix installer\n' "$p" "$p" >> "$fn"
fi
added=1
break
diff --git a/scripts/install-systemd-multi-user.sh b/scripts/install-systemd-multi-user.sh
index fda5ef600..f4a2dfc5d 100755
--- a/scripts/install-systemd-multi-user.sh
+++ b/scripts/install-systemd-multi-user.sh
@@ -15,7 +15,7 @@ readonly SERVICE_OVERRIDE=${SERVICE_DEST}.d/override.conf
create_systemd_override() {
header "Configuring proxy for the nix-daemon service"
- _sudo "create directory for systemd unit override" mkdir -p "$(dirname $SERVICE_OVERRIDE)"
+ _sudo "create directory for systemd unit override" mkdir -p "$(dirname "$SERVICE_OVERRIDE")"
cat <<EOF | _sudo "create systemd unit override" tee "$SERVICE_OVERRIDE"
[Service]
$1
@@ -41,10 +41,8 @@ handle_network_proxy() {
fi
}
-poly_validate_assumptions() {
- if [ "$(uname -s)" != "Linux" ]; then
- failure "This script is for use with Linux!"
- fi
+poly_cure_artifacts() {
+ :
}
poly_service_installed_check() {
@@ -72,7 +70,7 @@ poly_service_setup_note() {
EOF
}
-poly_extra_try_me_commands(){
+poly_extra_try_me_commands() {
if [ -e /run/systemd/system ]; then
:
else
@@ -81,19 +79,10 @@ poly_extra_try_me_commands(){
EOF
fi
}
-poly_extra_setup_instructions(){
- if [ -e /run/systemd/system ]; then
- :
- else
- cat <<EOF
-Additionally, you may want to add nix-daemon to your init-system.
-
-EOF
- fi
-}
poly_configure_nix_daemon_service() {
if [ -e /run/systemd/system ]; then
+ task "Setting up the nix-daemon systemd service"
_sudo "to set up the nix-daemon service" \
systemctl link "/nix/var/nix/profiles/default$SERVICE_SRC"
@@ -110,6 +99,8 @@ poly_configure_nix_daemon_service() {
_sudo "to start the nix-daemon.service" \
systemctl restart nix-daemon.service
+ else
+ reminder "I don't support your init system yet; you may want to add nix-daemon manually."
fi
}
@@ -207,3 +198,7 @@ poly_create_build_user() {
--password "!" \
"$username"
}
+
+poly_prepare_to_install() {
+ :
+}
diff --git a/scripts/install.in b/scripts/install.in
index 7d25f7bd7..38d1fb36f 100755
--- a/scripts/install.in
+++ b/scripts/install.in
@@ -40,21 +40,25 @@ case "$(uname -s).$(uname -m)" in
path=@tarballPath_aarch64-linux@
system=aarch64-linux
;;
+ Linux.armv6l_linux)
+ hash=@tarballHash_armv6l-linux@
+ path=@tarballPath_armv6l-linux@
+ system=armv6l-linux
+ ;;
+ Linux.armv7l_linux)
+ hash=@tarballHash_armv7l-linux@
+ path=@tarballPath_armv7l-linux@
+ system=armv7l-linux
+ ;;
Darwin.x86_64)
hash=@tarballHash_x86_64-darwin@
path=@tarballPath_x86_64-darwin@
system=x86_64-darwin
;;
Darwin.arm64|Darwin.aarch64)
- # check for Rosetta 2 support
- if ! [ -f /Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist ]; then
- oops "Rosetta 2 is not installed on this ARM64 macOS machine. Run softwareupdate --install-rosetta then restart installation"
- fi
-
- hash=@binaryTarball_x86_64-darwin@
- path=@tarballPath_x86_64-darwin@
- # eventually maybe: aarch64-darwin
- system=x86_64-darwin
+ hash=@tarballHash_aarch64-darwin@
+ path=@tarballPath_aarch64-darwin@
+ system=aarch64-darwin
;;
*) oops "sorry, there is no binary distribution of Nix for your platform";;
esac
@@ -72,14 +76,21 @@ fi
tarball=$tmpDir/nix-@nixVersion@-$system.tar.xz
-require_util curl "download the binary tarball"
require_util tar "unpack the binary tarball"
if [ "$(uname -s)" != "Darwin" ]; then
require_util xz "unpack the binary tarball"
fi
+if command -v curl > /dev/null 2>&1; then
+ fetch() { curl -L "$1" -o "$2"; }
+elif command -v wget > /dev/null 2>&1; then
+ fetch() { wget "$1" -O "$2"; }
+else
+ oops "you don't have wget or curl installed, which I need to download the binary tarball"
+fi
+
echo "downloading Nix @nixVersion@ binary tarball for $system from '$url' to '$tmpDir'..."
-curl -L "$url" -o "$tarball" || oops "failed to download '$url'"
+fetch "$url" "$tarball" || oops "failed to download '$url'"
if command -v sha256sum > /dev/null 2>&1; then
hash2="$(sha256sum -b "$tarball" | cut -c1-64)"
diff --git a/scripts/local.mk b/scripts/local.mk
index 2a0055852..b8477178e 100644
--- a/scripts/local.mk
+++ b/scripts/local.mk
@@ -1,7 +1,5 @@
nix_noinst_scripts := \
- $(d)/nix-http-export.cgi \
- $(d)/nix-profile.sh \
- $(d)/nix-reduce-build
+ $(d)/nix-profile.sh
noinst-scripts += $(nix_noinst_scripts)
diff --git a/scripts/nix-http-export.cgi.in b/scripts/nix-http-export.cgi.in
deleted file mode 100755
index 19a505af1..000000000
--- a/scripts/nix-http-export.cgi.in
+++ /dev/null
@@ -1,51 +0,0 @@
-#! /bin/sh
-
-export HOME=/tmp
-export NIX_REMOTE=daemon
-
-TMP_DIR="${TMP_DIR:-/tmp/nix-export}"
-
-@coreutils@/mkdir -p "$TMP_DIR" || true
-@coreutils@/chmod a+r "$TMP_DIR"
-
-needed_path="?$QUERY_STRING"
-needed_path="${needed_path#*[?&]needed_path=}"
-needed_path="${needed_path%%&*}"
-#needed_path="$(echo $needed_path | ./unhttp)"
-needed_path="${needed_path//%2B/+}"
-needed_path="${needed_path//%3D/=}"
-
-echo needed_path: "$needed_path" >&2
-
-NIX_STORE="${NIX_STORE_DIR:-/nix/store}"
-
-echo NIX_STORE: "${NIX_STORE}" >&2
-
-full_path="${NIX_STORE}"/"$needed_path"
-
-if [ "$needed_path" != "${needed_path%.drv}" ]; then
- echo "Status: 403 You should create the derivation file yourself"
- echo "Content-Type: text/plain"
- echo
- echo "Refusing to disclose derivation contents"
- exit
-fi
-
-if @bindir@/nix-store --check-validity "$full_path"; then
- if ! [ -e nix-export/"$needed_path".nar.gz ]; then
- @bindir@/nix-store --export "$full_path" | @gzip@ > "$TMP_DIR"/"$needed_path".nar.gz
- @coreutils@/ln -fs "$TMP_DIR"/"$needed_path".nar.gz nix-export/"$needed_path".nar.gz
- fi;
- echo "Status: 301 Moved"
- echo "Location: nix-export/"$needed_path".nar.gz"
- echo
-else
- echo "Status: 404 No such path found"
- echo "Content-Type: text/plain"
- echo
- echo "Path not found:"
- echo "$needed_path"
- echo "checked:"
- echo "$full_path"
-fi
-
diff --git a/scripts/nix-profile-daemon.sh.in b/scripts/nix-profile-daemon.sh.in
index 500a98992..0a47571ac 100644
--- a/scripts/nix-profile-daemon.sh.in
+++ b/scripts/nix-profile-daemon.sh.in
@@ -5,7 +5,7 @@ __ETC_PROFILE_NIX_SOURCED=1
export NIX_PROFILES="@localstatedir@/nix/profiles/default $HOME/.nix-profile"
# Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work.
-if [ ! -z "${NIX_SSL_CERT_FILE:-}" ]; then
+if [ -n "${NIX_SSL_CERT_FILE:-}" ]; then
: # Allow users to override the NIX_SSL_CERT_FILE
elif [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch
export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
@@ -18,14 +18,14 @@ elif [ -e /etc/pki/tls/certs/ca-bundle.crt ]; then # Fedora, CentOS
else
# Fall back to what is in the nix profiles, favouring whatever is defined last.
check_nix_profiles() {
- if [ "$ZSH_VERSION" ]; then
+ if [ -n "$ZSH_VERSION" ]; then
# Zsh by default doesn't split words in unquoted parameter expansion.
# Set local_options for these options to be reverted at the end of the function
# and shwordsplit to force splitting words in $NIX_PROFILES below.
setopt local_options shwordsplit
fi
for i in $NIX_PROFILES; do
- if [ -e $i/etc/ssl/certs/ca-bundle.crt ]; then
+ if [ -e "$i/etc/ssl/certs/ca-bundle.crt" ]; then
export NIX_SSL_CERT_FILE=$i/etc/ssl/certs/ca-bundle.crt
fi
done
diff --git a/scripts/nix-reduce-build.in b/scripts/nix-reduce-build.in
deleted file mode 100755
index 50beb9d10..000000000
--- a/scripts/nix-reduce-build.in
+++ /dev/null
@@ -1,171 +0,0 @@
-#! @bash@
-
-WORKING_DIRECTORY=$(mktemp -d "${TMPDIR:-/tmp}"/nix-reduce-build-XXXXXX);
-cd "$WORKING_DIRECTORY";
-
-if test -z "$1" || test "a--help" = "a$1" ; then
- echo 'nix-reduce-build (paths or Nix expressions) -- (package sources)' >&2
- echo As in: >&2
- echo nix-reduce-build /etc/nixos/nixos -- ssh://user@somewhere.nowhere.example.org >&2
- echo nix-reduce-build /etc/nixos/nixos -- \\
- echo " " \''http://somewhere.nowhere.example.org/nix/nix-http-export.cgi?needed_path='\' >&2
- echo " store path name will be added into the end of the URL" >&2
- echo nix-reduce-build /etc/nixos/nixos -- file://home/user/nar/ >&2
- echo " that should be a directory where gzipped 'nix-store --export' ">&2
- echo " files are located (they should have .nar.gz extension)" >&2
- echo " Or all together: " >&2
- echo -e nix-reduce-build /expr.nix /e2.nix -- \\\\\\\n\
- " ssh://a@b.example.com http://n.example.com/get-nar?q= file://nar/" >&2
- echo " Also supports best-effort local builds of failing expression set:" >&2
- echo "nix-reduce-build /e.nix -- nix-daemon:// nix-self://" >&2
- echo " nix-daemon:// builds using daemon"
- echo " nix-self:// builds directly using nix-store from current installation" >&2
- echo " nix-daemon-fixed:// and nix-self-fixed:// do the same, but only for" >&2;
- echo "derivations with specified output hash (sha256, sha1 or md5)." >&2
- echo " nix-daemon-substitute:// and nix-self-substitute:// try to substitute" >&2;
- echo "maximum amount of paths" >&2;
- echo " nix-daemon-build:// and nix-self-build:// try to build (not substitute)" >&2;
- echo "maximum amount of paths" >&2;
- echo " If no package sources are specified, required paths are listed." >&2;
- exit;
-fi;
-
-while ! test "$1" = "--" || test "$1" = "" ; do
- echo "$1" >> initial; >&2
- shift;
-done
-shift;
-echo Will work on $(cat initial | wc -l) targets. >&2
-
-while read ; do
- case "$REPLY" in
- ${NIX_STORE_DIR:-/nix/store}/*)
- echo "$REPLY" >> paths; >&2
- ;;
- *)
- (
- IFS=: ;
- nix-instantiate $REPLY >> paths;
- );
- ;;
- esac;
-done < initial;
-echo Proceeding $(cat paths | wc -l) paths. >&2
-
-while read; do
- case "$REPLY" in
- *.drv)
- echo "$REPLY" >> derivers; >&2
- ;;
- *)
- nix-store --query --deriver "$REPLY" >>derivers;
- ;;
- esac;
-done < paths;
-echo Found $(cat derivers | wc -l) derivers. >&2
-
-cat derivers | xargs nix-store --query -R > derivers-closure;
-echo Proceeding at most $(cat derivers-closure | wc -l) derivers. >&2
-
-cat derivers-closure | egrep '[.]drv$' | xargs nix-store --query --outputs > wanted-paths;
-cat derivers-closure | egrep -v '[.]drv$' >> wanted-paths;
-echo Prepared $(cat wanted-paths | wc -l) paths to get. >&2
-
-cat wanted-paths | xargs nix-store --check-validity --print-invalid > needed-paths;
-echo We need $(cat needed-paths | wc -l) paths. >&2
-
-egrep '[.]drv$' derivers-closure > critical-derivers;
-
-if test -z "$1" ; then
- cat needed-paths;
-fi;
-
-refresh_critical_derivers() {
- echo "Finding needed derivers..." >&2;
- cat critical-derivers | while read; do
- if ! (nix-store --query --outputs "$REPLY" | xargs nix-store --check-validity &> /dev/null;); then
- echo "$REPLY";
- fi;
- done > new-critical-derivers;
- mv new-critical-derivers critical-derivers;
- echo The needed paths are realized by $(cat critical-derivers | wc -l) derivers. >&2
-}
-
-build_here() {
- cat critical-derivers | while read; do
- echo "Realising $REPLY using nix-daemon" >&2
- @bindir@/nix-store -r "${REPLY}"
- done;
-}
-
-try_to_substitute(){
- cat needed-paths | while read ; do
- echo "Building $REPLY using nix-daemon" >&2
- @bindir@/nix-store -r "${NIX_STORE_DIR:-/nix/store}/${REPLY##*/}"
- done;
-}
-
-for i in "$@"; do
- sshHost="${i#ssh://}";
- httpHost="${i#http://}";
- httpsHost="${i#https://}";
- filePath="${i#file:/}";
- if [ "$i" != "$sshHost" ]; then
- cat needed-paths | while read; do
- echo "Getting $REPLY and its closure over ssh" >&2
- nix-copy-closure --from "$sshHost" --gzip "$REPLY" </dev/null || true;
- done;
- elif [ "$i" != "$httpHost" ] || [ "$i" != "$httpsHost" ]; then
- cat needed-paths | while read; do
- echo "Getting $REPLY over http/https" >&2
- curl ${BAD_CERTIFICATE:+-k} -L "$i${REPLY##*/}" | gunzip | nix-store --import;
- done;
- elif [ "$i" != "$filePath" ] ; then
- cat needed-paths | while read; do
- echo "Installing $REPLY from file" >&2
- gunzip < "$filePath/${REPLY##*/}".nar.gz | nix-store --import;
- done;
- elif [ "$i" = "nix-daemon://" ] ; then
- NIX_REMOTE=daemon try_to_substitute;
- refresh_critical_derivers;
- NIX_REMOTE=daemon build_here;
- elif [ "$i" = "nix-self://" ] ; then
- NIX_REMOTE= try_to_substitute;
- refresh_critical_derivers;
- NIX_REMOTE= build_here;
- elif [ "$i" = "nix-daemon-fixed://" ] ; then
- refresh_critical_derivers;
-
- cat critical-derivers | while read; do
- if egrep '"(md5|sha1|sha256)"' "$REPLY" &>/dev/null; then
- echo "Realising $REPLY using nix-daemon" >&2
- NIX_REMOTE=daemon @bindir@/nix-store -r "${REPLY}"
- fi;
- done;
- elif [ "$i" = "nix-self-fixed://" ] ; then
- refresh_critical_derivers;
-
- cat critical-derivers | while read; do
- if egrep '"(md5|sha1|sha256)"' "$REPLY" &>/dev/null; then
- echo "Realising $REPLY using direct Nix build" >&2
- NIX_REMOTE= @bindir@/nix-store -r "${REPLY}"
- fi;
- done;
- elif [ "$i" = "nix-daemon-substitute://" ] ; then
- NIX_REMOTE=daemon try_to_substitute;
- elif [ "$i" = "nix-self-substitute://" ] ; then
- NIX_REMOTE= try_to_substitute;
- elif [ "$i" = "nix-daemon-build://" ] ; then
- refresh_critical_derivers;
- NIX_REMOTE=daemon build_here;
- elif [ "$i" = "nix-self-build://" ] ; then
- refresh_critical_derivers;
- NIX_REMOTE= build_here;
- fi;
- mv needed-paths wanted-paths;
- cat wanted-paths | xargs nix-store --check-validity --print-invalid > needed-paths;
- echo We still need $(cat needed-paths | wc -l) paths. >&2
-done;
-
-cd /
-rm -r "$WORKING_DIRECTORY"
diff --git a/scripts/prepare-installer-for-github-actions b/scripts/prepare-installer-for-github-actions
index 92d930384..4b994a753 100755
--- a/scripts/prepare-installer-for-github-actions
+++ b/scripts/prepare-installer-for-github-actions
@@ -3,7 +3,7 @@
set -e
script=$(nix-build -A outputs.hydraJobs.installerScriptForGHA --no-out-link)
-installerHash=$(echo $script | cut -b12-43 -)
+installerHash=$(echo "$script" | cut -b12-43 -)
installerURL=https://$CACHIX_NAME.cachix.org/serve/$installerHash/install
diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc
index 57f2cd32d..9d541b45d 100644
--- a/src/build-remote/build-remote.cc
+++ b/src/build-remote/build-remote.cc
@@ -18,6 +18,7 @@
#include "derivations.hh"
#include "local-store.hh"
#include "legacy.hh"
+#include "experimental-features.hh"
using namespace nix;
using std::cin;
@@ -130,11 +131,14 @@ static int main_build_remote(int argc, char * * argv)
for (auto & m : machines) {
debug("considering building on remote machine '%s'", m.storeUri);
- if (m.enabled && std::find(m.systemTypes.begin(),
- m.systemTypes.end(),
- neededSystem) != m.systemTypes.end() &&
+ if (m.enabled
+ && (neededSystem == "builtin"
+ || std::find(m.systemTypes.begin(),
+ m.systemTypes.end(),
+ neededSystem) != m.systemTypes.end()) &&
m.allSupported(requiredFeatures) &&
- m.mandatoryMet(requiredFeatures)) {
+ m.mandatoryMet(requiredFeatures))
+ {
rightType = true;
AutoCloseFD free;
uint64_t load = 0;
@@ -270,14 +274,23 @@ connected:
{
Activity act(*logger, lvlTalkative, actUnknown, fmt("copying dependencies to '%s'", storeUri));
- copyPaths(store, ref<Store>(sshStore), store->parseStorePathSet(inputs), NoRepair, NoCheckSigs, substitute);
+ copyPaths(*store, *sshStore, store->parseStorePathSet(inputs), NoRepair, NoCheckSigs, substitute);
}
uploadLock = -1;
auto drv = store->readDerivation(*drvPath);
auto outputHashes = staticOutputHashes(*store, drv);
- drv.inputSrcs = store->parseStorePathSet(inputs);
+
+ // Hijack the inputs paths of the derivation to include all the paths
+ // that come from the `inputDrvs` set.
+ // We don’t do that for the derivations whose `inputDrvs` is empty
+ // because
+ // 1. It’s not needed
+ // 2. Changing the `inputSrcs` set changes the associated output ids,
+ // which break CA derivations
+ if (!drv.inputDrvs.empty())
+ drv.inputSrcs = store->parseStorePathSet(inputs);
auto result = sshStore->buildDerivation(*drvPath, drv);
@@ -286,7 +299,7 @@ connected:
std::set<Realisation> missingRealisations;
StorePathSet missingPaths;
- if (settings.isExperimentalFeatureEnabled("ca-derivations") && !derivationHasKnownOutputPaths(drv.type())) {
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations) && !derivationHasKnownOutputPaths(drv.type())) {
for (auto & outputName : wantedOutputs) {
auto thisOutputHash = outputHashes.at(outputName);
auto thisOutputId = DrvOutput{ thisOutputHash, outputName };
@@ -312,13 +325,13 @@ connected:
if (auto localStore = store.dynamic_pointer_cast<LocalStore>())
for (auto & path : missingPaths)
localStore->locksHeld.insert(store->printStorePath(path)); /* FIXME: ugly */
- copyPaths(ref<Store>(sshStore), store, missingPaths, NoRepair, NoCheckSigs, NoSubstitute);
+ copyPaths(*sshStore, *store, missingPaths, NoRepair, NoCheckSigs, NoSubstitute);
}
// XXX: Should be done as part of `copyPaths`
for (auto & realisation : missingRealisations) {
// Should hold, because if the feature isn't enabled the set
// of missing realisations should be empty
- settings.requireExperimentalFeature("ca-derivations");
+ settings.requireExperimentalFeature(Xp::CaDerivations);
store->registerDrvOutput(realisation);
}
diff --git a/src/libcmd/command.cc b/src/libcmd/command.cc
index 2c62bfa7f..4c5d985aa 100644
--- a/src/libcmd/command.cc
+++ b/src/libcmd/command.cc
@@ -95,8 +95,21 @@ EvalCommand::~EvalCommand()
evalState->printStats();
}
+ref<Store> EvalCommand::getEvalStore()
+{
+ if (!evalStore)
+ evalStore = evalStoreUrl ? openStore(*evalStoreUrl) : getStore();
+ return ref<Store>(evalStore);
+}
+
+ref<EvalState> EvalCommand::getEvalState()
+{
+ if (!evalState)
+ evalState = std::make_shared<EvalState>(searchPath, getEvalStore(), getStore());
+ return ref<EvalState>(evalState);
+}
-RealisedPathsCommand::RealisedPathsCommand(bool recursive)
+BuiltPathsCommand::BuiltPathsCommand(bool recursive)
: recursive(recursive)
{
if (recursive)
@@ -123,44 +136,53 @@ RealisedPathsCommand::RealisedPathsCommand(bool recursive)
});
}
-void RealisedPathsCommand::run(ref<Store> store)
+void BuiltPathsCommand::run(ref<Store> store)
{
- std::vector<RealisedPath> paths;
+ BuiltPaths paths;
if (all) {
if (installables.size())
throw UsageError("'--all' does not expect arguments");
// XXX: Only uses opaque paths, ignores all the realisations
for (auto & p : store->queryAllValidPaths())
- paths.push_back(p);
+ paths.push_back(BuiltPath::Opaque{p});
} else {
- auto pathSet = toRealisedPaths(store, realiseMode, operateOn, installables);
+ paths = toBuiltPaths(getEvalStore(), store, realiseMode, operateOn, installables);
if (recursive) {
- auto roots = std::move(pathSet);
- pathSet = {};
- RealisedPath::closure(*store, roots, pathSet);
+ // XXX: This only computes the store path closure, ignoring
+ // intermediate realisations
+ StorePathSet pathsRoots, pathsClosure;
+ for (auto & root : paths) {
+ auto rootFromThis = root.outPaths();
+ pathsRoots.insert(rootFromThis.begin(), rootFromThis.end());
+ }
+ store->computeFSClosure(pathsRoots, pathsClosure);
+ for (auto & path : pathsClosure)
+ paths.push_back(BuiltPath::Opaque{path});
}
- for (auto & path : pathSet)
- paths.push_back(path);
}
run(store, std::move(paths));
}
StorePathsCommand::StorePathsCommand(bool recursive)
- : RealisedPathsCommand(recursive)
+ : BuiltPathsCommand(recursive)
{
}
-void StorePathsCommand::run(ref<Store> store, std::vector<RealisedPath> paths)
+void StorePathsCommand::run(ref<Store> store, BuiltPaths && paths)
{
- StorePaths storePaths;
- for (auto & p : paths)
- storePaths.push_back(p.path());
+ StorePathSet storePaths;
+ for (auto & builtPath : paths)
+ for (auto & p : builtPath.outPaths())
+ storePaths.insert(p);
+
+ auto sorted = store->topoSortPaths(storePaths);
+ std::reverse(sorted.begin(), sorted.end());
- run(store, std::move(storePaths));
+ run(store, std::move(sorted));
}
-void StorePathCommand::run(ref<Store> store, std::vector<StorePath> storePaths)
+void StorePathCommand::run(ref<Store> store, std::vector<StorePath> && storePaths)
{
if (storePaths.size() != 1)
throw UsageError("this command requires exactly one store path");
@@ -204,7 +226,7 @@ void MixProfile::updateProfile(const StorePath & storePath)
profile2, storePath));
}
-void MixProfile::updateProfile(const DerivedPathsWithHints & buildables)
+void MixProfile::updateProfile(const BuiltPaths & buildables)
{
if (!profile) return;
@@ -212,22 +234,19 @@ void MixProfile::updateProfile(const DerivedPathsWithHints & buildables)
for (auto & buildable : buildables) {
std::visit(overloaded {
- [&](DerivedPathWithHints::Opaque bo) {
+ [&](const BuiltPath::Opaque & bo) {
result.push_back(bo.path);
},
- [&](DerivedPathWithHints::Built bfd) {
+ [&](const BuiltPath::Built & bfd) {
for (auto & output : bfd.outputs) {
- /* Output path should be known because we just tried to
- build it. */
- assert(output.second);
- result.push_back(*output.second);
+ result.push_back(output.second);
}
},
}, buildable.raw());
}
if (result.size() != 1)
- throw Error("'--profile' requires that the arguments produce a single store path, but there are %d", result.size());
+ throw UsageError("'--profile' requires that the arguments produce a single store path, but there are %d", result.size());
updateProfile(result[0]);
}
diff --git a/src/libcmd/command.hh b/src/libcmd/command.hh
index 8566e3a6d..0d847d255 100644
--- a/src/libcmd/command.hh
+++ b/src/libcmd/command.hh
@@ -47,13 +47,18 @@ struct EvalCommand : virtual StoreCommand, MixEvalArgs
{
bool startReplOnEvalErrors = false;
+ EvalCommand();
+
+ ~EvalCommand();
+
+ ref<Store> getEvalStore();
+
ref<EvalState> getEvalState();
- EvalCommand();
+private:
+ std::shared_ptr<Store> evalStore;
std::shared_ptr<EvalState> evalState;
-
- ~EvalCommand();
};
struct MixFlakeOptions : virtual Args, EvalCommand
@@ -105,6 +110,8 @@ enum class Realise {
exists. */
Derivation,
/* Evaluate in dry-run mode. Postcondition: nothing. */
+ // FIXME: currently unused, but could be revived if we can
+ // evaluate derivations in-memory.
Nothing
};
@@ -147,7 +154,7 @@ private:
};
/* A command that operates on zero or more store paths. */
-struct RealisedPathsCommand : public InstallablesCommand
+struct BuiltPathsCommand : public InstallablesCommand
{
private:
@@ -160,26 +167,26 @@ protected:
public:
- RealisedPathsCommand(bool recursive = false);
+ BuiltPathsCommand(bool recursive = false);
using StoreCommand::run;
- virtual void run(ref<Store> store, std::vector<RealisedPath> paths) = 0;
+ virtual void run(ref<Store> store, BuiltPaths && paths) = 0;
void run(ref<Store> store) override;
bool useDefaultInstallables() override { return !all; }
};
-struct StorePathsCommand : public RealisedPathsCommand
+struct StorePathsCommand : public BuiltPathsCommand
{
StorePathsCommand(bool recursive = false);
- using RealisedPathsCommand::run;
+ using BuiltPathsCommand::run;
- virtual void run(ref<Store> store, std::vector<StorePath> storePaths) = 0;
+ virtual void run(ref<Store> store, std::vector<StorePath> && storePaths) = 0;
- void run(ref<Store> store, std::vector<RealisedPath> paths) override;
+ void run(ref<Store> store, BuiltPaths && paths) override;
};
/* A command that operates on exactly one store path. */
@@ -189,7 +196,7 @@ struct StorePathCommand : public StorePathsCommand
virtual void run(ref<Store> store, const StorePath & storePath) = 0;
- void run(ref<Store> store, std::vector<StorePath> storePaths) override;
+ void run(ref<Store> store, std::vector<StorePath> && storePaths) override;
};
/* A helper class for registering commands globally. */
@@ -220,26 +227,37 @@ static RegisterCommand registerCommand2(std::vector<std::string> && name)
return RegisterCommand(std::move(name), [](){ return make_ref<T>(); });
}
-DerivedPathsWithHints build(ref<Store> store, Realise mode,
- std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode = bmNormal);
+BuiltPaths build(
+ ref<Store> evalStore,
+ ref<Store> store, Realise mode,
+ const std::vector<std::shared_ptr<Installable>> & installables,
+ BuildMode bMode = bmNormal);
-std::set<StorePath> toStorePaths(ref<Store> store,
- Realise mode, OperateOn operateOn,
- std::vector<std::shared_ptr<Installable>> installables);
+std::set<StorePath> toStorePaths(
+ ref<Store> evalStore,
+ ref<Store> store,
+ Realise mode,
+ OperateOn operateOn,
+ const std::vector<std::shared_ptr<Installable>> & installables);
-StorePath toStorePath(ref<Store> store,
- Realise mode, OperateOn operateOn,
+StorePath toStorePath(
+ ref<Store> evalStore,
+ ref<Store> store,
+ Realise mode,
+ OperateOn operateOn,
std::shared_ptr<Installable> installable);
-std::set<StorePath> toDerivations(ref<Store> store,
- std::vector<std::shared_ptr<Installable>> installables,
+std::set<StorePath> toDerivations(
+ ref<Store> store,
+ const std::vector<std::shared_ptr<Installable>> & installables,
bool useDeriver = false);
-std::set<RealisedPath> toRealisedPaths(
+BuiltPaths toBuiltPaths(
+ ref<Store> evalStore,
ref<Store> store,
Realise mode,
OperateOn operateOn,
- std::vector<std::shared_ptr<Installable>> installables);
+ const std::vector<std::shared_ptr<Installable>> & installables);
/* Helper function to generate args that invoke $EDITOR on
filename:lineno. */
@@ -256,7 +274,7 @@ struct MixProfile : virtual StoreCommand
/* If 'profile' is set, make it point at the store path produced
by 'buildables'. */
- void updateProfile(const DerivedPathsWithHints & buildables);
+ void updateProfile(const BuiltPaths & buildables);
};
struct MixDefaultProfile : MixProfile
diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc
index 7f7a89b37..5758b52ad 100644
--- a/src/libcmd/installables.cc
+++ b/src/libcmd/installables.cc
@@ -58,9 +58,13 @@ MixFlakeOptions::MixFlakeOptions()
addFlag({
.longName = "no-registries",
- .description = "Don't allow lookups in the flake registries.",
+ .description =
+ "Don't allow lookups in the flake registries. This option is deprecated; use `--no-use-registries`.",
.category = category,
- .handler = {&lockFlags.useRegistries, false}
+ .handler = {[&]() {
+ lockFlags.useRegistries = false;
+ warn("'--no-registries' is deprecated; use '--no-use-registries'");
+ }}
});
addFlag({
@@ -171,14 +175,50 @@ Strings SourceExprCommand::getDefaultFlakeAttrPathPrefixes()
void SourceExprCommand::completeInstallable(std::string_view prefix)
{
- if (file) return; // FIXME
+ if (file) {
+ evalSettings.pureEval = false;
+ auto state = getEvalState();
+ Expr *e = state->parseExprFromFile(
+ resolveExprPath(state->checkSourcePath(lookupFileArg(*state, *file)))
+ );
+
+ Value root;
+ state->eval(e, root);
+
+ auto autoArgs = getAutoArgs(*state);
- completeFlakeRefWithFragment(
- getEvalState(),
- lockFlags,
- getDefaultFlakeAttrPathPrefixes(),
- getDefaultFlakeAttrPaths(),
- prefix);
+ std::string prefix_ = std::string(prefix);
+ auto sep = prefix_.rfind('.');
+ std::string searchWord;
+ if (sep != std::string::npos) {
+ searchWord = prefix_.substr(sep, std::string::npos);
+ prefix_ = prefix_.substr(0, sep);
+ } else {
+ searchWord = prefix_;
+ prefix_ = "";
+ }
+
+ Value &v1(*findAlongAttrPath(*state, prefix_, *autoArgs, root).first);
+ state->forceValue(v1);
+ Value v2;
+ state->autoCallFunction(*autoArgs, v1, v2);
+
+ if (v2.type() == nAttrs) {
+ for (auto & i : *v2.attrs) {
+ std::string name = i.name;
+ if (name.find(searchWord) == 0) {
+ completions->add(i.name);
+ }
+ }
+ }
+ } else {
+ completeFlakeRefWithFragment(
+ getEvalState(),
+ lockFlags,
+ getDefaultFlakeAttrPathPrefixes(),
+ getDefaultFlakeAttrPaths(),
+ prefix);
+ }
}
void completeFlakeRefWithFragment(
@@ -249,7 +289,6 @@ void completeFlakeRefWithFragment(
completeFlakeRef(evalState->store, prefix);
}
-
void completeFlakeRef(ref<Store> store, std::string_view prefix)
{
if (prefix == "")
@@ -273,9 +312,9 @@ void completeFlakeRef(ref<Store> store, std::string_view prefix)
}
}
-DerivedPathWithHints Installable::toDerivedPathWithHints()
+DerivedPath Installable::toDerivedPath()
{
- auto buildables = toDerivedPathsWithHints();
+ auto buildables = toDerivedPaths();
if (buildables.size() != 1)
throw Error("installable '%s' evaluates to %d derivations, where only one is expected", what(), buildables.size());
return std::move(buildables[0]);
@@ -309,22 +348,19 @@ struct InstallableStorePath : Installable
std::string what() override { return store->printStorePath(storePath); }
- DerivedPathsWithHints toDerivedPathsWithHints() override
+ DerivedPaths toDerivedPaths() override
{
if (storePath.isDerivation()) {
- std::map<std::string, std::optional<StorePath>> outputs;
auto drv = store->readDerivation(storePath);
- for (auto & [name, output] : drv.outputsAndOptPaths(*store))
- outputs.emplace(name, output.second);
return {
- DerivedPathWithHints::Built {
+ DerivedPath::Built {
.drvPath = storePath,
- .outputs = std::move(outputs)
+ .outputs = drv.outputNames(),
}
};
} else {
return {
- DerivedPathWithHints::Opaque {
+ DerivedPath::Opaque {
.path = storePath,
}
};
@@ -337,22 +373,24 @@ struct InstallableStorePath : Installable
}
};
-DerivedPathsWithHints InstallableValue::toDerivedPathsWithHints()
+DerivedPaths InstallableValue::toDerivedPaths()
{
- DerivedPathsWithHints res;
+ DerivedPaths res;
- std::map<StorePath, std::map<std::string, std::optional<StorePath>>> drvsToOutputs;
+ std::map<StorePath, std::set<std::string>> drvsToOutputs;
+ RealisedPath::Set drvsToCopy;
// Group by derivation, helps with .all in particular
for (auto & drv : toDerivations()) {
auto outputName = drv.outputName;
if (outputName == "")
throw Error("derivation '%s' lacks an 'outputName' attribute", state->store->printStorePath(drv.drvPath));
- drvsToOutputs[drv.drvPath].insert_or_assign(outputName, drv.outPath);
+ drvsToOutputs[drv.drvPath].insert(outputName);
+ drvsToCopy.insert(drv.drvPath);
}
for (auto & i : drvsToOutputs)
- res.push_back(DerivedPathWithHints::Built { i.first, i.second });
+ res.push_back(DerivedPath::Built { i.first, i.second });
return res;
}
@@ -564,10 +602,10 @@ InstallableFlake::getCursors(EvalState & state)
std::shared_ptr<flake::LockedFlake> InstallableFlake::getLockedFlake() const
{
+ flake::LockFlags lockFlagsApplyConfig = lockFlags;
+ lockFlagsApplyConfig.applyNixConfig = true;
if (!_lockedFlake) {
- _lockedFlake = std::make_shared<flake::LockedFlake>(lockFlake(*state, flakeRef, lockFlags));
- _lockedFlake->flake.config.apply();
- // FIXME: send new config to the daemon.
+ _lockedFlake = std::make_shared<flake::LockedFlake>(lockFlake(*state, flakeRef, lockFlagsApplyConfig));
}
return _lockedFlake;
}
@@ -616,6 +654,17 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
for (auto & s : ss) {
std::exception_ptr ex;
+ if (s.find('/') != std::string::npos) {
+ try {
+ result.push_back(std::make_shared<InstallableStorePath>(store, store->followLinksToStorePath(s)));
+ continue;
+ } catch (BadStorePath &) {
+ } catch (...) {
+ if (!ex)
+ ex = std::current_exception();
+ }
+ }
+
try {
auto [flakeRef, fragment] = parseFlakeRefWithFragment(s, absPath("."));
result.push_back(std::make_shared<InstallableFlake>(
@@ -630,25 +679,7 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
ex = std::current_exception();
}
- if (s.find('/') != std::string::npos) {
- try {
- result.push_back(std::make_shared<InstallableStorePath>(store, store->followLinksToStorePath(s)));
- continue;
- } catch (BadStorePath &) {
- } catch (...) {
- if (!ex)
- ex = std::current_exception();
- }
- }
-
std::rethrow_exception(ex);
-
- /*
- throw Error(
- pathExists(s)
- ? "path '%s' is not a flake or a store path"
- : "don't know how to handle argument '%s'", s);
- */
}
}
@@ -663,107 +694,121 @@ std::shared_ptr<Installable> SourceExprCommand::parseInstallable(
return installables.front();
}
-DerivedPathsWithHints build(ref<Store> store, Realise mode,
- std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode)
+BuiltPaths getBuiltPaths(ref<Store> evalStore, ref<Store> store, const DerivedPaths & hopefullyBuiltPaths)
+{
+ BuiltPaths res;
+ for (const auto & b : hopefullyBuiltPaths)
+ std::visit(
+ overloaded{
+ [&](const DerivedPath::Opaque & bo) {
+ res.push_back(BuiltPath::Opaque{bo.path});
+ },
+ [&](const DerivedPath::Built & bfd) {
+ OutputPathMap outputs;
+ auto drv = evalStore->readDerivation(bfd.drvPath);
+ auto outputHashes = staticOutputHashes(*evalStore, drv); // FIXME: expensive
+ auto drvOutputs = drv.outputsAndOptPaths(*store);
+ for (auto & output : bfd.outputs) {
+ if (!outputHashes.count(output))
+ throw Error(
+ "the derivation '%s' doesn't have an output named '%s'",
+ store->printStorePath(bfd.drvPath), output);
+ if (settings.isExperimentalFeatureEnabled(
+ Xp::CaDerivations)) {
+ auto outputId =
+ DrvOutput{outputHashes.at(output), output};
+ auto realisation =
+ store->queryRealisation(outputId);
+ if (!realisation)
+ throw Error(
+ "cannot operate on an output of unbuilt "
+ "content-addressed derivation '%s'",
+ outputId.to_string());
+ outputs.insert_or_assign(
+ output, realisation->outPath);
+ } else {
+ // If ca-derivations isn't enabled, assume that
+ // the output path is statically known.
+ assert(drvOutputs.count(output));
+ assert(drvOutputs.at(output).second);
+ outputs.insert_or_assign(
+ output, *drvOutputs.at(output).second);
+ }
+ }
+ res.push_back(BuiltPath::Built{bfd.drvPath, outputs});
+ },
+ },
+ b.raw());
+
+ return res;
+}
+
+BuiltPaths build(
+ ref<Store> evalStore,
+ ref<Store> store,
+ Realise mode,
+ const std::vector<std::shared_ptr<Installable>> & installables,
+ BuildMode bMode)
{
if (mode == Realise::Nothing)
settings.readOnlyMode = true;
- DerivedPathsWithHints buildables;
-
std::vector<DerivedPath> pathsToBuild;
for (auto & i : installables) {
- for (auto & b : i->toDerivedPathsWithHints()) {
- std::visit(overloaded {
- [&](DerivedPathWithHints::Opaque bo) {
- pathsToBuild.push_back(bo);
- },
- [&](DerivedPathWithHints::Built bfd) {
- StringSet outputNames;
- for (auto & output : bfd.outputs)
- outputNames.insert(output.first);
- pathsToBuild.push_back(
- DerivedPath::Built{bfd.drvPath, outputNames});
- },
- }, b.raw());
- buildables.push_back(std::move(b));
- }
+ auto b = i->toDerivedPaths();
+ pathsToBuild.insert(pathsToBuild.end(), b.begin(), b.end());
}
- if (mode == Realise::Nothing)
+ if (mode == Realise::Nothing || mode == Realise::Derivation)
printMissing(store, pathsToBuild, lvlError);
else if (mode == Realise::Outputs)
- store->buildPaths(pathsToBuild, bMode);
+ store->buildPaths(pathsToBuild, bMode, evalStore);
- return buildables;
+ return getBuiltPaths(evalStore, store, pathsToBuild);
}
-std::set<RealisedPath> toRealisedPaths(
+BuiltPaths toBuiltPaths(
+ ref<Store> evalStore,
ref<Store> store,
Realise mode,
OperateOn operateOn,
- std::vector<std::shared_ptr<Installable>> installables)
+ const std::vector<std::shared_ptr<Installable>> & installables)
{
- std::set<RealisedPath> res;
- if (operateOn == OperateOn::Output) {
- for (auto & b : build(store, mode, installables))
- std::visit(overloaded {
- [&](DerivedPathWithHints::Opaque bo) {
- res.insert(bo.path);
- },
- [&](DerivedPathWithHints::Built bfd) {
- auto drv = store->readDerivation(bfd.drvPath);
- auto outputHashes = staticOutputHashes(*store, drv);
- for (auto & output : bfd.outputs) {
- if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
- if (!outputHashes.count(output.first))
- throw Error(
- "the derivation '%s' doesn't have an output named '%s'",
- store->printStorePath(bfd.drvPath),
- output.first);
- auto outputId = DrvOutput{outputHashes.at(output.first), output.first};
- auto realisation = store->queryRealisation(outputId);
- if (!realisation)
- throw Error("cannot operate on an output of unbuilt content-addresed derivation '%s'", outputId.to_string());
- res.insert(RealisedPath{*realisation});
- }
- else {
- // If ca-derivations isn't enabled, behave as if
- // all the paths are opaque to keep the default
- // behavior
- assert(output.second);
- res.insert(*output.second);
- }
- }
- },
- }, b.raw());
- } else {
+ if (operateOn == OperateOn::Output)
+ return build(evalStore, store, mode, installables);
+ else {
if (mode == Realise::Nothing)
settings.readOnlyMode = true;
- auto drvPaths = toDerivations(store, installables, true);
- res.insert(drvPaths.begin(), drvPaths.end());
+ BuiltPaths res;
+ for (auto & drvPath : toDerivations(store, installables, true))
+ res.push_back(BuiltPath::Opaque{drvPath});
+ return res;
}
-
- return res;
}
-StorePathSet toStorePaths(ref<Store> store,
+StorePathSet toStorePaths(
+ ref<Store> evalStore,
+ ref<Store> store,
Realise mode, OperateOn operateOn,
- std::vector<std::shared_ptr<Installable>> installables)
+ const std::vector<std::shared_ptr<Installable>> & installables)
{
StorePathSet outPaths;
- for (auto & path : toRealisedPaths(store, mode, operateOn, installables))
- outPaths.insert(path.path());
+ for (auto & path : toBuiltPaths(evalStore, store, mode, operateOn, installables)) {
+ auto thisOutPaths = path.outPaths();
+ outPaths.insert(thisOutPaths.begin(), thisOutPaths.end());
+ }
return outPaths;
}
-StorePath toStorePath(ref<Store> store,
+StorePath toStorePath(
+ ref<Store> evalStore,
+ ref<Store> store,
Realise mode, OperateOn operateOn,
std::shared_ptr<Installable> installable)
{
- auto paths = toStorePaths(store, mode, operateOn, {installable});
+ auto paths = toStorePaths(evalStore, store, mode, operateOn, {installable});
if (paths.size() != 1)
throw Error("argument '%s' should evaluate to one store path", installable->what());
@@ -771,15 +816,17 @@ StorePath toStorePath(ref<Store> store,
return *paths.begin();
}
-StorePathSet toDerivations(ref<Store> store,
- std::vector<std::shared_ptr<Installable>> installables, bool useDeriver)
+StorePathSet toDerivations(
+ ref<Store> store,
+ const std::vector<std::shared_ptr<Installable>> & installables,
+ bool useDeriver)
{
StorePathSet drvPaths;
- for (auto & i : installables)
- for (auto & b : i->toDerivedPathsWithHints())
+ for (const auto & i : installables)
+ for (const auto & b : i->toDerivedPaths())
std::visit(overloaded {
- [&](DerivedPathWithHints::Opaque bo) {
+ [&](const DerivedPath::Opaque & bo) {
if (!useDeriver)
throw Error("argument '%s' did not evaluate to a derivation", i->what());
auto derivers = store->queryValidDerivers(bo.path);
@@ -788,7 +835,7 @@ StorePathSet toDerivations(ref<Store> store,
// FIXME: use all derivers?
drvPaths.insert(*derivers.begin());
},
- [&](DerivedPathWithHints::Built bfd) {
+ [&](const DerivedPath::Built & bfd) {
drvPaths.insert(bfd.drvPath);
},
}, b.raw());
diff --git a/src/libcmd/installables.hh b/src/libcmd/installables.hh
index 403403c07..79931ad3e 100644
--- a/src/libcmd/installables.hh
+++ b/src/libcmd/installables.hh
@@ -23,17 +23,23 @@ struct App
// FIXME: add args, sandbox settings, metadata, ...
};
+struct UnresolvedApp
+{
+ App unresolved;
+ App resolve(ref<Store> evalStore, ref<Store> store);
+};
+
struct Installable
{
virtual ~Installable() { }
virtual std::string what() = 0;
- virtual DerivedPathsWithHints toDerivedPathsWithHints() = 0;
+ virtual DerivedPaths toDerivedPaths() = 0;
- DerivedPathWithHints toDerivedPathWithHints();
+ DerivedPath toDerivedPath();
- App toApp(EvalState & state);
+ UnresolvedApp toApp(EvalState & state);
virtual std::pair<Value *, Pos> toValue(EvalState & state)
{
@@ -74,7 +80,7 @@ struct InstallableValue : Installable
virtual std::vector<DerivationInfo> toDerivations() = 0;
- DerivedPathsWithHints toDerivedPathsWithHints() override;
+ DerivedPaths toDerivedPaths() override;
};
struct InstallableFlake : InstallableValue
diff --git a/src/libcmd/local.mk b/src/libcmd/local.mk
index df904612b..1ec258a54 100644
--- a/src/libcmd/local.mk
+++ b/src/libcmd/local.mk
@@ -8,8 +8,9 @@ libcmd_SOURCES := $(wildcard $(d)/*.cc)
libcmd_CXXFLAGS += -I src/libutil -I src/libstore -I src/libexpr -I src/libmain -I src/libfetchers -I src/nix
-libcmd_LDFLAGS = $(EDITLINE_LIBS) -llowdown
+# libcmd_LDFLAGS = $(EDITLINE_LIBS) -llowdown
+libcmd_LDFLAGS += -llowdown -pthread
libcmd_LIBS = libstore libutil libexpr libmain libfetchers libnix
-$(eval $(call install-file-in, $(d)/nix-cmd.pc, $(prefix)/lib/pkgconfig, 0644))
+$(eval $(call install-file-in, $(d)/nix-cmd.pc, $(libdir)/pkgconfig, 0644))
diff --git a/src/libcmd/markdown.cc b/src/libcmd/markdown.cc
index d25113d93..29bb4d31e 100644
--- a/src/libcmd/markdown.cc
+++ b/src/libcmd/markdown.cc
@@ -12,7 +12,7 @@ std::string renderMarkdownToTerminal(std::string_view markdown)
struct lowdown_opts opts {
.type = LOWDOWN_TERM,
.maxdepth = 20,
- .cols = std::min(getWindowSize().second, (unsigned short) 80),
+ .cols = std::max(getWindowSize().second, (unsigned short) 80),
.hmargin = 0,
.vmargin = 0,
.feat = LOWDOWN_COMMONMARK | LOWDOWN_FENCED | LOWDOWN_DEFLIST | LOWDOWN_TABLES,
@@ -25,7 +25,7 @@ std::string renderMarkdownToTerminal(std::string_view markdown)
Finally freeDoc([&]() { lowdown_doc_free(doc); });
size_t maxn = 0;
- auto node = lowdown_doc_parse(doc, &maxn, markdown.data(), markdown.size());
+ auto node = lowdown_doc_parse(doc, &maxn, markdown.data(), markdown.size(), nullptr);
if (!node)
throw Error("cannot parse Markdown document");
Finally freeNode([&]() { lowdown_node_free(node); });
@@ -40,11 +40,11 @@ std::string renderMarkdownToTerminal(std::string_view markdown)
throw Error("cannot allocate Markdown output buffer");
Finally freeBuffer([&]() { lowdown_buf_free(buf); });
- int rndr_res = lowdown_term_rndr(buf, nullptr, renderer, node);
+ int rndr_res = lowdown_term_rndr(buf, renderer, node);
if (!rndr_res)
throw Error("allocation error while rendering Markdown");
- return std::string(buf->data, buf->size);
+ return filterANSIEscapes(std::string(buf->data, buf->size), !shouldANSI());
}
}
diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc
index bfc131d27..6faa9f9fa 100644
--- a/src/libcmd/repl.cc
+++ b/src/libcmd/repl.cc
@@ -68,6 +68,7 @@ struct NixRepl
StorePath getDerivationPath(Value & v);
bool processLine(string line);
void loadFile(const Path & path);
+ void loadFlake(const std::string & flakeRef);
void initEnv();
void reloadFiles();
void addAttrsToScope(Value & attrs);
@@ -104,6 +105,25 @@ NixRepl::~NixRepl()
write_history(historyFile.c_str());
}
+string runNix(Path program, const Strings & args,
+ const std::optional<std::string> & input = {})
+{
+ auto subprocessEnv = getEnv();
+ subprocessEnv["NIX_CONFIG"] = globalConfig.toKeyValue();
+
+ auto res = runProgram(RunOptions {
+ .program = settings.nixBinDir+ "/" + program,
+ .args = args,
+ .environment = subprocessEnv,
+ .input = input,
+ });
+
+ if (!statusOk(res.first))
+ throw ExecError(res.first, fmt("program '%1%' %2%", program, statusToString(res.first)));
+
+ return res.second;
+}
+
static NixRepl * curRepl; // ugly
static char * completionCallback(char * s, int *match) {
@@ -180,15 +200,14 @@ namespace {
void NixRepl::mainLoop(const std::vector<std::string> & files)
{
string error = ANSI_RED "error:" ANSI_NORMAL " ";
- std::cout << "Welcome to Nix version " << nixVersion << ". Type :? for help." << std::endl << std::endl;
+ notice("Welcome to Nix " + nixVersion + ". Type :? for help.\n");
if (!files.empty()) {
for (auto & i : files)
loadedFiles.push_back(i);
- reloadFiles();
- if (!loadedFiles.empty()) std::cout << std::endl;
- }
+ reloadFiles();
+ if (!loadedFiles.empty()) notice("");
// Allow nix-repl specific settings in .inputrc
rl_readline_name = "nix-repl";
@@ -378,6 +397,8 @@ bool NixRepl::processLine(string line)
{
if (line == "") return true;
+ _isInterrupted = false;
+
string command, arg;
if (line[0] == ':') {
@@ -397,9 +418,10 @@ bool NixRepl::processLine(string line)
<< " <x> = <expr> Bind expression to variable\n"
<< " :a <expr> Add attributes from resulting set to scope\n"
<< " :b <expr> Build derivation\n"
- << " :e <expr> Open the derivation in $EDITOR\n"
+ << " :e <expr> Open package or function in $EDITOR\n"
<< " :i <expr> Build derivation, then install result into current profile\n"
<< " :l <path> Load Nix expression and add it to scope\n"
+ << " :lf <ref> Load Nix flake and add it to scope\n"
<< " :p <expr> Evaluate and print expression recursively\n"
<< " :q Exit nix-repl\n"
<< " :r Reload all files\n"
@@ -420,6 +442,10 @@ bool NixRepl::processLine(string line)
loadFile(arg);
}
+ else if (command == ":lf" || command == ":load-flake") {
+ loadFlake(arg);
+ }
+
else if (command == ":r" || command == ":reload") {
state->resetFileCache();
reloadFiles();
@@ -439,14 +465,17 @@ bool NixRepl::processLine(string line)
pos = v.lambda.fun->pos;
} else {
// assume it's a derivation
- pos = findDerivationFilename(*state, v, arg);
+ pos = findPackageFilename(*state, v, arg);
}
// Open in EDITOR
auto args = editorFor(pos);
auto editor = args.front();
args.pop_front();
- runProgram(editor, true, args);
+
+ // runProgram redirects stdout to a StringSink,
+ // using runProgram2 to allow editors to display their UI
+ runProgram2(RunOptions { .program = editor, .searchPath = true, .args = args });
// Reload right after exiting the editor
state->resetFileCache();
@@ -456,16 +485,17 @@ bool NixRepl::processLine(string line)
else if (command == ":t") {
Value v;
evalString(arg, v);
- std::cout << showType(v) << std::endl;
+ logger->cout(showType(v));
+ }
- } else if (command == ":u") {
+ else if (command == ":u") {
Value v, f, result;
evalString(arg, v);
evalString("drv: (import <nixpkgs> {}).runCommand \"shell\" { buildInputs = [ drv ]; } \"\"", f);
state->callFunction(f, v, result, Pos());
StorePath drvPath = getDerivationPath(result);
- runProgram(settings.nixBinDir + "/nix-shell", true, {state->store->printStorePath(drvPath)});
+ runNix("nix-shell", {state->store->printStorePath(drvPath)});
}
else if (command == ":b" || command == ":i" || command == ":s") {
@@ -475,21 +505,15 @@ bool NixRepl::processLine(string line)
Path drvPathRaw = state->store->printStorePath(drvPath);
if (command == ":b") {
- /* We could do the build in this process using buildPaths(),
- but doing it in a child makes it easier to recover from
- problems / SIGINT. */
- try {
- runProgram(settings.nixBinDir + "/nix", true, {"build", "--no-link", drvPathRaw});
- auto drv = state->store->readDerivation(drvPath);
- std::cout << std::endl << "this derivation produced the following outputs:" << std::endl;
- for (auto & i : drv.outputsAndOptPaths(*state->store))
- std::cout << fmt(" %s -> %s\n", i.first, state->store->printStorePath(*i.second.second));
- } catch (ExecError &) {
- }
+ state->store->buildPaths({DerivedPath::Built{drvPath}});
+ auto drv = state->store->readDerivation(drvPath);
+ logger->cout("\nThis derivation produced the following outputs:");
+ for (auto & [outputName, outputPath] : state->store->queryDerivationOutputMap(drvPath))
+ logger->cout(" %s -> %s", outputName, state->store->printStorePath(outputPath));
} else if (command == ":i") {
- runProgram(settings.nixBinDir + "/nix-env", true, {"-i", drvPathRaw});
+ runNix("nix-env", {"-i", drvPathRaw});
} else {
- runProgram(settings.nixBinDir + "/nix-shell", true, {drvPathRaw});
+ runNix("nix-shell", {drvPathRaw});
}
}
@@ -518,9 +542,9 @@ bool NixRepl::processLine(string line)
+ concatStringsSep(" ", args) + "\n\n";
}
- markdown += trim(stripIndentation(doc->doc));
+ markdown += stripIndentation(doc->doc);
- std::cout << renderMarkdownToTerminal(markdown);
+ logger->cout(trim(renderMarkdownToTerminal(markdown)));
} else
throw Error("value does not have documentation");
}
@@ -561,6 +585,25 @@ void NixRepl::loadFile(const Path & path)
addAttrsToScope(v2);
}
+void NixRepl::loadFlake(const std::string & flakeRefS)
+{
+ auto flakeRef = parseFlakeRef(flakeRefS, absPath("."), true);
+ if (evalSettings.pureEval && !flakeRef.input.isImmutable())
+ throw Error("cannot use ':load-flake' on mutable flake reference '%s' (use --impure to override)", flakeRefS);
+
+ Value v;
+
+ flake::callFlake(*state,
+ flake::lockFlake(*state, flakeRef,
+ flake::LockFlags {
+ .updateLockFile = false,
+ .useRegistries = !evalSettings.pureEval,
+ .allowMutable = !evalSettings.pureEval,
+ }),
+ v);
+ addAttrsToScope(v);
+}
+
void NixRepl::initEnv()
{
@@ -584,9 +627,9 @@ void NixRepl::reloadFiles()
bool first = true;
for (auto & i : old) {
- if (!first) std::cout << std::endl;
+ if (!first) notice("");
first = false;
- std::cout << format("Loading '%1%'...") % i << std::endl;
+ notice("Loading '%1%'...", i);
loadFile(i);
}
}
@@ -596,8 +639,8 @@ void NixRepl::addAttrsToScope(Value & attrs)
{
state->forceAttrs(attrs);
for (auto & i : *attrs.attrs)
- addVarToScope(i.name, i.value);
- std::cout << format("Added %1% variables.") % attrs.attrs->size() << std::endl;
+ addVarToScope(i.name, *i.value);
+ notice("Added %1% variables.", attrs.attrs->size());
}
@@ -605,8 +648,9 @@ void NixRepl::addVarToScope(const Symbol & name, Value * v)
{
if (displ >= envSize)
throw Error("environment full; cannot add more variables");
- staticEnv->vars[name] = displ;
- env->values[displ++] = v;
+ staticEnv->vars.emplace_back(name, displ);
+ staticEnv->sort();
+ env->values[displ++] = &v;
varNames.insert((string) name);
}
@@ -665,7 +709,7 @@ std::ostream & NixRepl::printValue(std::ostream & str, Value & v, unsigned int m
break;
case nString:
- str << ANSI_YELLOW;
+ str << ANSI_WARNING;
printStringValue(str, v.string.s);
str << ANSI_NORMAL;
break;
diff --git a/src/libexpr/attr-path.cc b/src/libexpr/attr-path.cc
index 9dd557205..c50c6d92b 100644
--- a/src/libexpr/attr-path.cc
+++ b/src/libexpr/attr-path.cc
@@ -19,7 +19,7 @@ static Strings parseAttrPath(std::string_view s)
++i;
while (1) {
if (i == s.end())
- throw Error("missing closing quote in selection path '%1%'", s);
+ throw ParseError("missing closing quote in selection path '%1%'", s);
if (*i == '"') break;
cur.push_back(*i++);
}
@@ -100,7 +100,7 @@ std::pair<Value *, Pos> findAlongAttrPath(EvalState & state, const string & attr
}
-Pos findDerivationFilename(EvalState & state, Value & v, std::string what)
+Pos findPackageFilename(EvalState & state, Value & v, std::string what)
{
Value * v2;
try {
@@ -116,14 +116,14 @@ Pos findDerivationFilename(EvalState & state, Value & v, std::string what)
auto colon = pos.rfind(':');
if (colon == std::string::npos)
- throw Error("cannot parse meta.position attribute '%s'", pos);
+ throw ParseError("cannot parse meta.position attribute '%s'", pos);
std::string filename(pos, 0, colon);
unsigned int lineno;
try {
lineno = std::stoi(std::string(pos, colon + 1));
} catch (std::invalid_argument & e) {
- throw Error("cannot parse line number '%s'", pos);
+ throw ParseError("cannot parse line number '%s'", pos);
}
Symbol file = state.symbols.create(filename);
diff --git a/src/libexpr/attr-path.hh b/src/libexpr/attr-path.hh
index d9d74ab2d..2ee3ea089 100644
--- a/src/libexpr/attr-path.hh
+++ b/src/libexpr/attr-path.hh
@@ -14,7 +14,7 @@ std::pair<Value *, Pos> findAlongAttrPath(EvalState & state, const string & attr
Bindings & autoArgs, Value & vIn);
/* Heuristic to find the filename and lineno or a nix value. */
-Pos findDerivationFilename(EvalState & state, Value & v, std::string what);
+Pos findPackageFilename(EvalState & state, Value & v, std::string what);
std::vector<Symbol> parseAttrPath(EvalState & state, std::string_view s);
diff --git a/src/libexpr/attr-set.hh b/src/libexpr/attr-set.hh
index 1da8d91df..7d6ffc9f3 100644
--- a/src/libexpr/attr-set.hh
+++ b/src/libexpr/attr-set.hh
@@ -17,8 +17,8 @@ struct Attr
{
Symbol name;
Value * value;
- Pos * pos;
- Attr(Symbol name, Value * value, Pos * pos = &noPos)
+ ptr<Pos> pos;
+ Attr(Symbol name, Value * value, ptr<Pos> pos = ptr(&noPos))
: name(name), value(value), pos(pos) { };
Attr() : pos(&noPos) { };
bool operator < (const Attr & a) const
@@ -35,13 +35,13 @@ class Bindings
{
public:
typedef uint32_t size_t;
- Pos *pos;
+ ptr<Pos> pos;
private:
size_t size_, capacity_;
Attr attrs[0];
- Bindings(size_t capacity) : size_(0), capacity_(capacity) { }
+ Bindings(size_t capacity) : pos(&noPos), size_(0), capacity_(capacity) { }
Bindings(const Bindings & bindings) = delete;
public:
diff --git a/src/libexpr/common-eval-args.cc b/src/libexpr/common-eval-args.cc
index aa14bf79b..fb0932c00 100644
--- a/src/libexpr/common-eval-args.cc
+++ b/src/libexpr/common-eval-args.cc
@@ -61,6 +61,14 @@ MixEvalArgs::MixEvalArgs()
fetchers::overrideRegistry(from.input, to.input, extraAttrs);
}}
});
+
+ addFlag({
+ .longName = "eval-store",
+ .description = "The Nix store to use for evaluations.",
+ .category = category,
+ .labels = {"store-url"},
+ .handler = {&evalStoreUrl},
+ });
}
Bindings * MixEvalArgs::getAutoArgs(EvalState & state)
diff --git a/src/libexpr/common-eval-args.hh b/src/libexpr/common-eval-args.hh
index be7fda783..0e113fff1 100644
--- a/src/libexpr/common-eval-args.hh
+++ b/src/libexpr/common-eval-args.hh
@@ -16,8 +16,9 @@ struct MixEvalArgs : virtual Args
Strings searchPath;
-private:
+ std::optional<std::string> evalStoreUrl;
+private:
std::map<std::string, std::string> autoArgs;
};
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
index 11a61da26..a20123f34 100644
--- a/src/libexpr/eval.cc
+++ b/src/libexpr/eval.cc
@@ -65,7 +65,11 @@ static char * dupStringWithLen(const char * s, size_t size)
RootValue allocRootValue(Value * v)
{
+#if HAVE_BOEHMGC
return std::allocate_shared<Value *>(traceable_allocator<Value *>(), v);
+#else
+ return std::make_shared<Value *>(v);
+#endif
}
@@ -234,22 +238,34 @@ static void * oomHandler(size_t requested)
}
class BoehmGCStackAllocator : public StackAllocator {
- boost::coroutines2::protected_fixedsize_stack stack {
- // We allocate 8 MB, the default max stack size on NixOS.
- // A smaller stack might be quicker to allocate but reduces the stack
- // depth available for source filter expressions etc.
- std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024))
+ boost::coroutines2::protected_fixedsize_stack stack {
+ // We allocate 8 MB, the default max stack size on NixOS.
+ // A smaller stack might be quicker to allocate but reduces the stack
+ // depth available for source filter expressions etc.
+ std::max(boost::context::stack_traits::default_size(), static_cast<std::size_t>(8 * 1024 * 1024))
};
+ // This is specific to boost::coroutines2::protected_fixedsize_stack.
+ // The stack protection page is included in sctx.size, so we have to
+ // subtract one page size from the stack size.
+ std::size_t pfss_usable_stack_size(boost::context::stack_context &sctx) {
+ return sctx.size - boost::context::stack_traits::page_size();
+ }
+
public:
boost::context::stack_context allocate() override {
auto sctx = stack.allocate();
- GC_add_roots(static_cast<char *>(sctx.sp) - sctx.size, sctx.sp);
+
+ // Stacks generally start at a high address and grow to lower addresses.
+ // Architectures that do the opposite are rare; in fact so rare that
+ // boost_routine does not implement it.
+ // So we subtract the stack size.
+ GC_add_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
return sctx;
}
void deallocate(boost::context::stack_context sctx) override {
- GC_remove_roots(static_cast<char *>(sctx.sp) - sctx.size, sctx.sp);
+ GC_remove_roots(static_cast<char *>(sctx.sp) - pfss_usable_stack_size(sctx), sctx.sp);
stack.deallocate(sctx);
}
@@ -363,7 +379,10 @@ static Strings parseNixPath(const string & s)
}
-EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
+EvalState::EvalState(
+ const Strings & _searchPath,
+ ref<Store> store,
+ std::shared_ptr<Store> buildStore)
: sWith(symbols.create("<with>"))
, sOutPath(symbols.create("outPath"))
, sDrvPath(symbols.create("drvPath"))
@@ -396,6 +415,7 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
, sEpsilon(symbols.create(""))
, repair(NoRepair)
, store(store)
+ , buildStore(buildStore ? buildStore : store)
, regexCache(makeRegexCache())
, baseEnv(allocEnv(128))
, staticBaseEnv(new StaticEnv(false, 0))
@@ -426,12 +446,12 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store)
StorePathSet closure;
store->computeFSClosure(store->toStorePath(r.second).first, closure);
for (auto & path : closure)
- allowedPaths->insert(store->printStorePath(path));
+ allowPath(path);
} catch (InvalidPath &) {
- allowedPaths->insert(r.second);
+ allowPath(r.second);
}
} else
- allowedPaths->insert(r.second);
+ allowPath(r.second);
}
}
@@ -446,6 +466,35 @@ EvalState::~EvalState()
}
+void EvalState::requireExperimentalFeatureOnEvaluation(
+ const ExperimentalFeature & feature,
+ const std::string_view fName,
+ const Pos & pos)
+{
+ if (!settings.isExperimentalFeatureEnabled(feature)) {
+ throw EvalError({
+ .msg = hintfmt(
+ "Cannot call '%2%' because experimental Nix feature '%1%' is disabled. You can enable it via '--extra-experimental-features %1%'.",
+ feature,
+ fName
+ ),
+ .errPos = pos
+ });
+ }
+}
+
+void EvalState::allowPath(const Path & path)
+{
+ if (allowedPaths)
+ allowedPaths->insert(path);
+}
+
+void EvalState::allowPath(const StorePath & storePath)
+{
+ if (allowedPaths)
+ allowedPaths->insert(store->toRealPath(storePath));
+}
+
Path EvalState::checkSourcePath(const Path & path_)
{
if (!allowedPaths) return path_;
@@ -472,7 +521,7 @@ Path EvalState::checkSourcePath(const Path & path_)
}
if (!found)
- throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", abspath);
+ throw RestrictedPathError("access to absolute path '%1%' is forbidden in restricted mode", abspath);
/* Resolve symlinks. */
debug(format("checking access to '%s'") % abspath);
@@ -485,7 +534,7 @@ Path EvalState::checkSourcePath(const Path & path_)
}
}
- throw RestrictedPathError("access to path '%1%' is forbidden in restricted mode", path);
+ throw RestrictedPathError("access to canonical path '%1%' is forbidden in restricted mode", path);
}
@@ -535,14 +584,20 @@ Value * EvalState::addConstant(const string & name, Value & v)
{
Value * v2 = allocValue();
*v2 = v;
- staticBaseEnv->vars[symbols.create(name)] = baseEnvDispl;
- baseEnv.values[baseEnvDispl++] = v2;
- string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name;
- baseEnv.values[0]->attrs->push_back(Attr(symbols.create(name2), v2));
+ addConstant(name, v2);
return v2;
}
+void EvalState::addConstant(const string & name, Value * v)
+{
+ staticBaseEnv.vars.emplace_back(symbols.create(name), baseEnvDispl);
+ baseEnv.values[baseEnvDispl++] = v;
+ string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name;
+ baseEnv.values[0]->attrs->push_back(Attr(symbols.create(name2), v));
+}
+
+
Value * EvalState::addPrimOp(const string & name,
size_t arity, PrimOpFun primOp)
{
@@ -561,7 +616,7 @@ Value * EvalState::addPrimOp(const string & name,
Value * v = allocValue();
v->mkPrimOp(new PrimOp { .fun = primOp, .arity = arity, .name = sym });
- staticBaseEnv->vars[symbols.create(name)] = baseEnvDispl;
+ staticBaseEnv->vars.emplace_back(symbols.create(name), baseEnvDispl);
baseEnv.values[baseEnvDispl++] = v;
baseEnv.values[0]->attrs->push_back(Attr(sym, v));
return v;
@@ -587,7 +642,7 @@ Value * EvalState::addPrimOp(PrimOp && primOp)
Value * v = allocValue();
v->mkPrimOp(new PrimOp(std::move(primOp)));
- staticBaseEnv->vars[envName] = baseEnvDispl;
+ staticBaseEnv->vars.emplace_back(envName, baseEnvDispl);
baseEnv.values[baseEnvDispl++] = v;
baseEnv.values[0]->attrs->push_back(Attr(primOp.name, v));
return v;
@@ -861,7 +916,7 @@ void mkPath(Value & v, const char * s)
inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
{
- for (size_t l = var.level; l; --l, env = env->up) ;
+ for (auto l = var.level; l; --l, env = env->up) ;
if (!var.fromWith) return env->values[var.displ];
@@ -875,7 +930,7 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
}
Bindings::iterator j = env->values[0]->attrs->find(var.name);
if (j != env->values[0]->attrs->end()) {
- if (countCalls && j->pos) attrSelects[*j->pos]++;
+ if (countCalls) attrSelects[*j->pos]++;
return j->value;
}
if (!env->prevWith) {
@@ -886,18 +941,10 @@ inline Value * EvalState::lookupVar(Env * env, const ExprVar & var, bool noEval)
}
-std::atomic<uint64_t> nrValuesFreed{0};
-
-void finalizeValue(void * obj, void * data)
-{
- nrValuesFreed++;
-}
-
Value * EvalState::allocValue()
{
nrValues++;
auto v = (Value *) allocBytes(sizeof(Value));
- //GC_register_finalizer_no_order(v, finalizeValue, nullptr, nullptr, nullptr);
return v;
}
@@ -949,9 +996,9 @@ void EvalState::mkThunk_(Value & v, Expr * expr)
}
-void EvalState::mkPos(Value & v, Pos * pos)
+void EvalState::mkPos(Value & v, ptr<Pos> pos)
{
- if (pos && pos->file.set()) {
+ if (pos->file.set()) {
mkAttrs(v, 3);
mkString(*allocAttr(v, sFile), pos->file);
mkInt(*allocAttr(v, sLine), pos->line);
@@ -974,39 +1021,37 @@ Value * Expr::maybeThunk(EvalState & state, Env & env)
}
-unsigned long nrAvoided = 0;
-
Value * ExprVar::maybeThunk(EvalState & state, Env & env)
{
Value * v = state.lookupVar(&env, *this, true);
/* The value might not be initialised in the environment yet.
In that case, ignore it. */
- if (v) { nrAvoided++; return v; }
+ if (v) { state.nrAvoided++; return v; }
return Expr::maybeThunk(state, env);
}
Value * ExprString::maybeThunk(EvalState & state, Env & env)
{
- nrAvoided++;
+ state.nrAvoided++;
return &v;
}
Value * ExprInt::maybeThunk(EvalState & state, Env & env)
{
- nrAvoided++;
+ state.nrAvoided++;
return &v;
}
Value * ExprFloat::maybeThunk(EvalState & state, Env & env)
{
- nrAvoided++;
+ state.nrAvoided++;
return &v;
}
Value * ExprPath::maybeThunk(EvalState & state, Env & env)
{
- nrAvoided++;
+ state.nrAvoided++;
return &v;
}
@@ -1021,45 +1066,56 @@ void EvalState::evalFile(const Path & path_, Value & v, bool mustBeTrivial)
return;
}
- Path path2 = resolveExprPath(path);
- if ((i = fileEvalCache.find(path2)) != fileEvalCache.end()) {
+ Path resolvedPath = resolveExprPath(path);
+ if ((i = fileEvalCache.find(resolvedPath)) != fileEvalCache.end()) {
v = i->second;
return;
}
- printTalkative("evaluating file '%1%'", path2);
+ printTalkative("evaluating file '%1%'", resolvedPath);
Expr * e = nullptr;
- auto j = fileParseCache.find(path2);
+ auto j = fileParseCache.find(resolvedPath);
if (j != fileParseCache.end())
e = j->second;
if (!e)
- e = parseExprFromFile(checkSourcePath(path2));
+ e = parseExprFromFile(checkSourcePath(resolvedPath));
+
+ cacheFile(path, resolvedPath, e, v, mustBeTrivial);
+}
+
+
+void EvalState::resetFileCache()
+{
+ fileEvalCache.clear();
+ fileParseCache.clear();
+}
- fileParseCache[path2] = e;
+
+void EvalState::cacheFile(
+ const Path & path,
+ const Path & resolvedPath,
+ Expr * e,
+ Value & v,
+ bool mustBeTrivial)
+{
+ fileParseCache[resolvedPath] = e;
try {
// Enforce that 'flake.nix' is a direct attrset, not a
// computation.
if (mustBeTrivial &&
!(dynamic_cast<ExprAttrs *>(e)))
- throw Error("file '%s' must be an attribute set", path);
+ throw EvalError("file '%s' must be an attribute set", path);
eval(e, v);
} catch (Error & e) {
- addErrorTrace(e, "while evaluating the file '%1%':", path2);
+ addErrorTrace(e, "while evaluating the file '%1%':", resolvedPath);
throw;
}
- fileEvalCache[path2] = v;
- if (path != path2) fileEvalCache[path] = v;
-}
-
-
-void EvalState::resetFileCache()
-{
- fileEvalCache.clear();
- fileParseCache.clear();
+ fileEvalCache[resolvedPath] = v;
+ if (path != resolvedPath) fileEvalCache[path] = v;
}
@@ -1144,7 +1200,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
/* The recursive attributes are evaluated in the new
environment, while the inherited attributes are evaluated
in the original environment. */
- size_t displ = 0;
+ Displacement displ = 0;
for (auto & i : attrs) {
Value * vAttr;
if (hasOverrides && !i.second.inherited) {
@@ -1153,7 +1209,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
} else
vAttr = i.second.e->maybeThunk(state, i.second.inherited ? env : env2);
env2.values[displ++] = vAttr;
- v.attrs->push_back(Attr(i.first, vAttr, &i.second.pos));
+ v.attrs->push_back(Attr(i.first, vAttr, ptr(&i.second.pos)));
}
/* If the rec contains an attribute called `__overrides', then
@@ -1185,7 +1241,7 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
else
for (auto & i : attrs)
- v.attrs->push_back(Attr(i.first, i.second.e->maybeThunk(state, env), &i.second.pos));
+ v.attrs->push_back(Attr(i.first, i.second.e->maybeThunk(state, env), ptr(&i.second.pos)));
/* Dynamic attrs apply *after* rec and __overrides. */
for (auto & i : dynamicAttrs) {
@@ -1203,11 +1259,11 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
i.valueExpr->setName(nameSym);
/* Keep sorted order so find can catch duplicates */
- v.attrs->push_back(Attr(nameSym, i.valueExpr->maybeThunk(state, *dynamicEnv), &i.pos));
+ v.attrs->push_back(Attr(nameSym, i.valueExpr->maybeThunk(state, *dynamicEnv), ptr(&i.pos)));
v.attrs->sort(); // FIXME: inefficient
}
- v.attrs->pos = &pos;
+ v.attrs->pos = ptr(&pos);
}
@@ -1221,7 +1277,7 @@ void ExprLet::eval(EvalState & state, Env & env, Value & v)
/* The recursive attributes are evaluated in the new environment,
while the inherited attributes are evaluated in the original
environment. */
- size_t displ = 0;
+ Displacement displ = 0;
for (auto & i : attrs->attrs)
env2.values[displ++] = i.second.e->maybeThunk(state, i.second.inherited ? env : env2);
@@ -1262,12 +1318,10 @@ static string showAttrPath(EvalState & state, Env & env, const AttrPath & attrPa
}
-unsigned long nrLookups = 0;
-
void ExprSelect::eval(EvalState & state, Env & env, Value & v)
{
Value vTmp;
- Pos * pos2 = 0;
+ ptr<Pos> pos2(&noPos);
Value * vAttrs = &vTmp;
e->eval(state, env, vTmp);
@@ -1275,7 +1329,7 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v)
try {
for (auto & i : attrPath) {
- nrLookups++;
+ state.nrLookups++;
Bindings::iterator j;
Symbol name = getName(i, state, env);
if (def) {
@@ -1293,13 +1347,13 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v)
}
vAttrs = j->value;
pos2 = j->pos;
- if (state.countCalls && pos2) state.attrSelects[*pos2]++;
+ if (state.countCalls) state.attrSelects[*pos2]++;
}
- state.forceValue(*vAttrs, ( pos2 != NULL ? *pos2 : this->pos ) );
+ state.forceValue(*vAttrs, (*pos2 != noPos ? *pos2 : this->pos ) );
} catch (Error & e) {
- if (pos2 && pos2->file != state.sDerivationNix)
+ if (*pos2 != noPos && pos2->file != state.sDerivationNix)
addErrorTrace(e, *pos2, "while evaluating the attribute '%1%'",
showAttrPath(state, env, attrPath));
throw;
@@ -1340,160 +1394,183 @@ void ExprLambda::eval(EvalState & state, Env & env, Value & v)
}
-void ExprApp::eval(EvalState & state, Env & env, Value & v)
+void EvalState::callFunction(Value & fun, size_t nrArgs, Value * * args, Value & vRes, const Pos & pos)
{
- /* FIXME: vFun prevents GCC from doing tail call optimisation. */
- Value vFun;
- e1->eval(state, env, vFun);
- state.callFunction(vFun, *(e2->maybeThunk(state, env)), v, pos);
-}
+ auto trace = evalSettings.traceFunctionCalls ? std::make_unique<FunctionCallTrace>(pos) : nullptr;
+ forceValue(fun, pos);
-void EvalState::callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos)
-{
- /* Figure out the number of arguments still needed. */
- size_t argsDone = 0;
- Value * primOp = &fun;
- while (primOp->isPrimOpApp()) {
- argsDone++;
- primOp = primOp->primOpApp.left;
- }
- assert(primOp->isPrimOp());
- auto arity = primOp->primOp->arity;
- auto argsLeft = arity - argsDone;
-
- if (argsLeft == 1) {
- /* We have all the arguments, so call the primop. */
-
- /* Put all the arguments in an array. */
- Value * vArgs[arity];
- auto n = arity - 1;
- vArgs[n--] = &arg;
- for (Value * arg = &fun; arg->isPrimOpApp(); arg = arg->primOpApp.left)
- vArgs[n--] = arg->primOpApp.right;
-
- /* And call the primop. */
- nrPrimOpCalls++;
- if (countCalls) primOpCalls[primOp->primOp->name]++;
- primOp->primOp->fun(*this, pos, vArgs, v);
- } else {
- Value * fun2 = allocValue();
- *fun2 = fun;
- v.mkPrimOpApp(fun2, &arg);
- }
-}
+ Value vCur(fun);
+ auto makeAppChain = [&]()
+ {
+ vRes = vCur;
+ for (size_t i = 0; i < nrArgs; ++i) {
+ auto fun2 = allocValue();
+ *fun2 = vRes;
+ vRes.mkPrimOpApp(fun2, args[i]);
+ }
+ };
-void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & pos)
-{
- auto trace = evalSettings.traceFunctionCalls ? std::make_unique<FunctionCallTrace>(pos) : nullptr;
+ Attr * functor;
- forceValue(fun, pos);
+ while (nrArgs > 0) {
- if (fun.isPrimOp() || fun.isPrimOpApp()) {
- callPrimOp(fun, arg, v, pos);
- return;
- }
+ if (vCur.isLambda()) {
- if (fun.type() == nAttrs) {
- auto found = fun.attrs->find(sFunctor);
- if (found != fun.attrs->end()) {
- /* fun may be allocated on the stack of the calling function,
- * but for functors we may keep a reference, so heap-allocate
- * a copy and use that instead.
- */
- auto & fun2 = *allocValue();
- fun2 = fun;
- /* !!! Should we use the attr pos here? */
- Value v2;
- callFunction(*found->value, fun2, v2, pos);
- return callFunction(v2, arg, v, pos);
- }
- }
+ ExprLambda & lambda(*vCur.lambda.fun);
- if (!fun.isLambda()) {
- throwTypeError(
- pos,
- "attempt to call something which is not a function but %1%",
- showType(fun).c_str(),
- fakeEnv(1), 0);
- }
+ auto size =
+ (lambda.arg.empty() ? 0 : 1) +
+ (lambda.hasFormals() ? lambda.formals->formals.size() : 0);
+ Env & env2(allocEnv(size));
+ env2.up = vCur.lambda.env;
- ExprLambda & lambda(*fun.lambda.fun);
+ Displacement displ = 0;
- auto size =
- (lambda.arg.empty() ? 0 : 1) +
- (lambda.matchAttrs ? lambda.formals->formals.size() : 0);
- Env & env2(allocEnv(size));
- env2.up = fun.lambda.env;
+ if (!lambda.hasFormals())
+ env2.values[displ++] = args[0];
+ else {
+ forceAttrs(*args[0], pos);
- size_t displ = 0;
+ if (!lambda.arg.empty())
+ env2.values[displ++] = args[0];
- if (!lambda.matchAttrs){
- env2.values[displ++] = &arg;
- }
- else {
- forceAttrs(arg, pos);
-
- if (!lambda.arg.empty())
- env2.values[displ++] = &arg;
-
- /* For each formal argument, get the actual argument. If
- there is no matching actual argument but the formal
- argument has a default, use the default. */
- size_t attrsUsed = 0;
- for (auto & i : lambda.formals->formals) {
- Bindings::iterator j = arg.attrs->find(i.name);
- if (j == arg.attrs->end()) {
- if (!i.def)
- throwTypeError(
- pos,
- "%1% called without required argument '%2%'",
- lambda,
- i.name,
- *fun.lambda.env, &lambda);
- env2.values[displ++] = i.def->maybeThunk(*this, env2);
+ /* For each formal argument, get the actual argument. If
+ there is no matching actual argument but the formal
+ argument has a default, use the default. */
+ size_t attrsUsed = 0;
+ for (auto & i : lambda.formals->formals) {
+ auto j = args[0]->attrs->get(i.name);
+ if (!j) {
+ if (!i.def) throwTypeError(pos, "%1% called without required argument '%2%'",
+ lambda, i.name, *fun.lambda.env, &lambda);
+ env2.values[displ++] = i.def->maybeThunk(*this, env2);
+ } else {
+ attrsUsed++;
+ env2.values[displ++] = j->value;
+ }
+ }
+
+ /* Check that each actual argument is listed as a formal
+ argument (unless the attribute match specifies a `...'). */
+ if (!lambda.formals->ellipsis && attrsUsed != args[0]->attrs->size()) {
+ /* Nope, so show the first unexpected argument to the
+ user. */
+ for (auto & i : *args[0]->attrs)
+ if (lambda.formals->argNames.find(i.name) == lambda.formals->argNames.end())
+ throwTypeError(pos, "%1% called with unexpected argument '%2%'", lambda, i.name);
+ abort(); // can't happen
+ }
+ }
+
+ nrFunctionCalls++;
+ if (countCalls) incrFunctionCall(&lambda);
+
+ /* Evaluate the body. */
+ try {
+ lambda.body->eval(*this, env2, vCur);
+ } catch (Error & e) {
+ if (loggerSettings.showTrace.get()) {
+ addErrorTrace(e, lambda.pos, "while evaluating %s",
+ (lambda.name.set()
+ ? "'" + (string) lambda.name + "'"
+ : "anonymous lambda"));
+ addErrorTrace(e, pos, "from call site%s", "");
+ }
+ throw;
+ }
+
+ nrArgs--;
+ args += 1;
+ }
+
+ else if (vCur.isPrimOp()) {
+
+ size_t argsLeft = vCur.primOp->arity;
+
+ if (nrArgs < argsLeft) {
+ /* We don't have enough arguments, so create a tPrimOpApp chain. */
+ makeAppChain();
+ return;
} else {
- attrsUsed++;
- env2.values[displ++] = j->value;
+ /* We have all the arguments, so call the primop. */
+ nrPrimOpCalls++;
+ if (countCalls) primOpCalls[vCur.primOp->name]++;
+ vCur.primOp->fun(*this, pos, args, vCur);
+
+ nrArgs -= argsLeft;
+ args += argsLeft;
}
}
+ else if (vCur.isPrimOpApp()) {
+ /* Figure out the number of arguments still needed. */
+ size_t argsDone = 0;
+ Value * primOp = &vCur;
+ while (primOp->isPrimOpApp()) {
+ argsDone++;
+ primOp = primOp->primOpApp.left;
+ }
+ assert(primOp->isPrimOp());
+ auto arity = primOp->primOp->arity;
+ auto argsLeft = arity - argsDone;
+
+ if (nrArgs < argsLeft) {
+ /* We still don't have enough arguments, so extend the tPrimOpApp chain. */
+ makeAppChain();
+ return;
+ } else {
+ /* We have all the arguments, so call the primop with
+ the previous and new arguments. */
+
+ Value * vArgs[arity];
+ auto n = argsDone;
+ for (Value * arg = &vCur; arg->isPrimOpApp(); arg = arg->primOpApp.left)
+ vArgs[--n] = arg->primOpApp.right;
+
+ for (size_t i = 0; i < argsLeft; ++i)
+ vArgs[argsDone + i] = args[i];
+
+ nrPrimOpCalls++;
+ if (countCalls) primOpCalls[primOp->primOp->name]++;
+ primOp->primOp->fun(*this, pos, vArgs, vCur);
+
+ nrArgs -= argsLeft;
+ args += argsLeft;
+ }
+ }
- /* Check that each actual argument is listed as a formal
- argument (unless the attribute match specifies a `...'). */
- if (!lambda.formals->ellipsis && attrsUsed != arg.attrs->size()) {
- /* Nope, so show the first unexpected argument to the
- user. */
- for (auto & i : *arg.attrs)
- if (lambda.formals->argNames.find(i.name) == lambda.formals->argNames.end())
- throwTypeError(pos,
- "%1% called with unexpected argument '%2%'",
- lambda,
- i.name,
- *fun.lambda.env, &lambda);
- abort(); // can't happen
+ else if (vCur.type() == nAttrs && (functor = vCur.attrs->get(sFunctor))) {
+ /* 'vCur' may be allocated on the stack of the calling
+ function, but for functors we may keep a reference, so
+ heap-allocate a copy and use that instead. */
+ Value * args2[] = {allocValue(), args[0]};
+ *args2[0] = vCur;
+ /* !!! Should we use the attr pos here? */
+ callFunction(*functor->value, 2, args2, vCur, pos);
+ nrArgs--;
+ args++;
}
+
+ else
+ throwTypeError(pos, "attempt to call something which is not a function but %1%", vCur);
}
- nrFunctionCalls++;
- if (countCalls) incrFunctionCall(&lambda);
+ vRes = vCur;
+}
- /* Evaluate the body. This is conditional on showTrace, because
- catching exceptions makes this function not tail-recursive. */
- if (loggerSettings.showTrace.get())
- try {
- lambda.body->eval(*this, env2, v);
- } catch (Error & e) {
- addErrorTrace(e, lambda.pos, "while evaluating %s",
- (lambda.name.set()
- ? "'" + (string) lambda.name + "'"
- : "anonymous lambda"));
- addErrorTrace(e, pos, "from call site%s", "");
- throw;
- }
- else
- fun.lambda.fun->body->eval(*this, env2, v);
+
+void ExprCall::eval(EvalState & state, Env & env, Value & v)
+{
+ Value vFun;
+ fun->eval(state, env, vFun);
+
+ Value * vArgs[args.size()];
+ for (size_t i = 0; i < args.size(); ++i)
+ vArgs[i] = args[i]->maybeThunk(state, env);
+
+ state.callFunction(vFun, args.size(), vArgs, v, pos);
}
@@ -1519,7 +1596,7 @@ void EvalState::autoCallFunction(Bindings & args, Value & fun, Value & res)
}
}
- if (!fun.isLambda() || !fun.lambda.fun->matchAttrs) {
+ if (!fun.isLambda() || !fun.lambda.fun->hasFormals()) {
res = fun;
return;
}
@@ -1722,7 +1799,6 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
and none of the strings are allowed to have contexts. */
if (first) {
firstType = vTmp.type();
- first = false;
}
if (firstType == nInt) {
@@ -1744,7 +1820,12 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
} else
throwEvalError(pos, "cannot add %1% to a float", showType(vTmp), env, this);
} else
- s << state.coerceToString(pos, vTmp, context, false, firstType == nString);
+ /* skip canonization of first path, which would only be not
+ canonized in the first place if it's coming from a ./${foo} type
+ path */
+ s << state.coerceToString(pos, vTmp, context, false, firstType == nString, !first);
+
+ first = false;
}
if (firstType == nInt)
@@ -1763,7 +1844,7 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v)
void ExprPos::eval(EvalState & state, Env & env, Value & v)
{
- state.mkPos(v, &pos);
+ state.mkPos(v, ptr(&pos));
}
@@ -1935,7 +2016,7 @@ std::optional<string> EvalState::tryAttrsToString(const Pos & pos, Value & v,
}
string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context,
- bool coerceMore, bool copyToStore)
+ bool coerceMore, bool copyToStore, bool canonicalizePath)
{
forceValue(v, pos);
@@ -1947,7 +2028,7 @@ string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context,
}
if (v.type() == nPath) {
- Path path(canonPath(v.path));
+ Path path(canonicalizePath ? canonPath(v.path) : v.path);
return copyToStore ? copyPathToStore(context, path) : path;
}
@@ -2010,6 +2091,7 @@ string EvalState::copyPathToStore(PathSet & context, const Path & path)
? store->computeStorePathForPath(std::string(baseNameOf(path)), checkSourcePath(path)).first
: store->addToStore(std::string(baseNameOf(path)), checkSourcePath(path), FileIngestionMethod::Recursive, htSHA256, defaultPathFilter, repair);
dstPath = store->printStorePath(p);
+ allowPath(p);
srcToStore.insert_or_assign(path, std::move(p));
printMsg(lvlChatty, "copied source '%1%' -> '%2%'", path, dstPath);
}
diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh
index 91e43ddfe..485c2df83 100644
--- a/src/libexpr/eval.hh
+++ b/src/libexpr/eval.hh
@@ -5,6 +5,7 @@
#include "nixexpr.hh"
#include "symbol-table.hh"
#include "config.hh"
+#include "experimental-features.hh"
#include <map>
#include <optional>
@@ -98,8 +99,14 @@ public:
Value vEmptySet;
+ /* Store used to materialise .drv files. */
const ref<Store> store;
+ /* Store used to build stuff. */
+ const ref<Store> buildStore;
+
+ RootValue vCallFlake = nullptr;
+ RootValue vImportedDrvToDerivation = nullptr;
private:
SrcToStore srcToStore;
@@ -132,13 +139,31 @@ private:
public:
- EvalState(const Strings & _searchPath, ref<Store> store);
+ EvalState(
+ const Strings & _searchPath,
+ ref<Store> store,
+ std::shared_ptr<Store> buildStore = nullptr);
~EvalState();
+ void requireExperimentalFeatureOnEvaluation(
+ const ExperimentalFeature &,
+ const std::string_view fName,
+ const Pos & pos
+ );
+
void addToSearchPath(const string & s);
SearchPath getSearchPath() { return searchPath; }
+ /* Allow access to a path. */
+ void allowPath(const Path & path);
+
+ /* Allow access to a store path. Note that this gets remapped to
+ the real store path if `store` is a chroot store. */
+ void allowPath(const StorePath & storePath);
+
+ /* Check whether access to a path is allowed and throw an error if
+ not. Otherwise return the canonicalised path. */
Path checkSourcePath(const Path & path);
void checkURI(const std::string & uri);
@@ -167,6 +192,14 @@ public:
trivial (i.e. doesn't require arbitrary computation). */
void evalFile(const Path & path, Value & v, bool mustBeTrivial = false);
+ /* Like `cacheFile`, but with an already parsed expression. */
+ void cacheFile(
+ const Path & path,
+ const Path & resolvedPath,
+ Expr * e,
+ Value & v,
+ bool mustBeTrivial = false);
+
void resetFileCache();
/* Look up a file in the search path. */
@@ -221,7 +254,8 @@ public:
booleans and lists to a string. If `copyToStore' is set,
referenced paths are copied to the Nix store as a side effect. */
string coerceToString(const Pos & pos, Value & v, PathSet & context,
- bool coerceMore = false, bool copyToStore = true);
+ bool coerceMore = false, bool copyToStore = true,
+ bool canonicalizePath = true);
string copyPathToStore(PathSet & context, const Path & path);
@@ -247,6 +281,8 @@ private:
Value * addConstant(const string & name, Value & v);
+ void addConstant(const string & name, Value * v);
+
Value * addPrimOp(const string & name,
size_t arity, PrimOpFun primOp);
@@ -286,8 +322,14 @@ public:
bool isFunctor(Value & fun);
- void callFunction(Value & fun, Value & arg, Value & v, const Pos & pos);
- void callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos);
+ // FIXME: use std::span
+ void callFunction(Value & fun, size_t nrArgs, Value * * args, Value & vRes, const Pos & pos);
+
+ void callFunction(Value & fun, Value & arg, Value & vRes, const Pos & pos)
+ {
+ Value * args[] = {&arg};
+ callFunction(fun, 1, args, vRes, pos);
+ }
/* Automatically call a function for which each argument has a
default value or has a binding in the `args' map. */
@@ -305,7 +347,7 @@ public:
void mkList(Value & v, size_t length);
void mkAttrs(Value & v, size_t capacity);
void mkThunk_(Value & v, Expr * expr);
- void mkPos(Value & v, Pos * pos);
+ void mkPos(Value & v, ptr<Pos> pos);
void concatLists(Value & v, size_t nrLists, Value * * lists, const Pos & pos);
@@ -320,8 +362,10 @@ private:
unsigned long nrValuesInEnvs = 0;
unsigned long nrValues = 0;
unsigned long nrListElems = 0;
+ unsigned long nrLookups = 0;
unsigned long nrAttrsets = 0;
unsigned long nrAttrsInAttrsets = 0;
+ unsigned long nrAvoided = 0;
unsigned long nrOpUpdates = 0;
unsigned long nrOpUpdateValuesCopied = 0;
unsigned long nrListConcats = 0;
@@ -343,6 +387,11 @@ private:
friend struct ExprOpUpdate;
friend struct ExprOpConcatLists;
+ friend struct ExprVar;
+ friend struct ExprString;
+ friend struct ExprInt;
+ friend struct ExprFloat;
+ friend struct ExprPath;
friend struct ExprSelect;
friend void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v);
friend void prim_match(EvalState & state, const Pos & pos, Value * * args, Value & v);
diff --git a/src/libexpr/flake/config.cc b/src/libexpr/flake/config.cc
index 63566131e..c03f4106c 100644
--- a/src/libexpr/flake/config.cc
+++ b/src/libexpr/flake/config.cc
@@ -1,4 +1,5 @@
#include "flake.hh"
+#include "globals.hh"
#include <nlohmann/json.hpp>
@@ -22,12 +23,14 @@ static TrustedList readTrustedList()
static void writeTrustedList(const TrustedList & trustedList)
{
- writeFile(trustedListPath(), nlohmann::json(trustedList).dump());
+ auto path = trustedListPath();
+ createDirs(dirOf(path));
+ writeFile(path, nlohmann::json(trustedList).dump());
}
void ConfigFile::apply()
{
- std::set<std::string> whitelist{"bash-prompt", "bash-prompt-suffix"};
+ std::set<std::string> whitelist{"bash-prompt", "bash-prompt-suffix", "flake-registry"};
for (auto & [name, value] : settings) {
@@ -50,21 +53,19 @@ void ConfigFile::apply()
auto trustedList = readTrustedList();
bool trusted = false;
-
- if (auto saved = get(get(trustedList, name).value_or(std::map<std::string, bool>()), valueS)) {
+ if (nix::settings.acceptFlakeConfig){
+ trusted = true;
+ } else if (auto saved = get(get(trustedList, name).value_or(std::map<std::string, bool>()), valueS)) {
trusted = *saved;
+ warn("Using saved setting for '%s = %s' from ~/.local/share/nix/trusted-settings.json.", name,valueS);
} else {
// FIXME: filter ANSI escapes, newlines, \r, etc.
- if (std::tolower(logger->ask(fmt("do you want to allow configuration setting '%s' to be set to '" ANSI_RED "%s" ANSI_NORMAL "' (y/N)?", name, valueS)).value_or('n')) != 'y') {
- if (std::tolower(logger->ask("do you want to permanently mark this value as untrusted (y/N)?").value_or('n')) == 'y') {
- trustedList[name][valueS] = false;
- writeTrustedList(trustedList);
- }
- } else {
- if (std::tolower(logger->ask("do you want to permanently mark this value as trusted (y/N)?").value_or('n')) == 'y') {
- trustedList[name][valueS] = trusted = true;
- writeTrustedList(trustedList);
- }
+ if (std::tolower(logger->ask(fmt("do you want to allow configuration setting '%s' to be set to '" ANSI_RED "%s" ANSI_NORMAL "' (y/N)?", name, valueS)).value_or('n')) == 'y') {
+ trusted = true;
+ }
+ if (std::tolower(logger->ask(fmt("do you want to permanently mark this value as %s (y/N)?", trusted ? "trusted": "untrusted" )).value_or('n')) == 'y') {
+ trustedList[name][valueS] = trusted;
+ writeTrustedList(trustedList);
}
}
diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc
index 2e94490d4..f5be67d67 100644
--- a/src/libexpr/flake/flake.cc
+++ b/src/libexpr/flake/flake.cc
@@ -1,4 +1,5 @@
#include "flake.hh"
+#include "eval.hh"
#include "lockfile.hh"
#include "primops.hh"
#include "eval-inline.hh"
@@ -63,8 +64,7 @@ static std::tuple<fetchers::Tree, FlakeRef, FlakeRef> fetchOrSubstituteTree(
debug("got tree '%s' from '%s'",
state.store->printStorePath(tree.storePath), lockedRef);
- if (state.allowedPaths)
- state.allowedPaths->insert(tree.actualPath);
+ state.allowPath(tree.storePath);
assert(!originalRef.input.getNarHash() || tree.storePath == originalRef.input.computeStorePath(*state.store));
@@ -88,10 +88,12 @@ static void expectType(EvalState & state, ValueType type,
}
static std::map<FlakeId, FlakeInput> parseFlakeInputs(
- EvalState & state, Value * value, const Pos & pos);
+ EvalState & state, Value * value, const Pos & pos,
+ const std::optional<Path> & baseDir);
static FlakeInput parseFlakeInput(EvalState & state,
- const std::string & inputName, Value * value, const Pos & pos)
+ const std::string & inputName, Value * value, const Pos & pos,
+ const std::optional<Path> & baseDir)
{
expectType(state, nAttrs, *value, pos);
@@ -115,7 +117,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
expectType(state, nBool, *attr.value, *attr.pos);
input.isFlake = attr.value->boolean;
} else if (attr.name == sInputs) {
- input.overrides = parseFlakeInputs(state, attr.value, *attr.pos);
+ input.overrides = parseFlakeInputs(state, attr.value, *attr.pos, baseDir);
} else if (attr.name == sFollows) {
expectType(state, nString, *attr.value, *attr.pos);
input.follows = parseInputPath(attr.value->string.s);
@@ -153,7 +155,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
if (!attrs.empty())
throw Error("unexpected flake input attribute '%s', at %s", attrs.begin()->first, pos);
if (url)
- input.ref = parseFlakeRef(*url, {}, true);
+ input.ref = parseFlakeRef(*url, baseDir, true);
}
if (!input.follows && !input.ref)
@@ -163,7 +165,8 @@ static FlakeInput parseFlakeInput(EvalState & state,
}
static std::map<FlakeId, FlakeInput> parseFlakeInputs(
- EvalState & state, Value * value, const Pos & pos)
+ EvalState & state, Value * value, const Pos & pos,
+ const std::optional<Path> & baseDir)
{
std::map<FlakeId, FlakeInput> inputs;
@@ -174,7 +177,8 @@ static std::map<FlakeId, FlakeInput> parseFlakeInputs(
parseFlakeInput(state,
inputAttr.name,
inputAttr.value,
- *inputAttr.pos));
+ *inputAttr.pos,
+ baseDir));
}
return inputs;
@@ -190,7 +194,8 @@ static Flake getFlake(
state, originalRef, allowLookup, flakeCache);
// Guard against symlink attacks.
- auto flakeFile = canonPath(sourceInfo.actualPath + "/" + lockedRef.subdir + "/flake.nix");
+ auto flakeDir = canonPath(sourceInfo.actualPath + "/" + lockedRef.subdir);
+ auto flakeFile = canonPath(flakeDir + "/flake.nix");
if (!isInDir(flakeFile, sourceInfo.actualPath))
throw Error("'flake.nix' file of flake '%s' escapes from '%s'",
lockedRef, state.store->printStorePath(sourceInfo.storePath));
@@ -218,14 +223,14 @@ static Flake getFlake(
auto sInputs = state.symbols.create("inputs");
if (auto inputs = vInfo.attrs->get(sInputs))
- flake.inputs = parseFlakeInputs(state, inputs->value, *inputs->pos);
+ flake.inputs = parseFlakeInputs(state, inputs->value, *inputs->pos, flakeDir);
auto sOutputs = state.symbols.create("outputs");
if (auto outputs = vInfo.attrs->get(sOutputs)) {
expectType(state, nFunction, *outputs->value, *outputs->pos);
- if (outputs->value->isLambda() && outputs->value->lambda.fun->matchAttrs) {
+ if (outputs->value->isLambda() && outputs->value->lambda.fun->hasFormals()) {
for (auto & formal : outputs->value->lambda.fun->formals->formals) {
if (formal.name != state.sSelf)
flake.inputs.emplace(formal.name, FlakeInput {
@@ -292,11 +297,18 @@ LockedFlake lockFlake(
const FlakeRef & topRef,
const LockFlags & lockFlags)
{
- settings.requireExperimentalFeature("flakes");
+ settings.requireExperimentalFeature(Xp::Flakes);
FlakeCache flakeCache;
- auto flake = getFlake(state, topRef, lockFlags.useRegistries, flakeCache);
+ auto useRegistries = lockFlags.useRegistries.value_or(settings.useRegistries);
+
+ auto flake = getFlake(state, topRef, useRegistries, flakeCache);
+
+ if (lockFlags.applyNixConfig) {
+ flake.config.apply();
+ state.store->setOptions();
+ }
try {
@@ -317,25 +329,38 @@ LockedFlake lockFlake(
std::vector<FlakeRef> parents;
+ struct LockParent {
+ /* The path to this parent. */
+ InputPath path;
+
+ /* Whether we are currently inside a top-level lockfile
+ (inputs absolute) or subordinate lockfile (inputs
+ relative). */
+ bool absolute;
+ };
+
std::function<void(
const FlakeInputs & flakeInputs,
std::shared_ptr<Node> node,
const InputPath & inputPathPrefix,
- std::shared_ptr<const Node> oldNode)>
+ std::shared_ptr<const Node> oldNode,
+ const LockParent & parent,
+ const Path & parentPath)>
computeLocks;
computeLocks = [&](
const FlakeInputs & flakeInputs,
std::shared_ptr<Node> node,
const InputPath & inputPathPrefix,
- std::shared_ptr<const Node> oldNode)
+ std::shared_ptr<const Node> oldNode,
+ const LockParent & parent,
+ const Path & parentPath)
{
debug("computing lock file node '%s'", printInputPath(inputPathPrefix));
/* Get the overrides (i.e. attributes of the form
'inputs.nixops.inputs.nixpkgs.url = ...'). */
- // FIXME: check this
- for (auto & [id, input] : flake.inputs) {
+ for (auto & [id, input] : flakeInputs) {
for (auto & [idOverride, inputOverride] : input.overrides) {
auto inputPath(inputPathPrefix);
inputPath.push_back(id);
@@ -359,22 +384,31 @@ LockedFlake lockFlake(
ancestors? */
auto i = overrides.find(inputPath);
bool hasOverride = i != overrides.end();
- if (hasOverride) overridesUsed.insert(inputPath);
+ if (hasOverride) {
+ overridesUsed.insert(inputPath);
+ // Respect the “flakeness” of the input even if we
+ // override it
+ i->second.isFlake = input2.isFlake;
+ }
auto & input = hasOverride ? i->second : input2;
/* Resolve 'follows' later (since it may refer to an input
path we haven't processed yet. */
if (input.follows) {
InputPath target;
- if (hasOverride || input.absolute)
- /* 'follows' from an override is relative to the
- root of the graph. */
+
+ if (parent.absolute && !hasOverride) {
target = *input.follows;
- else {
- /* Otherwise, it's relative to the current flake. */
- target = inputPathPrefix;
+ } else {
+ if (hasOverride) {
+ target = inputPathPrefix;
+ target.pop_back();
+ } else
+ target = parent.path;
+
for (auto & i : *input.follows) target.push_back(i);
}
+
debug("input '%s' follows '%s'", inputPathS, printInputPath(target));
node->inputs.insert_or_assign(id, target);
continue;
@@ -412,22 +446,18 @@ LockedFlake lockFlake(
update it. */
auto lb = lockFlags.inputUpdates.lower_bound(inputPath);
- auto hasChildUpdate =
+ auto mustRefetch =
lb != lockFlags.inputUpdates.end()
&& lb->size() > inputPath.size()
&& std::equal(inputPath.begin(), inputPath.end(), lb->begin());
- if (hasChildUpdate) {
- auto inputFlake = getFlake(
- state, oldLock->lockedRef, false, flakeCache);
- computeLocks(inputFlake.inputs, childNode, inputPath, oldLock);
- } else {
+ FlakeInputs fakeInputs;
+
+ if (!mustRefetch) {
/* No need to fetch this flake, we can be
lazy. However there may be new overrides on the
inputs of this flake, so we need to check
those. */
- FlakeInputs fakeInputs;
-
for (auto & i : oldLock->inputs) {
if (auto lockedNode = std::get_if<0>(&i.second)) {
fakeInputs.emplace(i.first, FlakeInput {
@@ -435,16 +465,28 @@ LockedFlake lockFlake(
.isFlake = (*lockedNode)->isFlake,
});
} else if (auto follows = std::get_if<1>(&i.second)) {
+ auto o = input.overrides.find(i.first);
+ // If the override disappeared, we have to refetch the flake,
+ // since some of the inputs may not be present in the lockfile.
+ if (o == input.overrides.end()) {
+ mustRefetch = true;
+ // There's no point populating the rest of the fake inputs,
+ // since we'll refetch the flake anyways.
+ break;
+ }
fakeInputs.emplace(i.first, FlakeInput {
.follows = *follows,
- .absolute = true
});
}
}
-
- computeLocks(fakeInputs, childNode, inputPath, oldLock);
}
+ computeLocks(
+ mustRefetch
+ ? getFlake(state, oldLock->lockedRef, false, flakeCache).inputs
+ : fakeInputs,
+ childNode, inputPath, oldLock, parent, parentPath);
+
} else {
/* We need to create a new lock file entry. So fetch
this input. */
@@ -454,7 +496,15 @@ LockedFlake lockFlake(
throw Error("cannot update flake input '%s' in pure mode", inputPathS);
if (input.isFlake) {
- auto inputFlake = getFlake(state, *input.ref, lockFlags.useRegistries, flakeCache);
+ Path localPath = parentPath;
+ FlakeRef localRef = *input.ref;
+
+ // If this input is a path, recurse it down.
+ // This allows us to resolve path inputs relative to the current flake.
+ if (localRef.input.getType() == "path")
+ localPath = absPath(*input.ref->input.getSourcePath(), parentPath);
+
+ auto inputFlake = getFlake(state, localRef, useRegistries, flakeCache);
/* Note: in case of an --override-input, we use
the *original* ref (input2.ref) for the
@@ -475,6 +525,13 @@ LockedFlake lockFlake(
parents.push_back(*input.ref);
Finally cleanup([&]() { parents.pop_back(); });
+ // Follows paths from existing inputs in the top-level lockfile are absolute,
+ // whereas paths in subordinate lockfiles are relative to those lockfiles.
+ LockParent newParent {
+ .path = inputPath,
+ .absolute = oldLock ? true : false
+ };
+
/* Recursively process the inputs of this
flake. Also, unless we already have this flake
in the top-level lock file, use this flake's
@@ -484,12 +541,13 @@ LockedFlake lockFlake(
oldLock
? std::dynamic_pointer_cast<const Node>(oldLock)
: LockFile::read(
- inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root);
+ inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root,
+ newParent, localPath);
}
else {
auto [sourceInfo, resolvedRef, lockedRef] = fetchOrSubstituteTree(
- state, *input.ref, lockFlags.useRegistries, flakeCache);
+ state, *input.ref, useRegistries, flakeCache);
node->inputs.insert_or_assign(id,
std::make_shared<LockedNode>(lockedRef, *input.ref, false));
}
@@ -502,9 +560,17 @@ LockedFlake lockFlake(
}
};
+ LockParent parent {
+ .path = {},
+ .absolute = true
+ };
+
+ // Bring in the current ref for relative path resolution if we have it
+ auto parentPath = canonPath(flake.sourceInfo->actualPath + "/" + flake.lockedRef.subdir);
+
computeLocks(
flake.inputs, newLockFile.root, {},
- lockFlags.recreateLockFile ? nullptr : oldLockFile.root);
+ lockFlags.recreateLockFile ? nullptr : oldLockFile.root, parent, parentPath);
for (auto & i : lockFlags.inputOverrides)
if (!overridesUsed.count(i.first))
@@ -554,8 +620,8 @@ LockedFlake lockFlake(
topRef.input.markChangedFile(
(topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock",
lockFlags.commitLockFile
- ? std::optional<std::string>(fmt("%s: %s\n\nFlake input changes:\n\n%s",
- relPath, lockFileExists ? "Update" : "Add", diff))
+ ? std::optional<std::string>(fmt("%s: %s\n\nFlake lock file changes:\n\n%s",
+ relPath, lockFileExists ? "Update" : "Add", filterANSIEscapes(diff, true)))
: std::nullopt);
/* Rewriting the lockfile changed the top-level
@@ -563,7 +629,7 @@ LockedFlake lockFlake(
also just clear the 'rev' field... */
auto prevLockedRef = flake.lockedRef;
FlakeCache dummyCache;
- flake = getFlake(state, topRef, lockFlags.useRegistries, dummyCache);
+ flake = getFlake(state, topRef, useRegistries, dummyCache);
if (lockFlags.commitLockFile &&
flake.lockedRef.input.getRev() &&
@@ -580,8 +646,10 @@ LockedFlake lockFlake(
}
} else
throw Error("cannot write modified lock file of flake '%s' (use '--no-write-lock-file' to ignore)", topRef);
- } else
+ } else {
warn("not writing modified lock file of flake '%s':\n%s", topRef, chomp(diff));
+ flake.forceDirty = true;
+ }
}
return LockedFlake { .flake = std::move(flake), .lockFile = std::move(newLockFile) };
@@ -604,26 +672,32 @@ void callFlake(EvalState & state,
mkString(*vLocks, lockedFlake.lockFile.to_string());
- emitTreeAttrs(state, *lockedFlake.flake.sourceInfo, lockedFlake.flake.lockedRef.input, *vRootSrc);
+ emitTreeAttrs(
+ state,
+ *lockedFlake.flake.sourceInfo,
+ lockedFlake.flake.lockedRef.input,
+ *vRootSrc,
+ false,
+ lockedFlake.flake.forceDirty);
mkString(*vRootSubdir, lockedFlake.flake.lockedRef.subdir);
- static RootValue vCallFlake = nullptr;
-
- if (!vCallFlake) {
- vCallFlake = allocRootValue(state.allocValue());
+ if (!state.vCallFlake) {
+ state.vCallFlake = allocRootValue(state.allocValue());
state.eval(state.parseExprFromString(
#include "call-flake.nix.gen.hh"
- , "/"), **vCallFlake);
+ , "/"), **state.vCallFlake);
}
- state.callFunction(**vCallFlake, *vLocks, *vTmp1, noPos);
+ state.callFunction(**state.vCallFlake, *vLocks, *vTmp1, noPos);
state.callFunction(*vTmp1, *vRootSrc, *vTmp2, noPos);
state.callFunction(*vTmp2, *vRootSubdir, vRes, noPos);
}
static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
{
+ state.requireExperimentalFeatureOnEvaluation(Xp::Flakes, "builtins.getFlake", pos);
+
auto flakeRefS = state.forceStringNoCtx(*args[0], pos);
auto flakeRef = parseFlakeRef(flakeRefS, {}, true);
if (evalSettings.pureEval && !flakeRef.input.isImmutable())
@@ -633,13 +707,13 @@ static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Va
lockFlake(state, flakeRef,
LockFlags {
.updateLockFile = false,
- .useRegistries = !evalSettings.pureEval,
+ .useRegistries = !evalSettings.pureEval && settings.useRegistries,
.allowMutable = !evalSettings.pureEval,
}),
v);
}
-static RegisterPrimOp r2("__getFlake", 1, prim_getFlake, "flakes");
+static RegisterPrimOp r2("__getFlake", 1, prim_getFlake);
}
@@ -649,8 +723,9 @@ Fingerprint LockedFlake::getFingerprint() const
// and we haven't changed it, then it's sufficient to use
// flake.sourceInfo.storePath for the fingerprint.
return hashString(htSHA256,
- fmt("%s;%d;%d;%s",
+ fmt("%s;%s;%d;%d;%s",
flake.sourceInfo->storePath.to_string(),
+ flake.lockedRef.subdir,
flake.lockedRef.input.getRevCount().value_or(0),
flake.lockedRef.input.getLastModified().value_or(0),
lockFile));
diff --git a/src/libexpr/flake/flake.hh b/src/libexpr/flake/flake.hh
index d17d5e183..524b18af1 100644
--- a/src/libexpr/flake/flake.hh
+++ b/src/libexpr/flake/flake.hh
@@ -43,7 +43,6 @@ struct FlakeInput
std::optional<FlakeRef> ref;
bool isFlake = true; // true = process flake to get outputs, false = (fetched) static source path
std::optional<InputPath> follows;
- bool absolute = false; // whether 'follows' is relative to the flake root
FlakeInputs overrides;
};
@@ -59,9 +58,10 @@ struct ConfigFile
/* The contents of a flake.nix file. */
struct Flake
{
- FlakeRef originalRef; // the original flake specification (by the user)
- FlakeRef resolvedRef; // registry references and caching resolved to the specific underlying flake
- FlakeRef lockedRef; // the specific local store result of invoking the fetcher
+ FlakeRef originalRef; // the original flake specification (by the user)
+ FlakeRef resolvedRef; // registry references and caching resolved to the specific underlying flake
+ FlakeRef lockedRef; // the specific local store result of invoking the fetcher
+ bool forceDirty = false; // pretend that 'lockedRef' is dirty
std::optional<std::string> description;
std::shared_ptr<const fetchers::Tree> sourceInfo;
FlakeInputs inputs;
@@ -102,7 +102,11 @@ struct LockFlags
/* Whether to use the registries to lookup indirect flake
references like 'nixpkgs'. */
- bool useRegistries = true;
+ std::optional<bool> useRegistries = std::nullopt;
+
+ /* Whether to apply flake's nixConfig attribute to the configuration */
+
+ bool applyNixConfig = false;
/* Whether mutable flake references (i.e. those without a Git
revision or similar) without a corresponding lock are
@@ -137,6 +141,8 @@ void emitTreeAttrs(
EvalState & state,
const fetchers::Tree & tree,
const fetchers::Input & input,
- Value & v, bool emptyRevFallback = false);
+ Value & v,
+ bool emptyRevFallback = false,
+ bool forceDirty = false);
}
diff --git a/src/libexpr/flake/flakeref.cc b/src/libexpr/flake/flakeref.cc
index 833e8a776..29128d789 100644
--- a/src/libexpr/flake/flakeref.cc
+++ b/src/libexpr/flake/flakeref.cc
@@ -172,8 +172,12 @@ std::pair<FlakeRef, std::string> parseFlakeRefWithFragment(
auto parsedURL = parseURL(url);
std::string fragment;
std::swap(fragment, parsedURL.fragment);
+
+ auto input = Input::fromURL(parsedURL);
+ input.parent = baseDir;
+
return std::make_pair(
- FlakeRef(Input::fromURL(parsedURL), get(parsedURL.query, "dir").value_or("")),
+ FlakeRef(std::move(input), get(parsedURL.query, "dir").value_or("")),
fragment);
}
}
diff --git a/src/libexpr/flake/lockfile.cc b/src/libexpr/flake/lockfile.cc
index 6089d1363..fda340789 100644
--- a/src/libexpr/flake/lockfile.cc
+++ b/src/libexpr/flake/lockfile.cc
@@ -2,6 +2,8 @@
#include "store-api.hh"
#include "url-parts.hh"
+#include <iomanip>
+
#include <nlohmann/json.hpp>
namespace nix::flake {
@@ -268,10 +270,20 @@ std::map<InputPath, Node::Edge> LockFile::getAllInputs() const
return res;
}
+static std::string describe(const FlakeRef & flakeRef)
+{
+ auto s = fmt("'%s'", flakeRef.to_string());
+
+ if (auto lastModified = flakeRef.input.getLastModified())
+ s += fmt(" (%s)", std::put_time(std::gmtime(&*lastModified), "%Y-%m-%d"));
+
+ return s;
+}
+
std::ostream & operator <<(std::ostream & stream, const Node::Edge & edge)
{
if (auto node = std::get_if<0>(&edge))
- stream << "'" << (*node)->lockedRef << "'";
+ stream << describe((*node)->lockedRef);
else if (auto follows = std::get_if<1>(&edge))
stream << fmt("follows '%s'", printInputPath(*follows));
return stream;
@@ -299,14 +311,15 @@ std::string LockFile::diff(const LockFile & oldLocks, const LockFile & newLocks)
while (i != oldFlat.end() || j != newFlat.end()) {
if (j != newFlat.end() && (i == oldFlat.end() || i->first > j->first)) {
- res += fmt("* Added '%s': %s\n", printInputPath(j->first), j->second);
+ res += fmt("• " ANSI_GREEN "Added input '%s':" ANSI_NORMAL "\n %s\n",
+ printInputPath(j->first), j->second);
++j;
} else if (i != oldFlat.end() && (j == newFlat.end() || i->first < j->first)) {
- res += fmt("* Removed '%s'\n", printInputPath(i->first));
+ res += fmt("• " ANSI_RED "Removed input '%s'" ANSI_NORMAL "\n", printInputPath(i->first));
++i;
} else {
if (!equals(i->second, j->second)) {
- res += fmt("* Updated '%s': %s -> %s\n",
+ res += fmt("• " ANSI_BOLD "Updated input '%s':" ANSI_NORMAL "\n %s\n → %s\n",
printInputPath(i->first),
i->second,
j->second);
diff --git a/src/libexpr/lexer.l b/src/libexpr/lexer.l
index 7298419d9..c18877e29 100644
--- a/src/libexpr/lexer.l
+++ b/src/libexpr/lexer.l
@@ -9,6 +9,9 @@
%s DEFAULT
%x STRING
%x IND_STRING
+%x INPATH
+%x INPATH_SLASH
+%x PATH_START
%{
@@ -25,6 +28,8 @@ using namespace nix;
namespace nix {
+// backup to recover from yyless(0)
+YYLTYPE prev_yylloc;
static void initLoc(YYLTYPE * loc)
{
@@ -35,14 +40,18 @@ static void initLoc(YYLTYPE * loc)
static void adjustLoc(YYLTYPE * loc, const char * s, size_t len)
{
+ prev_yylloc = *loc;
+
loc->first_line = loc->last_line;
loc->first_column = loc->last_column;
- while (len--) {
+ for (size_t i = 0; i < len; i++) {
switch (*s++) {
case '\r':
- if (*s == '\n') /* cr/lf */
+ if (*s == '\n') { /* cr/lf */
+ i++;
s++;
+ }
/* fall through */
case '\n':
++loc->last_line;
@@ -55,6 +64,7 @@ static void adjustLoc(YYLTYPE * loc, const char * s, size_t len)
}
+// FIXME: optimize
static Expr * unescapeStr(SymbolTable & symbols, const char * s, size_t length)
{
string t;
@@ -95,9 +105,12 @@ ANY .|\n
ID [a-zA-Z\_][a-zA-Z0-9\_\'\-]*
INT [0-9]+
FLOAT (([1-9][0-9]*\.[0-9]*)|(0?\.[0-9]+))([Ee][+-]?[0-9]+)?
-PATH [a-zA-Z0-9\.\_\-\+]*(\/[a-zA-Z0-9\.\_\-\+]+)+\/?
-HPATH \~(\/[a-zA-Z0-9\.\_\-\+]+)+\/?
-SPATH \<[a-zA-Z0-9\.\_\-\+]+(\/[a-zA-Z0-9\.\_\-\+]+)*\>
+PATH_CHAR [a-zA-Z0-9\.\_\-\+]
+PATH {PATH_CHAR}*(\/{PATH_CHAR}+)+\/?
+PATH_SEG {PATH_CHAR}*\/
+HPATH \~(\/{PATH_CHAR}+)+\/?
+HPATH_START \~\/
+SPATH \<{PATH_CHAR}+(\/{PATH_CHAR}+)*\>
URI [a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*\']+
@@ -198,17 +211,75 @@ or { return OR_KW; }
return IND_STR;
}
+{PATH_SEG}\$\{ |
+{HPATH_START}\$\{ {
+ PUSH_STATE(PATH_START);
+ yyless(0);
+ *yylloc = prev_yylloc;
+}
+
+<PATH_START>{PATH_SEG} {
+ POP_STATE();
+ PUSH_STATE(INPATH_SLASH);
+ yylval->path = strdup(yytext);
+ return PATH;
+}
+
+<PATH_START>{HPATH_START} {
+ POP_STATE();
+ PUSH_STATE(INPATH_SLASH);
+ yylval->path = strdup(yytext);
+ return HPATH;
+}
+
+{PATH} {
+ if (yytext[yyleng-1] == '/')
+ PUSH_STATE(INPATH_SLASH);
+ else
+ PUSH_STATE(INPATH);
+ yylval->path = strdup(yytext);
+ return PATH;
+}
+{HPATH} {
+ if (yytext[yyleng-1] == '/')
+ PUSH_STATE(INPATH_SLASH);
+ else
+ PUSH_STATE(INPATH);
+ yylval->path = strdup(yytext);
+ return HPATH;
+}
+
+<INPATH,INPATH_SLASH>\$\{ {
+ POP_STATE();
+ PUSH_STATE(INPATH);
+ PUSH_STATE(DEFAULT);
+ return DOLLAR_CURLY;
+}
+<INPATH,INPATH_SLASH>{PATH}|{PATH_SEG}|{PATH_CHAR}+ {
+ POP_STATE();
+ if (yytext[yyleng-1] == '/')
+ PUSH_STATE(INPATH_SLASH);
+ else
+ PUSH_STATE(INPATH);
+ yylval->e = new ExprString(data->symbols.create(string(yytext)));
+ return STR;
+}
+<INPATH>{ANY} |
+<INPATH><<EOF>> {
+ /* if we encounter a non-path character we inform the parser that the path has
+ ended with a PATH_END token and re-parse this character in the default
+ context (it may be ')', ';', or something of that sort) */
+ POP_STATE();
+ yyless(0);
+ *yylloc = prev_yylloc;
+ return PATH_END;
+}
+
+<INPATH_SLASH>{ANY} |
+<INPATH_SLASH><<EOF>> {
+ throw ParseError("path has a trailing slash");
+}
-{PATH} { if (yytext[yyleng-1] == '/')
- throw ParseError("path '%s' has a trailing slash", yytext);
- yylval->path = strdup(yytext);
- return PATH;
- }
-{HPATH} { if (yytext[yyleng-1] == '/')
- throw ParseError("path '%s' has a trailing slash", yytext);
- yylval->path = strdup(yytext);
- return HPATH;
- }
{SPATH} { yylval->path = strdup(yytext); return SPATH; }
{URI} { yylval->uri = strdup(yytext); return URI; }
diff --git a/src/libexpr/local.mk b/src/libexpr/local.mk
index 26c53d301..016631647 100644
--- a/src/libexpr/local.mk
+++ b/src/libexpr/local.mk
@@ -15,8 +15,8 @@ libexpr_CXXFLAGS += -I src/libutil -I src/libstore -I src/libfetchers -I src/lib
libexpr_LIBS = libutil libstore libfetchers
-libexpr_LDFLAGS = -lboost_context
-ifneq ($(OS), FreeBSD)
+libexpr_LDFLAGS += -lboost_context -pthread
+ifdef HOST_LINUX
libexpr_LDFLAGS += -ldl
endif
@@ -35,7 +35,7 @@ $(d)/lexer-tab.cc $(d)/lexer-tab.hh: $(d)/lexer.l
clean-files += $(d)/parser-tab.cc $(d)/parser-tab.hh $(d)/lexer-tab.cc $(d)/lexer-tab.hh
-$(eval $(call install-file-in, $(d)/nix-expr.pc, $(prefix)/lib/pkgconfig, 0644))
+$(eval $(call install-file-in, $(d)/nix-expr.pc, $(libdir)/pkgconfig, 0644))
$(foreach i, $(wildcard src/libexpr/flake/*.hh), \
$(eval $(call install-file-in, $(i), $(includedir)/nix/flake, 0644)))
diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc
index 3e42789a2..696b149e3 100644
--- a/src/libexpr/nixexpr.cc
+++ b/src/libexpr/nixexpr.cc
@@ -123,7 +123,7 @@ void ExprList::show(std::ostream & str) const
void ExprLambda::show(std::ostream & str) const
{
str << "(";
- if (matchAttrs) {
+ if (hasFormals()) {
str << "{ ";
bool first = true;
for (auto & i : formals->formals) {
@@ -142,6 +142,16 @@ void ExprLambda::show(std::ostream & str) const
str << ": " << *body << ")";
}
+void ExprCall::show(std::ostream & str) const
+{
+ str << '(' << *fun;
+ for (auto e : args) {
+ str << ' ';
+ str << *e;
+ }
+ str << ')';
+}
+
void ExprLet::show(std::ostream & str) const
{
str << "(let ";
@@ -273,13 +283,13 @@ void ExprVar::bindVars(const std::shared_ptr<const StaticEnv> &env)
/* Check whether the variable appears in the environment. If so,
set its level and displacement. */
const StaticEnv * curEnv;
- unsigned int level;
+ Level level;
int withLevel = -1;
for (curEnv = env.get(), level = 0; curEnv; curEnv = curEnv->up, level++) {
if (curEnv->isWith) {
if (withLevel == -1) withLevel = level;
} else {
- StaticEnv::Vars::const_iterator i = curEnv->vars.find(name);
+ auto i = curEnv->find(name);
if (i != curEnv->vars.end()) {
fromWith = false;
this->level = level;
@@ -332,12 +342,13 @@ void ExprAttrs::bindVars(const std::shared_ptr<const StaticEnv> &env)
staticenv = env;
if (recursive) {
- auto newEnv = std::shared_ptr<StaticEnv>(new StaticEnv(false, env.get()));
+ auto newEnv = std::shared_ptr<StaticEnv>(new StaticEnv(false, env.get(), recursive ? attrs.size() : 0));
- unsigned int displ = 0;
- for (auto & i : attrs) {
- newEnv->vars[i.first] = i.second.displ = displ++;
- }
+ Displacement displ = 0;
+ for (auto & i : attrs)
+ newEnv.vars.emplace_back(i.first, i.second.displ = displ++);
+
+ // No need to sort newEnv since attrs is in sorted order.
for (auto & i : attrs)
i.second.e->bindVars(i.second.inherited ? env : newEnv);
@@ -372,15 +383,21 @@ void ExprLambda::bindVars(const std::shared_ptr<const StaticEnv> &env)
if (debuggerHook)
staticenv = env;
- auto newEnv = std::shared_ptr<StaticEnv>(new StaticEnv(false, env.get()));
+ auto newEnv = std::shared_ptr<StaticEnv>(
+ new StaticEnv(
+ false, env.get(),
+ (hasFormals() ? formals->formals.size() : 0) +
+ (arg.empty() ? 0 : 1)));
- unsigned int displ = 0;
+ Displacement displ = 0;
- if (!arg.empty()) newEnv->vars[arg] = displ++;
+ if (!arg.empty()) newEnv.vars.emplace_back(arg, displ++);
- if (matchAttrs) {
+ if (hasFormals()) {
for (auto & i : formals->formals)
- newEnv->vars[i.name] = displ++;
+ newEnv.vars.emplace_back(i.name, displ++);
+
+ newEnv.sort();
for (auto & i : formals->formals)
if (i.def) i.def->bindVars(newEnv);
@@ -389,16 +406,28 @@ void ExprLambda::bindVars(const std::shared_ptr<const StaticEnv> &env)
body->bindVars(newEnv);
}
-void ExprLet::bindVars(const std::shared_ptr<const StaticEnv> &env)
+void ExprCall::bindVars(const StaticEnv & env)
{
if (debuggerHook)
staticenv = env;
- auto newEnv = std::shared_ptr<StaticEnv>(new StaticEnv(false, env.get()));
+ fun->bindVars(env);
+ for (auto e : args)
+ e->bindVars(env);
+}
+
+void ExprLet::bindVars(const StaticEnv & env)
+{
+ if (debuggerHook)
+ staticenv = env;
+
+ auto newEnv = std::shared_ptr<StaticEnv>(new StaticEnv(false, env.get(), attrs->attrs.size()));
- unsigned int displ = 0;
+ Displacement displ = 0;
for (auto & i : attrs->attrs)
- newEnv->vars[i.first] = i.second.displ = displ++;
+ newEnv.vars.emplace_back(i.first, i.second.displ = displ++);
+
+ // No need to sort newEnv since attrs->attrs is in sorted order.
for (auto & i : attrs->attrs)
i.second.e->bindVars(i.second.inherited ? env : newEnv);
@@ -415,7 +444,7 @@ void ExprWith::bindVars(const std::shared_ptr<const StaticEnv> &env)
level so that `lookupVar' can look up variables in the previous
`with' if this one doesn't contain the desired attribute. */
const StaticEnv * curEnv;
- unsigned int level;
+ Level level;
prevWith = 0;
for (curEnv = env.get(), level = 1; curEnv; curEnv = curEnv->up, level++)
if (curEnv->isWith) {
@@ -503,5 +532,4 @@ size_t SymbolTable::totalSize() const
return n;
}
-
}
diff --git a/src/libexpr/nixexpr.hh b/src/libexpr/nixexpr.hh
index a78ea6215..825933fa1 100644
--- a/src/libexpr/nixexpr.hh
+++ b/src/libexpr/nixexpr.hh
@@ -4,8 +4,6 @@
#include "symbol-table.hh"
#include "error.hh"
-#include <map>
-
namespace nix {
@@ -138,6 +136,9 @@ struct ExprPath : Expr
Value * maybeThunk(EvalState & state, Env & env);
};
+typedef uint32_t Level;
+typedef uint32_t Displacement;
+
struct ExprVar : Expr
{
Pos pos;
@@ -153,8 +154,8 @@ struct ExprVar : Expr
value is obtained by getting the attribute named `name' from
the set stored in the environment that is `level' levels up
from the current one.*/
- unsigned int level;
- unsigned int displ;
+ Level level;
+ Displacement displ;
ExprVar(const Symbol & name) : name(name) { };
ExprVar(const Pos & pos, const Symbol & name) : pos(pos), name(name) { };
@@ -188,7 +189,7 @@ struct ExprAttrs : Expr
bool inherited;
Expr * e;
Pos pos;
- unsigned int displ; // displacement
+ Displacement displ; // displacement
AttrDef(Expr * e, const Pos & pos, bool inherited=false)
: inherited(inherited), e(e), pos(pos) { };
AttrDef() { };
@@ -236,11 +237,10 @@ struct ExprLambda : Expr
Pos pos;
Symbol name;
Symbol arg;
- bool matchAttrs;
Formals * formals;
Expr * body;
- ExprLambda(const Pos & pos, const Symbol & arg, bool matchAttrs, Formals * formals, Expr * body)
- : pos(pos), arg(arg), matchAttrs(matchAttrs), formals(formals), body(body)
+ ExprLambda(const Pos & pos, const Symbol & arg, Formals * formals, Expr * body)
+ : pos(pos), arg(arg), formals(formals), body(body)
{
if (!arg.empty() && formals && formals->argNames.find(arg) != formals->argNames.end())
throw ParseError({
@@ -250,6 +250,18 @@ struct ExprLambda : Expr
};
void setName(Symbol & name);
string showNamePos() const;
+ inline bool hasFormals() const { return formals != nullptr; }
+ COMMON_METHODS
+};
+
+struct ExprCall : Expr
+{
+ Expr * fun;
+ std::vector<Expr *> args;
+ Pos pos;
+ ExprCall(const Pos & pos, Expr * fun, std::vector<Expr *> && args)
+ : fun(fun), args(args), pos(pos)
+ { }
COMMON_METHODS
};
@@ -311,7 +323,6 @@ struct ExprOpNot : Expr
void eval(EvalState & state, Env & env, Value & v); \
};
-MakeBinOp(ExprApp, "")
MakeBinOp(ExprOpEq, "==")
MakeBinOp(ExprOpNEq, "!=")
MakeBinOp(ExprOpAnd, "&&")
@@ -345,9 +356,28 @@ struct StaticEnv
{
bool isWith;
const StaticEnv * up;
- typedef std::map<Symbol, unsigned int> Vars;
+
+ // Note: these must be in sorted order.
+ typedef std::vector<std::pair<Symbol, Displacement>> Vars;
Vars vars;
- StaticEnv(bool isWith, const StaticEnv * up) : isWith(isWith), up(up) { };
+
+ StaticEnv(bool isWith, const StaticEnv * up, size_t expectedSize = 0) : isWith(isWith), up(up) {
+ vars.reserve(expectedSize);
+ };
+
+ void sort()
+ {
+ std::sort(vars.begin(), vars.end(),
+ [](const Vars::value_type & a, const Vars::value_type & b) { return a.first < b.first; });
+ }
+
+ Vars::const_iterator find(const Symbol & name) const
+ {
+ Vars::value_type key(name, 0);
+ auto i = std::lower_bound(vars.begin(), vars.end(), key);
+ if (i != vars.end() && i->first == name) return i;
+ return vars.end();
+ }
};
diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y
index d1e898677..58af0df7d 100644
--- a/src/libexpr/parser.y
+++ b/src/libexpr/parser.y
@@ -33,11 +33,9 @@ namespace nix {
Symbol file;
FileOrigin origin;
std::optional<ErrorInfo> error;
- Symbol sLetBody;
ParseData(EvalState & state)
: state(state)
, symbols(state.symbols)
- , sLetBody(symbols.create("<let-body>"))
{ };
};
@@ -126,14 +124,14 @@ static void addAttr(ExprAttrs * attrs, AttrPath & attrPath,
auto j2 = jAttrs->attrs.find(ad.first);
if (j2 != jAttrs->attrs.end()) // Attr already defined in iAttrs, error.
dupAttr(ad.first, j2->second.pos, ad.second.pos);
- jAttrs->attrs[ad.first] = ad.second;
+ jAttrs->attrs.emplace(ad.first, ad.second);
}
} else {
dupAttr(attrPath, pos, j->second.pos);
}
} else {
// This attr path is not defined. Let's create it.
- attrs->attrs[i->symbol] = ExprAttrs::AttrDef(e, pos);
+ attrs->attrs.emplace(i->symbol, ExprAttrs::AttrDef(e, pos));
e->setName(i->symbol);
}
} else {
@@ -283,20 +281,20 @@ void yyerror(YYLTYPE * loc, yyscan_t scanner, ParseData * data, const char * err
}
%type <e> start expr expr_function expr_if expr_op
-%type <e> expr_app expr_select expr_simple
+%type <e> expr_select expr_simple expr_app
%type <list> expr_list
%type <attrs> binds
%type <formals> formals
%type <formal> formal
%type <attrNames> attrs attrpath
%type <string_parts> string_parts_interpolated ind_string_parts
-%type <e> string_parts string_attr
+%type <e> path_start string_parts string_attr
%type <id> attr
%token <id> ID ATTRPATH
%token <e> STR IND_STR
%token <n> INT
%token <nf> FLOAT
-%token <path> PATH HPATH SPATH
+%token <path> PATH HPATH SPATH PATH_END
%token <uri> URI
%token IF THEN ELSE ASSERT WITH LET IN REC INHERIT EQ NEQ AND OR IMPL OR_KW
%token DOLLAR_CURLY /* == ${ */
@@ -324,13 +322,13 @@ expr: expr_function;
expr_function
: ID ':' expr_function
- { $$ = new ExprLambda(CUR_POS, data->symbols.create($1), false, 0, $3); }
+ { $$ = new ExprLambda(CUR_POS, data->symbols.create($1), 0, $3); }
| '{' formals '}' ':' expr_function
- { $$ = new ExprLambda(CUR_POS, data->symbols.create(""), true, $2, $5); }
+ { $$ = new ExprLambda(CUR_POS, data->symbols.create(""), $2, $5); }
| '{' formals '}' '@' ID ':' expr_function
- { $$ = new ExprLambda(CUR_POS, data->symbols.create($5), true, $2, $7); }
+ { $$ = new ExprLambda(CUR_POS, data->symbols.create($5), $2, $7); }
| ID '@' '{' formals '}' ':' expr_function
- { $$ = new ExprLambda(CUR_POS, data->symbols.create($1), true, $4, $7); }
+ { $$ = new ExprLambda(CUR_POS, data->symbols.create($1), $4, $7); }
| ASSERT expr ';' expr_function
{ $$ = new ExprAssert(CUR_POS, $2, $4); }
| WITH expr ';' expr_function
@@ -353,13 +351,13 @@ expr_if
expr_op
: '!' expr_op %prec NOT { $$ = new ExprOpNot($2); }
- | '-' expr_op %prec NEGATE { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__sub")), new ExprInt(0)), $2); }
+ | '-' expr_op %prec NEGATE { $$ = new ExprCall(CUR_POS, new ExprVar(data->symbols.create("__sub")), {new ExprInt(0), $2}); }
| expr_op EQ expr_op { $$ = new ExprOpEq($1, $3); }
| expr_op NEQ expr_op { $$ = new ExprOpNEq($1, $3); }
- | expr_op '<' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $1), $3); }
- | expr_op LEQ expr_op { $$ = new ExprOpNot(new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $3), $1)); }
- | expr_op '>' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $3), $1); }
- | expr_op GEQ expr_op { $$ = new ExprOpNot(new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $1), $3)); }
+ | expr_op '<' expr_op { $$ = new ExprCall(CUR_POS, new ExprVar(data->symbols.create("__lessThan")), {$1, $3}); }
+ | expr_op LEQ expr_op { $$ = new ExprOpNot(new ExprCall(CUR_POS, new ExprVar(data->symbols.create("__lessThan")), {$3, $1})); }
+ | expr_op '>' expr_op { $$ = new ExprCall(CUR_POS, new ExprVar(data->symbols.create("__lessThan")), {$3, $1}); }
+ | expr_op GEQ expr_op { $$ = new ExprOpNot(new ExprCall(CUR_POS, new ExprVar(data->symbols.create("__lessThan")), {$1, $3})); }
| expr_op AND expr_op { $$ = new ExprOpAnd(CUR_POS, $1, $3); }
| expr_op OR expr_op { $$ = new ExprOpOr(CUR_POS, $1, $3); }
| expr_op IMPL expr_op { $$ = new ExprOpImpl(CUR_POS, $1, $3); }
@@ -367,17 +365,22 @@ expr_op
| expr_op '?' attrpath { $$ = new ExprOpHasAttr($1, *$3); }
| expr_op '+' expr_op
{ $$ = new ExprConcatStrings(CUR_POS, false, new vector<Expr *>({$1, $3})); }
- | expr_op '-' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__sub")), $1), $3); }
- | expr_op '*' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__mul")), $1), $3); }
- | expr_op '/' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__div")), $1), $3); }
+ | expr_op '-' expr_op { $$ = new ExprCall(CUR_POS, new ExprVar(data->symbols.create("__sub")), {$1, $3}); }
+ | expr_op '*' expr_op { $$ = new ExprCall(CUR_POS, new ExprVar(data->symbols.create("__mul")), {$1, $3}); }
+ | expr_op '/' expr_op { $$ = new ExprCall(CUR_POS, new ExprVar(data->symbols.create("__div")), {$1, $3}); }
| expr_op CONCAT expr_op { $$ = new ExprOpConcatLists(CUR_POS, $1, $3); }
| expr_app
;
expr_app
- : expr_app expr_select
- { $$ = new ExprApp(CUR_POS, $1, $2); }
- | expr_select { $$ = $1; }
+ : expr_app expr_select {
+ if (auto e2 = dynamic_cast<ExprCall *>($1)) {
+ e2->args.push_back($2);
+ $$ = $1;
+ } else
+ $$ = new ExprCall(CUR_POS, $1, {$2});
+ }
+ | expr_select
;
expr_select
@@ -388,7 +391,7 @@ expr_select
| /* Backwards compatibility: because Nixpkgs has a rarely used
function named ‘or’, allow stuff like ‘map or [...]’. */
expr_simple OR_KW
- { $$ = new ExprApp(CUR_POS, $1, new ExprVar(CUR_POS, data->symbols.create("or"))); }
+ { $$ = new ExprCall(CUR_POS, $1, {new ExprVar(CUR_POS, data->symbols.create("or"))}); }
| expr_simple { $$ = $1; }
;
@@ -405,17 +408,20 @@ expr_simple
| IND_STRING_OPEN ind_string_parts IND_STRING_CLOSE {
$$ = stripIndentation(CUR_POS, data->symbols, *$2);
}
- | PATH { $$ = new ExprPath(absPath($1, data->basePath)); }
- | HPATH { $$ = new ExprPath(getHome() + string{$1 + 1}); }
+ | path_start PATH_END { $$ = $1; }
+ | path_start string_parts_interpolated PATH_END {
+ $2->insert($2->begin(), $1);
+ $$ = new ExprConcatStrings(CUR_POS, false, $2);
+ }
| SPATH {
string path($1 + 1, strlen($1) - 2);
- $$ = new ExprApp(CUR_POS,
- new ExprApp(new ExprVar(data->symbols.create("__findFile")),
- new ExprVar(data->symbols.create("__nixPath"))),
- new ExprString(data->symbols.create(path)));
+ $$ = new ExprCall(CUR_POS,
+ new ExprVar(data->symbols.create("__findFile")),
+ {new ExprVar(data->symbols.create("__nixPath")),
+ new ExprString(data->symbols.create(path))});
}
| URI {
- static bool noURLLiterals = settings.isExperimentalFeatureEnabled("no-url-literals");
+ static bool noURLLiterals = settings.isExperimentalFeatureEnabled(Xp::NoUrlLiterals);
if (noURLLiterals)
throw ParseError({
.msg = hintfmt("URL literals are disabled"),
@@ -452,6 +458,20 @@ string_parts_interpolated
}
;
+path_start
+ : PATH {
+ Path path(absPath($1, data->basePath));
+ /* add back in the trailing '/' to the first segment */
+ if ($1[strlen($1)-1] == '/' && strlen($1) > 1)
+ path += "/";
+ $$ = new ExprPath(path);
+ }
+ | HPATH {
+ Path path(getHome() + string($1 + 1));
+ $$ = new ExprPath(path);
+ }
+ ;
+
ind_string_parts
: ind_string_parts IND_STR { $$ = $1; $1->push_back($2); }
| ind_string_parts DOLLAR_CURLY expr '}' { $$ = $1; $1->push_back($3); }
@@ -466,7 +486,7 @@ binds
if ($$->attrs.find(i.symbol) != $$->attrs.end())
dupAttr(i.symbol, makeCurPos(@3, data), $$->attrs[i.symbol].pos);
Pos pos = makeCurPos(@3, data);
- $$->attrs[i.symbol] = ExprAttrs::AttrDef(new ExprVar(CUR_POS, i.symbol), pos, true);
+ $$->attrs.emplace(i.symbol, ExprAttrs::AttrDef(new ExprVar(CUR_POS, i.symbol), pos, true));
}
}
| binds INHERIT '(' expr ')' attrs ';'
@@ -475,7 +495,7 @@ binds
for (auto & i : *$6) {
if ($$->attrs.find(i.symbol) != $$->attrs.end())
dupAttr(i.symbol, makeCurPos(@6, data), $$->attrs[i.symbol].pos);
- $$->attrs[i.symbol] = ExprAttrs::AttrDef(new ExprSelect(CUR_POS, $4, i.symbol), makeCurPos(@6, data));
+ $$->attrs.emplace(i.symbol, ExprAttrs::AttrDef(new ExprSelect(CUR_POS, $4, i.symbol), makeCurPos(@6, data)));
}
}
| { $$ = new ExprAttrs(makeCurPos(@0, data)); }
@@ -735,7 +755,7 @@ std::pair<bool, std::string> EvalState::resolveSearchPathElem(const SearchPathEl
res = { true, path };
else {
logWarning({
- .msg = hintfmt("warning: Nix search path entry '%1%' does not exist, ignoring", elem.second)
+ .msg = hintfmt("Nix search path entry '%1%' does not exist, ignoring", elem.second)
});
res = { false, "" };
}
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
index 0400c8942..a9ee96bfa 100644
--- a/src/libexpr/primops.cc
+++ b/src/libexpr/primops.cc
@@ -21,6 +21,8 @@
#include <regex>
#include <dlfcn.h>
+#include <cmath>
+
namespace nix {
@@ -50,16 +52,13 @@ void EvalState::realiseContext(const PathSet & context)
if (drvs.empty()) return;
if (!evalSettings.enableImportFromDerivation)
- throw EvalError("attempted to realize '%1%' during evaluation but 'allow-import-from-derivation' is false",
+ throw Error(
+ "cannot build '%1%' during evaluation because the option 'allow-import-from-derivation' is disabled",
store->printStorePath(drvs.begin()->drvPath));
- /* For performance, prefetch all substitute info. */
- StorePathSet willBuild, willSubstitute, unknown;
- uint64_t downloadSize, narSize;
+ /* Build/substitute the context. */
std::vector<DerivedPath> buildReqs;
for (auto & d : drvs) buildReqs.emplace_back(DerivedPath { d });
- store->queryMissing(buildReqs, willBuild, willSubstitute, unknown, downloadSize, narSize);
-
store->buildPaths(buildReqs);
/* Add the output of this derivations to the allowed
@@ -71,7 +70,7 @@ void EvalState::realiseContext(const PathSet & context)
if (outputPaths.count(outputName) == 0)
throw Error("derivation '%s' does not have an output named '%s'",
store->printStorePath(drvPath), outputName);
- allowedPaths->insert(store->printStorePath(outputPaths.at(outputName)));
+ allowPath(outputPaths.at(outputName));
}
}
}
@@ -122,7 +121,7 @@ static void import(EvalState & state, const Pos & pos, Value & vPath, Value * vS
});
} catch (Error & e) {
e.addTrace(pos, "while importing '%s'", path);
- throw e;
+ throw;
}
Path realPath = state.checkSourcePath(state.toRealPath(path, context));
@@ -158,16 +157,15 @@ static void import(EvalState & state, const Pos & pos, Value & vPath, Value * vS
}
w.attrs->sort();
- static RootValue fun;
- if (!fun) {
- fun = allocRootValue(state.allocValue());
+ if (!state.vImportedDrvToDerivation) {
+ state.vImportedDrvToDerivation = allocRootValue(state.allocValue());
state.eval(state.parseExprFromString(
#include "imported-drv-to-derivation.nix.gen.hh"
- , "/"), **fun);
+ , "/"), **state.vImportedDrvToDerivation);
}
- state.forceFunction(**fun, pos);
- mkApp(v, **fun, w);
+ state.forceFunction(**state.vImportedDrvToDerivation, pos);
+ mkApp(v, **state.vImportedDrvToDerivation, w);
state.forceAttrs(v, pos);
}
@@ -186,14 +184,17 @@ static void import(EvalState & state, const Pos & pos, Value & vPath, Value * vS
Env * env = &state.allocEnv(vScope->attrs->size());
env->up = &state.baseEnv;
- auto staticEnv = std::shared_ptr<StaticEnv>(new StaticEnv(false, state.staticBaseEnv.get()));
+ auto staticEnv = std::shared_ptr<StaticEnv>(new StaticEnv(false, state.staticBaseEnv.get(), vScope->attrs->size()));
unsigned int displ = 0;
for (auto & attr : *vScope->attrs) {
- staticEnv->vars[attr.name] = displ;
+ staticEnv.vars.emplace_back(attr.name, displ);
env->values[displ++] = attr.value;
}
+ // No need to call staticEnv.sort(), because
+ // args[0]->attrs is already sorted.
+
printTalkative("evaluating file '%1%'", realPath);
Expr * e = state.parseExprFromFile(resolveExprPath(realPath), staticEnv);
@@ -412,7 +413,7 @@ static RegisterPrimOp primop_isNull({
Return `true` if *e* evaluates to `null`, and `false` otherwise.
> **Warning**
- >
+ >
> This function is *deprecated*; just write `e == null` instead.
)",
.fun = prim_isNull,
@@ -516,7 +517,11 @@ static RegisterPrimOp primop_isPath({
struct CompareValues
{
- bool operator () (const Value * v1, const Value * v2) const
+ EvalState & state;
+
+ CompareValues(EvalState & state) : state(state) { };
+
+ bool operator () (Value * v1, Value * v2) const
{
if (v1->type() == nFloat && v2->type() == nInt)
return v1->fpoint < v2->integer;
@@ -533,6 +538,17 @@ struct CompareValues
return strcmp(v1->string.s, v2->string.s) < 0;
case nPath:
return strcmp(v1->path, v2->path) < 0;
+ case nList:
+ // Lexicographic comparison
+ for (size_t i = 0;; i++) {
+ if (i == v2->listSize()) {
+ return false;
+ } else if (i == v1->listSize()) {
+ return true;
+ } else if (!state.eqValues(*v1->listElems()[i], *v2->listElems()[i])) {
+ return (*this)(v1->listElems()[i], v2->listElems()[i]);
+ }
+ }
default:
throw EvalError("cannot compare %1% with %2%", showType(*v1), showType(*v2));
}
@@ -620,7 +636,8 @@ static void prim_genericClosure(EvalState & state, const Pos & pos, Value * * ar
ValueList res;
// `doneKeys' doesn't need to be a GC root, because its values are
// reachable from res.
- set<Value *, CompareValues> doneKeys;
+ auto cmp = CompareValues(state);
+ set<Value *, decltype(cmp)> doneKeys(cmp);
while (!workSet.empty()) {
Value * e = *(workSet.begin());
workSet.pop_front();
@@ -714,6 +731,44 @@ static RegisterPrimOp primop_addErrorContext(RegisterPrimOp::Info {
.fun = prim_addErrorContext,
});
+static void prim_ceil(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ auto value = state.forceFloat(*args[0], args[0]->determinePos(pos));
+ mkInt(v, ceil(value));
+}
+
+static RegisterPrimOp primop_ceil({
+ .name = "__ceil",
+ .args = {"double"},
+ .doc = R"(
+ Converts an IEEE-754 double-precision floating-point number (*double*) to
+ the next higher integer.
+
+ If the datatype is neither an integer nor a "float", an evaluation error will be
+ thrown.
+ )",
+ .fun = prim_ceil,
+});
+
+static void prim_floor(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ auto value = state.forceFloat(*args[0], args[0]->determinePos(pos));
+ mkInt(v, floor(value));
+}
+
+static RegisterPrimOp primop_floor({
+ .name = "__floor",
+ .args = {"double"},
+ .doc = R"(
+ Converts an IEEE-754 double-precision floating-point number (*double*) to
+ the next lower integer.
+
+ If the datatype is neither an integer nor a "float", an evaluation error will be
+ thrown.
+ )",
+ .fun = prim_floor,
+});
+
/* Try evaluating the argument. Success => {success=true; value=something;},
* else => {success=false; value=false;} */
static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Value & v)
@@ -949,8 +1004,9 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
}
if (i->name == state.sContentAddressed) {
- settings.requireExperimentalFeature("ca-derivations");
contentAddressed = state.forceBool(*i->value, pos);
+ if (contentAddressed)
+ settings.requireExperimentalFeature(Xp::CaDerivations);
}
/* The `args' attribute is special: it supplies the
@@ -972,7 +1028,7 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
if (i->name == state.sStructuredAttrs) continue;
auto placeholder(jsonObject->placeholder(key));
- printValueAsJSON(state, true, *i->value, placeholder, context);
+ printValueAsJSON(state, true, *i->value, pos, placeholder, context);
if (i->name == state.sBuilder)
drv.builder = state.forceString(*i->value, context, posDrvName);
@@ -1134,7 +1190,7 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
// hash per output.
auto hashModulo = hashDerivationModulo(*state.store, Derivation(drv), true);
std::visit(overloaded {
- [&](Hash h) {
+ [&](Hash & h) {
for (auto & i : outputs) {
auto outPath = state.store->makeOutputPath(i, h, drvName);
drv.env[i] = state.store->printStorePath(outPath);
@@ -1146,11 +1202,11 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
});
}
},
- [&](CaOutputHashes) {
+ [&](CaOutputHashes &) {
// Shouldn't happen as the toplevel derivation is not CA.
assert(false);
},
- [&](DeferredHash _) {
+ [&](DeferredHash &) {
for (auto & i : outputs) {
drv.outputs.insert_or_assign(i,
DerivationOutput {
@@ -1453,15 +1509,20 @@ static void prim_hashFile(EvalState & state, const Pos & pos, Value * * args, Va
string type = state.forceStringNoCtx(*args[0], pos);
std::optional<HashType> ht = parseHashType(type);
if (!ht)
- throw Error({
- .msg = hintfmt("unknown hash type '%1%'", type),
- .errPos = pos
- });
+ throw Error({
+ .msg = hintfmt("unknown hash type '%1%'", type),
+ .errPos = pos
+ });
- PathSet context; // discarded
- Path p = state.coerceToPath(pos, *args[1], context);
+ PathSet context;
+ Path path = state.coerceToPath(pos, *args[1], context);
+ try {
+ state.realiseContext(context);
+ } catch (InvalidPathError & e) {
+ throw EvalError("cannot read '%s' since path '%s' is not valid, at %s", path, e.path, pos);
+ }
- mkString(v, hashFile(*ht, state.checkSourcePath(p)).to_string(Base16, false), context);
+ mkString(v, hashFile(*ht, state.checkSourcePath(state.toRealPath(path, context))).to_string(Base16, false));
}
static RegisterPrimOp primop_hashFile({
@@ -1538,7 +1599,7 @@ static void prim_toXML(EvalState & state, const Pos & pos, Value * * args, Value
{
std::ostringstream out;
PathSet context;
- printValueAsXML(state, true, false, *args[0], out, context);
+ printValueAsXML(state, true, false, *args[0], out, context, pos);
mkString(v, out.str(), context);
}
@@ -1646,7 +1707,7 @@ static void prim_toJSON(EvalState & state, const Pos & pos, Value * * args, Valu
{
std::ostringstream out;
PathSet context;
- printValueAsJSON(state, true, *args[0], out, context);
+ printValueAsJSON(state, true, *args[0], pos, out, context);
mkString(v, out.str(), context);
}
@@ -1672,7 +1733,7 @@ static void prim_fromJSON(EvalState & state, const Pos & pos, Value * * args, Va
parseJSON(state, s, v);
} catch (JSONParseError &e) {
e.addTrace(pos, "while decoding a JSON string");
- throw e;
+ throw;
}
}
@@ -1802,50 +1863,79 @@ static RegisterPrimOp primop_toFile({
.fun = prim_toFile,
});
-static void addPath(EvalState & state, const Pos & pos, const string & name, const Path & path_,
- Value * filterFun, FileIngestionMethod method, const std::optional<Hash> expectedHash, Value & v)
+static void addPath(
+ EvalState & state,
+ const Pos & pos,
+ const string & name,
+ Path path,
+ Value * filterFun,
+ FileIngestionMethod method,
+ const std::optional<Hash> expectedHash,
+ Value & v,
+ const PathSet & context)
{
- const auto path = evalSettings.pureEval && expectedHash ?
- path_ :
- state.checkSourcePath(path_);
- PathFilter filter = filterFun ? ([&](const Path & path) {
- auto st = lstat(path);
+ try {
+ // FIXME: handle CA derivation outputs (where path needs to
+ // be rewritten to the actual output).
+ state.realiseContext(context);
- /* Call the filter function. The first argument is the path,
- the second is a string indicating the type of the file. */
- Value arg1;
- mkString(arg1, path);
+ StorePathSet refs;
- Value fun2;
- state.callFunction(*filterFun, arg1, fun2, noPos);
+ if (state.store->isInStore(path)) {
+ auto [storePath, subPath] = state.store->toStorePath(path);
+ // FIXME: we should scanForReferences on the path before adding it
+ refs = state.store->queryPathInfo(storePath)->references;
+ path = state.store->toRealPath(storePath) + subPath;
+ }
- Value arg2;
- mkString(arg2,
- S_ISREG(st.st_mode) ? "regular" :
- S_ISDIR(st.st_mode) ? "directory" :
- S_ISLNK(st.st_mode) ? "symlink" :
- "unknown" /* not supported, will fail! */);
+ path = evalSettings.pureEval && expectedHash
+ ? path
+ : state.checkSourcePath(path);
- Value res;
- state.callFunction(fun2, arg2, res, noPos);
+ PathFilter filter = filterFun ? ([&](const Path & path) {
+ auto st = lstat(path);
+
+ /* Call the filter function. The first argument is the path,
+ the second is a string indicating the type of the file. */
+ Value arg1;
+ mkString(arg1, path);
+
+ Value arg2;
+ mkString(arg2,
+ S_ISREG(st.st_mode) ? "regular" :
+ S_ISDIR(st.st_mode) ? "directory" :
+ S_ISLNK(st.st_mode) ? "symlink" :
+ "unknown" /* not supported, will fail! */);
+
+ Value * args []{&arg1, &arg2};
+ Value res;
+ state.callFunction(*filterFun, 2, args, res, pos);
+
+ return state.forceBool(res, pos);
+ }) : defaultPathFilter;
+
+ std::optional<StorePath> expectedStorePath;
+ if (expectedHash)
+ expectedStorePath = state.store->makeFixedOutputPath(method, *expectedHash, name);
- return state.forceBool(res, pos);
- }) : defaultPathFilter;
+ Path dstPath;
+ if (!expectedHash || !state.store->isValidPath(*expectedStorePath)) {
+ dstPath = state.store->printStorePath(settings.readOnlyMode
+ ? state.store->computeStorePathForPath(name, path, method, htSHA256, filter).first
+ : state.store->addToStore(name, path, method, htSHA256, filter, state.repair, refs));
+ if (expectedHash && expectedStorePath != state.store->parseStorePath(dstPath))
+ throw Error("store path mismatch in (possibly filtered) path added from '%s'", path);
+ } else
+ dstPath = state.store->printStorePath(*expectedStorePath);
- std::optional<StorePath> expectedStorePath;
- if (expectedHash)
- expectedStorePath = state.store->makeFixedOutputPath(method, *expectedHash, name);
- Path dstPath;
- if (!expectedHash || !state.store->isValidPath(*expectedStorePath)) {
- dstPath = state.store->printStorePath(settings.readOnlyMode
- ? state.store->computeStorePathForPath(name, path, method, htSHA256, filter).first
- : state.store->addToStore(name, path, method, htSHA256, filter, state.repair));
- if (expectedHash && expectedStorePath != state.store->parseStorePath(dstPath))
- throw Error("store path mismatch in (possibly filtered) path added from '%s'", path);
- } else
- dstPath = state.store->printStorePath(*expectedStorePath);
+ mkString(v, dstPath, {dstPath});
- mkString(v, dstPath, {dstPath});
+ state.allowPath(dstPath);
+
+ } catch (Error & e) {
+ e.addTrace(pos, "while adding path '%s'", path);
+ throw;
+ }
}
@@ -1853,11 +1943,6 @@ static void prim_filterSource(EvalState & state, const Pos & pos, Value * * args
{
PathSet context;
Path path = state.coerceToPath(pos, *args[1], context);
- if (!context.empty())
- throw EvalError({
- .msg = hintfmt("string '%1%' cannot refer to other paths", path),
- .errPos = pos
- });
state.forceValue(*args[0], pos);
if (args[0]->type() != nFunction)
@@ -1868,13 +1953,26 @@ static void prim_filterSource(EvalState & state, const Pos & pos, Value * * args
.errPos = pos
});
- addPath(state, pos, std::string(baseNameOf(path)), path, args[0], FileIngestionMethod::Recursive, std::nullopt, v);
+ addPath(state, pos, std::string(baseNameOf(path)), path, args[0], FileIngestionMethod::Recursive, std::nullopt, v, context);
}
static RegisterPrimOp primop_filterSource({
.name = "__filterSource",
.args = {"e1", "e2"},
.doc = R"(
+ > **Warning**
+ >
+ > `filterSource` should not be used to filter store paths. Since
+ > `filterSource` uses the name of the input directory while naming
+ > the output directory, doing so will produce a directory name in
+ > the form of `<hash2>-<hash>-<name>`, where `<hash>-<name>` is
+ > the name of the input directory. Since `<hash>` depends on the
+ > unfiltered directory, the name of the output directory will
+ > indirectly depend on files that are filtered out by the
+ > function. This will trigger a rebuild even when a filtered out
+ > file is changed. Use `builtins.path` instead, which allows
+ > specifying the name of the output directory.
+
This function allows you to copy sources into the Nix store while
filtering certain files. For instance, suppose that you want to use
the directory `source-dir` as an input to a Nix expression, e.g.
@@ -1921,18 +2019,13 @@ static void prim_path(EvalState & state, const Pos & pos, Value * * args, Value
Value * filterFun = nullptr;
auto method = FileIngestionMethod::Recursive;
std::optional<Hash> expectedHash;
+ PathSet context;
for (auto & attr : *args[0]->attrs) {
const string & n(attr.name);
- if (n == "path") {
- PathSet context;
+ if (n == "path")
path = state.coerceToPath(*attr.pos, *attr.value, context);
- if (!context.empty())
- throw EvalError({
- .msg = hintfmt("string '%1%' cannot refer to other paths", path),
- .errPos = *attr.pos
- });
- } else if (attr.name == state.sName)
+ else if (attr.name == state.sName)
name = state.forceStringNoCtx(*attr.value, *attr.pos);
else if (n == "filter") {
state.forceValue(*attr.value, pos);
@@ -1955,7 +2048,7 @@ static void prim_path(EvalState & state, const Pos & pos, Value * * args, Value
if (name.empty())
name = baseNameOf(path);
- addPath(state, pos, name, path, filterFun, method, expectedHash, v);
+ addPath(state, pos, name, path, filterFun, method, expectedHash, v, context);
}
static RegisterPrimOp primop_path({
@@ -2069,7 +2162,7 @@ void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v)
pos
);
// !!! add to stack trace?
- if (state.countCalls && i->pos) state.attrSelects[*i->pos]++;
+ if (state.countCalls && *i->pos != noPos) state.attrSelects[*i->pos]++;
state.forceValue(*i->value, pos);
v = *i->value;
}
@@ -2320,7 +2413,7 @@ static void prim_functionArgs(EvalState & state, const Pos & pos, Value * * args
.errPos = pos
});
- if (!args[0]->lambda.fun->matchAttrs) {
+ if (!args[0]->lambda.fun->hasFormals()) {
state.mkAttrs(v, 0);
return;
}
@@ -2329,7 +2422,7 @@ static void prim_functionArgs(EvalState & state, const Pos & pos, Value * * args
for (auto & i : args[0]->lambda.fun->formals->formals) {
// !!! should optimise booleans (allocate only once)
Value * value = state.allocValue();
- v.attrs->push_back(Attr(i.name, value, &i.pos));
+ v.attrs->push_back(Attr(i.name, value, ptr(&i.pos)));
mkBool(*value, i.def);
}
v.attrs->sort();
@@ -2475,7 +2568,7 @@ static RegisterPrimOp primop_tail({
the argument isn’t a list or is an empty list.
> **Warning**
- >
+ >
> This function should generally be avoided since it's inefficient:
> unlike Haskell's `tail`, it takes O(n) time, so recursing over a
> list by repeatedly calling `tail` takes O(n^2) time.
@@ -2617,10 +2710,9 @@ static void prim_foldlStrict(EvalState & state, const Pos & pos, Value * * args,
Value * vCur = args[1];
for (unsigned int n = 0; n < args[2]->listSize(); ++n) {
- Value vTmp;
- state.callFunction(*args[0], *vCur, vTmp, pos);
+ Value * vs []{vCur, args[2]->listElems()[n]};
vCur = n == args[2]->listSize() - 1 ? &v : state.allocValue();
- state.callFunction(vTmp, *args[2]->listElems()[n], *vCur, pos);
+ state.callFunction(*args[0], 2, vs, *vCur, pos);
}
state.forceValue(v, pos);
} else {
@@ -2634,9 +2726,9 @@ static RegisterPrimOp primop_foldlStrict({
.args = {"op", "nul", "list"},
.doc = R"(
Reduce a list by applying a binary operator, from left to right,
- e.g. `foldl’ op nul [x0 x1 x2 ...] = op (op (op nul x0) x1) x2)
+ e.g. `foldl' op nul [x0 x1 x2 ...] = op (op (op nul x0) x1) x2)
...`. The operator is applied strictly, i.e., its arguments are
- evaluated first. For example, `foldl’ (x: y: x + y) 0 [1 2 3]`
+ evaluated first. For example, `foldl' (x: y: x + y) 0 [1 2 3]`
evaluates to 6.
)",
.fun = prim_foldlStrict,
@@ -2741,17 +2833,16 @@ static void prim_sort(EvalState & state, const Pos & pos, Value * * args, Value
v.listElems()[n] = args[1]->listElems()[n];
}
-
auto comparator = [&](Value * a, Value * b) {
/* Optimization: if the comparator is lessThan, bypass
callFunction. */
if (args[0]->isPrimOp() && args[0]->primOp->fun == prim_lessThan)
- return CompareValues()(a, b);
+ return CompareValues(state)(a, b);
- Value vTmp1, vTmp2;
- state.callFunction(*args[0], *a, vTmp1, pos);
- state.callFunction(vTmp1, *b, vTmp2, pos);
- return state.forceBool(vTmp2, pos);
+ Value * vs[] = {a, b};
+ Value vBool;
+ state.callFunction(*args[0], 2, vs, vBool, pos);
+ return state.forceBool(vBool, pos);
};
/* FIXME: std::sort can segfault if the comparator is not a strict
@@ -2857,7 +2948,7 @@ static void prim_concatMap(EvalState & state, const Pos & pos, Value * * args, V
state.forceList(lists[n], lists[n].determinePos(args[0]->determinePos(pos)));
} catch (TypeError &e) {
e.addTrace(pos, hintfmt("while invoking '%s'", "concatMap"));
- throw e;
+ throw;
}
len += lists[n].listSize();
}
@@ -3028,7 +3119,7 @@ static void prim_lessThan(EvalState & state, const Pos & pos, Value * * args, Va
{
state.forceValue(*args[0], pos);
state.forceValue(*args[1], pos);
- CompareValues comp;
+ CompareValues comp{state};
mkBool(v, comp(args[0], args[1]));
}
@@ -3069,7 +3160,7 @@ static RegisterPrimOp primop_toString({
- A path (e.g., `toString /foo/bar` yields `"/foo/bar"`.
- - A set containing `{ __toString = self: ...; }`.
+ - A set containing `{ __toString = self: ...; }` or `{ outPath = ...; }`.
- An integer.
@@ -3154,7 +3245,7 @@ static void prim_hashString(EvalState & state, const Pos & pos, Value * * args,
PathSet context; // discarded
string s = state.forceString(*args[1], context, pos);
- mkString(v, hashString(*ht, s).to_string(Base16, false), context);
+ mkString(v, hashString(*ht, s).to_string(Base16, false));
}
static RegisterPrimOp primop_hashString({
@@ -3561,15 +3652,13 @@ static RegisterPrimOp primop_splitVersion({
RegisterPrimOp::PrimOps * RegisterPrimOp::primOps;
-RegisterPrimOp::RegisterPrimOp(std::string name, size_t arity, PrimOpFun fun,
- std::optional<std::string> requiredFeature)
+RegisterPrimOp::RegisterPrimOp(std::string name, size_t arity, PrimOpFun fun)
{
if (!primOps) primOps = new PrimOps;
primOps->push_back({
.name = name,
.args = {},
.arity = arity,
- .requiredFeature = std::move(requiredFeature),
.fun = fun
});
}
@@ -3605,9 +3694,7 @@ void EvalState::createBaseEnv()
if (!evalSettings.pureEval) {
mkInt(v, time(0));
addConstant("__currentTime", v);
- }
- if (!evalSettings.pureEval) {
mkString(v, settings.thisSystem.get());
addConstant("__currentSystem", v);
}
@@ -3622,7 +3709,7 @@ void EvalState::createBaseEnv()
language feature gets added. It's not necessary to increase it
when primops get added, because you can just use `builtins ?
primOp' to check. */
- mkInt(v, 5);
+ mkInt(v, 6);
addConstant("__langVersion", v);
// Miscellaneous
@@ -3645,26 +3732,31 @@ void EvalState::createBaseEnv()
if (RegisterPrimOp::primOps)
for (auto & primOp : *RegisterPrimOp::primOps)
- if (!primOp.requiredFeature || settings.isExperimentalFeatureEnabled(*primOp.requiredFeature))
- addPrimOp({
- .fun = primOp.fun,
- .arity = std::max(primOp.args.size(), primOp.arity),
- .name = symbols.create(primOp.name),
- .args = std::move(primOp.args),
- .doc = primOp.doc,
- });
+ addPrimOp({
+ .fun = primOp.fun,
+ .arity = std::max(primOp.args.size(), primOp.arity),
+ .name = symbols.create(primOp.name),
+ .args = std::move(primOp.args),
+ .doc = primOp.doc,
+ });
/* Add a wrapper around the derivation primop that computes the
`drvPath' and `outPath' attributes lazily. */
sDerivationNix = symbols.create("//builtin/derivation.nix");
- eval(parse(
- #include "primops/derivation.nix.gen.hh"
- , foFile, sDerivationNix, "/", staticBaseEnv), v);
- addConstant("derivation", v);
+ auto vDerivation = allocValue();
+ addConstant("derivation", vDerivation);
/* Now that we've added all primops, sort the `builtins' set,
because attribute lookups expect it to be sorted. */
baseEnv.values[0]->attrs->sort();
+
+ staticBaseEnv.sort();
+
+ /* Note: we have to initialize the 'derivation' constant *after*
+ building baseEnv/staticBaseEnv because it uses 'builtins'. */
+ eval(parse(
+ #include "primops/derivation.nix.gen.hh"
+ , foFile, sDerivationNix, "/", staticBaseEnv), *vDerivation);
}
diff --git a/src/libexpr/primops.hh b/src/libexpr/primops.hh
index 9d42d6539..5b16e075f 100644
--- a/src/libexpr/primops.hh
+++ b/src/libexpr/primops.hh
@@ -15,7 +15,6 @@ struct RegisterPrimOp
std::vector<std::string> args;
size_t arity = 0;
const char * doc;
- std::optional<std::string> requiredFeature;
PrimOpFun fun;
};
@@ -28,8 +27,7 @@ struct RegisterPrimOp
RegisterPrimOp(
std::string name,
size_t arity,
- PrimOpFun fun,
- std::optional<std::string> requiredFeature = {});
+ PrimOpFun fun);
RegisterPrimOp(Info && info);
};
diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc
index 4830ebec3..c23480853 100644
--- a/src/libexpr/primops/fetchMercurial.cc
+++ b/src/libexpr/primops/fetchMercurial.cc
@@ -15,7 +15,7 @@ static void prim_fetchMercurial(EvalState & state, const Pos & pos, Value * * ar
std::string name = "source";
PathSet context;
- state.forceValue(*args[0]);
+ state.forceValue(*args[0], pos);
if (args[0]->type() == nAttrs) {
@@ -62,6 +62,7 @@ static void prim_fetchMercurial(EvalState & state, const Pos & pos, Value * * ar
fetchers::Attrs attrs;
attrs.insert_or_assign("type", "hg");
attrs.insert_or_assign("url", url.find("://") != std::string::npos ? url : "file://" + url);
+ attrs.insert_or_assign("name", name);
if (ref) attrs.insert_or_assign("ref", *ref);
if (rev) attrs.insert_or_assign("rev", rev->gitRev());
auto input = fetchers::Input::fromAttrs(std::move(attrs));
@@ -83,8 +84,7 @@ static void prim_fetchMercurial(EvalState & state, const Pos & pos, Value * * ar
mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *revCount);
v.attrs->sort();
- if (state.allowedPaths)
- state.allowedPaths->insert(tree.actualPath);
+ state.allowPath(tree.storePath);
}
static RegisterPrimOp r_fetchMercurial("fetchMercurial", 1, prim_fetchMercurial);
diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc
index b8b99d4fa..079513873 100644
--- a/src/libexpr/primops/fetchTree.cc
+++ b/src/libexpr/primops/fetchTree.cc
@@ -7,6 +7,7 @@
#include <ctime>
#include <iomanip>
+#include <regex>
namespace nix {
@@ -15,7 +16,8 @@ void emitTreeAttrs(
const fetchers::Tree & tree,
const fetchers::Input & input,
Value & v,
- bool emptyRevFallback)
+ bool emptyRevFallback,
+ bool forceDirty)
{
assert(input.isImmutable());
@@ -32,24 +34,28 @@ void emitTreeAttrs(
mkString(*state.allocAttr(v, state.symbols.create("narHash")),
narHash->to_string(SRI, true));
- if (auto rev = input.getRev()) {
- mkString(*state.allocAttr(v, state.symbols.create("rev")), rev->gitRev());
- mkString(*state.allocAttr(v, state.symbols.create("shortRev")), rev->gitShortRev());
- } else if (emptyRevFallback) {
- // Backwards compat for `builtins.fetchGit`: dirty repos return an empty sha1 as rev
- auto emptyHash = Hash(htSHA1);
- mkString(*state.allocAttr(v, state.symbols.create("rev")), emptyHash.gitRev());
- mkString(*state.allocAttr(v, state.symbols.create("shortRev")), emptyHash.gitShortRev());
- }
-
if (input.getType() == "git")
mkBool(*state.allocAttr(v, state.symbols.create("submodules")),
fetchers::maybeGetBoolAttr(input.attrs, "submodules").value_or(false));
- if (auto revCount = input.getRevCount())
- mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *revCount);
- else if (emptyRevFallback)
- mkInt(*state.allocAttr(v, state.symbols.create("revCount")), 0);
+ if (!forceDirty) {
+
+ if (auto rev = input.getRev()) {
+ mkString(*state.allocAttr(v, state.symbols.create("rev")), rev->gitRev());
+ mkString(*state.allocAttr(v, state.symbols.create("shortRev")), rev->gitShortRev());
+ } else if (emptyRevFallback) {
+ // Backwards compat for `builtins.fetchGit`: dirty repos return an empty sha1 as rev
+ auto emptyHash = Hash(htSHA1);
+ mkString(*state.allocAttr(v, state.symbols.create("rev")), emptyHash.gitRev());
+ mkString(*state.allocAttr(v, state.symbols.create("shortRev")), emptyHash.gitShortRev());
+ }
+
+ if (auto revCount = input.getRevCount())
+ mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *revCount);
+ else if (emptyRevFallback)
+ mkInt(*state.allocAttr(v, state.symbols.create("revCount")), 0);
+
+ }
if (auto lastModified = input.getLastModified()) {
mkInt(*state.allocAttr(v, state.symbols.create("lastModified")), *lastModified);
@@ -60,47 +66,74 @@ void emitTreeAttrs(
v.attrs->sort();
}
-std::string fixURI(std::string uri, EvalState &state)
+std::string fixURI(std::string uri, EvalState & state, const std::string & defaultScheme = "file")
{
state.checkURI(uri);
- return uri.find("://") != std::string::npos ? uri : "file://" + uri;
+ return uri.find("://") != std::string::npos ? uri : defaultScheme + "://" + uri;
}
-void addURI(EvalState &state, fetchers::Attrs &attrs, Symbol name, std::string v)
+std::string fixURIForGit(std::string uri, EvalState & state)
{
- string n(name);
- attrs.emplace(name, n == "url" ? fixURI(v, state) : v);
+ /* Detects scp-style uris (e.g. git@github.com:NixOS/nix) and fixes
+ * them by removing the `:` and assuming a scheme of `ssh://`
+ * */
+ static std::regex scp_uri("([^/]*)@(.*):(.*)");
+ if (uri[0] != '/' && std::regex_match(uri, scp_uri))
+ return fixURI(std::regex_replace(uri, scp_uri, "$1@$2/$3"), state, "ssh");
+ else
+ return fixURI(uri, state);
}
+struct FetchTreeParams {
+ bool emptyRevFallback = false;
+ bool allowNameArgument = false;
+};
+
static void fetchTree(
- EvalState &state,
- const Pos &pos,
- Value **args,
- Value &v,
- const std::optional<std::string> type,
- bool emptyRevFallback = false
+ EvalState & state,
+ const Pos & pos,
+ Value * * args,
+ Value & v,
+ std::optional<std::string> type,
+ const FetchTreeParams & params = FetchTreeParams{}
) {
fetchers::Input input;
PathSet context;
- state.forceValue(*args[0]);
+ state.forceValue(*args[0], pos);
if (args[0]->type() == nAttrs) {
state.forceAttrs(*args[0], pos);
fetchers::Attrs attrs;
+ if (auto aType = args[0]->attrs->get(state.sType)) {
+ if (type)
+ throw Error({
+ .msg = hintfmt("unexpected attribute 'type'"),
+ .errPos = pos
+ });
+ type = state.forceStringNoCtx(*aType->value, *aType->pos);
+ } else if (!type)
+ throw Error({
+ .msg = hintfmt("attribute 'type' is missing in call to 'fetchTree'"),
+ .errPos = pos
+ });
+
+ attrs.emplace("type", type.value());
+
for (auto & attr : *args[0]->attrs) {
- state.forceValue(*attr.value);
- if (attr.value->type() == nPath || attr.value->type() == nString)
- addURI(
- state,
- attrs,
- attr.name,
- state.coerceToString(*attr.pos, *attr.value, context, false, false)
- );
- else if (attr.value->type() == nString)
- addURI(state, attrs, attr.name, attr.value->string.s);
+ if (attr.name == state.sType) continue;
+ state.forceValue(*attr.value, *attr.pos);
+ if (attr.value->type() == nPath || attr.value->type() == nString) {
+ auto s = state.coerceToString(*attr.pos, *attr.value, context, false, false);
+ attrs.emplace(attr.name,
+ attr.name == "url"
+ ? type == "git"
+ ? fixURIForGit(s, state)
+ : fixURI(s, state)
+ : s);
+ }
else if (attr.value->type() == nBool)
attrs.emplace(attr.name, Explicit<bool>{attr.value->boolean});
else if (attr.value->type() == nInt)
@@ -110,26 +143,24 @@ static void fetchTree(
attr.name, showType(*attr.value));
}
- if (type)
- attrs.emplace("type", type.value());
-
- if (!attrs.count("type"))
- throw Error({
- .msg = hintfmt("attribute 'type' is missing in call to 'fetchTree'"),
- .errPos = pos
- });
+ if (!params.allowNameArgument)
+ if (auto nameIter = attrs.find("name"); nameIter != attrs.end())
+ throw Error({
+ .msg = hintfmt("attribute 'name' isn’t supported in call to 'fetchTree'"),
+ .errPos = pos
+ });
input = fetchers::Input::fromAttrs(std::move(attrs));
} else {
- auto url = fixURI(state.coerceToString(pos, *args[0], context, false, false), state);
+ auto url = state.coerceToString(pos, *args[0], context, false, false);
if (type == "git") {
fetchers::Attrs attrs;
attrs.emplace("type", "git");
- attrs.emplace("url", url);
+ attrs.emplace("url", fixURIForGit(url, state));
input = fetchers::Input::fromAttrs(std::move(attrs));
} else {
- input = fetchers::Input::fromURL(url);
+ input = fetchers::Input::fromURL(fixURI(url, state));
}
}
@@ -141,16 +172,15 @@ static void fetchTree(
auto [tree, input2] = input.fetch(state.store);
- if (state.allowedPaths)
- state.allowedPaths->insert(tree.actualPath);
+ state.allowPath(tree.storePath);
- emitTreeAttrs(state, tree, input2, v, emptyRevFallback);
+ emitTreeAttrs(state, tree, input2, v, params.emptyRevFallback, false);
}
static void prim_fetchTree(EvalState & state, const Pos & pos, Value * * args, Value & v)
{
- settings.requireExperimentalFeature("flakes");
- fetchTree(state, pos, args, v, std::nullopt);
+ settings.requireExperimentalFeature(Xp::Flakes);
+ fetchTree(state, pos, args, v, std::nullopt, FetchTreeParams { .allowNameArgument = false });
}
// FIXME: document
@@ -162,7 +192,7 @@ static void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v,
std::optional<std::string> url;
std::optional<Hash> expectedHash;
- state.forceValue(*args[0]);
+ state.forceValue(*args[0], pos);
if (args[0]->type() == nAttrs) {
@@ -206,20 +236,18 @@ static void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v,
? fetchers::downloadTarball(state.store, *url, name, (bool) expectedHash).first.storePath
: fetchers::downloadFile(state.store, *url, name, (bool) expectedHash).storePath;
- auto path = state.store->toRealPath(storePath);
-
if (expectedHash) {
auto hash = unpack
? state.store->queryPathInfo(storePath)->narHash
- : hashFile(htSHA256, path);
+ : hashFile(htSHA256, state.store->toRealPath(storePath));
if (hash != *expectedHash)
throw Error((unsigned int) 102, "hash mismatch in file downloaded from '%s':\n specified: %s\n got: %s",
*url, expectedHash->to_string(Base32, true), hash.to_string(Base32, true));
}
- if (state.allowedPaths)
- state.allowedPaths->insert(path);
+ state.allowPath(storePath);
+ auto path = state.store->printStorePath(storePath);
mkString(v, path, PathSet({path}));
}
@@ -262,13 +290,13 @@ static RegisterPrimOp primop_fetchTarball({
stdenv.mkDerivation { … }
```
- The fetched tarball is cached for a certain amount of time (1 hour
- by default) in `~/.cache/nix/tarballs/`. You can change the cache
- timeout either on the command line with `--option tarball-ttl number
- of seconds` or in the Nix configuration file with this option: `
- number of seconds to cache `.
+ The fetched tarball is cached for a certain amount of time (1
+ hour by default) in `~/.cache/nix/tarballs/`. You can change the
+ cache timeout either on the command line with `--tarball-ttl`
+ *number-of-seconds* or in the Nix configuration file by adding
+ the line `tarball-ttl = ` *number-of-seconds*.
- Note that when obtaining the hash with ` nix-prefetch-url ` the
+ Note that when obtaining the hash with `nix-prefetch-url` the
option `--unpack` is required.
This function can also verify the contents against a hash. In that
@@ -292,7 +320,7 @@ static RegisterPrimOp primop_fetchTarball({
static void prim_fetchGit(EvalState &state, const Pos &pos, Value **args, Value &v)
{
- fetchTree(state, pos, args, v, "git", true);
+ fetchTree(state, pos, args, v, "git", FetchTreeParams { .emptyRevFallback = true, .allowNameArgument = true });
}
static RegisterPrimOp primop_fetchGit({
@@ -368,7 +396,7 @@ static RegisterPrimOp primop_fetchGit({
```
> **Note**
- >
+ >
> It is nice to always specify the branch which a revision
> belongs to. Without the branch being specified, the fetcher
> might fail if the default branch changes. Additionally, it can
@@ -405,12 +433,12 @@ static RegisterPrimOp primop_fetchGit({
```
> **Note**
- >
+ >
> Nix will refetch the branch in accordance with
> the option `tarball-ttl`.
> **Note**
- >
+ >
> This behavior is disabled in *Pure evaluation mode*.
)",
.fun = prim_fetchGit,
diff --git a/src/libexpr/value-to-json.cc b/src/libexpr/value-to-json.cc
index bfea24d40..4d642c720 100644
--- a/src/libexpr/value-to-json.cc
+++ b/src/libexpr/value-to-json.cc
@@ -10,11 +10,11 @@
namespace nix {
void printValueAsJSON(EvalState & state, bool strict,
- Value & v, JSONPlaceholder & out, PathSet & context)
+ Value & v, const Pos & pos, JSONPlaceholder & out, PathSet & context)
{
checkInterrupt();
- if (strict) state.forceValue(v);
+ if (strict) state.forceValue(v, pos);
switch (v.type()) {
@@ -40,7 +40,7 @@ void printValueAsJSON(EvalState & state, bool strict,
break;
case nAttrs: {
- auto maybeString = state.tryAttrsToString(noPos, v, context, false, false);
+ auto maybeString = state.tryAttrsToString(pos, v, context, false, false);
if (maybeString) {
out.write(*maybeString);
break;
@@ -54,10 +54,10 @@ void printValueAsJSON(EvalState & state, bool strict,
for (auto & j : names) {
Attr & a(*v.attrs->find(state.symbols.create(j)));
auto placeholder(obj.placeholder(j));
- printValueAsJSON(state, strict, *a.value, placeholder, context);
+ printValueAsJSON(state, strict, *a.value, *a.pos, placeholder, context);
}
} else
- printValueAsJSON(state, strict, *i->value, out, context);
+ printValueAsJSON(state, strict, *i->value, *i->pos, out, context);
break;
}
@@ -65,7 +65,7 @@ void printValueAsJSON(EvalState & state, bool strict,
auto list(out.list());
for (unsigned int n = 0; n < v.listSize(); ++n) {
auto placeholder(list.placeholder());
- printValueAsJSON(state, strict, *v.listElems()[n], placeholder, context);
+ printValueAsJSON(state, strict, *v.listElems()[n], pos, placeholder, context);
}
break;
}
@@ -79,18 +79,20 @@ void printValueAsJSON(EvalState & state, bool strict,
break;
case nThunk:
- throw TypeError("cannot convert %1% to JSON", showType(v));
-
case nFunction:
- throw TypeError("cannot convert %1% to JSON", showType(v));
+ auto e = TypeError({
+ .msg = hintfmt("cannot convert %1% to JSON", showType(v)),
+ .errPos = v.determinePos(pos)
+ });
+ throw e.addTrace(pos, hintfmt("message for the trace"));
}
}
void printValueAsJSON(EvalState & state, bool strict,
- Value & v, std::ostream & str, PathSet & context)
+ Value & v, const Pos & pos, std::ostream & str, PathSet & context)
{
JSONPlaceholder out(str);
- printValueAsJSON(state, strict, v, out, context);
+ printValueAsJSON(state, strict, v, pos, out, context);
}
void ExternalValueBase::printValueAsJSON(EvalState & state, bool strict,
diff --git a/src/libexpr/value-to-json.hh b/src/libexpr/value-to-json.hh
index 67fed6487..c2f797b29 100644
--- a/src/libexpr/value-to-json.hh
+++ b/src/libexpr/value-to-json.hh
@@ -11,9 +11,9 @@ namespace nix {
class JSONPlaceholder;
void printValueAsJSON(EvalState & state, bool strict,
- Value & v, JSONPlaceholder & out, PathSet & context);
+ Value & v, const Pos & pos, JSONPlaceholder & out, PathSet & context);
void printValueAsJSON(EvalState & state, bool strict,
- Value & v, std::ostream & str, PathSet & context);
+ Value & v, const Pos & pos, std::ostream & str, PathSet & context);
}
diff --git a/src/libexpr/value-to-xml.cc b/src/libexpr/value-to-xml.cc
index 7464455d8..54268ece0 100644
--- a/src/libexpr/value-to-xml.cc
+++ b/src/libexpr/value-to-xml.cc
@@ -18,7 +18,8 @@ static XMLAttrs singletonAttrs(const string & name, const string & value)
static void printValueAsXML(EvalState & state, bool strict, bool location,
- Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen);
+ Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen,
+ const Pos & pos);
static void posToXML(XMLAttrs & xmlAttrs, const Pos & pos)
@@ -42,21 +43,22 @@ static void showAttrs(EvalState & state, bool strict, bool location,
XMLAttrs xmlAttrs;
xmlAttrs["name"] = i;
- if (location && a.pos != &noPos) posToXML(xmlAttrs, *a.pos);
+ if (location && a.pos != ptr(&noPos)) posToXML(xmlAttrs, *a.pos);
XMLOpenElement _(doc, "attr", xmlAttrs);
printValueAsXML(state, strict, location,
- *a.value, doc, context, drvsSeen);
+ *a.value, doc, context, drvsSeen, *a.pos);
}
}
static void printValueAsXML(EvalState & state, bool strict, bool location,
- Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen)
+ Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen,
+ const Pos & pos)
{
checkInterrupt();
- if (strict) state.forceValue(v);
+ if (strict) state.forceValue(v, pos);
switch (v.type()) {
@@ -91,14 +93,14 @@ static void printValueAsXML(EvalState & state, bool strict, bool location,
Path drvPath;
a = v.attrs->find(state.sDrvPath);
if (a != v.attrs->end()) {
- if (strict) state.forceValue(*a->value);
+ if (strict) state.forceValue(*a->value, *a->pos);
if (a->value->type() == nString)
xmlAttrs["drvPath"] = drvPath = a->value->string.s;
}
a = v.attrs->find(state.sOutPath);
if (a != v.attrs->end()) {
- if (strict) state.forceValue(*a->value);
+ if (strict) state.forceValue(*a->value, *a->pos);
if (a->value->type() == nString)
xmlAttrs["outPath"] = a->value->string.s;
}
@@ -121,7 +123,7 @@ static void printValueAsXML(EvalState & state, bool strict, bool location,
case nList: {
XMLOpenElement _(doc, "list");
for (unsigned int n = 0; n < v.listSize(); ++n)
- printValueAsXML(state, strict, location, *v.listElems()[n], doc, context, drvsSeen);
+ printValueAsXML(state, strict, location, *v.listElems()[n], doc, context, drvsSeen, pos);
break;
}
@@ -135,7 +137,7 @@ static void printValueAsXML(EvalState & state, bool strict, bool location,
if (location) posToXML(xmlAttrs, v.lambda.fun->pos);
XMLOpenElement _(doc, "function", xmlAttrs);
- if (v.lambda.fun->matchAttrs) {
+ if (v.lambda.fun->hasFormals()) {
XMLAttrs attrs;
if (!v.lambda.fun->arg.empty()) attrs["name"] = v.lambda.fun->arg;
if (v.lambda.fun->formals->ellipsis) attrs["ellipsis"] = "1";
@@ -149,7 +151,7 @@ static void printValueAsXML(EvalState & state, bool strict, bool location,
}
case nExternal:
- v.external->printValueAsXML(state, strict, location, doc, context, drvsSeen);
+ v.external->printValueAsXML(state, strict, location, doc, context, drvsSeen, pos);
break;
case nFloat:
@@ -163,19 +165,20 @@ static void printValueAsXML(EvalState & state, bool strict, bool location,
void ExternalValueBase::printValueAsXML(EvalState & state, bool strict,
- bool location, XMLWriter & doc, PathSet & context, PathSet & drvsSeen) const
+ bool location, XMLWriter & doc, PathSet & context, PathSet & drvsSeen,
+ const Pos & pos) const
{
doc.writeEmptyElement("unevaluated");
}
void printValueAsXML(EvalState & state, bool strict, bool location,
- Value & v, std::ostream & out, PathSet & context)
+ Value & v, std::ostream & out, PathSet & context, const Pos & pos)
{
XMLWriter doc(true, out);
XMLOpenElement root(doc, "expr");
PathSet drvsSeen;
- printValueAsXML(state, strict, location, v, doc, context, drvsSeen);
+ printValueAsXML(state, strict, location, v, doc, context, drvsSeen, pos);
}
diff --git a/src/libexpr/value-to-xml.hh b/src/libexpr/value-to-xml.hh
index 97657327e..cc778a2cb 100644
--- a/src/libexpr/value-to-xml.hh
+++ b/src/libexpr/value-to-xml.hh
@@ -9,6 +9,6 @@
namespace nix {
void printValueAsXML(EvalState & state, bool strict, bool location,
- Value & v, std::ostream & out, PathSet & context);
-
+ Value & v, std::ostream & out, PathSet & context, const Pos & pos);
+
}
diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh
index a1f131f9e..3bb97b3c2 100644
--- a/src/libexpr/value.hh
+++ b/src/libexpr/value.hh
@@ -94,7 +94,8 @@ class ExternalValueBase
/* Print the value as XML. Defaults to unevaluated */
virtual void printValueAsXML(EvalState & state, bool strict, bool location,
- XMLWriter & doc, PathSet & context, PathSet & drvsSeen) const;
+ XMLWriter & doc, PathSet & context, PathSet & drvsSeen,
+ const Pos & pos) const;
virtual ~ExternalValueBase()
{
diff --git a/src/libfetchers/attrs.hh b/src/libfetchers/attrs.hh
index a2d53a7bf..e41037633 100644
--- a/src/libfetchers/attrs.hh
+++ b/src/libfetchers/attrs.hh
@@ -6,6 +6,8 @@
#include <nlohmann/json_fwd.hpp>
+#include <optional>
+
namespace nix::fetchers {
typedef std::variant<std::string, uint64_t, Explicit<bool>> Attr;
diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc
index 916e0a8e8..e158d914b 100644
--- a/src/libfetchers/fetchers.cc
+++ b/src/libfetchers/fetchers.cc
@@ -200,12 +200,17 @@ void Input::markChangedFile(
return scheme->markChangedFile(*this, file, commitMsg);
}
+std::string Input::getName() const
+{
+ return maybeGetStrAttr(attrs, "name").value_or("source");
+}
+
StorePath Input::computeStorePath(Store & store) const
{
auto narHash = getNarHash();
if (!narHash)
throw Error("cannot compute store path for mutable input '%s'", to_string());
- return store.makeFixedOutputPath(FileIngestionMethod::Recursive, *narHash, "source");
+ return store.makeFixedOutputPath(FileIngestionMethod::Recursive, *narHash, getName());
}
std::string Input::getType() const
diff --git a/src/libfetchers/fetchers.hh b/src/libfetchers/fetchers.hh
index a72cfafa4..c43b047a7 100644
--- a/src/libfetchers/fetchers.hh
+++ b/src/libfetchers/fetchers.hh
@@ -38,6 +38,9 @@ struct Input
bool immutable = false;
bool direct = true;
+ /* path of the parent of this input, used for relative path resolution */
+ std::optional<Path> parent;
+
public:
static Input fromURL(const std::string & url);
@@ -81,6 +84,8 @@ public:
std::string_view file,
std::optional<std::string> commitMsg) const;
+ std::string getName() const;
+
StorePath computeStorePath(Store & store) const;
// Convenience functions for common attributes.
diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc
index d8e0dbe0a..544d2ffbf 100644
--- a/src/libfetchers/git.cc
+++ b/src/libfetchers/git.cc
@@ -4,6 +4,7 @@
#include "tarfile.hh"
#include "store-api.hh"
#include "url-parts.hh"
+#include "pathlocks.hh"
#include <sys/time.h>
#include <sys/wait.h>
@@ -12,6 +13,12 @@ using namespace std::string_literals;
namespace nix::fetchers {
+// Explicit initial branch of our bare repo to suppress warnings from new version of git.
+// The value itself does not matter, since we always fetch a specific revision or branch.
+// It is set with `-c init.defaultBranch=` instead of `--initial-branch=` to stay compatible with
+// old version of git, which will ignore unrecognized `-c` options.
+const std::string gitInitialBranch = "__nix_dummy_branch";
+
static std::string readHead(const Path & path)
{
return chomp(runProgram("git", true, { "-C", path, "rev-parse", "--abbrev-ref", "HEAD" }));
@@ -44,7 +51,7 @@ struct GitInputScheme : InputScheme
for (auto &[name, value] : url.query) {
if (name == "rev" || name == "ref")
attrs.emplace(name, value);
- else if (name == "shallow")
+ else if (name == "shallow" || name == "submodules")
attrs.emplace(name, Explicit<bool> { value == "1" });
else
url2.query.emplace(name, value);
@@ -60,7 +67,7 @@ struct GitInputScheme : InputScheme
if (maybeGetStrAttr(attrs, "type") != "git") return {};
for (auto & [name, value] : attrs)
- if (name != "type" && name != "url" && name != "ref" && name != "rev" && name != "shallow" && name != "submodules" && name != "lastModified" && name != "revCount" && name != "narHash" && name != "allRefs")
+ if (name != "type" && name != "url" && name != "ref" && name != "rev" && name != "shallow" && name != "submodules" && name != "lastModified" && name != "revCount" && name != "narHash" && name != "allRefs" && name != "name")
throw Error("unsupported Git input attribute '%s'", name);
parseURL(getStrAttr(attrs, "url"));
@@ -167,10 +174,10 @@ struct GitInputScheme : InputScheme
std::pair<Tree, Input> fetch(ref<Store> store, const Input & _input) override
{
- auto name = "source";
-
Input input(_input);
+ std::string name = input.getName();
+
bool shallow = maybeGetBoolAttr(input.attrs, "shallow").value_or(false);
bool submodules = maybeGetBoolAttr(input.attrs, "submodules").value_or(false);
bool allRefs = maybeGetBoolAttr(input.attrs, "allRefs").value_or(false);
@@ -270,7 +277,7 @@ struct GitInputScheme : InputScheme
return files.count(file);
};
- auto storePath = store->addToStore("source", actualUrl, FileIngestionMethod::Recursive, htSHA256, filter);
+ auto storePath = store->addToStore(input.getName(), actualUrl, FileIngestionMethod::Recursive, htSHA256, filter);
// FIXME: maybe we should use the timestamp of the last
// modified dirty file?
@@ -317,9 +324,11 @@ struct GitInputScheme : InputScheme
Path cacheDir = getCacheDir() + "/nix/gitv3/" + hashString(htSHA256, actualUrl).to_string(Base32, false);
repoDir = cacheDir;
+ createDirs(dirOf(cacheDir));
+ PathLocks cacheDirLock({cacheDir + ".lock"});
+
if (!pathExists(cacheDir)) {
- createDirs(dirOf(cacheDir));
- runProgram("git", true, { "init", "--bare", repoDir });
+ runProgram("git", true, { "-c", "init.defaultBranch=" + gitInitialBranch, "init", "--bare", repoDir });
}
Path localRefFile =
@@ -386,6 +395,8 @@ struct GitInputScheme : InputScheme
if (!input.getRev())
input.attrs.insert_or_assign("rev", Hash::parseAny(chomp(readFile(localRefFile)), htSHA1).gitRev());
+
+ // cache dir lock is removed at scope end; we will only use read-only operations on specific revisions in the remainder
}
bool isShallow = chomp(runProgram("git", true, { "-C", repoDir, "rev-parse", "--is-shallow-repository" })) == "true";
@@ -406,17 +417,14 @@ struct GitInputScheme : InputScheme
AutoDelete delTmpDir(tmpDir, true);
PathFilter filter = defaultPathFilter;
- RunOptions checkCommitOpts(
- "git",
- { "-C", repoDir, "cat-file", "commit", input.getRev()->gitRev() }
- );
- checkCommitOpts.searchPath = true;
- checkCommitOpts.mergeStderrToStdout = true;
-
- auto result = runProgram(checkCommitOpts);
+ auto result = runProgram(RunOptions {
+ .program = "git",
+ .args = { "-C", repoDir, "cat-file", "commit", input.getRev()->gitRev() },
+ .mergeStderrToStdout = true
+ });
if (WEXITSTATUS(result.first) == 128
- && result.second.find("bad file") != std::string::npos
- ) {
+ && result.second.find("bad file") != std::string::npos)
+ {
throw Error(
"Cannot find Git revision '%s' in ref '%s' of repository '%s'! "
"Please make sure that the " ANSI_BOLD "rev" ANSI_NORMAL " exists on the "
@@ -432,7 +440,7 @@ struct GitInputScheme : InputScheme
Path tmpGitDir = createTempDir();
AutoDelete delTmpGitDir(tmpGitDir, true);
- runProgram("git", true, { "init", tmpDir, "--separate-git-dir", tmpGitDir });
+ runProgram("git", true, { "-c", "init.defaultBranch=" + gitInitialBranch, "init", tmpDir, "--separate-git-dir", tmpGitDir });
// TODO: repoDir might lack the ref (it only checks if rev
// exists, see FIXME above) so use a big hammer and fetch
// everything to ensure we get the rev.
@@ -448,9 +456,11 @@ struct GitInputScheme : InputScheme
// FIXME: should pipe this, or find some better way to extract a
// revision.
auto source = sinkToSource([&](Sink & sink) {
- RunOptions gitOptions("git", { "-C", repoDir, "archive", input.getRev()->gitRev() });
- gitOptions.standardOut = &sink;
- runProgram2(gitOptions);
+ runProgram2({
+ .program = "git",
+ .args = { "-C", repoDir, "archive", input.getRev()->gitRev() },
+ .standardOut = &sink
+ });
});
unpackTarfile(*source, tmpDir);
diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc
index 8352ef02d..1c539b80e 100644
--- a/src/libfetchers/github.cc
+++ b/src/libfetchers/github.cc
@@ -207,7 +207,7 @@ struct GitArchiveInputScheme : InputScheme
auto url = getDownloadUrl(input);
- auto [tree, lastModified] = downloadTarball(store, url.url, "source", true, url.headers);
+ auto [tree, lastModified] = downloadTarball(store, url.url, input.getName(), true, url.headers);
input.attrs.insert_or_assign("lastModified", uint64_t(lastModified));
@@ -273,9 +273,9 @@ struct GitHubInputScheme : GitArchiveInputScheme
void clone(const Input & input, const Path & destDir) override
{
auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
- Input::fromURL(fmt("git+ssh://git@%s/%s/%s.git",
+ Input::fromURL(fmt("git+https://%s/%s/%s.git",
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo")))
- .applyOverrides(input.getRef().value_or("HEAD"), input.getRev())
+ .applyOverrides(input.getRef(), input.getRev())
.clone(destDir);
}
};
@@ -300,7 +300,7 @@ struct GitLabInputScheme : GitArchiveInputScheme
if ("PAT" == token.substr(0, fldsplit))
return std::make_pair("Private-token", token.substr(fldsplit+1));
warn("Unrecognized GitLab token type %s", token.substr(0, fldsplit));
- return std::nullopt;
+ return std::make_pair(token.substr(0,fldsplit), token.substr(fldsplit+1));
}
Hash getRevFromRef(nix::ref<Store> store, const Input & input) const override
@@ -341,9 +341,9 @@ struct GitLabInputScheme : GitArchiveInputScheme
{
auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com");
// FIXME: get username somewhere
- Input::fromURL(fmt("git+ssh://git@%s/%s/%s.git",
+ Input::fromURL(fmt("git+https://%s/%s/%s.git",
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo")))
- .applyOverrides(input.getRef().value_or("HEAD"), input.getRev())
+ .applyOverrides(input.getRef(), input.getRev())
.clone(destDir);
}
};
diff --git a/src/libfetchers/local.mk b/src/libfetchers/local.mk
index cfd705e22..2e8869d83 100644
--- a/src/libfetchers/local.mk
+++ b/src/libfetchers/local.mk
@@ -8,4 +8,6 @@ libfetchers_SOURCES := $(wildcard $(d)/*.cc)
libfetchers_CXXFLAGS += -I src/libutil -I src/libstore
+libfetchers_LDFLAGS += -pthread
+
libfetchers_LIBS = libutil libstore
diff --git a/src/libfetchers/mercurial.cc b/src/libfetchers/mercurial.cc
index 0eb401e10..d52d4641b 100644
--- a/src/libfetchers/mercurial.cc
+++ b/src/libfetchers/mercurial.cc
@@ -11,34 +11,32 @@ using namespace std::string_literals;
namespace nix::fetchers {
-namespace {
-
-RunOptions hgOptions(const Strings & args) {
- RunOptions opts("hg", args);
- opts.searchPath = true;
-
- auto env = getEnv();
- // Set HGPLAIN: this means we get consistent output from hg and avoids leakage from a user or system .hgrc.
- env["HGPLAIN"] = "";
- opts.environment = env;
-
- return opts;
+static RunOptions hgOptions(const Strings & args)
+{
+ auto env = getEnv();
+ // Set HGPLAIN: this means we get consistent output from hg and avoids leakage from a user or system .hgrc.
+ env["HGPLAIN"] = "";
+
+ return {
+ .program = "hg",
+ .searchPath = true,
+ .args = args,
+ .environment = env
+ };
}
// runProgram wrapper that uses hgOptions instead of stock RunOptions.
-string runHg(const Strings & args, const std::optional<std::string> & input = {})
+static string runHg(const Strings & args, const std::optional<std::string> & input = {})
{
- RunOptions opts = hgOptions(args);
- opts.input = input;
+ RunOptions opts = hgOptions(args);
+ opts.input = input;
- auto res = runProgram(opts);
+ auto res = runProgram(std::move(opts));
- if (!statusOk(res.first))
- throw ExecError(res.first, fmt("hg %1%", statusToString(res.first)));
-
- return res.second;
-}
+ if (!statusOk(res.first))
+ throw ExecError(res.first, fmt("hg %1%", statusToString(res.first)));
+ return res.second;
}
struct MercurialInputScheme : InputScheme
@@ -74,7 +72,7 @@ struct MercurialInputScheme : InputScheme
if (maybeGetStrAttr(attrs, "type") != "hg") return {};
for (auto & [name, value] : attrs)
- if (name != "type" && name != "url" && name != "ref" && name != "rev" && name != "revCount" && name != "narHash")
+ if (name != "type" && name != "url" && name != "ref" && name != "rev" && name != "revCount" && name != "narHash" && name != "name")
throw Error("unsupported Mercurial input attribute '%s'", name);
parseURL(getStrAttr(attrs, "url"));
@@ -147,10 +145,10 @@ struct MercurialInputScheme : InputScheme
std::pair<Tree, Input> fetch(ref<Store> store, const Input & _input) override
{
- auto name = "source";
-
Input input(_input);
+ auto name = input.getName();
+
auto [isLocal, actualUrl_] = getActualUrl(input);
auto actualUrl = actualUrl_; // work around clang bug
@@ -193,7 +191,7 @@ struct MercurialInputScheme : InputScheme
return files.count(file);
};
- auto storePath = store->addToStore("source", actualUrl, FileIngestionMethod::Recursive, htSHA256, filter);
+ auto storePath = store->addToStore(input.getName(), actualUrl, FileIngestionMethod::Recursive, htSHA256, filter);
return {
Tree(store->toRealPath(storePath), std::move(storePath)),
@@ -253,9 +251,7 @@ struct MercurialInputScheme : InputScheme
have to pull again. */
if (!(input.getRev()
&& pathExists(cacheDir)
- && runProgram(
- hgOptions({ "log", "-R", cacheDir, "-r", input.getRev()->gitRev(), "--template", "1" })
- .killStderr(true)).second == "1"))
+ && runProgram(hgOptions({ "log", "-R", cacheDir, "-r", input.getRev()->gitRev(), "--template", "1" })).second == "1"))
{
Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching Mercurial repository '%s'", actualUrl));
diff --git a/src/libfetchers/path.cc b/src/libfetchers/path.cc
index d1003de57..fb5702c4c 100644
--- a/src/libfetchers/path.cc
+++ b/src/libfetchers/path.cc
@@ -82,18 +82,38 @@ struct PathInputScheme : InputScheme
std::pair<Tree, Input> fetch(ref<Store> store, const Input & input) override
{
+ std::string absPath;
auto path = getStrAttr(input.attrs, "path");
- // FIXME: check whether access to 'path' is allowed.
+ if (path[0] != '/') {
+ if (!input.parent)
+ throw Error("cannot fetch input '%s' because it uses a relative path", input.to_string());
+
+ auto parent = canonPath(*input.parent);
- auto storePath = store->maybeParseStorePath(path);
+ // the path isn't relative, prefix it
+ absPath = nix::absPath(path, parent);
+
+ // for security, ensure that if the parent is a store path, it's inside it
+ if (store->isInStore(parent)) {
+ auto storePath = store->printStorePath(store->toStorePath(parent).first);
+ if (!isInDir(absPath, storePath))
+ throw BadStorePath("relative path '%s' points outside of its parent's store path '%s'", path, storePath);
+ }
+ } else
+ absPath = path;
+
+ Activity act(*logger, lvlTalkative, actUnknown, fmt("copying '%s'", absPath));
+
+ // FIXME: check whether access to 'path' is allowed.
+ auto storePath = store->maybeParseStorePath(absPath);
if (storePath)
store->addTempRoot(*storePath);
if (!storePath || storePath->name() != "source" || !store->isValidPath(*storePath))
// FIXME: try to substitute storePath.
- storePath = store->addToStore("source", path);
+ storePath = store->addToStore("source", absPath);
return {
Tree(store->toRealPath(*storePath), std::move(*storePath)),
diff --git a/src/libfetchers/registry.cc b/src/libfetchers/registry.cc
index 74376adc0..f35359d4b 100644
--- a/src/libfetchers/registry.cc
+++ b/src/libfetchers/registry.cc
@@ -124,6 +124,13 @@ std::shared_ptr<Registry> getUserRegistry()
return userRegistry;
}
+std::shared_ptr<Registry> getCustomRegistry(const Path & p)
+{
+ static auto customRegistry =
+ Registry::read(p, Registry::Custom);
+ return customRegistry;
+}
+
static std::shared_ptr<Registry> flagRegistry =
std::make_shared<Registry>(Registry::Flag);
diff --git a/src/libfetchers/registry.hh b/src/libfetchers/registry.hh
index 1077af020..260a2c460 100644
--- a/src/libfetchers/registry.hh
+++ b/src/libfetchers/registry.hh
@@ -14,6 +14,7 @@ struct Registry
User = 1,
System = 2,
Global = 3,
+ Custom = 4,
};
RegistryType type;
@@ -48,6 +49,8 @@ typedef std::vector<std::shared_ptr<Registry>> Registries;
std::shared_ptr<Registry> getUserRegistry();
+std::shared_ptr<Registry> getCustomRegistry(const Path & p);
+
Path getUserRegistryPath();
Registries getRegistries(ref<Store> store);
diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc
index b8d7d2c70..031ccc5f7 100644
--- a/src/libfetchers/tarball.cc
+++ b/src/libfetchers/tarball.cc
@@ -178,7 +178,8 @@ struct TarballInputScheme : InputScheme
&& !hasSuffix(url.path, ".tar")
&& !hasSuffix(url.path, ".tar.gz")
&& !hasSuffix(url.path, ".tar.xz")
- && !hasSuffix(url.path, ".tar.bz2"))
+ && !hasSuffix(url.path, ".tar.bz2")
+ && !hasSuffix(url.path, ".tar.zst"))
return {};
Input input;
@@ -195,7 +196,7 @@ struct TarballInputScheme : InputScheme
if (maybeGetStrAttr(attrs, "type") != "tarball") return {};
for (auto & [name, value] : attrs)
- if (name != "type" && name != "url" && /* name != "hash" && */ name != "narHash")
+ if (name != "type" && name != "url" && /* name != "hash" && */ name != "narHash" && name != "name")
throw Error("unsupported tarball input attribute '%s'", name);
Input input;
@@ -225,7 +226,7 @@ struct TarballInputScheme : InputScheme
std::pair<Tree, Input> fetch(ref<Store> store, const Input & input) override
{
- auto tree = downloadTarball(store, getStrAttr(input.attrs, "url"), "source", false).first;
+ auto tree = downloadTarball(store, getStrAttr(input.attrs, "url"), input.getName(), false).first;
return {std::move(tree), input};
}
};
diff --git a/src/libmain/local.mk b/src/libmain/local.mk
index a8eed6c65..99da95e27 100644
--- a/src/libmain/local.mk
+++ b/src/libmain/local.mk
@@ -8,10 +8,10 @@ libmain_SOURCES := $(wildcard $(d)/*.cc)
libmain_CXXFLAGS += -I src/libutil -I src/libstore
-libmain_LDFLAGS = $(OPENSSL_LIBS)
+libmain_LDFLAGS += $(OPENSSL_LIBS)
libmain_LIBS = libstore libutil
libmain_ALLOW_UNDEFINED = 1
-$(eval $(call install-file-in, $(d)/nix-main.pc, $(prefix)/lib/pkgconfig, 0644))
+$(eval $(call install-file-in, $(d)/nix-main.pc, $(libdir)/pkgconfig, 0644))
diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc
index 15354549a..63955eed1 100644
--- a/src/libmain/progress-bar.cc
+++ b/src/libmain/progress-bar.cc
@@ -103,17 +103,19 @@ public:
~ProgressBar()
{
stop();
- updateThread.join();
}
void stop() override
{
- auto state(state_.lock());
- if (!state->active) return;
- state->active = false;
- writeToStderr("\r\e[K");
- updateCV.notify_one();
- quitCV.notify_one();
+ {
+ auto state(state_.lock());
+ if (!state->active) return;
+ state->active = false;
+ writeToStderr("\r\e[K");
+ updateCV.notify_one();
+ quitCV.notify_one();
+ }
+ updateThread.join();
}
bool isVerbose() override {
@@ -484,7 +486,7 @@ Logger * makeProgressBar(bool printBuildLogs)
{
return new ProgressBar(
printBuildLogs,
- isatty(STDERR_FILENO) && getEnv("TERM").value_or("dumb") != "dumb"
+ shouldANSI()
);
}
diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc
index 86930c2e3..85f9f0d58 100644
--- a/src/libmain/shared.cc
+++ b/src/libmain/shared.cc
@@ -15,6 +15,9 @@
#include <sys/stat.h>
#include <unistd.h>
#include <signal.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netdb.h>
#include <openssl/crypto.h>
@@ -110,6 +113,31 @@ static void opensslLockCallback(int mode, int type, const char * file, int line)
}
#endif
+static std::once_flag dns_resolve_flag;
+
+static void preloadNSS() {
+ /* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of
+ one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already
+ been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to
+ load its lookup libraries in the parent before any child gets a chance to. */
+ std::call_once(dns_resolve_flag, []() {
+ struct addrinfo *res = NULL;
+
+ /* nss will only force the "local" (not through nscd) dns resolution if its on the LOCALDOMAIN.
+ We need the resolution to be done locally, as nscd socket will not be accessible in the
+ sandbox. */
+ char * previous_env = getenv("LOCALDOMAIN");
+ setenv("LOCALDOMAIN", "invalid", 1);
+ if (getaddrinfo("this.pre-initializes.the.dns.resolvers.invalid.", "http", NULL, &res) == 0) {
+ if (res) freeaddrinfo(res);
+ }
+ if (previous_env) {
+ setenv("LOCALDOMAIN", previous_env, 1);
+ } else {
+ unsetenv("LOCALDOMAIN");
+ }
+ });
+}
static void sigHandler(int signo) { }
@@ -176,6 +204,8 @@ void initNix()
if (hasPrefix(getEnv("TMPDIR").value_or("/tmp"), "/var/folders/"))
unsetenv("TMPDIR");
#endif
+
+ preloadNSS();
}
@@ -238,7 +268,7 @@ LegacyArgs::LegacyArgs(const std::string & programName,
addFlag({
.longName = "no-gc-warning",
.description = "Disable warnings about not using `--add-root`.",
- .handler = {&gcWarning, true},
+ .handler = {&gcWarning, false},
});
addFlag({
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc
index 09e1c254b..13c086a46 100644
--- a/src/libstore/binary-cache-store.cc
+++ b/src/libstore/binary-cache-store.cc
@@ -52,9 +52,9 @@ void BinaryCacheStore::init()
throw Error("binary cache '%s' is for Nix stores with prefix '%s', not '%s'",
getUri(), value, storeDir);
} else if (name == "WantMassQuery") {
- wantMassQuery.setDefault(value == "1" ? "true" : "false");
+ wantMassQuery.setDefault(value == "1");
} else if (name == "Priority") {
- priority.setDefault(fmt("%d", std::stoi(value)));
+ priority.setDefault(std::stoi(value));
}
}
}
@@ -111,15 +111,15 @@ void BinaryCacheStore::writeNarInfo(ref<NarInfo> narInfo)
upsertFile(narInfoFile, narInfo->to_string(*this), "text/x-nix-narinfo");
- std::string hashPart(narInfo->path.hashPart());
-
{
auto state_(state.lock());
- state_->pathInfoCache.upsert(hashPart, PathInfoCacheValue { .value = std::shared_ptr<NarInfo>(narInfo) });
+ state_->pathInfoCache.upsert(
+ std::string(narInfo->path.to_string()),
+ PathInfoCacheValue { .value = std::shared_ptr<NarInfo>(narInfo) });
}
if (diskCache)
- diskCache->upsertNarInfo(getUri(), hashPart, std::shared_ptr<NarInfo>(narInfo));
+ diskCache->upsertNarInfo(getUri(), std::string(narInfo->path.hashPart()), std::shared_ptr<NarInfo>(narInfo));
}
AutoCloseFD openFile(const Path & path)
@@ -130,17 +130,6 @@ AutoCloseFD openFile(const Path & path)
return fd;
}
-struct FileSource : FdSource
-{
- AutoCloseFD fd2;
-
- FileSource(const Path & path)
- : fd2(openFile(path))
- {
- fd = fd2.get();
- }
-};
-
ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
Source & narSource, RepairFlag repair, CheckSigsFlag checkSigs,
std::function<ValidPathInfo(HashResult)> mkInfo)
@@ -160,7 +149,7 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
{
FdSink fileSink(fdTemp.get());
TeeSink teeSinkCompressed { fileSink, fileHashSink };
- auto compressionSink = makeCompressionSink(compression, teeSinkCompressed);
+ auto compressionSink = makeCompressionSink(compression, teeSinkCompressed, parallelCompression, compressionLevel);
TeeSink teeSinkUncompressed { *compressionSink, narHashSink };
TeeSource teeSource { narSource, teeSinkUncompressed };
narAccessor = makeNarAccessor(teeSource);
@@ -319,16 +308,17 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource
}
StorePath BinaryCacheStore::addToStoreFromDump(Source & dump, const string & name,
- FileIngestionMethod method, HashType hashAlgo, RepairFlag repair)
+ FileIngestionMethod method, HashType hashAlgo, RepairFlag repair, const StorePathSet & references)
{
if (method != FileIngestionMethod::Recursive || hashAlgo != htSHA256)
unsupported("addToStoreFromDump");
return addToStoreCommon(dump, repair, CheckSigs, [&](HashResult nar) {
ValidPathInfo info {
- makeFixedOutputPath(method, nar.first, name),
+ makeFixedOutputPath(method, nar.first, name, references),
nar.first,
};
info.narSize = nar.second;
+ info.references = references;
return info;
})->path;
}
@@ -396,7 +386,7 @@ void BinaryCacheStore::queryPathInfoUncached(const StorePath & storePath,
}
StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath,
- FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
+ FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair, const StorePathSet & references)
{
/* FIXME: Make BinaryCacheStore::addToStoreCommon support
non-recursive+sha256 so we can just use the default
@@ -415,10 +405,11 @@ StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath
});
return addToStoreCommon(*source, repair, CheckSigs, [&](HashResult nar) {
ValidPathInfo info {
- makeFixedOutputPath(method, h, name),
+ makeFixedOutputPath(method, h, name, references),
nar.first,
};
info.narSize = nar.second;
+ info.references = references;
info.ca = FixedOutputHash {
.method = method,
.hash = h,
@@ -448,20 +439,34 @@ StorePath BinaryCacheStore::addTextToStore(const string & name, const string & s
})->path;
}
-std::optional<const Realisation> BinaryCacheStore::queryRealisation(const DrvOutput & id)
+void BinaryCacheStore::queryRealisationUncached(const DrvOutput & id,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept
{
auto outputInfoFilePath = realisationsPrefix + "/" + id.to_string() + ".doi";
- auto rawOutputInfo = getFile(outputInfoFilePath);
- if (rawOutputInfo) {
- return {Realisation::fromJSON(
- nlohmann::json::parse(*rawOutputInfo), outputInfoFilePath)};
- } else {
- return std::nullopt;
- }
+ auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
+
+ Callback<std::shared_ptr<std::string>> newCallback = {
+ [=](std::future<std::shared_ptr<std::string>> fut) {
+ try {
+ auto data = fut.get();
+ if (!data) return (*callbackPtr)(nullptr);
+
+ auto realisation = Realisation::fromJSON(
+ nlohmann::json::parse(*data), outputInfoFilePath);
+ return (*callbackPtr)(std::make_shared<const Realisation>(realisation));
+ } catch (...) {
+ callbackPtr->rethrow();
+ }
+ }
+ };
+
+ getFile(outputInfoFilePath, std::move(newCallback));
}
void BinaryCacheStore::registerDrvOutput(const Realisation& info) {
+ if (diskCache)
+ diskCache->upsertRealisation(getUri(), info);
auto filePath = realisationsPrefix + "/" + info.id.to_string() + ".doi";
upsertFile(filePath, info.toJSON().dump(), "application/json");
}
diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh
index c2163166c..9815af591 100644
--- a/src/libstore/binary-cache-store.hh
+++ b/src/libstore/binary-cache-store.hh
@@ -15,13 +15,17 @@ struct BinaryCacheStoreConfig : virtual StoreConfig
{
using StoreConfig::StoreConfig;
- const Setting<std::string> compression{(StoreConfig*) this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"};
+ const Setting<std::string> compression{(StoreConfig*) this, "xz", "compression", "NAR compression method ('xz', 'bzip2', 'gzip', 'zstd', or 'none')"};
const Setting<bool> writeNARListing{(StoreConfig*) this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"};
const Setting<bool> writeDebugInfo{(StoreConfig*) this, false, "index-debug-info", "whether to index DWARF debug info files by build ID"};
const Setting<Path> secretKeyFile{(StoreConfig*) this, "", "secret-key", "path to secret key used to sign the binary cache"};
const Setting<Path> localNarCache{(StoreConfig*) this, "", "local-nar-cache", "path to a local cache of NARs"};
const Setting<bool> parallelCompression{(StoreConfig*) this, false, "parallel-compression",
- "enable multi-threading compression, available for xz only currently"};
+ "enable multi-threading compression for NARs, available for xz and zstd only currently"};
+ const Setting<int> compressionLevel{(StoreConfig*) this, -1, "compression-level",
+ "specify 'preset level' of compression to be used with NARs: "
+ "meaning and accepted range of values depends on compression method selected, "
+ "other than -1 which we reserve to indicate Nix defaults should be used"};
};
class BinaryCacheStore : public virtual BinaryCacheStoreConfig, public virtual Store
@@ -34,7 +38,7 @@ private:
protected:
// The prefix under which realisation infos will be stored
- const std::string realisationsPrefix = "/realisations";
+ const std::string realisationsPrefix = "realisations";
BinaryCacheStore(const Params & params);
@@ -93,18 +97,19 @@ public:
RepairFlag repair, CheckSigsFlag checkSigs) override;
StorePath addToStoreFromDump(Source & dump, const string & name,
- FileIngestionMethod method, HashType hashAlgo, RepairFlag repair) override;
+ FileIngestionMethod method, HashType hashAlgo, RepairFlag repair, const StorePathSet & references ) override;
StorePath addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method, HashType hashAlgo,
- PathFilter & filter, RepairFlag repair) override;
+ PathFilter & filter, RepairFlag repair, const StorePathSet & references) override;
StorePath addTextToStore(const string & name, const string & s,
const StorePathSet & references, RepairFlag repair) override;
void registerDrvOutput(const Realisation & info) override;
- std::optional<const Realisation> queryRealisation(const DrvOutput &) override;
+ void queryRealisationUncached(const DrvOutput &,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
void narFromPath(const StorePath & path, Sink & sink) override;
diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc
index 9100d3333..b924d23b2 100644
--- a/src/libstore/build/derivation-goal.cc
+++ b/src/libstore/build/derivation-goal.cc
@@ -143,7 +143,6 @@ void DerivationGoal::work()
(this->*state)();
}
-
void DerivationGoal::addWantedOutputs(const StringSet & outputs)
{
/* If we already want all outputs, there is nothing to do. */
@@ -166,7 +165,7 @@ void DerivationGoal::getDerivation()
/* The first thing to do is to make sure that the derivation
exists. If it doesn't, it may be created through a
substitute. */
- if (buildMode == bmNormal && worker.store.isValidPath(drvPath)) {
+ if (buildMode == bmNormal && worker.evalStore.isValidPath(drvPath)) {
loadDerivation();
return;
}
@@ -189,12 +188,12 @@ void DerivationGoal::loadDerivation()
/* `drvPath' should already be a root, but let's be on the safe
side: if the user forgot to make it a root, we wouldn't want
things being garbage collected while we're busy. */
- worker.store.addTempRoot(drvPath);
+ worker.evalStore.addTempRoot(drvPath);
- assert(worker.store.isValidPath(drvPath));
+ assert(worker.evalStore.isValidPath(drvPath));
/* Get the derivation. */
- drv = std::make_unique<Derivation>(worker.store.derivationFromPath(drvPath));
+ drv = std::make_unique<Derivation>(worker.evalStore.derivationFromPath(drvPath));
haveDerivation();
}
@@ -205,7 +204,7 @@ void DerivationGoal::haveDerivation()
trace("have derivation");
if (drv->type() == DerivationType::CAFloating)
- settings.requireExperimentalFeature("ca-derivations");
+ settings.requireExperimentalFeature(Xp::CaDerivations);
retrySubstitution = false;
@@ -213,8 +212,8 @@ void DerivationGoal::haveDerivation()
if (i.second.second)
worker.store.addTempRoot(*i.second.second);
- auto outputHashes = staticOutputHashes(worker.store, *drv);
- for (auto &[outputName, outputHash] : outputHashes)
+ auto outputHashes = staticOutputHashes(worker.evalStore, *drv);
+ for (auto & [outputName, outputHash] : outputHashes)
initialOutputs.insert({
outputName,
InitialOutput{
@@ -338,6 +337,15 @@ void DerivationGoal::gaveUpOnSubstitution()
for (auto & i : dynamic_cast<Derivation *>(drv.get())->inputDrvs)
addWaitee(worker.makeDerivationGoal(i.first, i.second, buildMode == bmRepair ? bmRepair : bmNormal));
+ /* Copy the input sources from the eval store to the build
+ store. */
+ if (&worker.evalStore != &worker.store) {
+ RealisedPath::Set inputSrcs;
+ for (auto & i : drv->inputSrcs)
+ inputSrcs.insert(i);
+ copyClosure(worker.evalStore, worker.store, inputSrcs);
+ }
+
for (auto & i : drv->inputSrcs) {
if (worker.store.isValidPath(i)) continue;
if (!settings.useSubstitutes)
@@ -445,7 +453,7 @@ void DerivationGoal::inputsRealised()
if (useDerivation) {
auto & fullDrv = *dynamic_cast<Derivation *>(drv.get());
- if (settings.isExperimentalFeatureEnabled("ca-derivations") &&
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations) &&
((!fullDrv.inputDrvs.empty() && derivationIsCA(fullDrv.type()))
|| fullDrv.type() == DerivationType::DeferredInputAddressed)) {
/* We are be able to resolve this derivation based on the
@@ -479,8 +487,8 @@ void DerivationGoal::inputsRealised()
/* Add the relevant output closures of the input derivation
`i' as input paths. Only add the closures of output paths
that are specified as inputs. */
- assert(worker.store.isValidPath(drvPath));
- auto outputs = worker.store.queryPartialDerivationOutputMap(depDrvPath);
+ assert(worker.evalStore.isValidPath(drvPath));
+ auto outputs = worker.evalStore.queryPartialDerivationOutputMap(depDrvPath);
for (auto & j : wantedDepOutputs) {
if (outputs.count(j) > 0) {
auto optRealizedInput = outputs.at(j);
@@ -545,7 +553,7 @@ void DerivationGoal::tryToBuild()
PathSet lockFiles;
/* FIXME: Should lock something like the drv itself so we don't build same
CA drv concurrently */
- if (dynamic_cast<LocalStore *>(&worker.store))
+ if (dynamic_cast<LocalStore *>(&worker.store)) {
/* If we aren't a local store, we might need to use the local store as
a build remote, but that would cause a deadlock. */
/* FIXME: Make it so we can use ourselves as a build remote even if we
@@ -553,9 +561,15 @@ void DerivationGoal::tryToBuild()
/* FIXME: find some way to lock for scheduling for the other stores so
a forking daemon with --store still won't farm out redundant builds.
*/
- for (auto & i : drv->outputsAndOptPaths(worker.store))
+ for (auto & i : drv->outputsAndOptPaths(worker.store)) {
if (i.second.second)
lockFiles.insert(worker.store.Store::toRealPath(*i.second.second));
+ else
+ lockFiles.insert(
+ worker.store.Store::toRealPath(drvPath) + "." + i.first
+ );
+ }
+ }
if (!outputLocks.lockPaths(lockFiles, "", false)) {
if (!actLock)
@@ -602,7 +616,9 @@ void DerivationGoal::tryToBuild()
/* Don't do a remote build if the derivation has the attribute
`preferLocalBuild' set. Also, check and repair modes are only
supported for local builds. */
- bool buildLocally = buildMode != bmNormal || parsedDrv->willBuildLocally(worker.store);
+ bool buildLocally =
+ (buildMode != bmNormal || parsedDrv->willBuildLocally(worker.store))
+ && settings.maxBuildJobs.get() != 0;
if (!buildLocally) {
switch (tryBuildHook()) {
@@ -739,6 +755,64 @@ void DerivationGoal::cleanupPostOutputsRegisteredModeNonCheck()
{
}
+void runPostBuildHook(
+ Store & store,
+ Logger & logger,
+ const StorePath & drvPath,
+ StorePathSet outputPaths
+)
+{
+ auto hook = settings.postBuildHook;
+ if (hook == "")
+ return;
+
+ Activity act(logger, lvlInfo, actPostBuildHook,
+ fmt("running post-build-hook '%s'", settings.postBuildHook),
+ Logger::Fields{store.printStorePath(drvPath)});
+ PushActivity pact(act.id);
+ std::map<std::string, std::string> hookEnvironment = getEnv();
+
+ hookEnvironment.emplace("DRV_PATH", store.printStorePath(drvPath));
+ hookEnvironment.emplace("OUT_PATHS", chomp(concatStringsSep(" ", store.printStorePathSet(outputPaths))));
+ hookEnvironment.emplace("NIX_CONFIG", globalConfig.toKeyValue());
+
+ struct LogSink : Sink {
+ Activity & act;
+ std::string currentLine;
+
+ LogSink(Activity & act) : act(act) { }
+
+ void operator() (std::string_view data) override {
+ for (auto c : data) {
+ if (c == '\n') {
+ flushLine();
+ } else {
+ currentLine += c;
+ }
+ }
+ }
+
+ void flushLine() {
+ act.result(resPostBuildLogLine, currentLine);
+ currentLine.clear();
+ }
+
+ ~LogSink() {
+ if (currentLine != "") {
+ currentLine += '\n';
+ flushLine();
+ }
+ }
+ };
+ LogSink sink(act);
+
+ runProgram2({
+ .program = settings.postBuildHook,
+ .environment = hookEnvironment,
+ .standardOut = &sink,
+ .mergeStderrToStdout = true,
+ });
+}
void DerivationGoal::buildDone()
{
@@ -804,57 +878,15 @@ void DerivationGoal::buildDone()
being valid. */
registerOutputs();
- if (settings.postBuildHook != "") {
- Activity act(*logger, lvlInfo, actPostBuildHook,
- fmt("running post-build-hook '%s'", settings.postBuildHook),
- Logger::Fields{worker.store.printStorePath(drvPath)});
- PushActivity pact(act.id);
- StorePathSet outputPaths;
- for (auto i : drv->outputs) {
- outputPaths.insert(finalOutputs.at(i.first));
- }
- std::map<std::string, std::string> hookEnvironment = getEnv();
-
- hookEnvironment.emplace("DRV_PATH", worker.store.printStorePath(drvPath));
- hookEnvironment.emplace("OUT_PATHS", chomp(concatStringsSep(" ", worker.store.printStorePathSet(outputPaths))));
-
- RunOptions opts(settings.postBuildHook, {});
- opts.environment = hookEnvironment;
-
- struct LogSink : Sink {
- Activity & act;
- std::string currentLine;
-
- LogSink(Activity & act) : act(act) { }
-
- void operator() (std::string_view data) override {
- for (auto c : data) {
- if (c == '\n') {
- flushLine();
- } else {
- currentLine += c;
- }
- }
- }
-
- void flushLine() {
- act.result(resPostBuildLogLine, currentLine);
- currentLine.clear();
- }
-
- ~LogSink() {
- if (currentLine != "") {
- currentLine += '\n';
- flushLine();
- }
- }
- };
- LogSink sink(act);
-
- opts.standardOut = &sink;
- opts.mergeStderrToStdout = true;
- runProgram2(opts);
- }
+ StorePathSet outputPaths;
+ for (auto & [_, path] : finalOutputs)
+ outputPaths.insert(path);
+ runPostBuildHook(
+ worker.store,
+ *logger,
+ drvPath,
+ outputPaths
+ );
if (buildMode == bmCheck) {
cleanupPostOutputsRegisteredModeCheck();
@@ -910,6 +942,8 @@ void DerivationGoal::resolvedFinished() {
auto resolvedHashes = staticOutputHashes(worker.store, *resolvedDrv);
+ StorePathSet outputPaths;
+
// `wantedOutputs` might be empty, which means “all the outputs”
auto realWantedOutputs = wantedOutputs;
if (realWantedOutputs.empty())
@@ -927,8 +961,10 @@ void DerivationGoal::resolvedFinished() {
auto newRealisation = *realisation;
newRealisation.id = DrvOutput{initialOutputs.at(wantedOutput).outputHash, wantedOutput};
newRealisation.signatures.clear();
+ newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation->outPath);
signRealisation(newRealisation);
worker.store.registerDrvOutput(newRealisation);
+ outputPaths.insert(realisation->outPath);
} else {
// If we don't have a realisation, then it must mean that something
// failed when building the resolved drv
@@ -936,6 +972,13 @@ void DerivationGoal::resolvedFinished() {
}
}
+ runPostBuildHook(
+ worker.store,
+ *logger,
+ drvPath,
+ outputPaths
+ );
+
// This is potentially a bit fishy in terms of error reporting. Not sure
// how to do it in a cleaner way
amDone(nrFailed == 0 ? ecSuccess : ecFailed, ex);
@@ -968,7 +1011,7 @@ HookReply DerivationGoal::tryBuildHook()
return readLine(worker.hook->fromHook.readSide.get());
} catch (Error & e) {
e.addTrace({}, "while reading the response from the build hook");
- throw e;
+ throw;
}
}();
if (handleJSONLogMessage(s, worker.act, worker.hook->activities, true))
@@ -1014,7 +1057,7 @@ HookReply DerivationGoal::tryBuildHook()
machineName = readLine(hook->fromHook.readSide.get());
} catch (Error & e) {
e.addTrace({}, "while reading the machine name from the build hook");
- throw e;
+ throw;
}
/* Tell the hook all the inputs that have to be copied to the
@@ -1048,42 +1091,6 @@ HookReply DerivationGoal::tryBuildHook()
}
-StorePathSet DerivationGoal::exportReferences(const StorePathSet & storePaths)
-{
- StorePathSet paths;
-
- for (auto & storePath : storePaths) {
- if (!inputPaths.count(storePath))
- throw BuildError("cannot export references of path '%s' because it is not in the input closure of the derivation", worker.store.printStorePath(storePath));
-
- worker.store.computeFSClosure({storePath}, paths);
- }
-
- /* If there are derivations in the graph, then include their
- outputs as well. This is useful if you want to do things
- like passing all build-time dependencies of some path to a
- derivation that builds a NixOS DVD image. */
- auto paths2 = paths;
-
- for (auto & j : paths2) {
- if (j.isDerivation()) {
- Derivation drv = worker.store.derivationFromPath(j);
- for (auto & k : drv.outputsAndOptPaths(worker.store)) {
- if (!k.second.second)
- /* FIXME: I am confused why we are calling
- `computeFSClosure` on the output path, rather than
- derivation itself. That doesn't seem right to me, so I
- won't try to implemented this for CA derivations. */
- throw UnimplementedError("exportReferences on CA derivations is not yet implemented");
- worker.store.computeFSClosure(*k.second.second, paths);
- }
- }
- }
-
- return paths;
-}
-
-
void DerivationGoal::registerOutputs()
{
/* When using a build hook, the build hook can register the output
@@ -1268,7 +1275,7 @@ void DerivationGoal::checkPathValidity()
: PathStatus::Corrupt,
};
}
- if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
auto drvOutput = DrvOutput{initialOutputs.at(i.first).outputHash, i.first};
if (auto real = worker.store.queryRealisation(drvOutput)) {
info.known = {
diff --git a/src/libstore/build/drv-output-substitution-goal.cc b/src/libstore/build/drv-output-substitution-goal.cc
index a5ac4c49d..b9602e696 100644
--- a/src/libstore/build/drv-output-substitution-goal.cc
+++ b/src/libstore/build/drv-output-substitution-goal.cc
@@ -1,6 +1,8 @@
#include "drv-output-substitution-goal.hh"
+#include "finally.hh"
#include "worker.hh"
#include "substitution-goal.hh"
+#include "callback.hh"
namespace nix {
@@ -17,6 +19,13 @@ DrvOutputSubstitutionGoal::DrvOutputSubstitutionGoal(const DrvOutput& id, Worker
void DrvOutputSubstitutionGoal::init()
{
trace("init");
+
+ /* If the derivation already exists, we’re done */
+ if (worker.store.queryRealisation(id)) {
+ amDone(ecSuccess);
+ return;
+ }
+
subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>();
tryNext();
}
@@ -43,14 +52,62 @@ void DrvOutputSubstitutionGoal::tryNext()
return;
}
- auto sub = subs.front();
+ sub = subs.front();
subs.pop_front();
// FIXME: Make async
- outputInfo = sub->queryRealisation(id);
+ // outputInfo = sub->queryRealisation(id);
+ outPipe.create();
+ promise = decltype(promise)();
+
+ sub->queryRealisation(
+ id, { [&](std::future<std::shared_ptr<const Realisation>> res) {
+ try {
+ Finally updateStats([this]() { outPipe.writeSide.close(); });
+ promise.set_value(res.get());
+ } catch (...) {
+ promise.set_exception(std::current_exception());
+ }
+ } });
+
+ worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false);
+
+ state = &DrvOutputSubstitutionGoal::realisationFetched;
+}
+
+void DrvOutputSubstitutionGoal::realisationFetched()
+{
+ worker.childTerminated(this);
+
+ try {
+ outputInfo = promise.get_future().get();
+ } catch (std::exception & e) {
+ printError(e.what());
+ substituterFailed = true;
+ }
+
if (!outputInfo) {
- tryNext();
- return;
+ return tryNext();
+ }
+
+ for (const auto & [depId, depPath] : outputInfo->dependentRealisations) {
+ if (depId != id) {
+ if (auto localOutputInfo = worker.store.queryRealisation(depId);
+ localOutputInfo && localOutputInfo->outPath != depPath) {
+ warn(
+ "substituter '%s' has an incompatible realisation for '%s', ignoring.\n"
+ "Local: %s\n"
+ "Remote: %s",
+ sub->getUri(),
+ depId.to_string(),
+ worker.store.printStorePath(localOutputInfo->outPath),
+ worker.store.printStorePath(depPath)
+ );
+ tryNext();
+ return;
+ }
+ addWaitee(worker.makeDrvOutputSubstitutionGoal(depId));
+ }
}
addWaitee(worker.makePathSubstitutionGoal(outputInfo->outPath));
@@ -92,4 +149,10 @@ void DrvOutputSubstitutionGoal::work()
(this->*state)();
}
+void DrvOutputSubstitutionGoal::handleEOF(int fd)
+{
+ if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this());
+}
+
+
}
diff --git a/src/libstore/build/drv-output-substitution-goal.hh b/src/libstore/build/drv-output-substitution-goal.hh
index 63ab53d89..67ae2624a 100644
--- a/src/libstore/build/drv-output-substitution-goal.hh
+++ b/src/libstore/build/drv-output-substitution-goal.hh
@@ -3,6 +3,8 @@
#include "store-api.hh"
#include "goal.hh"
#include "realisation.hh"
+#include <thread>
+#include <future>
namespace nix {
@@ -20,11 +22,18 @@ private:
// The realisation corresponding to the given output id.
// Will be filled once we can get it.
- std::optional<Realisation> outputInfo;
+ std::shared_ptr<const Realisation> outputInfo;
/* The remaining substituters. */
std::list<ref<Store>> subs;
+ /* The current substituter. */
+ std::shared_ptr<Store> sub;
+
+ Pipe outPipe;
+ std::thread thr;
+ std::promise<std::shared_ptr<const Realisation>> promise;
+
/* Whether a substituter failed. */
bool substituterFailed = false;
@@ -36,6 +45,7 @@ public:
void init();
void tryNext();
+ void realisationFetched();
void outPathValid();
void finished();
@@ -44,7 +54,7 @@ public:
string key() override;
void work() override;
-
+ void handleEOF(int fd) override;
};
}
diff --git a/src/libstore/build/entry-points.cc b/src/libstore/build/entry-points.cc
index 732d4785d..9b4cfd835 100644
--- a/src/libstore/build/entry-points.cc
+++ b/src/libstore/build/entry-points.cc
@@ -1,4 +1,3 @@
-#include "machines.hh"
#include "worker.hh"
#include "substitution-goal.hh"
#include "derivation-goal.hh"
@@ -6,17 +5,17 @@
namespace nix {
-void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMode)
+void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMode, std::shared_ptr<Store> evalStore)
{
- Worker worker(*this);
+ Worker worker(*this, evalStore ? *evalStore : *this);
Goals goals;
- for (auto & br : reqs) {
+ for (const auto & br : reqs) {
std::visit(overloaded {
- [&](DerivedPath::Built bfd) {
+ [&](const DerivedPath::Built & bfd) {
goals.insert(worker.makeDerivationGoal(bfd.drvPath, bfd.outputs, buildMode));
},
- [&](DerivedPath::Opaque bo) {
+ [&](const DerivedPath::Opaque & bo) {
goals.insert(worker.makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair));
},
}, br.raw());
@@ -51,7 +50,7 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
BuildMode buildMode)
{
- Worker worker(*this);
+ Worker worker(*this, *this);
auto goal = worker.makeBasicDerivationGoal(drvPath, drv, {}, buildMode);
BuildResult result;
@@ -74,7 +73,7 @@ BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivat
outputId,
Realisation{ outputId, *staticOutput.second}
);
- if (settings.isExperimentalFeatureEnabled("ca-derivations") && !derivationHasKnownOutputPaths(drv.type())) {
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations) && !derivationHasKnownOutputPaths(drv.type())) {
auto realisation = this->queryRealisation(outputId);
if (realisation)
result.builtOutputs.insert_or_assign(
@@ -93,7 +92,7 @@ void Store::ensurePath(const StorePath & path)
/* If the path is already valid, we're done. */
if (isValidPath(path)) return;
- Worker worker(*this);
+ Worker worker(*this, *this);
GoalPtr goal = worker.makePathSubstitutionGoal(path);
Goals goals = {goal};
@@ -111,7 +110,7 @@ void Store::ensurePath(const StorePath & path)
void LocalStore::repairPath(const StorePath & path)
{
- Worker worker(*this);
+ Worker worker(*this, *this);
GoalPtr goal = worker.makePathSubstitutionGoal(path, Repair);
Goals goals = {goal};
diff --git a/src/libstore/build/goal.cc b/src/libstore/build/goal.cc
index 9de40bdf2..7c985128b 100644
--- a/src/libstore/build/goal.cc
+++ b/src/libstore/build/goal.cc
@@ -13,11 +13,9 @@ bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) const {
void addToWeakGoals(WeakGoals & goals, GoalPtr p)
{
- // FIXME: necessary?
- // FIXME: O(n)
- for (auto & i : goals)
- if (i.lock() == p) return;
- goals.push_back(p);
+ if (goals.find(p) != goals.end())
+ return;
+ goals.insert(p);
}
@@ -46,10 +44,7 @@ void Goal::waiteeDone(GoalPtr waitee, ExitCode result)
/* If we failed and keepGoing is not set, we remove all
remaining waitees. */
for (auto & goal : waitees) {
- WeakGoals waiters2;
- for (auto & j : goal->waiters)
- if (j.lock() != shared_from_this()) waiters2.push_back(j);
- goal->waiters = waiters2;
+ goal->waiters.extract(shared_from_this());
}
waitees.clear();
diff --git a/src/libstore/build/goal.hh b/src/libstore/build/goal.hh
index e6bf628cb..192e416d2 100644
--- a/src/libstore/build/goal.hh
+++ b/src/libstore/build/goal.hh
@@ -19,7 +19,7 @@ struct CompareGoalPtrs {
/* Set of goals. */
typedef set<GoalPtr, CompareGoalPtrs> Goals;
-typedef list<WeakGoalPtr> WeakGoals;
+typedef set<WeakGoalPtr, std::owner_less<WeakGoalPtr>> WeakGoals;
/* A map of paths to goals (and the other way around). */
typedef std::map<StorePath, WeakGoalPtr> WeakGoalMap;
diff --git a/src/libstore/build/local-derivation-goal.cc b/src/libstore/build/local-derivation-goal.cc
index 7c1402918..c9a4a31e7 100644
--- a/src/libstore/build/local-derivation-goal.cc
+++ b/src/libstore/build/local-derivation-goal.cc
@@ -17,16 +17,14 @@
#include <regex>
#include <queue>
-#include <sys/types.h>
-#include <sys/socket.h>
#include <sys/un.h>
-#include <netdb.h>
#include <fcntl.h>
#include <termios.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/utsname.h>
#include <sys/resource.h>
+#include <sys/socket.h>
#if HAVE_STATVFS
#include <sys/statvfs.h>
@@ -34,7 +32,6 @@
/* Includes required for chroot support. */
#if __linux__
-#include <sys/socket.h>
#include <sys/ioctl.h>
#include <net/if.h>
#include <netinet/ip.h>
@@ -70,12 +67,14 @@ void handleDiffHook(
auto diffHook = settings.diffHook;
if (diffHook != "" && settings.runDiffHook) {
try {
- RunOptions diffHookOptions(diffHook,{tryA, tryB, drvPath, tmpDir});
- diffHookOptions.searchPath = true;
- diffHookOptions.uid = uid;
- diffHookOptions.gid = gid;
- diffHookOptions.chdir = "/";
- auto diffRes = runProgram(diffHookOptions);
+ auto diffRes = runProgram(RunOptions {
+ .program = diffHook,
+ .searchPath = true,
+ .args = {tryA, tryB, drvPath, tmpDir},
+ .uid = uid,
+ .gid = gid,
+ .chdir = "/"
+ });
if (!statusOk(diffRes.first))
throw ExecError(diffRes.first,
"diff-hook program '%1%' %2%",
@@ -153,6 +152,7 @@ void LocalDerivationGoal::killChild()
void LocalDerivationGoal::tryLocalBuild() {
unsigned int curBuilds = worker.getNrLocalBuilds();
if (curBuilds >= settings.maxBuildJobs) {
+ state = &DerivationGoal::tryToBuild;
worker.waitForBuildSlot(shared_from_this());
outputLocks.unlock();
return;
@@ -291,7 +291,7 @@ bool LocalDerivationGoal::cleanupDecideWhetherDiskFull()
auto & localStore = getLocalStore();
uint64_t required = 8ULL * 1024 * 1024; // FIXME: make configurable
struct statvfs st;
- if (statvfs(localStore.realStoreDir.c_str(), &st) == 0 &&
+ if (statvfs(localStore.realStoreDir.get().c_str(), &st) == 0 &&
(uint64_t) st.f_bavail * st.f_bsize < required)
diskFull = true;
if (statvfs(tmpDir.c_str(), &st) == 0 &&
@@ -342,24 +342,7 @@ int childEntry(void * arg)
return 1;
}
-
-static std::once_flag dns_resolve_flag;
-
-static void preloadNSS() {
- /* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of
- one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already
- been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to
- load its lookup libraries in the parent before any child gets a chance to. */
- std::call_once(dns_resolve_flag, []() {
- struct addrinfo *res = NULL;
-
- if (getaddrinfo("this.pre-initializes.the.dns.resolvers.invalid.", "http", NULL, &res) != 0) {
- if (res) freeaddrinfo(res);
- }
- });
-}
-
-
+#if __linux__
static void linkOrCopy(const Path & from, const Path & to)
{
if (link(from.c_str(), to.c_str()) == -1) {
@@ -375,6 +358,7 @@ static void linkOrCopy(const Path & from, const Path & to)
copyPath(from, to);
}
}
+#endif
void LocalDerivationGoal::startBuilder()
@@ -388,9 +372,6 @@ void LocalDerivationGoal::startBuilder()
settings.thisSystem,
concatStringsSep<StringSet>(", ", worker.store.systemFeatures));
- if (drv->isBuiltin())
- preloadNSS();
-
#if __APPLE__
additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or("");
#endif
@@ -416,7 +397,7 @@ void LocalDerivationGoal::startBuilder()
}
auto & localStore = getLocalStore();
- if (localStore.storeDir != localStore.realStoreDir) {
+ if (localStore.storeDir != localStore.realStoreDir.get()) {
#if __linux__
useChroot = true;
#else
@@ -517,7 +498,7 @@ void LocalDerivationGoal::startBuilder()
/* Write closure info to <fileName>. */
writeFile(tmpDir + "/" + fileName,
worker.store.makeValidityRegistration(
- exportReferences({storePath}), false, false));
+ worker.store.exportReferences({storePath}, inputPaths), false, false));
}
}
@@ -581,7 +562,9 @@ void LocalDerivationGoal::startBuilder()
throw Error("derivation '%s' requested impure path '%s', but it was not in allowed-impure-host-deps",
worker.store.printStorePath(drvPath), i);
- dirsInChroot[i] = i;
+ /* Allow files in __impureHostDeps to be missing; e.g.
+ macOS 11+ has no /usr/lib/libSystem*.dylib */
+ dirsInChroot[i] = {i, true};
}
#if __linux__
@@ -729,6 +712,7 @@ void LocalDerivationGoal::startBuilder()
if (!builderOut.readSide)
throw SysError("opening pseudoterminal master");
+ // FIXME: not thread-safe, use ptsname_r
std::string slaveName(ptsname(builderOut.readSide.get()));
if (buildUser) {
@@ -772,7 +756,6 @@ void LocalDerivationGoal::startBuilder()
result.startTime = time(0);
/* Fork a child to build the package. */
- ProcessOptions options;
#if __linux__
if (useChroot) {
@@ -815,8 +798,6 @@ void LocalDerivationGoal::startBuilder()
userNamespaceSync.create();
- options.allowVfork = false;
-
Path maxUserNamespaces = "/proc/sys/user/max_user_namespaces";
static bool userNamespacesEnabled =
pathExists(maxUserNamespaces)
@@ -874,7 +855,7 @@ void LocalDerivationGoal::startBuilder()
writeFull(builderOut.writeSide.get(),
fmt("%d %d\n", usingUserNamespace, child));
_exit(0);
- }, options);
+ });
int res = helper.wait();
if (res != 0 && settings.sandboxFallback) {
@@ -937,11 +918,12 @@ void LocalDerivationGoal::startBuilder()
} else
#endif
{
+#if __linux__
fallback:
- options.allowVfork = !buildUser && !drv->isBuiltin();
+#endif
pid = startProcess([&]() {
runChild();
- }, options);
+ });
}
/* parent */
@@ -956,9 +938,12 @@ void LocalDerivationGoal::startBuilder()
try {
return readLine(builderOut.readSide.get());
} catch (Error & e) {
- e.addTrace({}, "while waiting for the build environment to initialize (previous messages: %s)",
+ auto status = pid.wait();
+ e.addTrace({}, "while waiting for the build environment for '%s' to initialize (%s, previous messages: %s)",
+ worker.store.printStorePath(drvPath),
+ statusToString(status),
concatStringsSep("|", msgs));
- throw e;
+ throw;
}
}();
if (string(msg, 0, 1) == "\2") break;
@@ -1081,123 +1066,38 @@ void LocalDerivationGoal::initEnv()
}
-static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*");
-
-
void LocalDerivationGoal::writeStructuredAttrs()
{
- auto structuredAttrs = parsedDrv->getStructuredAttrs();
- if (!structuredAttrs) return;
-
- auto json = *structuredAttrs;
-
- /* Add an "outputs" object containing the output paths. */
- nlohmann::json outputs;
- for (auto & i : drv->outputs) {
- /* The placeholder must have a rewrite, so we use it to cover both the
- cases where we know or don't know the output path ahead of time. */
- outputs[i.first] = rewriteStrings(hashPlaceholder(i.first), inputRewrites);
- }
- json["outputs"] = outputs;
-
- /* Handle exportReferencesGraph. */
- auto e = json.find("exportReferencesGraph");
- if (e != json.end() && e->is_object()) {
- for (auto i = e->begin(); i != e->end(); ++i) {
- std::ostringstream str;
- {
- JSONPlaceholder jsonRoot(str, true);
- StorePathSet storePaths;
- for (auto & p : *i)
- storePaths.insert(worker.store.parseStorePath(p.get<std::string>()));
- worker.store.pathInfoToJSON(jsonRoot,
- exportReferences(storePaths), false, true);
- }
- json[i.key()] = nlohmann::json::parse(str.str()); // urgh
- }
- }
-
- writeFile(tmpDir + "/.attrs.json", rewriteStrings(json.dump(), inputRewrites));
- chownToBuilder(tmpDir + "/.attrs.json");
-
- /* As a convenience to bash scripts, write a shell file that
- maps all attributes that are representable in bash -
- namely, strings, integers, nulls, Booleans, and arrays and
- objects consisting entirely of those values. (So nested
- arrays or objects are not supported.) */
-
- auto handleSimpleType = [](const nlohmann::json & value) -> std::optional<std::string> {
- if (value.is_string())
- return shellEscape(value);
-
- if (value.is_number()) {
- auto f = value.get<float>();
- if (std::ceil(f) == f)
- return std::to_string(value.get<int>());
+ if (auto structAttrsJson = parsedDrv->prepareStructuredAttrs(worker.store, inputPaths)) {
+ auto json = structAttrsJson.value();
+ nlohmann::json rewritten;
+ for (auto & [i, v] : json["outputs"].get<nlohmann::json::object_t>()) {
+ /* The placeholder must have a rewrite, so we use it to cover both the
+ cases where we know or don't know the output path ahead of time. */
+ rewritten[i] = rewriteStrings(v, inputRewrites);
}
- if (value.is_null())
- return std::string("''");
-
- if (value.is_boolean())
- return value.get<bool>() ? std::string("1") : std::string("");
-
- return {};
- };
-
- std::string jsonSh;
+ json["outputs"] = rewritten;
- for (auto i = json.begin(); i != json.end(); ++i) {
+ auto jsonSh = writeStructuredAttrsShell(json);
- if (!std::regex_match(i.key(), shVarName)) continue;
-
- auto & value = i.value();
-
- auto s = handleSimpleType(value);
- if (s)
- jsonSh += fmt("declare %s=%s\n", i.key(), *s);
-
- else if (value.is_array()) {
- std::string s2;
- bool good = true;
-
- for (auto i = value.begin(); i != value.end(); ++i) {
- auto s3 = handleSimpleType(i.value());
- if (!s3) { good = false; break; }
- s2 += *s3; s2 += ' ';
- }
-
- if (good)
- jsonSh += fmt("declare -a %s=(%s)\n", i.key(), s2);
- }
-
- else if (value.is_object()) {
- std::string s2;
- bool good = true;
-
- for (auto i = value.begin(); i != value.end(); ++i) {
- auto s3 = handleSimpleType(i.value());
- if (!s3) { good = false; break; }
- s2 += fmt("[%s]=%s ", shellEscape(i.key()), *s3);
- }
-
- if (good)
- jsonSh += fmt("declare -A %s=(%s)\n", i.key(), s2);
- }
+ writeFile(tmpDir + "/.attrs.sh", rewriteStrings(jsonSh, inputRewrites));
+ chownToBuilder(tmpDir + "/.attrs.sh");
+ env["NIX_ATTRS_SH_FILE"] = tmpDir + "/.attrs.sh";
+ writeFile(tmpDir + "/.attrs.json", rewriteStrings(json.dump(), inputRewrites));
+ chownToBuilder(tmpDir + "/.attrs.json");
+ env["NIX_ATTRS_JSON_FILE"] = tmpDir + "/.attrs.json";
}
-
- writeFile(tmpDir + "/.attrs.sh", rewriteStrings(jsonSh, inputRewrites));
- chownToBuilder(tmpDir + "/.attrs.sh");
}
static StorePath pathPartOfReq(const DerivedPath & req)
{
return std::visit(overloaded {
- [&](DerivedPath::Opaque bo) {
+ [&](const DerivedPath::Opaque & bo) {
return bo.path;
},
- [&](DerivedPath::Built bfd) {
+ [&](const DerivedPath::Built & bfd) {
return bfd.drvPath;
},
}, req.raw());
@@ -1282,7 +1182,8 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
StorePath addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256,
- PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair) override
+ PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair,
+ const StorePathSet & references = StorePathSet()) override
{ throw Error("addToStore"); }
void addToStore(const ValidPathInfo & info, Source & narSource,
@@ -1301,9 +1202,10 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
}
StorePath addToStoreFromDump(Source & dump, const string & name,
- FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override
+ FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair,
+ const StorePathSet & references = StorePathSet()) override
{
- auto path = next->addToStoreFromDump(dump, name, method, hashAlgo, repair);
+ auto path = next->addToStoreFromDump(dump, name, method, hashAlgo, repair, references);
goal.addDependency(path);
return path;
}
@@ -1327,16 +1229,24 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
// corresponds to an allowed derivation
{ throw Error("registerDrvOutput"); }
- std::optional<const Realisation> queryRealisation(const DrvOutput & id) override
+ void queryRealisationUncached(const DrvOutput & id,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept override
// XXX: This should probably be allowed if the realisation corresponds to
// an allowed derivation
- { throw Error("queryRealisation"); }
+ {
+ if (!goal.isAllowed(id))
+ callback(nullptr);
+ next->queryRealisation(id, std::move(callback));
+ }
- void buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode) override
+ void buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override
{
+ assert(!evalStore);
+
if (buildMode != bmNormal) throw Error("unsupported build mode");
StorePathSet newPaths;
+ std::set<Realisation> newRealisations;
for (auto & req : paths) {
if (!goal.isAllowed(req))
@@ -1349,16 +1259,28 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
auto p = std::get_if<DerivedPath::Built>(&path);
if (!p) continue;
auto & bfd = *p;
+ auto drv = readDerivation(bfd.drvPath);
+ auto drvHashes = staticOutputHashes(*this, drv);
auto outputs = next->queryDerivationOutputMap(bfd.drvPath);
for (auto & [outputName, outputPath] : outputs)
- if (wantOutput(outputName, bfd.outputs))
+ if (wantOutput(outputName, bfd.outputs)) {
newPaths.insert(outputPath);
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
+ auto thisRealisation = next->queryRealisation(
+ DrvOutput{drvHashes.at(outputName), outputName}
+ );
+ assert(thisRealisation);
+ newRealisations.insert(*thisRealisation);
+ }
+ }
}
StorePathSet closure;
next->computeFSClosure(newPaths, closure);
for (auto & path : closure)
goal.addDependency(path);
+ for (auto & real : Realisation::closure(*next, newRealisations))
+ goal.addedDrvOutputs.insert(real.id);
}
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
@@ -1404,7 +1326,7 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
void LocalDerivationGoal::startDaemon()
{
- settings.requireExperimentalFeature("recursive-nix");
+ settings.requireExperimentalFeature(Xp::RecursiveNix);
Store::Params params;
params["path-info-cache-size"] = "0";
@@ -1437,7 +1359,7 @@ void LocalDerivationGoal::startDaemon()
AutoCloseFD remote = accept(daemonSocket.get(),
(struct sockaddr *) &remoteAddr, &remoteAddrLen);
if (!remote) {
- if (errno == EINTR) continue;
+ if (errno == EINTR || errno == EAGAIN) continue;
if (errno == EINVAL) break;
throw SysError("accepting connection");
}
@@ -1734,7 +1656,7 @@ void LocalDerivationGoal::runChild()
/* N.B. it is realistic that these paths might not exist. It
happens when testing Nix building fixed-output derivations
within a pure derivation. */
- for (auto & path : { "/etc/resolv.conf", "/etc/services", "/etc/hosts", "/var/run/nscd/socket" })
+ for (auto & path : { "/etc/resolv.conf", "/etc/services", "/etc/hosts" })
if (pathExists(path))
ss.push_back(path);
}
@@ -1916,7 +1838,7 @@ void LocalDerivationGoal::runChild()
/* Fill in the arguments. */
Strings args;
- const char *builder = "invalid";
+ std::string builder = "invalid";
if (drv->isBuiltin()) {
;
@@ -2042,13 +1964,13 @@ void LocalDerivationGoal::runChild()
}
args.push_back(drv->builder);
} else {
- builder = drv->builder.c_str();
+ builder = drv->builder;
args.push_back(std::string(baseNameOf(drv->builder)));
}
}
#else
else {
- builder = drv->builder.c_str();
+ builder = drv->builder;
args.push_back(std::string(baseNameOf(drv->builder)));
}
#endif
@@ -2075,7 +1997,7 @@ void LocalDerivationGoal::runChild()
else if (drv->builder == "builtin:unpack-channel")
builtinUnpackChannel(drv2);
else
- throw Error("unsupported builtin function '%1%'", string(drv->builder, 8));
+ throw Error("unsupported builtin builder '%1%'", string(drv->builder, 8));
_exit(0);
} catch (std::exception & e) {
writeFull(STDERR_FILENO, e.what() + std::string("\n"));
@@ -2104,9 +2026,9 @@ void LocalDerivationGoal::runChild()
posix_spawnattr_setbinpref_np(&attrp, 1, &cpu, NULL);
}
- posix_spawn(NULL, builder, NULL, &attrp, stringsToCharPtrs(args).data(), stringsToCharPtrs(envStrs).data());
+ posix_spawn(NULL, builder.c_str(), NULL, &attrp, stringsToCharPtrs(args).data(), stringsToCharPtrs(envStrs).data());
#else
- execve(builder, stringsToCharPtrs(args).data(), stringsToCharPtrs(envStrs).data());
+ execve(builder.c_str(), stringsToCharPtrs(args).data(), stringsToCharPtrs(envStrs).data());
#endif
throw SysError("executing '%1%'", drv->builder);
@@ -2221,8 +2143,7 @@ void LocalDerivationGoal::registerOutputs()
/* Pass blank Sink as we are not ready to hash data at this stage. */
NullSink blank;
- auto references = worker.store.parseStorePathSet(
- scanForReferences(blank, actualPath, worker.store.printStorePathSet(referenceablePaths)));
+ auto references = scanForReferences(blank, actualPath, referenceablePaths);
outputReferencesIfUnregistered.insert_or_assign(
outputName,
@@ -2236,8 +2157,8 @@ void LocalDerivationGoal::registerOutputs()
/* Since we'll use the already installed versions of these, we
can treat them as leaves and ignore any references they
have. */
- [&](AlreadyRegistered _) { return StringSet {}; },
- [&](PerhapsNeedToRegister refs) {
+ [&](const AlreadyRegistered &) { return StringSet {}; },
+ [&](const PerhapsNeedToRegister & refs) {
StringSet referencedOutputs;
/* FIXME build inverted map up front so no quadratic waste here */
for (auto & r : refs.refs)
@@ -2273,11 +2194,11 @@ void LocalDerivationGoal::registerOutputs()
};
std::optional<StorePathSet> referencesOpt = std::visit(overloaded {
- [&](AlreadyRegistered skippedFinalPath) -> std::optional<StorePathSet> {
+ [&](const AlreadyRegistered & skippedFinalPath) -> std::optional<StorePathSet> {
finish(skippedFinalPath.path);
return std::nullopt;
},
- [&](PerhapsNeedToRegister r) -> std::optional<StorePathSet> {
+ [&](const PerhapsNeedToRegister & r) -> std::optional<StorePathSet> {
return r.refs;
},
}, outputReferencesIfUnregistered.at(outputName));
@@ -2289,7 +2210,7 @@ void LocalDerivationGoal::registerOutputs()
auto rewriteOutput = [&]() {
/* Apply hash rewriting if necessary. */
if (!outputRewrites.empty()) {
- warn("rewriting hashes in '%1%'; cross fingers", actualPath);
+ debug("rewriting hashes in '%1%'; cross fingers", actualPath);
/* FIXME: this is in-memory. */
StringSink sink;
@@ -2298,10 +2219,6 @@ void LocalDerivationGoal::registerOutputs()
sink.s = make_ref<std::string>(rewriteStrings(*sink.s, outputRewrites));
StringSource source(*sink.s);
restorePath(actualPath, source);
-
- /* FIXME: set proper permissions in restorePath() so
- we don't have to do another traversal. */
- canonicalisePathMetaData(actualPath, -1, inodesSeen);
}
};
@@ -2355,32 +2272,19 @@ void LocalDerivationGoal::registerOutputs()
}
auto got = caSink.finish().first;
auto refs = rewriteRefs();
- HashModuloSink narSink { htSHA256, oldHashPart };
- dumpPath(actualPath, narSink);
- auto narHashAndSize = narSink.finish();
- ValidPathInfo newInfo0 {
- worker.store.makeFixedOutputPath(
+
+ auto finalPath = worker.store.makeFixedOutputPath(
outputHash.method,
got,
outputPathName(drv->name, outputName),
refs.second,
- refs.first),
- narHashAndSize.first,
- };
- newInfo0.narSize = narHashAndSize.second;
- newInfo0.ca = FixedOutputHash {
- .method = outputHash.method,
- .hash = got,
- };
- newInfo0.references = refs.second;
- if (refs.first)
- newInfo0.references.insert(newInfo0.path);
- if (scratchPath != newInfo0.path) {
+ refs.first);
+ if (scratchPath != finalPath) {
// Also rewrite the output path
auto source = sinkToSource([&](Sink & nextSink) {
StringSink sink;
dumpPath(actualPath, sink);
- RewritingSink rsink2(oldHashPart, std::string(newInfo0.path.hashPart()), nextSink);
+ RewritingSink rsink2(oldHashPart, std::string(finalPath.hashPart()), nextSink);
rsink2(*sink.s);
rsink2.flush();
});
@@ -2390,12 +2294,27 @@ void LocalDerivationGoal::registerOutputs()
movePath(tmpPath, actualPath);
}
+ HashResult narHashAndSize = hashPath(htSHA256, actualPath);
+ ValidPathInfo newInfo0 {
+ finalPath,
+ narHashAndSize.first,
+ };
+
+ newInfo0.narSize = narHashAndSize.second;
+ newInfo0.ca = FixedOutputHash {
+ .method = outputHash.method,
+ .hash = got,
+ };
+ newInfo0.references = refs.second;
+ if (refs.first)
+ newInfo0.references.insert(newInfo0.path);
+
assert(newInfo0.ca);
return newInfo0;
};
ValidPathInfo newInfo = std::visit(overloaded {
- [&](DerivationOutputInputAddressed output) {
+ [&](const DerivationOutputInputAddressed & output) {
/* input-addressed case */
auto requiredFinalPath = output.path;
/* Preemptively add rewrite rule for final hash, as that is
@@ -2414,14 +2333,14 @@ void LocalDerivationGoal::registerOutputs()
newInfo0.references.insert(newInfo0.path);
return newInfo0;
},
- [&](DerivationOutputCAFixed dof) {
+ [&](const DerivationOutputCAFixed & dof) {
auto newInfo0 = newInfoFromCA(DerivationOutputCAFloating {
.method = dof.hash.method,
.hashType = dof.hash.hash.type,
});
/* Check wanted hash */
- Hash & wanted = dof.hash.hash;
+ const Hash & wanted = dof.hash.hash;
assert(newInfo0.ca);
auto got = getContentAddressHash(*newInfo0.ca);
if (wanted != got) {
@@ -2450,6 +2369,10 @@ void LocalDerivationGoal::registerOutputs()
},
}, output.output);
+ /* FIXME: set proper permissions in restorePath() so
+ we don't have to do another traversal. */
+ canonicalisePathMetaData(actualPath, -1, inodesSeen);
+
/* Calculate where we'll move the output files. In the checking case we
will leave leave them where they are, for now, rather than move to
their usual "final destination" */
@@ -2459,6 +2382,7 @@ void LocalDerivationGoal::registerOutputs()
floating CA derivations and hash-mismatching fixed-output
derivations. */
PathLocks dynamicOutputLock;
+ dynamicOutputLock.setDeletion(true);
auto optFixedPath = output.path(worker.store, drv->name, outputName);
if (!optFixedPath ||
worker.store.printStorePath(*optFixedPath) != finalDestPath)
@@ -2482,6 +2406,7 @@ void LocalDerivationGoal::registerOutputs()
assert(newInfo.ca);
} else {
auto destPath = worker.store.toRealPath(finalDestPath);
+ deletePath(destPath);
movePath(actualPath, destPath);
actualPath = destPath;
}
@@ -2551,7 +2476,13 @@ void LocalDerivationGoal::registerOutputs()
infos.emplace(outputName, std::move(newInfo));
}
- if (buildMode == bmCheck) return;
+ if (buildMode == bmCheck) {
+ // In case of FOD mismatches on `--check` an error must be thrown as this is also
+ // a source for non-determinism.
+ if (delayedException)
+ std::rethrow_exception(delayedException);
+ return;
+ }
/* Apply output checks. */
checkOutputs(infos);
@@ -2636,7 +2567,7 @@ void LocalDerivationGoal::registerOutputs()
that for floating CA derivations, which otherwise couldn't be cached,
but it's fine to do in all cases. */
- if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
for (auto& [outputName, newInfo] : infos) {
auto thisRealisation = Realisation{
.id = DrvOutput{initialOutputs.at(outputName).outputHash,
diff --git a/src/libstore/build/local-derivation-goal.hh b/src/libstore/build/local-derivation-goal.hh
index d30be2351..088a57209 100644
--- a/src/libstore/build/local-derivation-goal.hh
+++ b/src/libstore/build/local-derivation-goal.hh
@@ -108,6 +108,9 @@ struct LocalDerivationGoal : public DerivationGoal
/* Paths that were added via recursive Nix calls. */
StorePathSet addedPaths;
+ /* Realisations that were added via recursive Nix calls. */
+ std::set<DrvOutput> addedDrvOutputs;
+
/* Recursive Nix calls are only allowed to build or realize paths
in the original input closure or added via a recursive Nix call
(so e.g. you can't do 'nix-store -r /nix/store/<bla>' where
@@ -116,6 +119,11 @@ struct LocalDerivationGoal : public DerivationGoal
{
return inputPaths.count(path) || addedPaths.count(path);
}
+ bool isAllowed(const DrvOutput & id)
+ {
+ return addedDrvOutputs.count(id);
+ }
+
bool isAllowed(const DerivedPath & req);
friend struct RestrictedStore;
diff --git a/src/libstore/build/substitution-goal.cc b/src/libstore/build/substitution-goal.cc
index e56cfadbe..29a8cfb87 100644
--- a/src/libstore/build/substitution-goal.cc
+++ b/src/libstore/build/substitution-goal.cc
@@ -204,7 +204,7 @@ void PathSubstitutionGoal::tryToRun()
Activity act(*logger, actSubstitute, Logger::Fields{worker.store.printStorePath(storePath), sub->getUri()});
PushActivity pact(act.id);
- copyStorePath(ref<Store>(sub), ref<Store>(worker.store.shared_from_this()),
+ copyStorePath(*sub, worker.store,
subPath ? *subPath : storePath, repair, sub->isTrusted ? NoCheckSigs : CheckSigs);
promise.set_value();
diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc
index 0f2ade348..55afb5cca 100644
--- a/src/libstore/build/worker.cc
+++ b/src/libstore/build/worker.cc
@@ -9,11 +9,12 @@
namespace nix {
-Worker::Worker(Store & store)
+Worker::Worker(Store & store, Store & evalStore)
: act(*logger, actRealise)
, actDerivations(*logger, actBuilds)
, actSubstitutions(*logger, actCopyPaths)
, store(store)
+ , evalStore(evalStore)
{
/* Debugging: prevent recursive workers. */
nrLocalBuilds = 0;
@@ -238,7 +239,7 @@ void Worker::run(const Goals & _topGoals)
}
}
- /* Call queryMissing() efficiently query substitutes. */
+ /* Call queryMissing() to efficiently query substitutes. */
StorePathSet willBuild, willSubstitute, unknown;
uint64_t downloadSize, narSize;
store.queryMissing(topPaths, willBuild, willSubstitute, unknown, downloadSize, narSize);
diff --git a/src/libstore/build/worker.hh b/src/libstore/build/worker.hh
index 918de35f6..6a3b99c02 100644
--- a/src/libstore/build/worker.hh
+++ b/src/libstore/build/worker.hh
@@ -110,6 +110,7 @@ public:
bool checkMismatch;
Store & store;
+ Store & evalStore;
std::unique_ptr<HookInstance> hook;
@@ -131,7 +132,7 @@ public:
it answers with "decline-permanently", we don't try again. */
bool tryBuildHook = true;
- Worker(Store & store);
+ Worker(Store & store, Store & evalStore);
~Worker();
/* Make a goal (with caching). */
diff --git a/src/libstore/ca-specific-schema.sql b/src/libstore/ca-specific-schema.sql
index 20ee046a1..08af0cc1f 100644
--- a/src/libstore/ca-specific-schema.sql
+++ b/src/libstore/ca-specific-schema.sql
@@ -3,10 +3,19 @@
-- is enabled
create table if not exists Realisations (
+ id integer primary key autoincrement not null,
drvPath text not null,
outputName text not null, -- symbolic output id, usually "out"
outputPath integer not null,
signatures text, -- space-separated list
- primary key (drvPath, outputName),
foreign key (outputPath) references ValidPaths(id) on delete cascade
);
+
+create index if not exists IndexRealisations on Realisations(drvPath, outputName);
+
+create table if not exists RealisationsRefs (
+ referrer integer not null,
+ realisationReference integer,
+ foreign key (referrer) references Realisations(id) on delete cascade,
+ foreign key (realisationReference) references Realisations(id) on delete restrict
+);
diff --git a/src/libstore/content-address.cc b/src/libstore/content-address.cc
index 90a3ad1f5..cf32ccdc4 100644
--- a/src/libstore/content-address.cc
+++ b/src/libstore/content-address.cc
@@ -31,10 +31,10 @@ std::string makeFixedOutputCA(FileIngestionMethod method, const Hash & hash)
std::string renderContentAddress(ContentAddress ca)
{
return std::visit(overloaded {
- [](TextHash th) {
+ [](TextHash & th) {
return "text:" + th.hash.to_string(Base32, true);
},
- [](FixedOutputHash fsh) {
+ [](FixedOutputHash & fsh) {
return makeFixedOutputCA(fsh.method, fsh.hash);
}
}, ca);
@@ -43,10 +43,10 @@ std::string renderContentAddress(ContentAddress ca)
std::string renderContentAddressMethod(ContentAddressMethod cam)
{
return std::visit(overloaded {
- [](TextHashMethod &th) {
+ [](TextHashMethod & th) {
return std::string{"text:"} + printHashType(htSHA256);
},
- [](FixedOutputHashMethod &fshm) {
+ [](FixedOutputHashMethod & fshm) {
return "fixed:" + makeFileIngestionPrefix(fshm.fileIngestionMethod) + printHashType(fshm.hashType);
}
}, cam);
@@ -104,12 +104,12 @@ ContentAddress parseContentAddress(std::string_view rawCa) {
return std::visit(
overloaded {
- [&](TextHashMethod thm) {
+ [&](TextHashMethod & thm) {
return ContentAddress(TextHash {
.hash = Hash::parseNonSRIUnprefixed(rest, htSHA256)
});
},
- [&](FixedOutputHashMethod fohMethod) {
+ [&](FixedOutputHashMethod & fohMethod) {
return ContentAddress(FixedOutputHash {
.method = fohMethod.fileIngestionMethod,
.hash = Hash::parseNonSRIUnprefixed(rest, std::move(fohMethod.hashType)),
@@ -120,8 +120,10 @@ ContentAddress parseContentAddress(std::string_view rawCa) {
ContentAddressMethod parseContentAddressMethod(std::string_view caMethod)
{
- std::string_view asPrefix {std::string{caMethod} + ":"};
- return parseContentAddressMethodPrefix(asPrefix);
+ std::string asPrefix = std::string{caMethod} + ":";
+ // parseContentAddressMethodPrefix takes its argument by reference
+ std::string_view asPrefixView = asPrefix;
+ return parseContentAddressMethodPrefix(asPrefixView);
}
std::optional<ContentAddress> parseContentAddressOpt(std::string_view rawCaOpt)
@@ -137,10 +139,10 @@ std::string renderContentAddress(std::optional<ContentAddress> ca)
Hash getContentAddressHash(const ContentAddress & ca)
{
return std::visit(overloaded {
- [](TextHash th) {
+ [](const TextHash & th) {
return th.hash;
},
- [](FixedOutputHash fsh) {
+ [](const FixedOutputHash & fsh) {
return fsh.hash;
}
}, ca);
diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc
index 0be9d2c54..2eb566080 100644
--- a/src/libstore/daemon.cc
+++ b/src/libstore/daemon.cc
@@ -227,8 +227,15 @@ struct ClientSettings
try {
if (name == "ssh-auth-sock") // obsolete
;
+ else if (name == settings.experimentalFeatures.name) {
+ // We don’t want to forward the experimental features to
+ // the daemon, as that could cause some pretty weird stuff
+ if (parseFeatures(tokenizeString<StringSet>(value)) != settings.experimentalFeatures.get())
+ debug("Ignoring the client-specified experimental features");
+ }
else if (trusted
|| name == settings.buildTimeout.name
+ || name == settings.buildRepeat.name
|| name == "connect-timeout"
|| (name == "builders" && value == ""))
settings.set(name, value);
@@ -243,27 +250,10 @@ struct ClientSettings
}
};
-static void writeValidPathInfo(
- ref<Store> store,
- unsigned int clientVersion,
- Sink & to,
- std::shared_ptr<const ValidPathInfo> info)
-{
- to << (info->deriver ? store->printStorePath(*info->deriver) : "")
- << info->narHash.to_string(Base16, false);
- worker_proto::write(*store, to, info->references);
- to << info->registrationTime << info->narSize;
- if (GET_PROTOCOL_MINOR(clientVersion) >= 16) {
- to << info->ultimate
- << info->sigs
- << renderContentAddress(info->ca);
- }
-}
-
static std::vector<DerivedPath> readDerivedPaths(Store & store, unsigned int clientVersion, Source & from)
{
std::vector<DerivedPath> reqs;
- if (GET_PROTOCOL_MINOR(clientVersion) >= 29) {
+ if (GET_PROTOCOL_MINOR(clientVersion) >= 30) {
reqs = worker_proto::read(store, from, Phantom<std::vector<DerivedPath>> {});
} else {
for (auto & s : readStrings<Strings>(from))
@@ -406,25 +396,21 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
FramedSource source(from);
// TODO this is essentially RemoteStore::addCAToStore. Move it up to Store.
return std::visit(overloaded {
- [&](TextHashMethod &_) {
+ [&](TextHashMethod &) {
// We could stream this by changing Store
std::string contents = source.drain();
auto path = store->addTextToStore(name, contents, refs, repair);
return store->queryPathInfo(path);
},
- [&](FixedOutputHashMethod &fohm) {
- if (!refs.empty())
- throw UnimplementedError("cannot yet have refs with flat or nar-hashed data");
- auto path = store->addToStoreFromDump(source, name, fohm.fileIngestionMethod, fohm.hashType, repair);
+ [&](FixedOutputHashMethod & fohm) {
+ auto path = store->addToStoreFromDump(source, name, fohm.fileIngestionMethod, fohm.hashType, repair, refs);
return store->queryPathInfo(path);
},
}, contentAddressMethod);
}();
logger->stopWork();
- to << store->printStorePath(pathInfo->path);
- writeValidPathInfo(store, clientVersion, to, pathInfo);
-
+ pathInfo->write(to, *store, GET_PROTOCOL_MINOR(clientVersion));
} else {
HashType hashAlgo;
std::string baseName;
@@ -471,6 +457,21 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
+ case wopAddMultipleToStore: {
+ bool repair, dontCheckSigs;
+ from >> repair >> dontCheckSigs;
+ if (!trusted && dontCheckSigs)
+ dontCheckSigs = false;
+
+ logger->startWork();
+ FramedSource source(from);
+ store->addMultipleToStore(source,
+ RepairFlag{repair},
+ dontCheckSigs ? NoCheckSigs : CheckSigs);
+ logger->stopWork();
+ break;
+ }
+
case wopAddTextToStore: {
string suffix = readString(from);
string s = readString(from);
@@ -622,9 +623,9 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
+ // Obsolete.
case wopSyncWithGC: {
logger->startWork();
- store->syncWithGC();
logger->stopWork();
to << 1;
break;
@@ -770,7 +771,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
if (info) {
if (GET_PROTOCOL_MINOR(clientVersion) >= 17)
to << 1;
- writeValidPathInfo(store, clientVersion, to, info);
+ info->write(to, *store, GET_PROTOCOL_MINOR(clientVersion), false);
} else {
assert(GET_PROTOCOL_MINOR(clientVersion) >= 17);
to << 0;
@@ -885,10 +886,15 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
case wopRegisterDrvOutput: {
logger->startWork();
- auto outputId = DrvOutput::parse(readString(from));
- auto outputPath = StorePath(readString(from));
- store->registerDrvOutput(Realisation{
- .id = outputId, .outPath = outputPath});
+ if (GET_PROTOCOL_MINOR(clientVersion) < 31) {
+ auto outputId = DrvOutput::parse(readString(from));
+ auto outputPath = StorePath(readString(from));
+ store->registerDrvOutput(Realisation{
+ .id = outputId, .outPath = outputPath});
+ } else {
+ auto realisation = worker_proto::read(*store, from, Phantom<Realisation>());
+ store->registerDrvOutput(realisation);
+ }
logger->stopWork();
break;
}
@@ -898,9 +904,15 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
auto outputId = DrvOutput::parse(readString(from));
auto info = store->queryRealisation(outputId);
logger->stopWork();
- std::set<StorePath> outPaths;
- if (info) outPaths.insert(info->outPath);
- worker_proto::write(*store, to, outPaths);
+ if (GET_PROTOCOL_MINOR(clientVersion) < 31) {
+ std::set<StorePath> outPaths;
+ if (info) outPaths.insert(info->outPath);
+ worker_proto::write(*store, to, outPaths);
+ } else {
+ std::set<Realisation> realisations;
+ if (info) realisations.insert(*info);
+ worker_proto::write(*store, to, realisations);
+ }
break;
}
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
index f6defd98f..b926bb711 100644
--- a/src/libstore/derivations.cc
+++ b/src/libstore/derivations.cc
@@ -10,18 +10,18 @@ namespace nix {
std::optional<StorePath> DerivationOutput::path(const Store & store, std::string_view drvName, std::string_view outputName) const
{
return std::visit(overloaded {
- [](DerivationOutputInputAddressed doi) -> std::optional<StorePath> {
+ [](const DerivationOutputInputAddressed & doi) -> std::optional<StorePath> {
return { doi.path };
},
- [&](DerivationOutputCAFixed dof) -> std::optional<StorePath> {
+ [&](const DerivationOutputCAFixed & dof) -> std::optional<StorePath> {
return {
dof.path(store, drvName, outputName)
};
},
- [](DerivationOutputCAFloating dof) -> std::optional<StorePath> {
+ [](const DerivationOutputCAFloating & dof) -> std::optional<StorePath> {
return std::nullopt;
},
- [](DerivationOutputDeferred) -> std::optional<StorePath> {
+ [](const DerivationOutputDeferred &) -> std::optional<StorePath> {
return std::nullopt;
},
}, output);
@@ -187,7 +187,7 @@ static DerivationOutput parseDerivationOutput(const Store & store,
},
};
} else {
- settings.requireExperimentalFeature("ca-derivations");
+ settings.requireExperimentalFeature(Xp::CaDerivations);
assert(pathS == "");
return DerivationOutput {
.output = DerivationOutputCAFloating {
@@ -332,22 +332,22 @@ string Derivation::unparse(const Store & store, bool maskOutputs,
if (first) first = false; else s += ',';
s += '('; printUnquotedString(s, i.first);
std::visit(overloaded {
- [&](DerivationOutputInputAddressed doi) {
+ [&](const DerivationOutputInputAddressed & doi) {
s += ','; printUnquotedString(s, maskOutputs ? "" : store.printStorePath(doi.path));
s += ','; printUnquotedString(s, "");
s += ','; printUnquotedString(s, "");
},
- [&](DerivationOutputCAFixed dof) {
+ [&](const DerivationOutputCAFixed & dof) {
s += ','; printUnquotedString(s, maskOutputs ? "" : store.printStorePath(dof.path(store, name, i.first)));
s += ','; printUnquotedString(s, dof.hash.printMethodAlgo());
s += ','; printUnquotedString(s, dof.hash.hash.to_string(Base16, false));
},
- [&](DerivationOutputCAFloating dof) {
+ [&](const DerivationOutputCAFloating & dof) {
s += ','; printUnquotedString(s, "");
s += ','; printUnquotedString(s, makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType));
s += ','; printUnquotedString(s, "");
},
- [&](DerivationOutputDeferred) {
+ [&](const DerivationOutputDeferred &) {
s += ','; printUnquotedString(s, "");
s += ','; printUnquotedString(s, "");
s += ','; printUnquotedString(s, "");
@@ -420,13 +420,13 @@ DerivationType BasicDerivation::type() const
std::optional<HashType> floatingHashType;
for (auto & i : outputs) {
std::visit(overloaded {
- [&](DerivationOutputInputAddressed _) {
+ [&](const DerivationOutputInputAddressed &) {
inputAddressedOutputs.insert(i.first);
},
- [&](DerivationOutputCAFixed _) {
+ [&](const DerivationOutputCAFixed &) {
fixedCAOutputs.insert(i.first);
},
- [&](DerivationOutputCAFloating dof) {
+ [&](const DerivationOutputCAFloating & dof) {
floatingCAOutputs.insert(i.first);
if (!floatingHashType) {
floatingHashType = dof.hashType;
@@ -435,7 +435,7 @@ DerivationType BasicDerivation::type() const
throw Error("All floating outputs must use the same hash type");
}
},
- [&](DerivationOutputDeferred _) {
+ [&](const DerivationOutputDeferred &) {
deferredIAOutputs.insert(i.first);
},
}, i.second.output);
@@ -538,15 +538,15 @@ DrvHashModulo hashDerivationModulo(Store & store, const Derivation & drv, bool m
const auto & res = pathDerivationModulo(store, i.first);
std::visit(overloaded {
// Regular non-CA derivation, replace derivation
- [&](Hash drvHash) {
+ [&](const Hash & drvHash) {
inputs2.insert_or_assign(drvHash.to_string(Base16, false), i.second);
},
- [&](DeferredHash deferredHash) {
+ [&](const DeferredHash & deferredHash) {
isDeferred = true;
inputs2.insert_or_assign(deferredHash.hash.to_string(Base16, false), i.second);
},
// CA derivation's output hashes
- [&](CaOutputHashes outputHashes) {
+ [&](const CaOutputHashes & outputHashes) {
std::set<std::string> justOut = { "out" };
for (auto & output : i.second) {
/* Put each one in with a single "out" output.. */
@@ -568,21 +568,21 @@ DrvHashModulo hashDerivationModulo(Store & store, const Derivation & drv, bool m
}
-std::map<std::string, Hash> staticOutputHashes(Store& store, const Derivation& drv)
+std::map<std::string, Hash> staticOutputHashes(Store & store, const Derivation & drv)
{
std::map<std::string, Hash> res;
std::visit(overloaded {
- [&](Hash drvHash) {
+ [&](const Hash & drvHash) {
for (auto & outputName : drv.outputNames()) {
res.insert({outputName, drvHash});
}
},
- [&](DeferredHash deferredHash) {
+ [&](const DeferredHash & deferredHash) {
for (auto & outputName : drv.outputNames()) {
res.insert({outputName, deferredHash.hash});
}
},
- [&](CaOutputHashes outputHashes) {
+ [&](const CaOutputHashes & outputHashes) {
res = outputHashes;
},
}, hashDerivationModulo(store, drv, true));
@@ -666,22 +666,22 @@ void writeDerivation(Sink & out, const Store & store, const BasicDerivation & dr
for (auto & i : drv.outputs) {
out << i.first;
std::visit(overloaded {
- [&](DerivationOutputInputAddressed doi) {
+ [&](const DerivationOutputInputAddressed & doi) {
out << store.printStorePath(doi.path)
<< ""
<< "";
},
- [&](DerivationOutputCAFixed dof) {
+ [&](const DerivationOutputCAFixed & dof) {
out << store.printStorePath(dof.path(store, drv.name, i.first))
<< dof.hash.printMethodAlgo()
<< dof.hash.hash.to_string(Base16, false);
},
- [&](DerivationOutputCAFloating dof) {
+ [&](const DerivationOutputCAFloating & dof) {
out << ""
<< (makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType))
<< "";
},
- [&](DerivationOutputDeferred) {
+ [&](const DerivationOutputDeferred &) {
out << ""
<< ""
<< "";
diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh
index 2df440536..b1cb68194 100644
--- a/src/libstore/derivations.hh
+++ b/src/libstore/derivations.hh
@@ -138,8 +138,8 @@ struct Derivation : BasicDerivation
/* Return the underlying basic derivation but with these changes:
- 1. Input drvs are emptied, but the outputs of them that were used are
- added directly to input sources.
+ 1. Input drvs are emptied, but the outputs of them that were used are
+ added directly to input sources.
2. Input placeholders are replaced with realized input store paths. */
std::optional<BasicDerivation> tryResolve(Store & store);
diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc
index 13833c58e..3d188e981 100644
--- a/src/libstore/derived-path.cc
+++ b/src/libstore/derived-path.cc
@@ -11,18 +11,33 @@ nlohmann::json DerivedPath::Opaque::toJSON(ref<Store> store) const {
return res;
}
-nlohmann::json DerivedPathWithHints::Built::toJSON(ref<Store> store) const {
+nlohmann::json BuiltPath::Built::toJSON(ref<Store> store) const {
nlohmann::json res;
res["drvPath"] = store->printStorePath(drvPath);
for (const auto& [output, path] : outputs) {
- res["outputs"][output] = path ? store->printStorePath(*path) : "";
+ res["outputs"][output] = store->printStorePath(path);
}
return res;
}
-nlohmann::json derivedPathsWithHintsToJSON(const DerivedPathsWithHints & buildables, ref<Store> store) {
+StorePathSet BuiltPath::outPaths() const
+{
+ return std::visit(
+ overloaded{
+ [](const BuiltPath::Opaque & p) { return StorePathSet{p.path}; },
+ [](const BuiltPath::Built & b) {
+ StorePathSet res;
+ for (auto & [_, path] : b.outputs)
+ res.insert(path);
+ return res;
+ },
+ }, raw()
+ );
+}
+
+nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store) {
auto res = nlohmann::json::array();
- for (const DerivedPathWithHints & buildable : buildables) {
+ for (const BuiltPath & buildable : buildables) {
std::visit([&res, store](const auto & buildable) {
res.push_back(buildable.toJSON(store));
}, buildable.raw());
@@ -62,7 +77,7 @@ DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_vi
auto outputsS = s.substr(n + 1);
std::set<string> outputs;
if (outputsS != "*")
- outputs = tokenizeString<std::set<string>>(outputsS);
+ outputs = tokenizeString<std::set<string>>(outputsS, ",");
return {drvPath, outputs};
}
@@ -74,4 +89,30 @@ DerivedPath DerivedPath::parse(const Store & store, std::string_view s)
: (DerivedPath) DerivedPath::Built::parse(store, s);
}
+RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const
+{
+ RealisedPath::Set res;
+ std::visit(
+ overloaded{
+ [&](const BuiltPath::Opaque & p) { res.insert(p.path); },
+ [&](const BuiltPath::Built & p) {
+ auto drvHashes =
+ staticOutputHashes(store, store.readDerivation(p.drvPath));
+ for (auto& [outputName, outputPath] : p.outputs) {
+ if (settings.isExperimentalFeatureEnabled(
+ Xp::CaDerivations)) {
+ auto thisRealisation = store.queryRealisation(
+ DrvOutput{drvHashes.at(outputName), outputName});
+ assert(thisRealisation); // We’ve built it, so we must h
+ // ve the realisation
+ res.insert(*thisRealisation);
+ } else {
+ res.insert(outputPath);
+ }
+ }
+ },
+ },
+ raw());
+ return res;
+}
}
diff --git a/src/libstore/derived-path.hh b/src/libstore/derived-path.hh
index 7a2fe59de..9d6ace069 100644
--- a/src/libstore/derived-path.hh
+++ b/src/libstore/derived-path.hh
@@ -2,6 +2,7 @@
#include "util.hh"
#include "path.hh"
+#include "realisation.hh"
#include <optional>
@@ -79,51 +80,44 @@ struct DerivedPath : _DerivedPathRaw {
/**
* A built derived path with hints in the form of optional concrete output paths.
*
- * See 'DerivedPathWithHints' for more an explanation.
+ * See 'BuiltPath' for more an explanation.
*/
-struct DerivedPathWithHintsBuilt {
+struct BuiltPathBuilt {
StorePath drvPath;
- std::map<std::string, std::optional<StorePath>> outputs;
+ std::map<std::string, StorePath> outputs;
nlohmann::json toJSON(ref<Store> store) const;
- static DerivedPathWithHintsBuilt parse(const Store & store, std::string_view);
+ static BuiltPathBuilt parse(const Store & store, std::string_view);
};
-using _DerivedPathWithHintsRaw = std::variant<
+using _BuiltPathRaw = std::variant<
DerivedPath::Opaque,
- DerivedPathWithHintsBuilt
+ BuiltPathBuilt
>;
/**
- * A derived path with hints in the form of optional concrete output paths in the built case.
- *
- * This type is currently just used by the CLI. The paths are filled in
- * during evaluation for derivations that know what paths they will
- * produce in advanced, i.e. input-addressed or fixed-output content
- * addressed derivations.
- *
- * That isn't very good, because it puts floating content-addressed
- * derivations "at a disadvantage". It would be better to never rely on
- * the output path of unbuilt derivations, and exclusively use the
- * realizations types to work with built derivations' concrete output
- * paths.
+ * A built path. Similar to a `DerivedPath`, but enriched with the corresponding
+ * output path(s).
*/
-// FIXME Stop using and delete this, or if that is not possible move out of libstore to libcmd.
-struct DerivedPathWithHints : _DerivedPathWithHintsRaw {
- using Raw = _DerivedPathWithHintsRaw;
+struct BuiltPath : _BuiltPathRaw {
+ using Raw = _BuiltPathRaw;
using Raw::Raw;
using Opaque = DerivedPathOpaque;
- using Built = DerivedPathWithHintsBuilt;
+ using Built = BuiltPathBuilt;
inline const Raw & raw() const {
return static_cast<const Raw &>(*this);
}
+ StorePathSet outPaths() const;
+ RealisedPath::Set toRealisedPaths(Store & store) const;
+
};
-typedef std::vector<DerivedPathWithHints> DerivedPathsWithHints;
+typedef std::vector<DerivedPath> DerivedPaths;
+typedef std::vector<BuiltPath> BuiltPaths;
-nlohmann::json derivedPathsWithHintsToJSON(const DerivedPathsWithHints & buildables, ref<Store> store);
+nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store);
}
diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc
index 8f26af685..62dc21c59 100644
--- a/src/libstore/dummy-store.cc
+++ b/src/libstore/dummy-store.cc
@@ -43,11 +43,6 @@ struct DummyStore : public virtual DummyStoreConfig, public virtual Store
RepairFlag repair, CheckSigsFlag checkSigs) override
{ unsupported("addToStore"); }
- StorePath addToStore(const string & name, const Path & srcPath,
- FileIngestionMethod method, HashType hashAlgo,
- PathFilter & filter, RepairFlag repair) override
- { unsupported("addToStore"); }
-
StorePath addTextToStore(const string & name, const string & s,
const StorePathSet & references, RepairFlag repair) override
{ unsupported("addTextToStore"); }
@@ -55,8 +50,9 @@ struct DummyStore : public virtual DummyStoreConfig, public virtual Store
void narFromPath(const StorePath & path, Sink & sink) override
{ unsupported("narFromPath"); }
- std::optional<const Realisation> queryRealisation(const DrvOutput&) override
- { unsupported("queryRealisation"); }
+ void queryRealisationUncached(const DrvOutput &,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept override
+ { callback(nullptr); }
};
static RegisterStoreImplementation<DummyStore, DummyStoreConfig> regDummyStore;
diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc
index 514ab3bf9..4621a8217 100644
--- a/src/libstore/filetransfer.cc
+++ b/src/libstore/filetransfer.cc
@@ -7,7 +7,7 @@
#include "finally.hh"
#include "callback.hh"
-#ifdef ENABLE_S3
+#if ENABLE_S3
#include <aws/core/client/ClientConfiguration.h>
#endif
@@ -544,6 +544,14 @@ struct curlFileTransfer : public FileTransfer
stopWorkerThread();
});
+#ifdef __linux__
+ /* Cause this thread to not share any FS attributes with the main thread,
+ because this causes setns() in restoreMountNamespace() to fail.
+ Ideally, this would happen in the std::thread() constructor. */
+ if (unshare(CLONE_FS) != 0)
+ throw SysError("unsharing filesystem state in download thread");
+#endif
+
std::map<CURL *, std::shared_ptr<TransferItem>> items;
bool quit = false;
@@ -665,7 +673,7 @@ struct curlFileTransfer : public FileTransfer
writeFull(wakeupPipe.writeSide.get(), " ");
}
-#ifdef ENABLE_S3
+#if ENABLE_S3
std::tuple<std::string, std::string, Store::Params> parseS3Uri(std::string uri)
{
auto [path, params] = splitUriAndParams(uri);
@@ -688,7 +696,7 @@ struct curlFileTransfer : public FileTransfer
if (hasPrefix(request.uri, "s3://")) {
// FIXME: do this on a worker thread
try {
-#ifdef ENABLE_S3
+#if ENABLE_S3
auto [bucketName, key, params] = parseS3Uri(request.uri);
std::string profile = get(params, "profile").value_or("");
@@ -716,15 +724,24 @@ struct curlFileTransfer : public FileTransfer
}
};
+ref<curlFileTransfer> makeCurlFileTransfer()
+{
+ return make_ref<curlFileTransfer>();
+}
+
ref<FileTransfer> getFileTransfer()
{
- static ref<FileTransfer> fileTransfer = makeFileTransfer();
+ static ref<curlFileTransfer> fileTransfer = makeCurlFileTransfer();
+
+ if (fileTransfer->state_.lock()->quit)
+ fileTransfer = makeCurlFileTransfer();
+
return fileTransfer;
}
ref<FileTransfer> makeFileTransfer()
{
- return make_ref<curlFileTransfer>();
+ return makeCurlFileTransfer();
}
std::future<FileTransferResult> FileTransfer::enqueueFileTransfer(const FileTransferRequest & request)
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
index bc692ca42..7a414da6b 100644
--- a/src/libstore/gc.cc
+++ b/src/libstore/gc.cc
@@ -10,48 +10,22 @@
#include <regex>
#include <random>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/statvfs.h>
+#include <climits>
#include <errno.h>
#include <fcntl.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/statvfs.h>
+#include <sys/types.h>
+#include <sys/un.h>
#include <unistd.h>
-#include <climits>
namespace nix {
-static string gcLockName = "gc.lock";
-static string gcRootsDir = "gcroots";
-
-
-/* Acquire the global GC lock. This is used to prevent new Nix
- processes from starting after the temporary root files have been
- read. To be precise: when they try to create a new temporary root
- file, they will block until the garbage collector has finished /
- yielded the GC lock. */
-AutoCloseFD LocalStore::openGCLock(LockType lockType)
-{
- Path fnGCLock = (format("%1%/%2%")
- % stateDir % gcLockName).str();
-
- debug(format("acquiring global GC lock '%1%'") % fnGCLock);
-
- AutoCloseFD fdGCLock = open(fnGCLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
- if (!fdGCLock)
- throw SysError("opening global GC lock '%1%'", fnGCLock);
-
- if (!lockFile(fdGCLock.get(), lockType, false)) {
- printInfo("waiting for the big garbage collector lock...");
- lockFile(fdGCLock.get(), lockType, true);
- }
-
- /* !!! Restrict read permission on the GC root. Otherwise any
- process that can open the file for reading can DoS the
- collector. */
-
- return fdGCLock;
-}
+static std::string gcSocketPath = "/gc-socket/socket";
+static std::string gcRootsDir = "gcroots";
static void makeSymlink(const Path & link, const Path & target)
@@ -71,12 +45,6 @@ static void makeSymlink(const Path & link, const Path & target)
}
-void LocalStore::syncWithGC()
-{
- AutoCloseFD fdGCLock = openGCLock(ltRead);
-}
-
-
void LocalStore::addIndirectRoot(const Path & path)
{
string hash = hashString(htSHA1, path).to_string(Base32, false);
@@ -95,6 +63,12 @@ Path LocalFSStore::addPermRoot(const StorePath & storePath, const Path & _gcRoot
"creating a garbage collector root (%1%) in the Nix store is forbidden "
"(are you running nix-build inside the store?)", gcRoot);
+ /* Register this root with the garbage collector, if it's
+ running. This should be superfluous since the caller should
+ have registered this root yet, but let's be on the safe
+ side. */
+ addTempRoot(storePath);
+
/* Don't clobber the link if it already exists and doesn't
point to the Nix store. */
if (pathExists(gcRoot) && (!isLink(gcRoot) || !isInStore(readLink(gcRoot))))
@@ -102,11 +76,6 @@ Path LocalFSStore::addPermRoot(const StorePath & storePath, const Path & _gcRoot
makeSymlink(gcRoot, printStorePath(storePath));
addIndirectRoot(gcRoot);
- /* Grab the global GC root, causing us to block while a GC is in
- progress. This prevents the set of permanent roots from
- increasing while a GC is in progress. */
- syncWithGC();
-
return gcRoot;
}
@@ -119,8 +88,6 @@ void LocalStore::addTempRoot(const StorePath & path)
if (!state->fdTempRoots) {
while (1) {
- AutoCloseFD fdGCLock = openGCLock(ltRead);
-
if (pathExists(fnTempRoots))
/* It *must* be stale, since there can be no two
processes with the same pid. */
@@ -128,10 +95,8 @@ void LocalStore::addTempRoot(const StorePath & path)
state->fdTempRoots = openLockFile(fnTempRoots, true);
- fdGCLock = -1;
-
- debug(format("acquiring read lock on '%1%'") % fnTempRoots);
- lockFile(state->fdTempRoots.get(), ltRead, true);
+ debug("acquiring write lock on '%s'", fnTempRoots);
+ lockFile(state->fdTempRoots.get(), ltWrite, true);
/* Check whether the garbage collector didn't get in our
way. */
@@ -147,24 +112,55 @@ void LocalStore::addTempRoot(const StorePath & path)
}
- /* Upgrade the lock to a write lock. This will cause us to block
- if the garbage collector is holding our lock. */
- debug(format("acquiring write lock on '%1%'") % fnTempRoots);
- lockFile(state->fdTempRoots.get(), ltWrite, true);
+ if (!state->fdGCLock)
+ state->fdGCLock = openGCLock();
+
+ restart:
+ FdLock gcLock(state->fdGCLock.get(), ltRead, false, "");
+
+ if (!gcLock.acquired) {
+ /* We couldn't get a shared global GC lock, so the garbage
+ collector is running. So we have to connect to the garbage
+ collector and inform it about our root. */
+ if (!state->fdRootsSocket) {
+ auto socketPath = stateDir.get() + gcSocketPath;
+ debug("connecting to '%s'", socketPath);
+ state->fdRootsSocket = createUnixDomainSocket();
+ nix::connect(state->fdRootsSocket.get(), socketPath);
+ }
+
+ try {
+ debug("sending GC root '%s'", printStorePath(path));
+ writeFull(state->fdRootsSocket.get(), printStorePath(path) + "\n", false);
+ char c;
+ readFull(state->fdRootsSocket.get(), &c, 1);
+ assert(c == '1');
+ debug("got ack for GC root '%s'", printStorePath(path));
+ } catch (SysError & e) {
+ /* The garbage collector may have exited, so we need to
+ restart. */
+ if (e.errNo == EPIPE) {
+ debug("GC socket disconnected");
+ state->fdRootsSocket.close();
+ goto restart;
+ }
+ } catch (EndOfFile & e) {
+ debug("GC socket disconnected");
+ state->fdRootsSocket.close();
+ goto restart;
+ }
+ }
+ /* Append the store path to the temporary roots file. */
string s = printStorePath(path) + '\0';
writeFull(state->fdTempRoots.get(), s);
-
- /* Downgrade to a read lock. */
- debug(format("downgrading to read lock on '%1%'") % fnTempRoots);
- lockFile(state->fdTempRoots.get(), ltRead, true);
}
static std::string censored = "{censored}";
-void LocalStore::findTempRoots(FDs & fds, Roots & tempRoots, bool censor)
+void LocalStore::findTempRoots(Roots & tempRoots, bool censor)
{
/* Read the `temproots' directory for per-process temporary root
files. */
@@ -179,35 +175,25 @@ void LocalStore::findTempRoots(FDs & fds, Roots & tempRoots, bool censor)
pid_t pid = std::stoi(i.name);
debug(format("reading temporary root file '%1%'") % path);
- FDPtr fd(new AutoCloseFD(open(path.c_str(), O_CLOEXEC | O_RDWR, 0666)));
- if (!*fd) {
+ AutoCloseFD fd(open(path.c_str(), O_CLOEXEC | O_RDWR, 0666));
+ if (!fd) {
/* It's okay if the file has disappeared. */
if (errno == ENOENT) continue;
throw SysError("opening temporary roots file '%1%'", path);
}
- /* This should work, but doesn't, for some reason. */
- //FDPtr fd(new AutoCloseFD(openLockFile(path, false)));
- //if (*fd == -1) continue;
-
/* Try to acquire a write lock without blocking. This can
only succeed if the owning process has died. In that case
we don't care about its temporary roots. */
- if (lockFile(fd->get(), ltWrite, false)) {
+ if (lockFile(fd.get(), ltWrite, false)) {
printInfo("removing stale temporary roots file '%1%'", path);
unlink(path.c_str());
- writeFull(fd->get(), "d");
+ writeFull(fd.get(), "d");
continue;
}
- /* Acquire a read lock. This will prevent the owning process
- from upgrading to a write lock, therefore it will block in
- addTempRoot(). */
- debug(format("waiting for read lock on '%1%'") % path);
- lockFile(fd->get(), ltRead, true);
-
/* Read the entire file. */
- string contents = readFile(fd->get());
+ string contents = readFile(fd.get());
/* Extract the roots. */
string::size_type pos = 0, end;
@@ -218,8 +204,6 @@ void LocalStore::findTempRoots(FDs & fds, Roots & tempRoots, bool censor)
tempRoots[parseStorePath(root)].emplace(censor ? censored : fmt("{temp:%d}", pid));
pos = end + 1;
}
-
- fds.push_back(fd); /* keep open */
}
}
@@ -304,8 +288,7 @@ Roots LocalStore::findRoots(bool censor)
Roots roots;
findRootsNoTemp(roots, censor);
- FDs fds;
- findTempRoots(fds, roots, censor);
+ findTempRoots(roots, censor);
return roots;
}
@@ -341,6 +324,7 @@ static string quoteRegexChars(const string & raw)
return std::regex_replace(raw, specialRegex, R"(\$&)");
}
+#if __linux__
static void readFileRoots(const char * path, UncheckedRoots & roots)
{
try {
@@ -350,6 +334,7 @@ static void readFileRoots(const char * path, UncheckedRoots & roots)
throw;
}
}
+#endif
void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
{
@@ -431,7 +416,7 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
}
#endif
-#if defined(__linux__)
+#if __linux__
readFileRoots("/proc/sys/kernel/modprobe", unchecked);
readFileRoots("/proc/sys/kernel/fbsplash", unchecked);
readFileRoots("/proc/sys/kernel/poweroff_cmd", unchecked);
@@ -455,391 +440,397 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
struct GCLimitReached { };
-struct LocalStore::GCState
-{
- const GCOptions & options;
- GCResults & results;
- StorePathSet roots;
- StorePathSet tempRoots;
- StorePathSet dead;
- StorePathSet alive;
- bool gcKeepOutputs;
- bool gcKeepDerivations;
- uint64_t bytesInvalidated;
- bool moveToTrash = true;
- bool shouldDelete;
- GCState(const GCOptions & options, GCResults & results)
- : options(options), results(results), bytesInvalidated(0) { }
-};
-
-
-bool LocalStore::isActiveTempFile(const GCState & state,
- const Path & path, const string & suffix)
+void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
{
- return hasSuffix(path, suffix)
- && state.tempRoots.count(parseStorePath(string(path, 0, path.size() - suffix.size())));
-}
+ bool shouldDelete = options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific;
+ bool gcKeepOutputs = settings.gcKeepOutputs;
+ bool gcKeepDerivations = settings.gcKeepDerivations;
+ StorePathSet roots, dead, alive;
-void LocalStore::deleteGarbage(GCState & state, const Path & path)
-{
- uint64_t bytesFreed;
- deletePath(path, bytesFreed);
- state.results.bytesFreed += bytesFreed;
-}
+ struct Shared
+ {
+ // The temp roots only store the hash part to make it easier to
+ // ignore suffixes like '.lock', '.chroot' and '.check'.
+ std::unordered_set<std::string> tempRoots;
+ // Hash part of the store path currently being deleted, if
+ // any.
+ std::optional<std::string> pending;
+ };
-void LocalStore::deletePathRecursive(GCState & state, const Path & path)
-{
- checkInterrupt();
-
- uint64_t size = 0;
-
- auto storePath = maybeParseStorePath(path);
- if (storePath && isValidPath(*storePath)) {
- StorePathSet referrers;
- queryReferrers(*storePath, referrers);
- for (auto & i : referrers)
- if (printStorePath(i) != path) deletePathRecursive(state, printStorePath(i));
- size = queryPathInfo(*storePath)->narSize;
- invalidatePathChecked(*storePath);
- }
+ Sync<Shared> _shared;
- Path realPath = realStoreDir + "/" + std::string(baseNameOf(path));
+ std::condition_variable wakeup;
- struct stat st;
- if (lstat(realPath.c_str(), &st)) {
- if (errno == ENOENT) return;
- throw SysError("getting status of %1%", realPath);
+ /* Using `--ignore-liveness' with `--delete' can have unintended
+ consequences if `keep-outputs' or `keep-derivations' are true
+ (the garbage collector will recurse into deleting the outputs
+ or derivers, respectively). So disable them. */
+ if (options.action == GCOptions::gcDeleteSpecific && options.ignoreLiveness) {
+ gcKeepOutputs = false;
+ gcKeepDerivations = false;
}
- printInfo(format("deleting '%1%'") % path);
-
- state.results.paths.insert(path);
+ if (shouldDelete)
+ deletePath(reservedPath);
- /* If the path is not a regular file or symlink, move it to the
- trash directory. The move is to ensure that later (when we're
- not holding the global GC lock) we can delete the path without
- being afraid that the path has become alive again. Otherwise
- delete it right away. */
- if (state.moveToTrash && S_ISDIR(st.st_mode)) {
- // Estimate the amount freed using the narSize field. FIXME:
- // if the path was not valid, need to determine the actual
- // size.
- try {
- if (chmod(realPath.c_str(), st.st_mode | S_IWUSR) == -1)
- throw SysError("making '%1%' writable", realPath);
- Path tmp = trashDir + "/" + std::string(baseNameOf(path));
- if (rename(realPath.c_str(), tmp.c_str()))
- throw SysError("unable to rename '%1%' to '%2%'", realPath, tmp);
- state.bytesInvalidated += size;
- } catch (SysError & e) {
- if (e.errNo == ENOSPC) {
- printInfo(format("note: can't create move '%1%': %2%") % realPath % e.msg());
- deleteGarbage(state, realPath);
+ /* Acquire the global GC root. Note: we don't use fdGCLock
+ here because then in auto-gc mode, another thread could
+ downgrade our exclusive lock. */
+ auto fdGCLock = openGCLock();
+ FdLock gcLock(fdGCLock.get(), ltWrite, true, "waiting for the big garbage collector lock...");
+
+ /* Start the server for receiving new roots. */
+ auto socketPath = stateDir.get() + gcSocketPath;
+ createDirs(dirOf(socketPath));
+ auto fdServer = createUnixDomainSocket(socketPath, 0666);
+
+ if (fcntl(fdServer.get(), F_SETFL, fcntl(fdServer.get(), F_GETFL) | O_NONBLOCK) == -1)
+ throw SysError("making socket '%1%' non-blocking", socketPath);
+
+ Pipe shutdownPipe;
+ shutdownPipe.create();
+
+ std::thread serverThread([&]() {
+ Sync<std::map<int, std::thread>> connections;
+
+ Finally cleanup([&]() {
+ debug("GC roots server shutting down");
+ while (true) {
+ auto item = remove_begin(*connections.lock());
+ if (!item) break;
+ auto & [fd, thread] = *item;
+ shutdown(fd, SHUT_RDWR);
+ thread.join();
}
- }
- } else
- deleteGarbage(state, realPath);
-
- if (state.results.bytesFreed + state.bytesInvalidated > state.options.maxFreed) {
- printInfo(format("deleted or invalidated more than %1% bytes; stopping") % state.options.maxFreed);
- throw GCLimitReached();
- }
-}
-
-
-bool LocalStore::canReachRoot(GCState & state, StorePathSet & visited, const StorePath & path)
-{
- if (visited.count(path)) return false;
-
- if (state.alive.count(path)) return true;
-
- if (state.dead.count(path)) return false;
-
- if (state.roots.count(path)) {
- debug("cannot delete '%1%' because it's a root", printStorePath(path));
- state.alive.insert(path);
- return true;
- }
-
- visited.insert(path);
-
- if (!isValidPath(path)) return false;
-
- StorePathSet incoming;
-
- /* Don't delete this path if any of its referrers are alive. */
- queryReferrers(path, incoming);
-
- /* If keep-derivations is set and this is a derivation, then
- don't delete the derivation if any of the outputs are alive. */
- if (state.gcKeepDerivations && path.isDerivation()) {
- for (auto & [name, maybeOutPath] : queryPartialDerivationOutputMap(path))
- if (maybeOutPath &&
- isValidPath(*maybeOutPath) &&
- queryPathInfo(*maybeOutPath)->deriver == path
- )
- incoming.insert(*maybeOutPath);
- }
-
- /* If keep-outputs is set, then don't delete this path if there
- are derivers of this path that are not garbage. */
- if (state.gcKeepOutputs) {
- auto derivers = queryValidDerivers(path);
- for (auto & i : derivers)
- incoming.insert(i);
- }
+ });
+
+ while (true) {
+ std::vector<struct pollfd> fds;
+ fds.push_back({.fd = shutdownPipe.readSide.get(), .events = POLLIN});
+ fds.push_back({.fd = fdServer.get(), .events = POLLIN});
+ auto count = poll(fds.data(), fds.size(), -1);
+ assert(count != -1);
+
+ if (fds[0].revents)
+ /* Parent is asking us to quit. */
+ break;
+
+ if (fds[1].revents) {
+ /* Accept a new connection. */
+ assert(fds[1].revents & POLLIN);
+ AutoCloseFD fdClient = accept(fdServer.get(), nullptr, nullptr);
+ if (!fdClient) continue;
+
+ /* Process the connection in a separate thread. */
+ auto fdClient_ = fdClient.get();
+ std::thread clientThread([&, fdClient = std::move(fdClient)]() {
+ Finally cleanup([&]() {
+ auto conn(connections.lock());
+ auto i = conn->find(fdClient.get());
+ if (i != conn->end()) {
+ i->second.detach();
+ conn->erase(i);
+ }
+ });
+
+ while (true) {
+ try {
+ auto path = readLine(fdClient.get());
+ auto storePath = maybeParseStorePath(path);
+ if (storePath) {
+ debug("got new GC root '%s'", path);
+ auto hashPart = std::string(storePath->hashPart());
+ auto shared(_shared.lock());
+ shared->tempRoots.insert(hashPart);
+ /* If this path is currently being
+ deleted, then we have to wait until
+ deletion is finished to ensure that
+ the client doesn't start
+ re-creating it before we're
+ done. FIXME: ideally we would use a
+ FD for this so we don't block the
+ poll loop. */
+ while (shared->pending == hashPart) {
+ debug("synchronising with deletion of path '%s'", path);
+ shared.wait(wakeup);
+ }
+ } else
+ printError("received garbage instead of a root from client");
+ writeFull(fdClient.get(), "1", false);
+ } catch (Error &) { break; }
+ }
+ });
- for (auto & i : incoming)
- if (i != path)
- if (canReachRoot(state, visited, i)) {
- state.alive.insert(path);
- return true;
+ connections.lock()->insert({fdClient_, std::move(clientThread)});
}
+ }
+ });
- return false;
-}
-
-
-void LocalStore::tryToDelete(GCState & state, const Path & path)
-{
- checkInterrupt();
-
- auto realPath = realStoreDir + "/" + std::string(baseNameOf(path));
- if (realPath == linksDir || realPath == trashDir) return;
-
- //Activity act(*logger, lvlDebug, format("considering whether to delete '%1%'") % path);
-
- auto storePath = maybeParseStorePath(path);
-
- if (!storePath || !isValidPath(*storePath)) {
- /* A lock file belonging to a path that we're building right
- now isn't garbage. */
- if (isActiveTempFile(state, path, ".lock")) return;
+ Finally stopServer([&]() {
+ writeFull(shutdownPipe.writeSide.get(), "x", false);
+ wakeup.notify_all();
+ if (serverThread.joinable()) serverThread.join();
+ });
- /* Don't delete .chroot directories for derivations that are
- currently being built. */
- if (isActiveTempFile(state, path, ".chroot")) return;
+ /* Find the roots. Since we've grabbed the GC lock, the set of
+ permanent roots cannot increase now. */
+ printInfo("finding garbage collector roots...");
+ Roots rootMap;
+ if (!options.ignoreLiveness)
+ findRootsNoTemp(rootMap, true);
- /* Don't delete .check directories for derivations that are
- currently being built, because we may need to run
- diff-hook. */
- if (isActiveTempFile(state, path, ".check")) return;
- }
+ for (auto & i : rootMap) roots.insert(i.first);
- StorePathSet visited;
-
- if (storePath && canReachRoot(state, visited, *storePath)) {
- debug("cannot delete '%s' because it's still reachable", path);
- } else {
- /* No path we visited was a root, so everything is garbage.
- But we only delete ‘path’ and its referrers here so that
- ‘nix-store --delete’ doesn't have the unexpected effect of
- recursing into derivations and outputs. */
- for (auto & i : visited)
- state.dead.insert(i);
- if (state.shouldDelete)
- deletePathRecursive(state, path);
+ /* Read the temporary roots created before we acquired the global
+ GC root. Any new roots will be sent to our socket. */
+ Roots tempRoots;
+ findTempRoots(tempRoots, true);
+ for (auto & root : tempRoots) {
+ _shared.lock()->tempRoots.insert(std::string(root.first.hashPart()));
+ roots.insert(root.first);
}
-}
+ /* Helper function that deletes a path from the store and throws
+ GCLimitReached if we've deleted enough garbage. */
+ auto deleteFromStore = [&](std::string_view baseName)
+ {
+ Path path = storeDir + "/" + std::string(baseName);
+ Path realPath = realStoreDir + "/" + std::string(baseName);
-/* Unlink all files in /nix/store/.links that have a link count of 1,
- which indicates that there are no other links and so they can be
- safely deleted. FIXME: race condition with optimisePath(): we
- might see a link count of 1 just before optimisePath() increases
- the link count. */
-void LocalStore::removeUnusedLinks(const GCState & state)
-{
- AutoCloseDir dir(opendir(linksDir.c_str()));
- if (!dir) throw SysError("opening directory '%1%'", linksDir);
-
- int64_t actualSize = 0, unsharedSize = 0;
+ printInfo("deleting '%1%'", path);
- struct dirent * dirent;
- while (errno = 0, dirent = readdir(dir.get())) {
- checkInterrupt();
- string name = dirent->d_name;
- if (name == "." || name == "..") continue;
- Path path = linksDir + "/" + name;
+ results.paths.insert(path);
- auto st = lstat(path);
+ uint64_t bytesFreed;
+ deletePath(realPath, bytesFreed);
+ results.bytesFreed += bytesFreed;
- if (st.st_nlink != 1) {
- actualSize += st.st_size;
- unsharedSize += (st.st_nlink - 1) * st.st_size;
- continue;
+ if (results.bytesFreed > options.maxFreed) {
+ printInfo("deleted more than %d bytes; stopping", options.maxFreed);
+ throw GCLimitReached();
}
+ };
- printMsg(lvlTalkative, format("deleting unused link '%1%'") % path);
-
- if (unlink(path.c_str()) == -1)
- throw SysError("deleting '%1%'", path);
+ std::map<StorePath, StorePathSet> referrersCache;
- state.results.bytesFreed += st.st_size;
- }
+ /* Helper function that visits all paths reachable from `start`
+ via the referrers edges and optionally derivers and derivation
+ output edges. If none of those paths are roots, then all
+ visited paths are garbage and are deleted. */
+ auto deleteReferrersClosure = [&](const StorePath & start) {
+ StorePathSet visited;
+ std::queue<StorePath> todo;
- struct stat st;
- if (stat(linksDir.c_str(), &st) == -1)
- throw SysError("statting '%1%'", linksDir);
- int64_t overhead = st.st_blocks * 512ULL;
+ /* Wake up any GC client waiting for deletion of the paths in
+ 'visited' to finish. */
+ Finally releasePending([&]() {
+ auto shared(_shared.lock());
+ shared->pending.reset();
+ wakeup.notify_all();
+ });
- printInfo("note: currently hard linking saves %.2f MiB",
- ((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
-}
+ auto enqueue = [&](const StorePath & path) {
+ if (visited.insert(path).second)
+ todo.push(path);
+ };
+ enqueue(start);
-void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
-{
- GCState state(options, results);
- state.gcKeepOutputs = settings.gcKeepOutputs;
- state.gcKeepDerivations = settings.gcKeepDerivations;
+ while (auto path = pop(todo)) {
+ checkInterrupt();
- /* Using `--ignore-liveness' with `--delete' can have unintended
- consequences if `keep-outputs' or `keep-derivations' are true
- (the garbage collector will recurse into deleting the outputs
- or derivers, respectively). So disable them. */
- if (options.action == GCOptions::gcDeleteSpecific && options.ignoreLiveness) {
- state.gcKeepOutputs = false;
- state.gcKeepDerivations = false;
- }
+ /* Bail out if we've previously discovered that this path
+ is alive. */
+ if (alive.count(*path)) {
+ alive.insert(start);
+ return;
+ }
- state.shouldDelete = options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific;
+ /* If we've previously deleted this path, we don't have to
+ handle it again. */
+ if (dead.count(*path)) continue;
- if (state.shouldDelete)
- deletePath(reservedPath);
+ auto markAlive = [&]()
+ {
+ alive.insert(*path);
+ alive.insert(start);
+ try {
+ StorePathSet closure;
+ computeFSClosure(*path, closure);
+ for (auto & p : closure)
+ alive.insert(p);
+ } catch (InvalidPath &) { }
+ };
+
+ /* If this is a root, bail out. */
+ if (roots.count(*path)) {
+ debug("cannot delete '%s' because it's a root", printStorePath(*path));
+ return markAlive();
+ }
- /* Acquire the global GC root. This prevents
- a) New roots from being added.
- b) Processes from creating new temporary root files. */
- AutoCloseFD fdGCLock = openGCLock(ltWrite);
+ if (options.action == GCOptions::gcDeleteSpecific
+ && !options.pathsToDelete.count(*path))
+ return;
- /* Find the roots. Since we've grabbed the GC lock, the set of
- permanent roots cannot increase now. */
- printInfo("finding garbage collector roots...");
- Roots rootMap;
- if (!options.ignoreLiveness)
- findRootsNoTemp(rootMap, true);
+ {
+ auto hashPart = std::string(path->hashPart());
+ auto shared(_shared.lock());
+ if (shared->tempRoots.count(hashPart)) {
+ debug("cannot delete '%s' because it's a temporary root", printStorePath(*path));
+ return markAlive();
+ }
+ shared->pending = hashPart;
+ }
- for (auto & i : rootMap) state.roots.insert(i.first);
+ if (isValidPath(*path)) {
- /* Read the temporary roots. This acquires read locks on all
- per-process temporary root files. So after this point no paths
- can be added to the set of temporary roots. */
- FDs fds;
- Roots tempRoots;
- findTempRoots(fds, tempRoots, true);
- for (auto & root : tempRoots) {
- state.tempRoots.insert(root.first);
- state.roots.insert(root.first);
- }
+ /* Visit the referrers of this path. */
+ auto i = referrersCache.find(*path);
+ if (i == referrersCache.end()) {
+ StorePathSet referrers;
+ queryReferrers(*path, referrers);
+ referrersCache.emplace(*path, std::move(referrers));
+ i = referrersCache.find(*path);
+ }
+ for (auto & p : i->second)
+ enqueue(p);
+
+ /* If keep-derivations is set and this is a
+ derivation, then visit the derivation outputs. */
+ if (gcKeepDerivations && path->isDerivation()) {
+ for (auto & [name, maybeOutPath] : queryPartialDerivationOutputMap(*path))
+ if (maybeOutPath &&
+ isValidPath(*maybeOutPath) &&
+ queryPathInfo(*maybeOutPath)->deriver == *path)
+ enqueue(*maybeOutPath);
+ }
- /* After this point the set of roots or temporary roots cannot
- increase, since we hold locks on everything. So everything
- that is not reachable from `roots' is garbage. */
+ /* If keep-outputs is set, then visit the derivers. */
+ if (gcKeepOutputs) {
+ auto derivers = queryValidDerivers(*path);
+ for (auto & i : derivers)
+ enqueue(i);
+ }
+ }
+ }
- if (state.shouldDelete) {
- if (pathExists(trashDir)) deleteGarbage(state, trashDir);
- try {
- createDirs(trashDir);
- } catch (SysError & e) {
- if (e.errNo == ENOSPC) {
- printInfo("note: can't create trash directory: %s", e.msg());
- state.moveToTrash = false;
+ for (auto & path : topoSortPaths(visited)) {
+ if (!dead.insert(path).second) continue;
+ if (shouldDelete) {
+ invalidatePathChecked(path);
+ deleteFromStore(path.to_string());
+ referrersCache.erase(path);
}
}
- }
+ };
- /* Now either delete all garbage paths, or just the specified
- paths (for gcDeleteSpecific). */
+ /* Synchronisation point for testing, see tests/gc-concurrent.sh. */
+ if (auto p = getEnv("_NIX_TEST_GC_SYNC"))
+ readFile(*p);
+ /* Either delete all garbage paths, or just the specified
+ paths (for gcDeleteSpecific). */
if (options.action == GCOptions::gcDeleteSpecific) {
for (auto & i : options.pathsToDelete) {
- tryToDelete(state, printStorePath(i));
- if (state.dead.find(i) == state.dead.end())
+ deleteReferrersClosure(i);
+ if (!dead.count(i))
throw Error(
- "cannot delete path '%1%' since it is still alive. "
- "To find out why use: "
+ "Cannot delete path '%1%' since it is still alive. "
+ "To find out why, use: "
"nix-store --query --roots",
printStorePath(i));
}
} else if (options.maxFreed > 0) {
- if (state.shouldDelete)
+ if (shouldDelete)
printInfo("deleting garbage...");
else
printInfo("determining live/dead paths...");
try {
-
- AutoCloseDir dir(opendir(realStoreDir.c_str()));
+ AutoCloseDir dir(opendir(realStoreDir.get().c_str()));
if (!dir) throw SysError("opening directory '%1%'", realStoreDir);
- /* Read the store and immediately delete all paths that
- aren't valid. When using --max-freed etc., deleting
- invalid paths is preferred over deleting unreachable
- paths, since unreachable paths could become reachable
- again. We don't use readDirectory() here so that GCing
- can start faster. */
+ /* Read the store and delete all paths that are invalid or
+ unreachable. We don't use readDirectory() here so that
+ GCing can start faster. */
+ auto linksName = baseNameOf(linksDir);
Paths entries;
struct dirent * dirent;
while (errno = 0, dirent = readdir(dir.get())) {
checkInterrupt();
string name = dirent->d_name;
- if (name == "." || name == "..") continue;
- Path path = storeDir + "/" + name;
- auto storePath = maybeParseStorePath(path);
- if (storePath && isValidPath(*storePath))
- entries.push_back(path);
- else
- tryToDelete(state, path);
- }
-
- dir.reset();
-
- /* Now delete the unreachable valid paths. Randomise the
- order in which we delete entries to make the collector
- less biased towards deleting paths that come
- alphabetically first (e.g. /nix/store/000...). This
- matters when using --max-freed etc. */
- vector<Path> entries_(entries.begin(), entries.end());
- std::mt19937 gen(1);
- std::shuffle(entries_.begin(), entries_.end(), gen);
+ if (name == "." || name == ".." || name == linksName) continue;
- for (auto & i : entries_)
- tryToDelete(state, i);
+ if (auto storePath = maybeParseStorePath(storeDir + "/" + name))
+ deleteReferrersClosure(*storePath);
+ else
+ deleteFromStore(name);
+ }
} catch (GCLimitReached & e) {
}
}
- if (state.options.action == GCOptions::gcReturnLive) {
- for (auto & i : state.alive)
- state.results.paths.insert(printStorePath(i));
+ if (options.action == GCOptions::gcReturnLive) {
+ for (auto & i : alive)
+ results.paths.insert(printStorePath(i));
return;
}
- if (state.options.action == GCOptions::gcReturnDead) {
- for (auto & i : state.dead)
- state.results.paths.insert(printStorePath(i));
+ if (options.action == GCOptions::gcReturnDead) {
+ for (auto & i : dead)
+ results.paths.insert(printStorePath(i));
return;
}
- /* Allow other processes to add to the store from here on. */
- fdGCLock = -1;
- fds.clear();
-
- /* Delete the trash directory. */
- printInfo(format("deleting '%1%'") % trashDir);
- deleteGarbage(state, trashDir);
-
- /* Clean up the links directory. */
+ /* Unlink all files in /nix/store/.links that have a link count of 1,
+ which indicates that there are no other links and so they can be
+ safely deleted. FIXME: race condition with optimisePath(): we
+ might see a link count of 1 just before optimisePath() increases
+ the link count. */
if (options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific) {
printInfo("deleting unused links...");
- removeUnusedLinks(state);
+
+ AutoCloseDir dir(opendir(linksDir.c_str()));
+ if (!dir) throw SysError("opening directory '%1%'", linksDir);
+
+ int64_t actualSize = 0, unsharedSize = 0;
+
+ struct dirent * dirent;
+ while (errno = 0, dirent = readdir(dir.get())) {
+ checkInterrupt();
+ string name = dirent->d_name;
+ if (name == "." || name == "..") continue;
+ Path path = linksDir + "/" + name;
+
+ auto st = lstat(path);
+
+ if (st.st_nlink != 1) {
+ actualSize += st.st_size;
+ unsharedSize += (st.st_nlink - 1) * st.st_size;
+ continue;
+ }
+
+ printMsg(lvlTalkative, format("deleting unused link '%1%'") % path);
+
+ if (unlink(path.c_str()) == -1)
+ throw SysError("deleting '%1%'", path);
+
+ results.bytesFreed += st.st_size;
+ }
+
+ struct stat st;
+ if (stat(linksDir.c_str(), &st) == -1)
+ throw SysError("statting '%1%'", linksDir);
+ int64_t overhead = st.st_blocks * 512ULL;
+
+ printInfo("note: currently hard linking saves %.2f MiB",
+ ((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
}
/* While we're at it, vacuum the database. */
@@ -856,7 +847,7 @@ void LocalStore::autoGC(bool sync)
return std::stoll(readFile(*fakeFreeSpaceFile));
struct statvfs st;
- if (statvfs(realStoreDir.c_str(), &st))
+ if (statvfs(realStoreDir.get().c_str(), &st))
throw SysError("getting filesystem info about '%s'", realStoreDir);
return (uint64_t) st.f_bavail * st.f_frsize;
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index d3b27d7be..9f1a88130 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -148,7 +148,8 @@ StringSet Settings::getDefaultExtraPlatforms()
// machines. Note that we can’t force processes from executing
// x86_64 in aarch64 environments or vice versa since they can
// always exec with their own binary preferences.
- if (pathExists("/Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist")) {
+ if (pathExists("/Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist") ||
+ pathExists("/System/Library/LaunchDaemons/com.apple.oahd.plist")) {
if (std::string{SYSTEM} == "x86_64-darwin")
extraPlatforms.insert("aarch64-darwin");
else if (std::string{SYSTEM} == "aarch64-darwin")
@@ -159,21 +160,16 @@ StringSet Settings::getDefaultExtraPlatforms()
return extraPlatforms;
}
-bool Settings::isExperimentalFeatureEnabled(const std::string & name)
+bool Settings::isExperimentalFeatureEnabled(const ExperimentalFeature & feature)
{
auto & f = experimentalFeatures.get();
- return std::find(f.begin(), f.end(), name) != f.end();
+ return std::find(f.begin(), f.end(), feature) != f.end();
}
-MissingExperimentalFeature::MissingExperimentalFeature(std::string feature)
- : Error("experimental Nix feature '%1%' is disabled; use '--experimental-features %1%' to override", feature)
- , missingFeature(feature)
- {}
-
-void Settings::requireExperimentalFeature(const std::string & name)
+void Settings::requireExperimentalFeature(const ExperimentalFeature & feature)
{
- if (!isExperimentalFeatureEnabled(name))
- throw MissingExperimentalFeature(name);
+ if (!isExperimentalFeatureEnabled(feature))
+ throw MissingExperimentalFeature(feature);
}
bool Settings::isWSL1()
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index 6f8749254..a50eb6803 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -3,6 +3,7 @@
#include "types.hh"
#include "config.hh"
#include "util.hh"
+#include "experimental-features.hh"
#include <map>
#include <limits>
@@ -45,15 +46,6 @@ struct PluginFilesSetting : public BaseSetting<Paths>
void set(const std::string & str, bool append = false) override;
};
-class MissingExperimentalFeature: public Error
-{
-public:
- std::string missingFeature;
-
- MissingExperimentalFeature(std::string feature);
- virtual const char* sname() const override { return "MissingExperimentalFeature"; }
-};
-
class Settings : public Config {
unsigned int getDefaultCores();
@@ -617,8 +609,10 @@ public:
Strings{"https://cache.nixos.org/"},
"substituters",
R"(
- A list of URLs of substituters, separated by whitespace. The default
- is `https://cache.nixos.org`.
+ A list of URLs of substituters, separated by whitespace. Substituters
+ are tried based on their Priority value, which each substituter can set
+ independently. Lower value means higher priority.
+ The default is `https://cache.nixos.org`, with a Priority of 40.
)",
{"binary-caches"}};
@@ -923,12 +917,12 @@ public:
value.
)"};
- Setting<Strings> experimentalFeatures{this, {}, "experimental-features",
+ Setting<std::set<ExperimentalFeature>> experimentalFeatures{this, {}, "experimental-features",
"Experimental Nix features to enable."};
- bool isExperimentalFeatureEnabled(const std::string & name);
+ bool isExperimentalFeatureEnabled(const ExperimentalFeature &);
- void requireExperimentalFeature(const std::string & name);
+ void requireExperimentalFeature(const ExperimentalFeature &);
Setting<bool> allowDirty{this, true, "allow-dirty",
"Whether to allow dirty Git/Mercurial trees."};
@@ -954,6 +948,12 @@ public:
resolves to a different location from that of the build machine. You
can enable this setting if you are sure you're not going to do that.
)"};
+
+ Setting<bool> useRegistries{this, true, "use-registries",
+ "Whether to use flake registries to resolve flake references."};
+
+ Setting<bool> acceptFlakeConfig{this, false, "accept-flake-config",
+ "Whether to accept nix configuration from a flake without prompting."};
};
diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc
index 0a3afcd51..605ec4b28 100644
--- a/src/libstore/http-binary-cache-store.cc
+++ b/src/libstore/http-binary-cache-store.cc
@@ -57,8 +57,8 @@ public:
{
// FIXME: do this lazily?
if (auto cacheInfo = diskCache->cacheExists(cacheUri)) {
- wantMassQuery.setDefault(cacheInfo->wantMassQuery ? "true" : "false");
- priority.setDefault(fmt("%d", cacheInfo->priority));
+ wantMassQuery.setDefault(cacheInfo->wantMassQuery);
+ priority.setDefault(cacheInfo->priority);
} else {
try {
BinaryCacheStore::init();
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
index edaf75136..4861d185e 100644
--- a/src/libstore/legacy-ssh-store.cc
+++ b/src/libstore/legacy-ssh-store.cc
@@ -82,9 +82,20 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
conn->to << SERVE_MAGIC_1 << SERVE_PROTOCOL_VERSION;
conn->to.flush();
- unsigned int magic = readInt(conn->from);
- if (magic != SERVE_MAGIC_2)
- throw Error("protocol mismatch with 'nix-store --serve' on '%s'", host);
+ StringSink saved;
+ try {
+ TeeSource tee(conn->from, saved);
+ unsigned int magic = readInt(tee);
+ if (magic != SERVE_MAGIC_2)
+ throw Error("'nix-store --serve' protocol mismatch from '%s'", host);
+ } catch (SerialisationError & e) {
+ /* In case the other side is waiting for our input,
+ close it. */
+ conn->sshConn->in.close();
+ auto msg = conn->from.drain();
+ throw Error("'nix-store --serve' protocol mismatch from '%s', got '%s'",
+ host, chomp(*saved.s + msg));
+ }
conn->remoteVersion = readInt(conn->from);
if (GET_PROTOCOL_MAJOR(conn->remoteVersion) != 0x200)
throw Error("unsupported 'nix-store --serve' protocol version on '%s'", host);
@@ -216,7 +227,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
StorePath addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method, HashType hashAlgo,
- PathFilter & filter, RepairFlag repair) override
+ PathFilter & filter, RepairFlag repair, const StorePathSet & references) override
{ unsupported("addToStore"); }
StorePath addTextToStore(const string & name, const string & s,
@@ -237,6 +248,10 @@ private:
conn.to
<< settings.buildRepeat
<< settings.enforceDeterminism;
+
+ if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 7) {
+ conn.to << ((int) settings.keepFailed);
+ }
}
public:
@@ -267,8 +282,11 @@ public:
return status;
}
- void buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode) override
+ void buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override
{
+ if (evalStore && evalStore.get() != this)
+ throw Error("building on an SSH store is incompatible with '--eval-store'");
+
auto conn(connections->get());
conn->to << cmdBuildPaths;
@@ -276,10 +294,10 @@ public:
for (auto & p : drvPaths) {
auto sOrDrvPath = StorePathWithOutputs::tryFromDerivedPath(p);
std::visit(overloaded {
- [&](StorePathWithOutputs s) {
+ [&](const StorePathWithOutputs & s) {
ss.push_back(s.to_string(*this));
},
- [&](StorePath drvPath) {
+ [&](const StorePath & drvPath) {
throw Error("wanted to fetch '%s' but the legacy ssh protocol doesn't support merely substituting drv files via the build paths command. It would build them instead. Try using ssh-ng://", printStorePath(drvPath));
},
}, sOrDrvPath);
@@ -349,7 +367,8 @@ public:
return conn->remoteVersion;
}
- std::optional<const Realisation> queryRealisation(const DrvOutput&) override
+ void queryRealisationUncached(const DrvOutput &,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept override
// TODO: Implement
{ unsupported("queryRealisation"); }
};
diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc
index 964c4017e..f93111fce 100644
--- a/src/libstore/local-binary-cache-store.cc
+++ b/src/libstore/local-binary-cache-store.cc
@@ -93,7 +93,7 @@ protected:
void LocalBinaryCacheStore::init()
{
createDirs(binaryCacheDir + "/nar");
- createDirs(binaryCacheDir + realisationsPrefix);
+ createDirs(binaryCacheDir + "/" + realisationsPrefix);
if (writeDebugInfo)
createDirs(binaryCacheDir + "/debuginfo");
BinaryCacheStore::init();
diff --git a/src/libstore/local-fs-store.hh b/src/libstore/local-fs-store.hh
index 55941b771..f8b19d00d 100644
--- a/src/libstore/local-fs-store.hh
+++ b/src/libstore/local-fs-store.hh
@@ -18,6 +18,9 @@ struct LocalFSStoreConfig : virtual StoreConfig
const PathSetting logDir{(StoreConfig*) this, false,
rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir,
"log", "directory where Nix will store state"};
+ const PathSetting realStoreDir{(StoreConfig*) this, false,
+ rootDir != "" ? rootDir + "/nix/store" : storeDir, "real",
+ "physical path to the Nix store"};
};
class LocalFSStore : public virtual LocalFSStoreConfig, public virtual Store
@@ -34,7 +37,7 @@ public:
/* Register a permanent GC root. */
Path addPermRoot(const StorePath & storePath, const Path & gcRoot);
- virtual Path getRealStoreDir() { return storeDir; }
+ virtual Path getRealStoreDir() { return realStoreDir; }
Path toRealPath(const Path & storePath) override
{
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index 83daa7506..64019314f 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -53,12 +53,15 @@ struct LocalStore::State::Stmts {
SQLiteStmt InvalidatePath;
SQLiteStmt AddDerivationOutput;
SQLiteStmt RegisterRealisedOutput;
+ SQLiteStmt UpdateRealisedOutput;
SQLiteStmt QueryValidDerivers;
SQLiteStmt QueryDerivationOutputs;
SQLiteStmt QueryRealisedOutput;
SQLiteStmt QueryAllRealisedOutputs;
SQLiteStmt QueryPathFromHashPart;
SQLiteStmt QueryValidPaths;
+ SQLiteStmt QueryRealisationReferences;
+ SQLiteStmt AddRealisationReference;
};
int getSchema(Path schemaPath)
@@ -76,7 +79,7 @@ int getSchema(Path schemaPath)
void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
{
- const int nixCASchemaVersion = 1;
+ const int nixCASchemaVersion = 2;
int curCASchema = getSchema(schemaPath);
if (curCASchema != nixCASchemaVersion) {
if (curCASchema > nixCASchemaVersion) {
@@ -94,7 +97,39 @@ void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
#include "ca-specific-schema.sql.gen.hh"
;
db.exec(schema);
+ curCASchema = nixCASchemaVersion;
}
+
+ if (curCASchema < 2) {
+ SQLiteTxn txn(db);
+ // Ugly little sql dance to add a new `id` column and make it the primary key
+ db.exec(R"(
+ create table Realisations2 (
+ id integer primary key autoincrement not null,
+ drvPath text not null,
+ outputName text not null, -- symbolic output id, usually "out"
+ outputPath integer not null,
+ signatures text, -- space-separated list
+ foreign key (outputPath) references ValidPaths(id) on delete cascade
+ );
+ insert into Realisations2 (drvPath, outputName, outputPath, signatures)
+ select drvPath, outputName, outputPath, signatures from Realisations;
+ drop table Realisations;
+ alter table Realisations2 rename to Realisations;
+ )");
+ db.exec(R"(
+ create index if not exists IndexRealisations on Realisations(drvPath, outputName);
+
+ create table if not exists RealisationsRefs (
+ referrer integer not null,
+ realisationReference integer,
+ foreign key (referrer) references Realisations(id) on delete cascade,
+ foreign key (realisationReference) references Realisations(id) on delete restrict
+ );
+ )");
+ txn.commit();
+ }
+
writeFile(schemaPath, fmt("%d", nixCASchemaVersion));
lockFile(lockFd.get(), ltRead, true);
}
@@ -106,14 +141,10 @@ LocalStore::LocalStore(const Params & params)
, LocalStoreConfig(params)
, Store(params)
, LocalFSStore(params)
- , realStoreDir_{this, false, rootDir != "" ? rootDir + "/nix/store" : storeDir, "real",
- "physical path to the Nix store"}
- , realStoreDir(realStoreDir_)
, dbDir(stateDir + "/db")
, linksDir(realStoreDir + "/.links")
, reservedPath(dbDir + "/reserved")
, schemaPath(dbDir + "/schema")
- , trashDir(realStoreDir + "/trash")
, tempRootsDir(stateDir + "/temproots")
, fnTempRoots(fmt("%s/%d", tempRootsDir, getpid()))
, locksHeld(tokenizeString<PathSet>(getEnv("NIX_HELD_LOCKS").value_or("")))
@@ -153,13 +184,13 @@ LocalStore::LocalStore(const Params & params)
printError("warning: the group '%1%' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
else {
struct stat st;
- if (stat(realStoreDir.c_str(), &st))
+ if (stat(realStoreDir.get().c_str(), &st))
throw SysError("getting attributes of path '%1%'", realStoreDir);
if (st.st_uid != 0 || st.st_gid != gr->gr_gid || (st.st_mode & ~S_IFMT) != perm) {
- if (chown(realStoreDir.c_str(), 0, gr->gr_gid) == -1)
+ if (chown(realStoreDir.get().c_str(), 0, gr->gr_gid) == -1)
throw SysError("changing ownership of path '%1%'", realStoreDir);
- if (chmod(realStoreDir.c_str(), perm) == -1)
+ if (chmod(realStoreDir.get().c_str(), perm) == -1)
throw SysError("changing permissions on path '%1%'", realStoreDir);
}
}
@@ -277,7 +308,7 @@ LocalStore::LocalStore(const Params & params)
else openDB(*state, false);
- if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
migrateCASchema(state->db, dbDir + "/ca-schema", globalLock);
}
@@ -307,16 +338,25 @@ LocalStore::LocalStore(const Params & params)
state->stmts->QueryPathFromHashPart.create(state->db,
"select path from ValidPaths where path >= ? limit 1;");
state->stmts->QueryValidPaths.create(state->db, "select path from ValidPaths");
- if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
state->stmts->RegisterRealisedOutput.create(state->db,
R"(
- insert or replace into Realisations (drvPath, outputName, outputPath, signatures)
+ insert into Realisations (drvPath, outputName, outputPath, signatures)
values (?, ?, (select id from ValidPaths where path = ?), ?)
;
)");
+ state->stmts->UpdateRealisedOutput.create(state->db,
+ R"(
+ update Realisations
+ set signatures = ?
+ where
+ drvPath = ? and
+ outputName = ?
+ ;
+ )");
state->stmts->QueryRealisedOutput.create(state->db,
R"(
- select Output.path, Realisations.signatures from Realisations
+ select Realisations.id, Output.path, Realisations.signatures from Realisations
inner join ValidPaths as Output on Output.id = Realisations.outputPath
where drvPath = ? and outputName = ?
;
@@ -328,10 +368,33 @@ LocalStore::LocalStore(const Params & params)
where drvPath = ?
;
)");
+ state->stmts->QueryRealisationReferences.create(state->db,
+ R"(
+ select drvPath, outputName from Realisations
+ join RealisationsRefs on realisationReference = Realisations.id
+ where referrer = ?;
+ )");
+ state->stmts->AddRealisationReference.create(state->db,
+ R"(
+ insert or replace into RealisationsRefs (referrer, realisationReference)
+ values (
+ (select id from Realisations where drvPath = ? and outputName = ?),
+ (select id from Realisations where drvPath = ? and outputName = ?));
+ )");
}
}
+AutoCloseFD LocalStore::openGCLock()
+{
+ Path fnGCLock = stateDir + "/gc.lock";
+ auto fdGCLock = open(fnGCLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
+ if (!fdGCLock)
+ throw SysError("opening global GC lock '%1%'", fnGCLock);
+ return fdGCLock;
+}
+
+
LocalStore::~LocalStore()
{
std::shared_future<void> future;
@@ -437,14 +500,11 @@ void LocalStore::makeStoreWritable()
if (getuid() != 0) return;
/* Check if /nix/store is on a read-only mount. */
struct statvfs stat;
- if (statvfs(realStoreDir.c_str(), &stat) != 0)
+ if (statvfs(realStoreDir.get().c_str(), &stat) != 0)
throw SysError("getting info about the Nix store mount point");
if (stat.f_flag & ST_RDONLY) {
- if (unshare(CLONE_NEWNS) == -1)
- throw SysError("setting up a private mount namespace");
-
- if (mount(0, realStoreDir.c_str(), "none", MS_REMOUNT | MS_BIND, 0) == -1)
+ if (mount(0, realStoreDir.get().c_str(), "none", MS_REMOUNT | MS_BIND, 0) == -1)
throw SysError("remounting %1% writable", realStoreDir);
}
#endif
@@ -627,7 +687,7 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
std::optional<Hash> h;
for (auto & i : drv.outputs) {
std::visit(overloaded {
- [&](DerivationOutputInputAddressed doia) {
+ [&](const DerivationOutputInputAddressed & doia) {
if (!h) {
// somewhat expensive so we do lazily
auto temp = hashDerivationModulo(*this, drv, true);
@@ -639,14 +699,14 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
printStorePath(drvPath), printStorePath(doia.path), printStorePath(recomputed));
envHasRightPath(doia.path, i.first);
},
- [&](DerivationOutputCAFixed dof) {
+ [&](const DerivationOutputCAFixed & dof) {
StorePath path = makeFixedOutputPath(dof.hash.method, dof.hash.hash, drvName);
envHasRightPath(path, i.first);
},
- [&](DerivationOutputCAFloating _) {
+ [&](const DerivationOutputCAFloating &) {
/* Nothing to check */
},
- [&](DerivationOutputDeferred) {
+ [&](const DerivationOutputDeferred &) {
},
}, i.second.output);
}
@@ -654,7 +714,7 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
void LocalStore::registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs)
{
- settings.requireExperimentalFeature("ca-derivations");
+ settings.requireExperimentalFeature(Xp::CaDerivations);
if (checkSigs == NoCheckSigs || !realisationIsUntrusted(info))
registerDrvOutput(info);
else
@@ -663,15 +723,55 @@ void LocalStore::registerDrvOutput(const Realisation & info, CheckSigsFlag check
void LocalStore::registerDrvOutput(const Realisation & info)
{
- settings.requireExperimentalFeature("ca-derivations");
- auto state(_state.lock());
+ settings.requireExperimentalFeature(Xp::CaDerivations);
retrySQLite<void>([&]() {
- state->stmts->RegisterRealisedOutput.use()
- (info.id.strHash())
- (info.id.outputName)
- (printStorePath(info.outPath))
- (concatStringsSep(" ", info.signatures))
- .exec();
+ auto state(_state.lock());
+ if (auto oldR = queryRealisation_(*state, info.id)) {
+ if (info.isCompatibleWith(*oldR)) {
+ auto combinedSignatures = oldR->signatures;
+ combinedSignatures.insert(info.signatures.begin(),
+ info.signatures.end());
+ state->stmts->UpdateRealisedOutput.use()
+ (concatStringsSep(" ", combinedSignatures))
+ (info.id.strHash())
+ (info.id.outputName)
+ .exec();
+ } else {
+ throw Error("Trying to register a realisation of '%s', but we already "
+ "have another one locally.\n"
+ "Local: %s\n"
+ "Remote: %s",
+ info.id.to_string(),
+ printStorePath(oldR->outPath),
+ printStorePath(info.outPath)
+ );
+ }
+ } else {
+ state->stmts->RegisterRealisedOutput.use()
+ (info.id.strHash())
+ (info.id.outputName)
+ (printStorePath(info.outPath))
+ (concatStringsSep(" ", info.signatures))
+ .exec();
+ }
+ for (auto & [outputId, depPath] : info.dependentRealisations) {
+ auto localRealisation = queryRealisationCore_(*state, outputId);
+ if (!localRealisation)
+ throw Error("unable to register the derivation '%s' as it "
+ "depends on the non existent '%s'",
+ info.id.to_string(), outputId.to_string());
+ if (localRealisation->second.outPath != depPath)
+ throw Error("unable to register the derivation '%s' as it "
+ "depends on a realisation of '%s' that doesn’t"
+ "match what we have locally",
+ info.id.to_string(), outputId.to_string());
+ state->stmts->AddRealisationReference.use()
+ (info.id.strHash())
+ (info.id.outputName)
+ (outputId.strHash())
+ (outputId.outputName)
+ .exec();
+ }
});
}
@@ -731,7 +831,7 @@ uint64_t LocalStore::addValidPath(State & state,
{
auto state_(Store::state.lock());
- state_->pathInfoCache.upsert(std::string(info.path.hashPart()),
+ state_->pathInfoCache.upsert(std::string(info.path.to_string()),
PathInfoCacheValue{ .value = std::make_shared<const ValidPathInfo>(info) });
}
@@ -909,7 +1009,7 @@ LocalStore::queryPartialDerivationOutputMap(const StorePath & path_)
return outputs;
});
- if (!settings.isExperimentalFeatureEnabled("ca-derivations"))
+ if (!settings.isExperimentalFeatureEnabled(Xp::CaDerivations))
return outputs;
auto drv = readInvalidDerivation(path);
@@ -977,14 +1077,19 @@ StorePathSet LocalStore::querySubstitutablePaths(const StorePathSet & paths)
}
+// FIXME: move this, it's not specific to LocalStore.
void LocalStore::querySubstitutablePathInfos(const StorePathCAMap & paths, SubstitutablePathInfos & infos)
{
if (!settings.useSubstitutes) return;
for (auto & sub : getDefaultSubstituters()) {
for (auto & path : paths) {
+ if (infos.count(path.first))
+ // Choose first succeeding substituter.
+ continue;
+
auto subPath(path.first);
- // recompute store path so that we can use a different store root
+ // Recompute store path so that we can use a different store root.
if (path.second) {
subPath = makeFixedOutputPathFromCA(path.first.name(), *path.second);
if (sub->storeDir == storeDir)
@@ -1099,7 +1204,7 @@ void LocalStore::invalidatePath(State & state, const StorePath & path)
{
auto state_(Store::state.lock());
- state_->pathInfoCache.erase(std::string(path.hashPart()));
+ state_->pathInfoCache.erase(std::string(path.to_string()));
}
}
@@ -1145,24 +1250,15 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
deletePath(realPath);
- // text hashing has long been allowed to have non-self-references because it is used for drv files.
- bool refersToSelf = info.references.count(info.path) > 0;
- if (info.ca.has_value() && !info.references.empty() && !(std::holds_alternative<TextHash>(*info.ca) && !refersToSelf))
- settings.requireExperimentalFeature("ca-references");
-
/* While restoring the path from the NAR, compute the hash
of the NAR. */
- std::unique_ptr<AbstractHashSink> hashSink;
- if (!info.ca.has_value() || !info.references.count(info.path))
- hashSink = std::make_unique<HashSink>(htSHA256);
- else
- hashSink = std::make_unique<HashModuloSink>(htSHA256, std::string(info.path.hashPart()));
+ HashSink hashSink(htSHA256);
- TeeSource wrapperSource { source, *hashSink };
+ TeeSource wrapperSource { source, hashSink };
restorePath(realPath, wrapperSource);
- auto hashResult = hashSink->finish();
+ auto hashResult = hashSink.finish();
if (hashResult.first != info.narHash)
throw Error("hash mismatch importing path '%s';\n specified: %s\n got: %s",
@@ -1172,6 +1268,31 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
throw Error("size mismatch importing path '%s';\n specified: %s\n got: %s",
printStorePath(info.path), info.narSize, hashResult.second);
+ if (info.ca) {
+ if (auto foHash = std::get_if<FixedOutputHash>(&*info.ca)) {
+ auto actualFoHash = hashCAPath(
+ foHash->method,
+ foHash->hash.type,
+ info.path
+ );
+ if (foHash->hash != actualFoHash.hash) {
+ throw Error("ca hash mismatch importing path '%s';\n specified: %s\n got: %s",
+ printStorePath(info.path),
+ foHash->hash.to_string(Base32, true),
+ actualFoHash.hash.to_string(Base32, true));
+ }
+ }
+ if (auto textHash = std::get_if<TextHash>(&*info.ca)) {
+ auto actualTextHash = hashString(htSHA256, readFile(realPath));
+ if (textHash->hash != actualTextHash) {
+ throw Error("ca hash mismatch importing path '%s';\n specified: %s\n got: %s",
+ printStorePath(info.path),
+ textHash->hash.to_string(Base32, true),
+ actualTextHash.to_string(Base32, true));
+ }
+ }
+ }
+
autoGC();
canonicalisePathMetaData(realPath, -1);
@@ -1187,7 +1308,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
StorePath LocalStore::addToStoreFromDump(Source & source0, const string & name,
- FileIngestionMethod method, HashType hashAlgo, RepairFlag repair)
+ FileIngestionMethod method, HashType hashAlgo, RepairFlag repair, const StorePathSet & references)
{
/* For computing the store path. */
auto hashSink = std::make_unique<HashSink>(hashAlgo);
@@ -1243,7 +1364,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, const string & name,
auto [hash, size] = hashSink->finish();
- auto dstPath = makeFixedOutputPath(method, hash, name);
+ auto dstPath = makeFixedOutputPath(method, hash, name, references);
addTempRoot(dstPath);
@@ -1290,6 +1411,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, const string & name,
ValidPathInfo info { dstPath, narHash.first };
info.narSize = narHash.second;
+ info.references = references;
info.ca = FixedOutputHash { .method = method, .hash = hash };
registerValidPath(info);
}
@@ -1390,7 +1512,8 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
/* Acquire the global GC lock to get a consistent snapshot of
existing and valid paths. */
- AutoCloseFD fdGCLock = openGCLock(ltWrite);
+ auto fdGCLock = openGCLock();
+ FdLock gcLock(fdGCLock.get(), ltRead, true, "waiting for the big garbage collector lock...");
StringSet store;
for (auto & i : readDirectory(realStoreDir)) store.insert(i.name);
@@ -1401,8 +1524,6 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
StorePathSet validPaths;
PathSet done;
- fdGCLock = -1;
-
for (auto & i : queryAllValidPaths())
verifyPath(printStorePath(i), store, done, validPaths, repair, errors);
@@ -1440,14 +1561,10 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
/* Check the content hash (optionally - slow). */
printMsg(lvlTalkative, "checking contents of '%s'", printStorePath(i));
- std::unique_ptr<AbstractHashSink> hashSink;
- if (!info->ca || !info->references.count(info->path))
- hashSink = std::make_unique<HashSink>(info->narHash.type);
- else
- hashSink = std::make_unique<HashModuloSink>(info->narHash.type, std::string(info->path.hashPart()));
+ auto hashSink = HashSink(info->narHash.type);
- dumpPath(Store::toRealPath(i), *hashSink);
- auto current = hashSink->finish();
+ dumpPath(Store::toRealPath(i), hashSink);
+ auto current = hashSink.finish();
if (info->narHash != nullHash && info->narHash != current.first) {
printError("path '%s' was modified! expected hash '%s', got '%s'",
@@ -1665,19 +1782,108 @@ void LocalStore::createUser(const std::string & userName, uid_t userId)
}
}
-std::optional<const Realisation> LocalStore::queryRealisation(
- const DrvOutput& id) {
- typedef std::optional<const Realisation> Ret;
- return retrySQLite<Ret>([&]() -> Ret {
- auto state(_state.lock());
- auto use(state->stmts->QueryRealisedOutput.use()(id.strHash())(
- id.outputName));
- if (!use.next())
- return std::nullopt;
- auto outputPath = parseStorePath(use.getStr(0));
- auto signatures = tokenizeString<StringSet>(use.getStr(1));
- return Ret{Realisation{
- .id = id, .outPath = outputPath, .signatures = signatures}};
- });
+std::optional<std::pair<int64_t, Realisation>> LocalStore::queryRealisationCore_(
+ LocalStore::State & state,
+ const DrvOutput & id)
+{
+ auto useQueryRealisedOutput(
+ state.stmts->QueryRealisedOutput.use()
+ (id.strHash())
+ (id.outputName));
+ if (!useQueryRealisedOutput.next())
+ return std::nullopt;
+ auto realisationDbId = useQueryRealisedOutput.getInt(0);
+ auto outputPath = parseStorePath(useQueryRealisedOutput.getStr(1));
+ auto signatures =
+ tokenizeString<StringSet>(useQueryRealisedOutput.getStr(2));
+
+ return {{
+ realisationDbId,
+ Realisation{
+ .id = id,
+ .outPath = outputPath,
+ .signatures = signatures,
+ }
+ }};
+}
+
+std::optional<const Realisation> LocalStore::queryRealisation_(
+ LocalStore::State & state,
+ const DrvOutput & id)
+{
+ auto maybeCore = queryRealisationCore_(state, id);
+ if (!maybeCore)
+ return std::nullopt;
+ auto [realisationDbId, res] = *maybeCore;
+
+ std::map<DrvOutput, StorePath> dependentRealisations;
+ auto useRealisationRefs(
+ state.stmts->QueryRealisationReferences.use()
+ (realisationDbId));
+ while (useRealisationRefs.next()) {
+ auto depId = DrvOutput {
+ Hash::parseAnyPrefixed(useRealisationRefs.getStr(0)),
+ useRealisationRefs.getStr(1),
+ };
+ auto dependentRealisation = queryRealisationCore_(state, depId);
+ assert(dependentRealisation); // Enforced by the db schema
+ auto outputPath = dependentRealisation->second.outPath;
+ dependentRealisations.insert({depId, outputPath});
+ }
+
+ res.dependentRealisations = dependentRealisations;
+
+ return { res };
+}
+
+void LocalStore::queryRealisationUncached(const DrvOutput & id,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept
+{
+ try {
+ auto maybeRealisation
+ = retrySQLite<std::optional<const Realisation>>([&]() {
+ auto state(_state.lock());
+ return queryRealisation_(*state, id);
+ });
+ if (maybeRealisation)
+ callback(
+ std::make_shared<const Realisation>(maybeRealisation.value()));
+ else
+ callback(nullptr);
+
+ } catch (...) {
+ callback.rethrow();
+ }
+}
+
+FixedOutputHash LocalStore::hashCAPath(
+ const FileIngestionMethod & method, const HashType & hashType,
+ const StorePath & path)
+{
+ return hashCAPath(method, hashType, Store::toRealPath(path), path.hashPart());
+}
+
+FixedOutputHash LocalStore::hashCAPath(
+ const FileIngestionMethod & method,
+ const HashType & hashType,
+ const Path & path,
+ const std::string_view pathHash
+)
+{
+ HashModuloSink caSink ( hashType, std::string(pathHash) );
+ switch (method) {
+ case FileIngestionMethod::Recursive:
+ dumpPath(path, caSink);
+ break;
+ case FileIngestionMethod::Flat:
+ readFile(path, caSink);
+ break;
+ }
+ auto hash = caSink.finish().first;
+ return FixedOutputHash{
+ .method = method,
+ .hash = hash,
+ };
}
+
} // namespace nix
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
index 26e034a82..115ea046a 100644
--- a/src/libstore/local-store.hh
+++ b/src/libstore/local-store.hh
@@ -58,9 +58,15 @@ private:
struct Stmts;
std::unique_ptr<Stmts> stmts;
+ /* The global GC lock */
+ AutoCloseFD fdGCLock;
+
/* The file to which we write our temporary roots. */
AutoCloseFD fdTempRoots;
+ /* Connection to the garbage collector. */
+ AutoCloseFD fdRootsSocket;
+
/* The last time we checked whether to do an auto-GC, or an
auto-GC finished. */
std::chrono::time_point<std::chrono::steady_clock> lastGCCheck;
@@ -83,14 +89,10 @@ private:
public:
- PathSetting realStoreDir_;
-
- const Path realStoreDir;
const Path dbDir;
const Path linksDir;
const Path reservedPath;
const Path schemaPath;
- const Path trashDir;
const Path tempRootsDir;
const Path fnTempRoots;
@@ -143,7 +145,7 @@ public:
RepairFlag repair, CheckSigsFlag checkSigs) override;
StorePath addToStoreFromDump(Source & dump, const string & name,
- FileIngestionMethod method, HashType hashAlgo, RepairFlag repair) override;
+ FileIngestionMethod method, HashType hashAlgo, RepairFlag repair, const StorePathSet & references) override;
StorePath addTextToStore(const string & name, const string & s,
const StorePathSet & references, RepairFlag repair) override;
@@ -152,14 +154,11 @@ public:
void addIndirectRoot(const Path & path) override;
- void syncWithGC() override;
-
private:
- typedef std::shared_ptr<AutoCloseFD> FDPtr;
- typedef list<FDPtr> FDs;
+ void findTempRoots(Roots & roots, bool censor);
- void findTempRoots(FDs & fds, Roots & roots, bool censor);
+ AutoCloseFD openGCLock();
public:
@@ -206,7 +205,10 @@ public:
void registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs) override;
void cacheDrvOutputMapping(State & state, const uint64_t deriver, const string & outputName, const StorePath & output);
- std::optional<const Realisation> queryRealisation(const DrvOutput&) override;
+ std::optional<const Realisation> queryRealisation_(State & state, const DrvOutput & id);
+ std::optional<std::pair<int64_t, Realisation>> queryRealisationCore_(State & state, const DrvOutput & id);
+ void queryRealisationUncached(const DrvOutput&,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
private:
@@ -237,29 +239,12 @@ private:
PathSet queryValidPathsOld();
ValidPathInfo queryPathInfoOld(const Path & path);
- struct GCState;
-
- void deleteGarbage(GCState & state, const Path & path);
-
- void tryToDelete(GCState & state, const Path & path);
-
- bool canReachRoot(GCState & state, StorePathSet & visited, const StorePath & path);
-
- void deletePathRecursive(GCState & state, const Path & path);
-
- bool isActiveTempFile(const GCState & state,
- const Path & path, const string & suffix);
-
- AutoCloseFD openGCLock(LockType lockType);
-
void findRoots(const Path & path, unsigned char type, Roots & roots);
void findRootsNoTemp(Roots & roots, bool censor);
void findRuntimeRoots(Roots & roots, bool censor);
- void removeUnusedLinks(const GCState & state);
-
Path createTempDirInStore();
void checkDerivationOutputs(const StorePath & drvPath, const Derivation & drv);
@@ -279,10 +264,21 @@ private:
void signPathInfo(ValidPathInfo & info);
void signRealisation(Realisation &);
- Path getRealStoreDir() override { return realStoreDir; }
-
void createUser(const std::string & userName, uid_t userId) override;
+ // XXX: Make a generic `Store` method
+ FixedOutputHash hashCAPath(
+ const FileIngestionMethod & method,
+ const HashType & hashType,
+ const StorePath & path);
+
+ FixedOutputHash hashCAPath(
+ const FileIngestionMethod & method,
+ const HashType & hashType,
+ const Path & path,
+ const std::string_view pathHash
+ );
+
friend struct LocalDerivationGoal;
friend struct PathSubstitutionGoal;
friend struct SubstitutionGoal;
diff --git a/src/libstore/local.mk b/src/libstore/local.mk
index cf0933705..b992bcbc0 100644
--- a/src/libstore/local.mk
+++ b/src/libstore/local.mk
@@ -8,12 +8,12 @@ libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc $(d)/build/*.cc)
libstore_LIBS = libutil
-libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread
-ifneq ($(OS), FreeBSD)
+libstore_LDFLAGS += $(SQLITE3_LIBS) $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread
+ifdef HOST_LINUX
libstore_LDFLAGS += -ldl
endif
-ifeq ($(OS), Darwin)
+ifdef HOST_DARWIN
libstore_FILES = sandbox-defaults.sb sandbox-minimal.sb sandbox-network.sb
endif
@@ -23,7 +23,7 @@ ifeq ($(ENABLE_S3), 1)
libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core
endif
-ifeq ($(OS), SunOS)
+ifdef HOST_SOLARIS
libstore_LDFLAGS += -lsocket
endif
@@ -60,7 +60,7 @@ $(d)/build.cc:
clean-files += $(d)/schema.sql.gen.hh $(d)/ca-specific-schema.sql.gen.hh
-$(eval $(call install-file-in, $(d)/nix-store.pc, $(prefix)/lib/pkgconfig, 0644))
+$(eval $(call install-file-in, $(d)/nix-store.pc, $(libdir)/pkgconfig, 0644))
$(foreach i, $(wildcard src/libstore/builtins/*.hh), \
$(eval $(call install-file-in, $(i), $(includedir)/nix/builtins, 0644)))
diff --git a/src/libstore/machines.cc b/src/libstore/machines.cc
index b42e5e434..b6270a81b 100644
--- a/src/libstore/machines.cc
+++ b/src/libstore/machines.cc
@@ -16,13 +16,18 @@ Machine::Machine(decltype(storeUri) storeUri,
decltype(mandatoryFeatures) mandatoryFeatures,
decltype(sshPublicHostKey) sshPublicHostKey) :
storeUri(
- // Backwards compatibility: if the URI is a hostname,
- // prepend ssh://.
+ // Backwards compatibility: if the URI is schemeless, is not a path,
+ // and is not one of the special store connection words, prepend
+ // ssh://.
storeUri.find("://") != std::string::npos
- || hasPrefix(storeUri, "local")
- || hasPrefix(storeUri, "remote")
- || hasPrefix(storeUri, "auto")
- || hasPrefix(storeUri, "/")
+ || storeUri.find("/") != std::string::npos
+ || storeUri == "auto"
+ || storeUri == "daemon"
+ || storeUri == "local"
+ || hasPrefix(storeUri, "auto?")
+ || hasPrefix(storeUri, "daemon?")
+ || hasPrefix(storeUri, "local?")
+ || hasPrefix(storeUri, "?")
? storeUri
: "ssh://" + storeUri),
systemTypes(systemTypes),
@@ -34,7 +39,8 @@ Machine::Machine(decltype(storeUri) storeUri,
sshPublicHostKey(sshPublicHostKey)
{}
-bool Machine::allSupported(const std::set<string> & features) const {
+bool Machine::allSupported(const std::set<string> & features) const
+{
return std::all_of(features.begin(), features.end(),
[&](const string & feature) {
return supportedFeatures.count(feature) ||
@@ -42,14 +48,16 @@ bool Machine::allSupported(const std::set<string> & features) const {
});
}
-bool Machine::mandatoryMet(const std::set<string> & features) const {
+bool Machine::mandatoryMet(const std::set<string> & features) const
+{
return std::all_of(mandatoryFeatures.begin(), mandatoryFeatures.end(),
[&](const string & feature) {
return features.count(feature);
});
}
-ref<Store> Machine::openStore() const {
+ref<Store> Machine::openStore() const
+{
Store::Params storeParams;
if (hasPrefix(storeUri, "ssh://")) {
storeParams["max-connections"] = "1";
@@ -78,53 +86,86 @@ ref<Store> Machine::openStore() const {
return nix::openStore(storeUri, storeParams);
}
-void parseMachines(const std::string & s, Machines & machines)
+static std::vector<std::string> expandBuilderLines(const std::string & builders)
{
- for (auto line : tokenizeString<std::vector<string>>(s, "\n;")) {
+ std::vector<std::string> result;
+ for (auto line : tokenizeString<std::vector<string>>(builders, "\n;")) {
trim(line);
line.erase(std::find(line.begin(), line.end(), '#'), line.end());
if (line.empty()) continue;
if (line[0] == '@') {
- auto file = trim(std::string(line, 1));
+ const std::string path = trim(std::string(line, 1));
+ std::string text;
try {
- parseMachines(readFile(file), machines);
+ text = readFile(path);
} catch (const SysError & e) {
if (e.errNo != ENOENT)
throw;
- debug("cannot find machines file '%s'", file);
+ debug("cannot find machines file '%s'", path);
}
+
+ const auto lines = expandBuilderLines(text);
+ result.insert(end(result), begin(lines), end(lines));
continue;
}
- auto tokens = tokenizeString<std::vector<string>>(line);
- auto sz = tokens.size();
- if (sz < 1)
- throw FormatError("bad machine specification '%s'", line);
+ result.emplace_back(line);
+ }
+ return result;
+}
- auto isSet = [&](size_t n) {
- return tokens.size() > n && tokens[n] != "" && tokens[n] != "-";
- };
+static Machine parseBuilderLine(const std::string & line)
+{
+ const auto tokens = tokenizeString<std::vector<string>>(line);
- machines.emplace_back(tokens[0],
- isSet(1) ? tokenizeString<std::vector<string>>(tokens[1], ",") : std::vector<string>{settings.thisSystem},
- isSet(2) ? tokens[2] : "",
- isSet(3) ? std::stoull(tokens[3]) : 1LL,
- isSet(4) ? std::stoull(tokens[4]) : 1LL,
- isSet(5) ? tokenizeString<std::set<string>>(tokens[5], ",") : std::set<string>{},
- isSet(6) ? tokenizeString<std::set<string>>(tokens[6], ",") : std::set<string>{},
- isSet(7) ? tokens[7] : "");
- }
+ auto isSet = [&](size_t fieldIndex) {
+ return tokens.size() > fieldIndex && tokens[fieldIndex] != "" && tokens[fieldIndex] != "-";
+ };
+
+ auto parseUnsignedIntField = [&](size_t fieldIndex) {
+ const auto result = string2Int<unsigned int>(tokens[fieldIndex]);
+ if (!result) {
+ throw FormatError("bad machine specification: failed to convert column #%lu in a row: '%s' to 'unsigned int'", fieldIndex, line);
+ }
+ return result.value();
+ };
+
+ auto ensureBase64 = [&](size_t fieldIndex) {
+ const auto & str = tokens[fieldIndex];
+ try {
+ base64Decode(str);
+ } catch (const Error & e) {
+ throw FormatError("bad machine specification: a column #%lu in a row: '%s' is not valid base64 string: %s", fieldIndex, line, e.what());
+ }
+ return str;
+ };
+
+ if (!isSet(0))
+ throw FormatError("bad machine specification: store URL was not found at the first column of a row: '%s'", line);
+
+ return {
+ tokens[0],
+ isSet(1) ? tokenizeString<std::vector<string>>(tokens[1], ",") : std::vector<string>{settings.thisSystem},
+ isSet(2) ? tokens[2] : "",
+ isSet(3) ? parseUnsignedIntField(3) : 1U,
+ isSet(4) ? parseUnsignedIntField(4) : 1U,
+ isSet(5) ? tokenizeString<std::set<string>>(tokens[5], ",") : std::set<string>{},
+ isSet(6) ? tokenizeString<std::set<string>>(tokens[6], ",") : std::set<string>{},
+ isSet(7) ? ensureBase64(7) : ""
+ };
+}
+
+static Machines parseBuilderLines(const std::vector<std::string>& builders) {
+ Machines result;
+ std::transform(builders.begin(), builders.end(), std::back_inserter(result), parseBuilderLine);
+ return result;
}
Machines getMachines()
{
- static auto machines = [&]() {
- Machines machines;
- parseMachines(settings.builders, machines);
- return machines;
- }();
- return machines;
+ const auto builderLines = expandBuilderLines(settings.builders);
+ return parseBuilderLines(builderLines);
}
}
diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc
index a99a2fc78..32786e963 100644
--- a/src/libstore/misc.cc
+++ b/src/libstore/misc.cc
@@ -6,98 +6,73 @@
#include "thread-pool.hh"
#include "topo-sort.hh"
#include "callback.hh"
+#include "closure.hh"
namespace nix {
-
void Store::computeFSClosure(const StorePathSet & startPaths,
StorePathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
{
- struct State
- {
- size_t pending;
- StorePathSet & paths;
- std::exception_ptr exc;
- };
-
- Sync<State> state_(State{0, paths_, 0});
-
- std::function<void(const StorePath &)> enqueue;
-
- std::condition_variable done;
-
- enqueue = [&](const StorePath & path) -> void {
- {
- auto state(state_.lock());
- if (state->exc) return;
- if (!state->paths.insert(path).second) return;
- state->pending++;
- }
-
- queryPathInfo(path, {[&](std::future<ref<const ValidPathInfo>> fut) {
- // FIXME: calls to isValidPath() should be async
-
- try {
- auto info = fut.get();
-
- if (flipDirection) {
-
- StorePathSet referrers;
- queryReferrers(path, referrers);
- for (auto & ref : referrers)
- if (ref != path)
- enqueue(ref);
-
- if (includeOutputs)
- for (auto & i : queryValidDerivers(path))
- enqueue(i);
-
- if (includeDerivers && path.isDerivation())
- for (auto & i : queryDerivationOutputs(path))
- if (isValidPath(i) && queryPathInfo(i)->deriver == path)
- enqueue(i);
-
- } else {
-
- for (auto & ref : info->references)
- if (ref != path)
- enqueue(ref);
-
- if (includeOutputs && path.isDerivation())
- for (auto & i : queryDerivationOutputs(path))
- if (isValidPath(i)) enqueue(i);
-
- if (includeDerivers && info->deriver && isValidPath(*info->deriver))
- enqueue(*info->deriver);
-
- }
-
- {
- auto state(state_.lock());
- assert(state->pending);
- if (!--state->pending) done.notify_one();
- }
-
- } catch (...) {
- auto state(state_.lock());
- if (!state->exc) state->exc = std::current_exception();
- assert(state->pending);
- if (!--state->pending) done.notify_one();
- };
- }});
- };
-
- for (auto & startPath : startPaths)
- enqueue(startPath);
-
- {
- auto state(state_.lock());
- while (state->pending) state.wait(done);
- if (state->exc) std::rethrow_exception(state->exc);
- }
+ std::function<std::set<StorePath>(const StorePath & path, std::future<ref<const ValidPathInfo>> &)> queryDeps;
+ if (flipDirection)
+ queryDeps = [&](const StorePath& path,
+ std::future<ref<const ValidPathInfo>> & fut) {
+ StorePathSet res;
+ StorePathSet referrers;
+ queryReferrers(path, referrers);
+ for (auto& ref : referrers)
+ if (ref != path)
+ res.insert(ref);
+
+ if (includeOutputs)
+ for (auto& i : queryValidDerivers(path))
+ res.insert(i);
+
+ if (includeDerivers && path.isDerivation())
+ for (auto& [_, maybeOutPath] : queryPartialDerivationOutputMap(path))
+ if (maybeOutPath && isValidPath(*maybeOutPath))
+ res.insert(*maybeOutPath);
+ return res;
+ };
+ else
+ queryDeps = [&](const StorePath& path,
+ std::future<ref<const ValidPathInfo>> & fut) {
+ StorePathSet res;
+ auto info = fut.get();
+ for (auto& ref : info->references)
+ if (ref != path)
+ res.insert(ref);
+
+ if (includeOutputs && path.isDerivation())
+ for (auto& [_, maybeOutPath] : queryPartialDerivationOutputMap(path))
+ if (maybeOutPath && isValidPath(*maybeOutPath))
+ res.insert(*maybeOutPath);
+
+ if (includeDerivers && info->deriver && isValidPath(*info->deriver))
+ res.insert(*info->deriver);
+ return res;
+ };
+
+ computeClosure<StorePath>(
+ startPaths, paths_,
+ [&](const StorePath& path,
+ std::function<void(std::promise<std::set<StorePath>>&)>
+ processEdges) {
+ std::promise<std::set<StorePath>> promise;
+ std::function<void(std::future<ref<const ValidPathInfo>>)>
+ getDependencies =
+ [&](std::future<ref<const ValidPathInfo>> fut) {
+ try {
+ promise.set_value(queryDeps(path, fut));
+ } catch (...) {
+ promise.set_exception(std::current_exception());
+ }
+ };
+ queryPathInfo(path, getDependencies);
+ processEdges(promise);
+ });
}
-
void Store::computeFSClosure(const StorePath & startPath,
StorePathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
{
@@ -191,7 +166,7 @@ void Store::queryMissing(const std::vector<DerivedPath> & targets,
}
std::visit(overloaded {
- [&](DerivedPath::Built bfd) {
+ [&](const DerivedPath::Built & bfd) {
if (!isValidPath(bfd.drvPath)) {
// FIXME: we could try to substitute the derivation.
auto state(state_.lock());
@@ -224,7 +199,7 @@ void Store::queryMissing(const std::vector<DerivedPath> & targets,
mustBuildDrv(bfd.drvPath, *drv);
},
- [&](DerivedPath::Opaque bo) {
+ [&](const DerivedPath::Opaque & bo) {
if (isValidPath(bo.path)) return;
@@ -264,12 +239,11 @@ StorePaths Store::topoSortPaths(const StorePathSet & paths)
{
return topoSort(paths,
{[&](const StorePath & path) {
- StorePathSet references;
try {
- references = queryPathInfo(path)->references;
+ return queryPathInfo(path)->references;
} catch (InvalidPath &) {
+ return StorePathSet();
}
- return references;
}},
{[&](const StorePath & path, const StorePath & parent) {
return BuildError(
@@ -279,5 +253,44 @@ StorePaths Store::topoSortPaths(const StorePathSet & paths)
}});
}
+std::map<DrvOutput, StorePath> drvOutputReferences(
+ const std::set<Realisation> & inputRealisations,
+ const StorePathSet & pathReferences)
+{
+ std::map<DrvOutput, StorePath> res;
+ for (const auto & input : inputRealisations) {
+ if (pathReferences.count(input.outPath)) {
+ res.insert({input.id, input.outPath});
+ }
+ }
+
+ return res;
+}
+
+std::map<DrvOutput, StorePath> drvOutputReferences(
+ Store & store,
+ const Derivation & drv,
+ const StorePath & outputPath)
+{
+ std::set<Realisation> inputRealisations;
+
+ for (const auto& [inputDrv, outputNames] : drv.inputDrvs) {
+ auto outputHashes =
+ staticOutputHashes(store, store.readDerivation(inputDrv));
+ for (const auto& outputName : outputNames) {
+ auto thisRealisation = store.queryRealisation(
+ DrvOutput{outputHashes.at(outputName), outputName});
+ if (!thisRealisation)
+ throw Error(
+ "output '%s' of derivation '%s' isn’t built", outputName,
+ store.printStorePath(inputDrv));
+ inputRealisations.insert(*thisRealisation);
+ }
+ }
+
+ auto info = store.queryPathInfo(outputPath);
+
+ return drvOutputReferences(Realisation::closure(store, inputRealisations), info->references);
+}
}
diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc
index 1d8d2d57e..9dd81ddfb 100644
--- a/src/libstore/nar-info-disk-cache.cc
+++ b/src/libstore/nar-info-disk-cache.cc
@@ -4,6 +4,7 @@
#include "globals.hh"
#include <sqlite3.h>
+#include <nlohmann/json.hpp>
namespace nix {
@@ -38,6 +39,15 @@ create table if not exists NARs (
foreign key (cache) references BinaryCaches(id) on delete cascade
);
+create table if not exists Realisations (
+ cache integer not null,
+ outputId text not null,
+ content blob, -- Json serialisation of the realisation, or null if the realisation is absent
+ timestamp integer not null,
+ primary key (cache, outputId),
+ foreign key (cache) references BinaryCaches(id) on delete cascade
+);
+
create table if not exists LastPurge (
dummy text primary key,
value integer
@@ -63,7 +73,9 @@ public:
struct State
{
SQLite db;
- SQLiteStmt insertCache, queryCache, insertNAR, insertMissingNAR, queryNAR, purgeCache;
+ SQLiteStmt insertCache, queryCache, insertNAR, insertMissingNAR,
+ queryNAR, insertRealisation, insertMissingRealisation,
+ queryRealisation, purgeCache;
std::map<std::string, Cache> caches;
};
@@ -98,6 +110,26 @@ public:
state->queryNAR.create(state->db,
"select present, namePart, url, compression, fileHash, fileSize, narHash, narSize, refs, deriver, sigs, ca from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))");
+ state->insertRealisation.create(state->db,
+ R"(
+ insert or replace into Realisations(cache, outputId, content, timestamp)
+ values (?, ?, ?, ?)
+ )");
+
+ state->insertMissingRealisation.create(state->db,
+ R"(
+ insert or replace into Realisations(cache, outputId, timestamp)
+ values (?, ?, ?)
+ )");
+
+ state->queryRealisation.create(state->db,
+ R"(
+ select content from Realisations
+ where cache = ? and outputId = ? and
+ ((content is null and timestamp > ?) or
+ (content is not null and timestamp > ?))
+ )");
+
/* Periodically purge expired entries from the database. */
retrySQLite<void>([&]() {
auto now = time(0);
@@ -212,6 +244,38 @@ public:
});
}
+ std::pair<Outcome, std::shared_ptr<Realisation>> lookupRealisation(
+ const std::string & uri, const DrvOutput & id) override
+ {
+ return retrySQLite<std::pair<Outcome, std::shared_ptr<Realisation>>>(
+ [&]() -> std::pair<Outcome, std::shared_ptr<Realisation>> {
+ auto state(_state.lock());
+
+ auto & cache(getCache(*state, uri));
+
+ auto now = time(0);
+
+ auto queryRealisation(state->queryRealisation.use()
+ (cache.id)
+ (id.to_string())
+ (now - settings.ttlNegativeNarInfoCache)
+ (now - settings.ttlPositiveNarInfoCache));
+
+ if (!queryRealisation.next())
+ return {oUnknown, 0};
+
+ if (queryRealisation.isNull(0))
+ return {oInvalid, 0};
+
+ auto realisation =
+ std::make_shared<Realisation>(Realisation::fromJSON(
+ nlohmann::json::parse(queryRealisation.getStr(0)),
+ "Local disk cache"));
+
+ return {oValid, realisation};
+ });
+ }
+
void upsertNarInfo(
const std::string & uri, const std::string & hashPart,
std::shared_ptr<const ValidPathInfo> info) override
@@ -251,6 +315,39 @@ public:
}
});
}
+
+ void upsertRealisation(
+ const std::string & uri,
+ const Realisation & realisation) override
+ {
+ retrySQLite<void>([&]() {
+ auto state(_state.lock());
+
+ auto & cache(getCache(*state, uri));
+
+ state->insertRealisation.use()
+ (cache.id)
+ (realisation.id.to_string())
+ (realisation.toJSON().dump())
+ (time(0)).exec();
+ });
+
+ }
+
+ virtual void upsertAbsentRealisation(
+ const std::string & uri,
+ const DrvOutput & id) override
+ {
+ retrySQLite<void>([&]() {
+ auto state(_state.lock());
+
+ auto & cache(getCache(*state, uri));
+ state->insertMissingRealisation.use()
+ (cache.id)
+ (id.to_string())
+ (time(0)).exec();
+ });
+ }
};
ref<NarInfoDiskCache> getNarInfoDiskCache()
diff --git a/src/libstore/nar-info-disk-cache.hh b/src/libstore/nar-info-disk-cache.hh
index 04de2c5eb..2dcaa76a4 100644
--- a/src/libstore/nar-info-disk-cache.hh
+++ b/src/libstore/nar-info-disk-cache.hh
@@ -2,6 +2,7 @@
#include "ref.hh"
#include "nar-info.hh"
+#include "realisation.hh"
namespace nix {
@@ -29,6 +30,15 @@ public:
virtual void upsertNarInfo(
const std::string & uri, const std::string & hashPart,
std::shared_ptr<const ValidPathInfo> info) = 0;
+
+ virtual void upsertRealisation(
+ const std::string & uri,
+ const Realisation & realisation) = 0;
+ virtual void upsertAbsentRealisation(
+ const std::string & uri,
+ const DrvOutput & id) = 0;
+ virtual std::pair<Outcome, std::shared_ptr<Realisation>> lookupRealisation(
+ const std::string & uri, const DrvOutput & id) = 0;
};
/* Return a singleton cache object that can be used concurrently by
diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc
index 78d587139..d95e54af1 100644
--- a/src/libstore/optimise-store.cc
+++ b/src/libstore/optimise-store.cc
@@ -198,7 +198,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
/* Make the containing directory writable, but only if it's not
the store itself (we don't want or need to mess with its
permissions). */
- bool mustToggle = dirOf(path) != realStoreDir;
+ bool mustToggle = dirOf(path) != realStoreDir.get();
if (mustToggle) makeWritable(dirOf(path));
/* When we're done, make the directory read-only again and reset
diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc
index c5c3ae3dc..caddba9b1 100644
--- a/src/libstore/parsed-derivations.cc
+++ b/src/libstore/parsed-derivations.cc
@@ -1,6 +1,8 @@
#include "parsed-derivations.hh"
#include <nlohmann/json.hpp>
+#include <regex>
+#include "json.hh"
namespace nix {
@@ -91,6 +93,8 @@ StringSet ParsedDerivation::getRequiredSystemFeatures() const
StringSet res;
for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings()))
res.insert(i);
+ if (!derivationHasKnownOutputPaths(drv.type()))
+ res.insert("ca-derivations");
return res;
}
@@ -121,4 +125,107 @@ bool ParsedDerivation::substitutesAllowed() const
return getBoolAttr("allowSubstitutes", true);
}
+static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*");
+
+std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths)
+{
+ auto structuredAttrs = getStructuredAttrs();
+ if (!structuredAttrs) return std::nullopt;
+
+ auto json = *structuredAttrs;
+
+ /* Add an "outputs" object containing the output paths. */
+ nlohmann::json outputs;
+ for (auto & i : drv.outputs)
+ outputs[i.first] = hashPlaceholder(i.first);
+ json["outputs"] = outputs;
+
+ /* Handle exportReferencesGraph. */
+ auto e = json.find("exportReferencesGraph");
+ if (e != json.end() && e->is_object()) {
+ for (auto i = e->begin(); i != e->end(); ++i) {
+ std::ostringstream str;
+ {
+ JSONPlaceholder jsonRoot(str, true);
+ StorePathSet storePaths;
+ for (auto & p : *i)
+ storePaths.insert(store.parseStorePath(p.get<std::string>()));
+ store.pathInfoToJSON(jsonRoot,
+ store.exportReferences(storePaths, inputPaths), false, true);
+ }
+ json[i.key()] = nlohmann::json::parse(str.str()); // urgh
+ }
+ }
+
+ return json;
+}
+
+/* As a convenience to bash scripts, write a shell file that
+ maps all attributes that are representable in bash -
+ namely, strings, integers, nulls, Booleans, and arrays and
+ objects consisting entirely of those values. (So nested
+ arrays or objects are not supported.) */
+std::string writeStructuredAttrsShell(const nlohmann::json & json)
+{
+
+ auto handleSimpleType = [](const nlohmann::json & value) -> std::optional<std::string> {
+ if (value.is_string())
+ return shellEscape(value);
+
+ if (value.is_number()) {
+ auto f = value.get<float>();
+ if (std::ceil(f) == f)
+ return std::to_string(value.get<int>());
+ }
+
+ if (value.is_null())
+ return std::string("''");
+
+ if (value.is_boolean())
+ return value.get<bool>() ? std::string("1") : std::string("");
+
+ return {};
+ };
+
+ std::string jsonSh;
+
+ for (auto & [key, value] : json.items()) {
+
+ if (!std::regex_match(key, shVarName)) continue;
+
+ auto s = handleSimpleType(value);
+ if (s)
+ jsonSh += fmt("declare %s=%s\n", key, *s);
+
+ else if (value.is_array()) {
+ std::string s2;
+ bool good = true;
+
+ for (auto & value2 : value) {
+ auto s3 = handleSimpleType(value2);
+ if (!s3) { good = false; break; }
+ s2 += *s3; s2 += ' ';
+ }
+
+ if (good)
+ jsonSh += fmt("declare -a %s=(%s)\n", key, s2);
+ }
+
+ else if (value.is_object()) {
+ std::string s2;
+ bool good = true;
+
+ for (auto & [key2, value2] : value.items()) {
+ auto s3 = handleSimpleType(value2);
+ if (!s3) { good = false; break; }
+ s2 += fmt("[%s]=%s ", shellEscape(key2), *s3);
+ }
+
+ if (good)
+ jsonSh += fmt("declare -A %s=(%s)\n", key, s2);
+ }
+ }
+
+ return jsonSh;
+}
}
diff --git a/src/libstore/parsed-derivations.hh b/src/libstore/parsed-derivations.hh
index c9fbe68c4..effcf099d 100644
--- a/src/libstore/parsed-derivations.hh
+++ b/src/libstore/parsed-derivations.hh
@@ -36,6 +36,10 @@ public:
bool willBuildLocally(Store & localStore) const;
bool substitutesAllowed() const;
+
+ std::optional<nlohmann::json> prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths);
};
+std::string writeStructuredAttrsShell(const nlohmann::json & json);
+
}
diff --git a/src/libstore/path-info.cc b/src/libstore/path-info.cc
new file mode 100644
index 000000000..fda55b2b6
--- /dev/null
+++ b/src/libstore/path-info.cc
@@ -0,0 +1,46 @@
+#include "path-info.hh"
+#include "worker-protocol.hh"
+
+namespace nix {
+
+ValidPathInfo ValidPathInfo::read(Source & source, const Store & store, unsigned int format)
+{
+ return read(source, store, format, store.parseStorePath(readString(source)));
+}
+
+ValidPathInfo ValidPathInfo::read(Source & source, const Store & store, unsigned int format, StorePath && path)
+{
+ auto deriver = readString(source);
+ auto narHash = Hash::parseAny(readString(source), htSHA256);
+ ValidPathInfo info(path, narHash);
+ if (deriver != "") info.deriver = store.parseStorePath(deriver);
+ info.references = worker_proto::read(store, source, Phantom<StorePathSet> {});
+ source >> info.registrationTime >> info.narSize;
+ if (format >= 16) {
+ source >> info.ultimate;
+ info.sigs = readStrings<StringSet>(source);
+ info.ca = parseContentAddressOpt(readString(source));
+ }
+ return info;
+}
+
+void ValidPathInfo::write(
+ Sink & sink,
+ const Store & store,
+ unsigned int format,
+ bool includePath) const
+{
+ if (includePath)
+ sink << store.printStorePath(path);
+ sink << (deriver ? store.printStorePath(*deriver) : "")
+ << narHash.to_string(Base16, false);
+ worker_proto::write(store, sink, references);
+ sink << registrationTime << narSize;
+ if (format >= 16) {
+ sink << ultimate
+ << sigs
+ << renderContentAddress(ca);
+ }
+}
+
+}
diff --git a/src/libstore/path-info.hh b/src/libstore/path-info.hh
index de87f8b33..b4b54e593 100644
--- a/src/libstore/path-info.hh
+++ b/src/libstore/path-info.hh
@@ -105,6 +105,11 @@ struct ValidPathInfo
ValidPathInfo(const StorePath & path, Hash narHash) : path(path), narHash(narHash) { };
virtual ~ValidPathInfo() { }
+
+ static ValidPathInfo read(Source & source, const Store & store, unsigned int format);
+ static ValidPathInfo read(Source & source, const Store & store, unsigned int format, StorePath && path);
+
+ void write(Sink & sink, const Store & store, unsigned int format, bool includePath = true) const;
};
typedef std::map<StorePath, ValidPathInfo> ValidPathInfos;
diff --git a/src/libstore/path-with-outputs.cc b/src/libstore/path-with-outputs.cc
index 865d64cf2..e5a121e00 100644
--- a/src/libstore/path-with-outputs.cc
+++ b/src/libstore/path-with-outputs.cc
@@ -31,14 +31,14 @@ std::vector<DerivedPath> toDerivedPaths(const std::vector<StorePathWithOutputs>
std::variant<StorePathWithOutputs, StorePath> StorePathWithOutputs::tryFromDerivedPath(const DerivedPath & p)
{
return std::visit(overloaded {
- [&](DerivedPath::Opaque bo) -> std::variant<StorePathWithOutputs, StorePath> {
+ [&](const DerivedPath::Opaque & bo) -> std::variant<StorePathWithOutputs, StorePath> {
if (bo.path.isDerivation()) {
// drv path gets interpreted as "build", not "get drv file itself"
return bo.path;
}
return StorePathWithOutputs { bo.path };
},
- [&](DerivedPath::Built bfd) -> std::variant<StorePathWithOutputs, StorePath> {
+ [&](const DerivedPath::Built & bfd) -> std::variant<StorePathWithOutputs, StorePath> {
return StorePathWithOutputs { bfd.drvPath, bfd.outputs };
},
}, p.raw());
diff --git a/src/libstore/pathlocks.cc b/src/libstore/pathlocks.cc
index 926f4ea1e..2da74e262 100644
--- a/src/libstore/pathlocks.cc
+++ b/src/libstore/pathlocks.cc
@@ -176,4 +176,17 @@ void PathLocks::setDeletion(bool deletePaths)
}
+FdLock::FdLock(int fd, LockType lockType, bool wait, std::string_view waitMsg)
+ : fd(fd)
+{
+ if (wait) {
+ if (!lockFile(fd, lockType, false)) {
+ printInfo("%s", waitMsg);
+ acquired = lockFile(fd, lockType, true);
+ }
+ } else
+ acquired = lockFile(fd, lockType, false);
+}
+
+
}
diff --git a/src/libstore/pathlocks.hh b/src/libstore/pathlocks.hh
index 411da0222..919c8904c 100644
--- a/src/libstore/pathlocks.hh
+++ b/src/libstore/pathlocks.hh
@@ -35,4 +35,18 @@ public:
void setDeletion(bool deletePaths);
};
+struct FdLock
+{
+ int fd;
+ bool acquired = false;
+
+ FdLock(int fd, LockType lockType, bool wait, std::string_view waitMsg);
+
+ ~FdLock()
+ {
+ if (acquired)
+ lockFile(fd, ltNone, false);
+ }
+};
+
}
diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc
index 5d1723886..73163424c 100644
--- a/src/libstore/profiles.cc
+++ b/src/libstore/profiles.cc
@@ -126,9 +126,9 @@ void deleteGeneration(const Path & profile, GenerationNumber gen)
static void deleteGeneration2(const Path & profile, GenerationNumber gen, bool dryRun)
{
if (dryRun)
- printInfo(format("would remove generation %1%") % gen);
+ notice("would remove profile version %1%", gen);
else {
- printInfo(format("removing generation %1%") % gen);
+ notice("removing profile version %1%", gen);
deleteGeneration(profile, gen);
}
}
@@ -142,7 +142,7 @@ void deleteGenerations(const Path & profile, const std::set<GenerationNumber> &
auto [gens, curGen] = findGenerations(profile);
if (gensToDelete.count(*curGen))
- throw Error("cannot delete current generation of profile %1%'", profile);
+ throw Error("cannot delete current version of profile %1%'", profile);
for (auto & i : gens) {
if (!gensToDelete.count(i.number)) continue;
@@ -211,12 +211,15 @@ void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun)
void deleteGenerationsOlderThan(const Path & profile, const string & timeSpec, bool dryRun)
{
+ if (timeSpec.empty() || timeSpec[timeSpec.size() - 1] != 'd')
+ throw UsageError("invalid number of days specifier '%1%', expected something like '14d'", timeSpec);
+
time_t curTime = time(0);
string strDays = string(timeSpec, 0, timeSpec.size() - 1);
auto days = string2Int<int>(strDays);
if (!days || *days < 1)
- throw Error("invalid number of days specifier '%1%'", timeSpec);
+ throw UsageError("invalid number of days specifier '%1%'", timeSpec);
time_t oldTime = curTime - *days * 24 * 3600;
@@ -233,6 +236,37 @@ void switchLink(Path link, Path target)
}
+void switchGeneration(
+ const Path & profile,
+ std::optional<GenerationNumber> dstGen,
+ bool dryRun)
+{
+ PathLocks lock;
+ lockProfile(lock, profile);
+
+ auto [gens, curGen] = findGenerations(profile);
+
+ std::optional<Generation> dst;
+ for (auto & i : gens)
+ if ((!dstGen && i.number < curGen) ||
+ (dstGen && i.number == *dstGen))
+ dst = i;
+
+ if (!dst) {
+ if (dstGen)
+ throw Error("profile version %1% does not exist", *dstGen);
+ else
+ throw Error("no profile version older than the current (%1%) exists", curGen.value_or(0));
+ }
+
+ notice("switching profile from version %d to %d", curGen.value_or(0), dst->number);
+
+ if (dryRun) return;
+
+ switchLink(profile, dst->path);
+}
+
+
void lockProfile(PathLocks & lock, const Path & profile)
{
lock.lockPaths({profile}, (format("waiting for lock on profile '%1%'") % profile).str());
diff --git a/src/libstore/profiles.hh b/src/libstore/profiles.hh
index be55a65d4..d100c970c 100644
--- a/src/libstore/profiles.hh
+++ b/src/libstore/profiles.hh
@@ -11,7 +11,7 @@ namespace nix {
class StorePath;
-typedef unsigned int GenerationNumber;
+typedef uint64_t GenerationNumber;
struct Generation
{
@@ -46,6 +46,13 @@ void deleteGenerationsOlderThan(const Path & profile, const string & timeSpec, b
void switchLink(Path link, Path target);
+/* Roll back a profile to the specified generation, or to the most
+ recent one older than the current. */
+void switchGeneration(
+ const Path & profile,
+ std::optional<GenerationNumber> dstGen,
+ bool dryRun);
+
/* Ensure exclusive access to a profile. Any command that modifies
the profile first acquires this lock. */
void lockProfile(PathLocks & lock, const Path & profile);
diff --git a/src/libstore/realisation.cc b/src/libstore/realisation.cc
index 638065547..f871e6437 100644
--- a/src/libstore/realisation.cc
+++ b/src/libstore/realisation.cc
@@ -1,5 +1,6 @@
#include "realisation.hh"
#include "store-api.hh"
+#include "closure.hh"
#include <nlohmann/json.hpp>
namespace nix {
@@ -21,11 +22,52 @@ std::string DrvOutput::to_string() const {
return strHash() + "!" + outputName;
}
+std::set<Realisation> Realisation::closure(Store & store, const std::set<Realisation> & startOutputs)
+{
+ std::set<Realisation> res;
+ Realisation::closure(store, startOutputs, res);
+ return res;
+}
+
+void Realisation::closure(Store & store, const std::set<Realisation> & startOutputs, std::set<Realisation> & res)
+{
+ auto getDeps = [&](const Realisation& current) -> std::set<Realisation> {
+ std::set<Realisation> res;
+ for (auto& [currentDep, _] : current.dependentRealisations) {
+ if (auto currentRealisation = store.queryRealisation(currentDep))
+ res.insert(*currentRealisation);
+ else
+ throw Error(
+ "Unrealised derivation '%s'", currentDep.to_string());
+ }
+ return res;
+ };
+
+ computeClosure<Realisation>(
+ startOutputs, res,
+ [&](const Realisation& current,
+ std::function<void(std::promise<std::set<Realisation>>&)>
+ processEdges) {
+ std::promise<std::set<Realisation>> promise;
+ try {
+ auto res = getDeps(current);
+ promise.set_value(res);
+ } catch (...) {
+ promise.set_exception(std::current_exception());
+ }
+ return processEdges(promise);
+ });
+}
+
nlohmann::json Realisation::toJSON() const {
+ auto jsonDependentRealisations = nlohmann::json::object();
+ for (auto & [depId, depOutPath] : dependentRealisations)
+ jsonDependentRealisations.emplace(depId.to_string(), depOutPath.to_string());
return nlohmann::json{
{"id", id.to_string()},
{"outPath", outPath.to_string()},
{"signatures", signatures},
+ {"dependentRealisations", jsonDependentRealisations},
};
}
@@ -51,10 +93,16 @@ Realisation Realisation::fromJSON(
if (auto signaturesIterator = json.find("signatures"); signaturesIterator != json.end())
signatures.insert(signaturesIterator->begin(), signaturesIterator->end());
+ std::map <DrvOutput, StorePath> dependentRealisations;
+ if (auto jsonDependencies = json.find("dependentRealisations"); jsonDependencies != json.end())
+ for (auto & [jsonDepId, jsonDepOutPath] : jsonDependencies->get<std::map<std::string, std::string>>())
+ dependentRealisations.insert({DrvOutput::parse(jsonDepId), StorePath(jsonDepOutPath)});
+
return Realisation{
.id = DrvOutput::parse(getField("id")),
.outPath = StorePath(getField("outPath")),
.signatures = signatures,
+ .dependentRealisations = dependentRealisations,
};
}
@@ -92,6 +140,24 @@ StorePath RealisedPath::path() const {
return std::visit([](auto && arg) { return arg.getPath(); }, raw);
}
+bool Realisation::isCompatibleWith(const Realisation & other) const
+{
+ assert (id == other.id);
+ if (outPath == other.outPath) {
+ if (dependentRealisations.empty() != other.dependentRealisations.empty()) {
+ warn(
+ "Encountered a realisation for '%s' with an empty set of "
+ "dependencies. This is likely an artifact from an older Nix. "
+ "I’ll try to fix the realisation if I can",
+ id.to_string());
+ return true;
+ } else if (dependentRealisations == other.dependentRealisations) {
+ return true;
+ }
+ }
+ return false;
+}
+
void RealisedPath::closure(
Store& store,
const RealisedPath::Set& startPaths,
diff --git a/src/libstore/realisation.hh b/src/libstore/realisation.hh
index f5049c9e9..9070a6ee2 100644
--- a/src/libstore/realisation.hh
+++ b/src/libstore/realisation.hh
@@ -28,6 +28,14 @@ struct Realisation {
StringSet signatures;
+ /**
+ * The realisations that are required for the current one to be valid.
+ *
+ * When importing this realisation, the store will first check that all its
+ * dependencies exist, and map to the correct output path
+ */
+ std::map<DrvOutput, StorePath> dependentRealisations;
+
nlohmann::json toJSON() const;
static Realisation fromJSON(const nlohmann::json& json, const std::string& whence);
@@ -36,6 +44,11 @@ struct Realisation {
bool checkSignature(const PublicKeys & publicKeys, const std::string & sig) const;
size_t checkSignatures(const PublicKeys & publicKeys) const;
+ static std::set<Realisation> closure(Store &, const std::set<Realisation> &);
+ static void closure(Store &, const std::set<Realisation> &, std::set<Realisation> & res);
+
+ bool isCompatibleWith(const Realisation & other) const;
+
StorePath getPath() const { return outPath; }
GENERATE_CMP(Realisation, me->id, me->outPath);
diff --git a/src/libstore/references.cc b/src/libstore/references.cc
index 39c4970c6..91b3fc142 100644
--- a/src/libstore/references.cc
+++ b/src/libstore/references.cc
@@ -5,27 +5,29 @@
#include <map>
#include <cstdlib>
+#include <mutex>
namespace nix {
-static unsigned int refLength = 32; /* characters */
+static size_t refLength = 32; /* characters */
-static void search(const unsigned char * s, size_t len,
- StringSet & hashes, StringSet & seen)
+static void search(
+ std::string_view s,
+ StringSet & hashes,
+ StringSet & seen)
{
- static bool initialised = false;
+ static std::once_flag initialised;
static bool isBase32[256];
- if (!initialised) {
+ std::call_once(initialised, [](){
for (unsigned int i = 0; i < 256; ++i) isBase32[i] = false;
for (unsigned int i = 0; i < base32Chars.size(); ++i)
isBase32[(unsigned char) base32Chars[i]] = true;
- initialised = true;
- }
+ });
- for (size_t i = 0; i + refLength <= len; ) {
+ for (size_t i = 0; i + refLength <= s.size(); ) {
int j;
bool match = true;
for (j = refLength - 1; j >= 0; --j)
@@ -35,7 +37,7 @@ static void search(const unsigned char * s, size_t len,
break;
}
if (!match) continue;
- string ref((const char *) s + i, refLength);
+ std::string ref(s.substr(i, refLength));
if (hashes.erase(ref)) {
debug(format("found reference to '%1%' at offset '%2%'")
% ref % i);
@@ -46,69 +48,60 @@ static void search(const unsigned char * s, size_t len,
}
-struct RefScanSink : Sink
+void RefScanSink::operator () (std::string_view data)
{
- StringSet hashes;
- StringSet seen;
-
- string tail;
-
- RefScanSink() { }
-
- void operator () (std::string_view data) override
- {
- /* It's possible that a reference spans the previous and current
- fragment, so search in the concatenation of the tail of the
- previous fragment and the start of the current fragment. */
- string s = tail + std::string(data, 0, refLength);
- search((const unsigned char *) s.data(), s.size(), hashes, seen);
-
- search((const unsigned char *) data.data(), data.size(), hashes, seen);
-
- size_t tailLen = data.size() <= refLength ? data.size() : refLength;
- tail = std::string(tail, tail.size() < refLength - tailLen ? 0 : tail.size() - (refLength - tailLen));
- tail.append({data.data() + data.size() - tailLen, tailLen});
- }
-};
+ /* It's possible that a reference spans the previous and current
+ fragment, so search in the concatenation of the tail of the
+ previous fragment and the start of the current fragment. */
+ auto s = tail;
+ auto tailLen = std::min(data.size(), refLength);
+ s.append(data.data(), tailLen);
+ search(s, hashes, seen);
+
+ search(data, hashes, seen);
+
+ auto rest = refLength - tailLen;
+ if (rest < tail.size())
+ tail = tail.substr(tail.size() - rest);
+ tail.append(data.data() + data.size() - tailLen, tailLen);
+}
-std::pair<PathSet, HashResult> scanForReferences(const string & path,
- const PathSet & refs)
+std::pair<StorePathSet, HashResult> scanForReferences(
+ const string & path,
+ const StorePathSet & refs)
{
HashSink hashSink { htSHA256 };
auto found = scanForReferences(hashSink, path, refs);
auto hash = hashSink.finish();
- return std::pair<PathSet, HashResult>(found, hash);
+ return std::pair<StorePathSet, HashResult>(found, hash);
}
-PathSet scanForReferences(Sink & toTee,
- const string & path, const PathSet & refs)
+StorePathSet scanForReferences(
+ Sink & toTee,
+ const Path & path,
+ const StorePathSet & refs)
{
- RefScanSink refsSink;
- TeeSink sink { refsSink, toTee };
- std::map<string, Path> backMap;
+ StringSet hashes;
+ std::map<std::string, StorePath> backMap;
for (auto & i : refs) {
- auto baseName = std::string(baseNameOf(i));
- string::size_type pos = baseName.find('-');
- if (pos == string::npos)
- throw Error("bad reference '%1%'", i);
- string s = string(baseName, 0, pos);
- assert(s.size() == refLength);
- assert(backMap.find(s) == backMap.end());
- // parseHash(htSHA256, s);
- refsSink.hashes.insert(s);
- backMap[s] = i;
+ std::string hashPart(i.hashPart());
+ auto inserted = backMap.emplace(hashPart, i).second;
+ assert(inserted);
+ hashes.insert(hashPart);
}
/* Look for the hashes in the NAR dump of the path. */
+ RefScanSink refsSink(std::move(hashes));
+ TeeSink sink { refsSink, toTee };
dumpPath(path, sink);
/* Map the hashes found back to their store paths. */
- PathSet found;
- for (auto & i : refsSink.seen) {
- std::map<string, Path>::iterator j;
- if ((j = backMap.find(i)) == backMap.end()) abort();
+ StorePathSet found;
+ for (auto & i : refsSink.getResult()) {
+ auto j = backMap.find(i);
+ assert(j != backMap.end());
found.insert(j->second);
}
diff --git a/src/libstore/references.hh b/src/libstore/references.hh
index 4f12e6b21..a6119c861 100644
--- a/src/libstore/references.hh
+++ b/src/libstore/references.hh
@@ -1,13 +1,31 @@
#pragma once
-#include "types.hh"
#include "hash.hh"
+#include "path.hh"
namespace nix {
-std::pair<PathSet, HashResult> scanForReferences(const Path & path, const PathSet & refs);
+std::pair<StorePathSet, HashResult> scanForReferences(const Path & path, const StorePathSet & refs);
-PathSet scanForReferences(Sink & toTee, const Path & path, const PathSet & refs);
+StorePathSet scanForReferences(Sink & toTee, const Path & path, const StorePathSet & refs);
+
+class RefScanSink : public Sink
+{
+ StringSet hashes;
+ StringSet seen;
+
+ std::string tail;
+
+public:
+
+ RefScanSink(StringSet && hashes) : hashes(hashes)
+ { }
+
+ StringSet & getResult()
+ { return seen; }
+
+ void operator () (std::string_view data) override;
+};
struct RewritingSink : Sink
{
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index 761b4a087..a627e9cf1 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -162,8 +162,19 @@ void RemoteStore::initConnection(Connection & conn)
try {
conn.to << WORKER_MAGIC_1;
conn.to.flush();
- unsigned int magic = readInt(conn.from);
- if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch");
+ StringSink saved;
+ try {
+ TeeSource tee(conn.from, saved);
+ unsigned int magic = readInt(tee);
+ if (magic != WORKER_MAGIC_2)
+ throw Error("protocol mismatch");
+ } catch (SerialisationError & e) {
+ /* In case the other side is waiting for our input, close
+ it. */
+ conn.closeWrite();
+ auto msg = conn.from.drain();
+ throw Error("protocol mismatch, got '%s'", chomp(*saved.s + msg));
+ }
conn.from >> conn.daemonVersion;
if (GET_PROTOCOL_MAJOR(conn.daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION))
@@ -222,6 +233,7 @@ void RemoteStore::setOptions(Connection & conn)
overrides.erase(settings.buildCores.name);
overrides.erase(settings.useSubstitutes.name);
overrides.erase(loggerSettings.showTrace.name);
+ overrides.erase(settings.experimentalFeatures.name);
conn.to << overrides.size();
for (auto & i : overrides)
conn.to << i.first << i.second.value;
@@ -278,6 +290,10 @@ ConnectionHandle RemoteStore::getConnection()
return ConnectionHandle(connections->get());
}
+void RemoteStore::setOptions()
+{
+ setOptions(*(getConnection().handle));
+}
bool RemoteStore::isValidPathUncached(const StorePath & path)
{
@@ -386,23 +402,6 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S
}
-ref<const ValidPathInfo> RemoteStore::readValidPathInfo(ConnectionHandle & conn, const StorePath & path)
-{
- auto deriver = readString(conn->from);
- auto narHash = Hash::parseAny(readString(conn->from), htSHA256);
- auto info = make_ref<ValidPathInfo>(path, narHash);
- if (deriver != "") info->deriver = parseStorePath(deriver);
- info->references = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
- conn->from >> info->registrationTime >> info->narSize;
- if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) {
- conn->from >> info->ultimate;
- info->sigs = readStrings<StringSet>(conn->from);
- info->ca = parseContentAddressOpt(readString(conn->from));
- }
- return info;
-}
-
-
void RemoteStore::queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
{
@@ -423,7 +422,8 @@ void RemoteStore::queryPathInfoUncached(const StorePath & path,
bool valid; conn->from >> valid;
if (!valid) throw InvalidPath("path '%s' is not valid", printStorePath(path));
}
- info = readValidPathInfo(conn, path);
+ info = std::make_shared<ValidPathInfo>(
+ ValidPathInfo::read(conn->from, *this, GET_PROTOCOL_MINOR(conn->daemonVersion), StorePath{path}));
}
callback(std::move(info));
} catch (...) { callback.rethrow(); }
@@ -525,20 +525,20 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
});
}
- auto path = parseStorePath(readString(conn->from));
- return readValidPathInfo(conn, path);
+ return make_ref<ValidPathInfo>(
+ ValidPathInfo::read(conn->from, *this, GET_PROTOCOL_MINOR(conn->daemonVersion)));
}
else {
if (repair) throw Error("repairing is not supported when building through the Nix daemon protocol < 1.25");
std::visit(overloaded {
- [&](TextHashMethod thm) -> void {
+ [&](const TextHashMethod & thm) -> void {
std::string s = dump.drain();
conn->to << wopAddTextToStore << name << s;
worker_proto::write(*this, conn->to, references);
conn.processStderr();
},
- [&](FixedOutputHashMethod fohm) -> void {
+ [&](const FixedOutputHashMethod & fohm) -> void {
conn->to
<< wopAddToStore
<< name
@@ -582,9 +582,8 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
StorePath RemoteStore::addToStoreFromDump(Source & dump, const string & name,
- FileIngestionMethod method, HashType hashType, RepairFlag repair)
+ FileIngestionMethod method, HashType hashType, RepairFlag repair, const StorePathSet & references)
{
- StorePathSet references;
return addCAToStore(dump, name, FixedOutputHashMethod{ .fileIngestionMethod = method, .hashType = hashType }, references, repair)->path;
}
@@ -642,6 +641,25 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
}
+void RemoteStore::addMultipleToStore(
+ Source & source,
+ RepairFlag repair,
+ CheckSigsFlag checkSigs)
+{
+ if (GET_PROTOCOL_MINOR(getConnection()->daemonVersion) >= 32) {
+ auto conn(getConnection());
+ conn->to
+ << wopAddMultipleToStore
+ << repair
+ << !checkSigs;
+ conn.withFramedSink([&](Sink & sink) {
+ source.drainInto(sink);
+ });
+ } else
+ Store::addMultipleToStore(source, repair, checkSigs);
+}
+
+
StorePath RemoteStore::addTextToStore(const string & name, const string & s,
const StorePathSet & references, RepairFlag repair)
{
@@ -653,36 +671,57 @@ void RemoteStore::registerDrvOutput(const Realisation & info)
{
auto conn(getConnection());
conn->to << wopRegisterDrvOutput;
- conn->to << info.id.to_string();
- conn->to << std::string(info.outPath.to_string());
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
+ conn->to << info.id.to_string();
+ conn->to << std::string(info.outPath.to_string());
+ } else {
+ worker_proto::write(*this, conn->to, info);
+ }
conn.processStderr();
}
-std::optional<const Realisation> RemoteStore::queryRealisation(const DrvOutput & id)
+void RemoteStore::queryRealisationUncached(const DrvOutput & id,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept
{
auto conn(getConnection());
conn->to << wopQueryRealisation;
conn->to << id.to_string();
conn.processStderr();
- auto outPaths = worker_proto::read(*this, conn->from, Phantom<std::set<StorePath>>{});
- if (outPaths.empty())
- return std::nullopt;
- return {Realisation{.id = id, .outPath = *outPaths.begin()}};
+
+ auto real = [&]() -> std::shared_ptr<const Realisation> {
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
+ auto outPaths = worker_proto::read(
+ *this, conn->from, Phantom<std::set<StorePath>> {});
+ if (outPaths.empty())
+ return nullptr;
+ return std::make_shared<const Realisation>(Realisation { .id = id, .outPath = *outPaths.begin() });
+ } else {
+ auto realisations = worker_proto::read(
+ *this, conn->from, Phantom<std::set<Realisation>> {});
+ if (realisations.empty())
+ return nullptr;
+ return std::make_shared<const Realisation>(*realisations.begin());
+ }
+ }();
+
+ try {
+ callback(std::shared_ptr<const Realisation>(real));
+ } catch (...) { return callback.rethrow(); }
}
static void writeDerivedPaths(RemoteStore & store, ConnectionHandle & conn, const std::vector<DerivedPath> & reqs)
{
- if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 29) {
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 30) {
worker_proto::write(store, conn->to, reqs);
} else {
Strings ss;
for (auto & p : reqs) {
auto sOrDrvPath = StorePathWithOutputs::tryFromDerivedPath(p);
std::visit(overloaded {
- [&](StorePathWithOutputs s) {
+ [&](const StorePathWithOutputs & s) {
ss.push_back(s.to_string(store));
},
- [&](StorePath drvPath) {
+ [&](const StorePath & drvPath) {
throw Error("trying to request '%s', but daemon protocol %d.%d is too old (< 1.29) to request a derivation file",
store.printStorePath(drvPath),
GET_PROTOCOL_MAJOR(conn->daemonVersion),
@@ -694,8 +733,18 @@ static void writeDerivedPaths(RemoteStore & store, ConnectionHandle & conn, cons
}
}
-void RemoteStore::buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode)
+void RemoteStore::buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore)
{
+ if (evalStore && evalStore.get() != this) {
+ /* The remote doesn't have a way to access evalStore, so copy
+ the .drvs. */
+ RealisedPath::Set drvPaths2;
+ for (auto & i : drvPaths)
+ if (auto p = std::get_if<DerivedPath::Built>(&i))
+ drvPaths2.insert(p->drvPath);
+ copyClosure(*evalStore, *this, drvPaths2);
+ }
+
auto conn(getConnection());
conn->to << wopBuildPaths;
assert(GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13);
@@ -761,15 +810,6 @@ void RemoteStore::addIndirectRoot(const Path & path)
}
-void RemoteStore::syncWithGC()
-{
- auto conn(getConnection());
- conn->to << wopSyncWithGC;
- conn.processStderr();
- readInt(conn->from);
-}
-
-
Roots RemoteStore::findRoots(bool censor)
{
auto conn(getConnection());
@@ -990,14 +1030,14 @@ std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source *
return nullptr;
}
-void ConnectionHandle::withFramedSink(std::function<void(Sink &sink)> fun)
+void ConnectionHandle::withFramedSink(std::function<void(Sink & sink)> fun)
{
(*this)->to.flush();
std::exception_ptr ex;
- /* Handle log messages / exceptions from the remote on a
- separate thread. */
+ /* Handle log messages / exceptions from the remote on a separate
+ thread. */
std::thread stderrThread([&]()
{
try {
@@ -1030,7 +1070,6 @@ void ConnectionHandle::withFramedSink(std::function<void(Sink &sink)> fun)
stderrThread.join();
if (ex)
std::rethrow_exception(ex);
-
}
}
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
index 6cf76a46d..0fd67f371 100644
--- a/src/libstore/remote-store.hh
+++ b/src/libstore/remote-store.hh
@@ -73,19 +73,25 @@ public:
/* Add a content-addressable store path. Does not support references. `dump` will be drained. */
StorePath addToStoreFromDump(Source & dump, const string & name,
- FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override;
+ FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair, const StorePathSet & references = StorePathSet()) override;
void addToStore(const ValidPathInfo & info, Source & nar,
RepairFlag repair, CheckSigsFlag checkSigs) override;
+ void addMultipleToStore(
+ Source & source,
+ RepairFlag repair,
+ CheckSigsFlag checkSigs) override;
+
StorePath addTextToStore(const string & name, const string & s,
const StorePathSet & references, RepairFlag repair) override;
void registerDrvOutput(const Realisation & info) override;
- std::optional<const Realisation> queryRealisation(const DrvOutput &) override;
+ void queryRealisationUncached(const DrvOutput &,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
- void buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode) override;
+ void buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override;
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
BuildMode buildMode) override;
@@ -96,8 +102,6 @@ public:
void addIndirectRoot(const Path & path) override;
- void syncWithGC() override;
-
Roots findRoots(bool censor) override;
void collectGarbage(const GCOptions & options, GCResults & results) override;
@@ -120,7 +124,6 @@ public:
struct Connection
{
- AutoCloseFD fd;
FdSink to;
FdSource from;
unsigned int daemonVersion;
@@ -128,6 +131,8 @@ public:
virtual ~Connection();
+ virtual void closeWrite() = 0;
+
std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0, bool flush = true);
};
@@ -143,6 +148,8 @@ protected:
virtual void setOptions(Connection & conn);
+ void setOptions() override;
+
ConnectionHandle getConnection();
friend struct ConnectionHandle;
@@ -151,8 +158,6 @@ protected:
virtual void narFromPath(const StorePath & path, Sink & sink) override;
- ref<const ValidPathInfo> readValidPathInfo(ConnectionHandle & conn, const StorePath & path);
-
private:
std::atomic_bool failed{false};
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
index 6bfbee044..7accad7f4 100644
--- a/src/libstore/s3-binary-cache-store.cc
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -209,7 +209,7 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
S3Helper s3Helper;
S3BinaryCacheStoreImpl(
- const std::string & scheme,
+ const std::string & uriScheme,
const std::string & bucketName,
const Params & params)
: StoreConfig(params)
@@ -232,8 +232,8 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
void init() override
{
if (auto cacheInfo = diskCache->cacheExists(getUri())) {
- wantMassQuery.setDefault(cacheInfo->wantMassQuery ? "true" : "false");
- priority.setDefault(fmt("%d", cacheInfo->priority));
+ wantMassQuery.setDefault(cacheInfo->wantMassQuery);
+ priority.setDefault(cacheInfo->priority);
} else {
BinaryCacheStore::init();
diskCache->createCache(getUri(), storeDir, wantMassQuery, priority);
diff --git a/src/libstore/sandbox-defaults.sb b/src/libstore/sandbox-defaults.sb
index 351037822..56b35c3fe 100644
--- a/src/libstore/sandbox-defaults.sb
+++ b/src/libstore/sandbox-defaults.sb
@@ -32,7 +32,9 @@
(literal "/tmp") (subpath TMPDIR))
; Some packages like to read the system version.
-(allow file-read* (literal "/System/Library/CoreServices/SystemVersion.plist"))
+(allow file-read*
+ (literal "/System/Library/CoreServices/SystemVersion.plist")
+ (literal "/System/Library/CoreServices/SystemVersionCompat.plist"))
; Without this line clang cannot write to /dev/null, breaking some configure tests.
(allow file-read-metadata (literal "/dev"))
@@ -95,3 +97,8 @@
; This is used by /bin/sh on macOS 10.15 and later.
(allow file*
(literal "/private/var/select/sh"))
+
+; Allow Rosetta 2 to run x86_64 binaries on aarch64-darwin.
+(allow file-read*
+ (subpath "/Library/Apple/usr/libexec/oah")
+ (subpath "/System/Library/Apple/usr/libexec/oah"))
diff --git a/src/libstore/serve-protocol.hh b/src/libstore/serve-protocol.hh
index 02d0810cc..3f76baa82 100644
--- a/src/libstore/serve-protocol.hh
+++ b/src/libstore/serve-protocol.hh
@@ -5,7 +5,7 @@ namespace nix {
#define SERVE_MAGIC_1 0x390c9deb
#define SERVE_MAGIC_2 0x5452eecb
-#define SERVE_PROTOCOL_VERSION (2 << 8 | 6)
+#define SERVE_PROTOCOL_VERSION (2 << 8 | 7)
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc
index 447b4179b..1d6baf02d 100644
--- a/src/libstore/sqlite.cc
+++ b/src/libstore/sqlite.cc
@@ -1,4 +1,5 @@
#include "sqlite.hh"
+#include "globals.hh"
#include "util.hh"
#include <sqlite3.h>
@@ -27,8 +28,12 @@ namespace nix {
SQLite::SQLite(const Path & path, bool create)
{
+ // useSQLiteWAL also indicates what virtual file system we need. Using
+ // `unix-dotfile` is needed on NFS file systems and on Windows' Subsystem
+ // for Linux (WSL) where useSQLiteWAL should be false by default.
+ const char *vfs = settings.useSQLiteWAL ? 0 : "unix-dotfile";
if (sqlite3_open_v2(path.c_str(), &db,
- SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK)
+ SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), vfs) != SQLITE_OK)
throw Error("cannot open SQLite database '%s'", path);
if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc
index f2caf2aeb..bb03daef4 100644
--- a/src/libstore/ssh-store.cc
+++ b/src/libstore/ssh-store.cc
@@ -57,6 +57,11 @@ private:
struct Connection : RemoteStore::Connection
{
std::unique_ptr<SSHMaster::Connection> sshConn;
+
+ void closeWrite() override
+ {
+ sshConn->in.close();
+ }
};
ref<RemoteStore::Connection> openConnection() override;
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index 93fcb068f..c88dfe179 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -9,6 +9,7 @@
#include "url.hh"
#include "archive.hh"
#include "callback.hh"
+#include "remote-store.hh"
#include <regex>
@@ -198,10 +199,10 @@ StorePath Store::makeFixedOutputPathFromCA(std::string_view name, ContentAddress
{
// New template
return std::visit(overloaded {
- [&](TextHash th) {
+ [&](const TextHash & th) {
return makeTextPath(name, th.hash, references);
},
- [&](FixedOutputHash fsh) {
+ [&](const FixedOutputHash & fsh) {
return makeFixedOutputPath(fsh.method, fsh.hash, name, references, hasSelfReference);
}
}, ca);
@@ -236,7 +237,7 @@ StorePath Store::computeStorePathForText(const string & name, const string & s,
StorePath Store::addToStore(const string & name, const Path & _srcPath,
- FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
+ FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair, const StorePathSet & references)
{
Path srcPath(absPath(_srcPath));
auto source = sinkToSource([&](Sink & sink) {
@@ -245,7 +246,21 @@ StorePath Store::addToStore(const string & name, const Path & _srcPath,
else
readFile(srcPath, sink);
});
- return addToStoreFromDump(*source, name, method, hashAlgo, repair);
+ return addToStoreFromDump(*source, name, method, hashAlgo, repair, references);
+}
+
+
+void Store::addMultipleToStore(
+ Source & source,
+ RepairFlag repair,
+ CheckSigsFlag checkSigs)
+{
+ auto expected = readNum<uint64_t>(source);
+ for (uint64_t i = 0; i < expected; ++i) {
+ auto info = ValidPathInfo::read(source, *this, 16);
+ info.ultimate = false;
+ addToStore(info, source, repair, checkSigs);
+ }
}
@@ -337,6 +352,13 @@ ValidPathInfo Store::addToStoreSlow(std::string_view name, const Path & srcPath,
return info;
}
+StringSet StoreConfig::getDefaultSystemFeatures()
+{
+ auto res = settings.systemFeatures.get();
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations))
+ res.insert("ca-derivations");
+ return res;
+}
Store::Store(const Params & params)
: StoreConfig(params)
@@ -392,11 +414,9 @@ StorePathSet Store::queryDerivationOutputs(const StorePath & path)
bool Store::isValidPath(const StorePath & storePath)
{
- std::string hashPart(storePath.hashPart());
-
{
auto state_(state.lock());
- auto res = state_->pathInfoCache.get(hashPart);
+ auto res = state_->pathInfoCache.get(std::string(storePath.to_string()));
if (res && res->isKnownNow()) {
stats.narInfoReadAverted++;
return res->didExist();
@@ -404,11 +424,11 @@ bool Store::isValidPath(const StorePath & storePath)
}
if (diskCache) {
- auto res = diskCache->lookupNarInfo(getUri(), hashPart);
+ auto res = diskCache->lookupNarInfo(getUri(), std::string(storePath.hashPart()));
if (res.first != NarInfoDiskCache::oUnknown) {
stats.narInfoReadAverted++;
auto state_(state.lock());
- state_->pathInfoCache.upsert(hashPart,
+ state_->pathInfoCache.upsert(std::string(storePath.to_string()),
res.first == NarInfoDiskCache::oInvalid ? PathInfoCacheValue{} : PathInfoCacheValue { .value = res.second });
return res.first == NarInfoDiskCache::oValid;
}
@@ -418,7 +438,7 @@ bool Store::isValidPath(const StorePath & storePath)
if (diskCache && !valid)
// FIXME: handle valid = true case.
- diskCache->upsertNarInfo(getUri(), hashPart, 0);
+ diskCache->upsertNarInfo(getUri(), std::string(storePath.hashPart()), 0);
return valid;
}
@@ -465,13 +485,11 @@ static bool goodStorePath(const StorePath & expected, const StorePath & actual)
void Store::queryPathInfo(const StorePath & storePath,
Callback<ref<const ValidPathInfo>> callback) noexcept
{
- std::string hashPart;
+ auto hashPart = std::string(storePath.hashPart());
try {
- hashPart = storePath.hashPart();
-
{
- auto res = state.lock()->pathInfoCache.get(hashPart);
+ auto res = state.lock()->pathInfoCache.get(std::string(storePath.to_string()));
if (res && res->isKnownNow()) {
stats.narInfoReadAverted++;
if (!res->didExist())
@@ -486,7 +504,7 @@ void Store::queryPathInfo(const StorePath & storePath,
stats.narInfoReadAverted++;
{
auto state_(state.lock());
- state_->pathInfoCache.upsert(hashPart,
+ state_->pathInfoCache.upsert(std::string(storePath.to_string()),
res.first == NarInfoDiskCache::oInvalid ? PathInfoCacheValue{} : PathInfoCacheValue{ .value = res.second });
if (res.first == NarInfoDiskCache::oInvalid ||
!goodStorePath(storePath, res.second->path))
@@ -501,7 +519,7 @@ void Store::queryPathInfo(const StorePath & storePath,
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
queryPathInfoUncached(storePath,
- {[this, storePathS{printStorePath(storePath)}, hashPart, callbackPtr](std::future<std::shared_ptr<const ValidPathInfo>> fut) {
+ {[this, storePath, hashPart, callbackPtr](std::future<std::shared_ptr<const ValidPathInfo>> fut) {
try {
auto info = fut.get();
@@ -511,14 +529,12 @@ void Store::queryPathInfo(const StorePath & storePath,
{
auto state_(state.lock());
- state_->pathInfoCache.upsert(hashPart, PathInfoCacheValue { .value = info });
+ state_->pathInfoCache.upsert(std::string(storePath.to_string()), PathInfoCacheValue { .value = info });
}
- auto storePath = parseStorePath(storePathS);
-
if (!info || !goodStorePath(storePath, info->path)) {
stats.narInfoMissing++;
- throw InvalidPath("path '%s' is not valid", storePathS);
+ throw InvalidPath("path '%s' is not valid", printStorePath(storePath));
}
(*callbackPtr)(ref<const ValidPathInfo>(info));
@@ -526,6 +542,74 @@ void Store::queryPathInfo(const StorePath & storePath,
}});
}
+void Store::queryRealisation(const DrvOutput & id,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept
+{
+
+ try {
+ if (diskCache) {
+ auto [cacheOutcome, maybeCachedRealisation]
+ = diskCache->lookupRealisation(getUri(), id);
+ switch (cacheOutcome) {
+ case NarInfoDiskCache::oValid:
+ debug("Returning a cached realisation for %s", id.to_string());
+ callback(maybeCachedRealisation);
+ return;
+ case NarInfoDiskCache::oInvalid:
+ debug(
+ "Returning a cached missing realisation for %s",
+ id.to_string());
+ callback(nullptr);
+ return;
+ case NarInfoDiskCache::oUnknown:
+ break;
+ }
+ }
+ } catch (...) {
+ return callback.rethrow();
+ }
+
+ auto callbackPtr
+ = std::make_shared<decltype(callback)>(std::move(callback));
+
+ queryRealisationUncached(
+ id,
+ { [this, id, callbackPtr](
+ std::future<std::shared_ptr<const Realisation>> fut) {
+ try {
+ auto info = fut.get();
+
+ if (diskCache) {
+ if (info)
+ diskCache->upsertRealisation(getUri(), *info);
+ else
+ diskCache->upsertAbsentRealisation(getUri(), id);
+ }
+
+ (*callbackPtr)(std::shared_ptr<const Realisation>(info));
+
+ } catch (...) {
+ callbackPtr->rethrow();
+ }
+ } });
+}
+
+std::shared_ptr<const Realisation> Store::queryRealisation(const DrvOutput & id)
+{
+ using RealPtr = std::shared_ptr<const Realisation>;
+ std::promise<RealPtr> promise;
+
+ queryRealisation(id,
+ {[&](std::future<RealPtr> result) {
+ try {
+ promise.set_value(result.get());
+ } catch (...) {
+ promise.set_exception(std::current_exception());
+ }
+ }});
+
+ return promise.get_future().get();
+}
void Store::substitutePaths(const StorePathSet & paths)
{
@@ -627,6 +711,42 @@ string Store::makeValidityRegistration(const StorePathSet & paths,
}
+StorePathSet Store::exportReferences(const StorePathSet & storePaths, const StorePathSet & inputPaths)
+{
+ StorePathSet paths;
+
+ for (auto & storePath : storePaths) {
+ if (!inputPaths.count(storePath))
+ throw BuildError("cannot export references of path '%s' because it is not in the input closure of the derivation", printStorePath(storePath));
+
+ computeFSClosure({storePath}, paths);
+ }
+
+ /* If there are derivations in the graph, then include their
+ outputs as well. This is useful if you want to do things
+ like passing all build-time dependencies of some path to a
+ derivation that builds a NixOS DVD image. */
+ auto paths2 = paths;
+
+ for (auto & j : paths2) {
+ if (j.isDerivation()) {
+ Derivation drv = derivationFromPath(j);
+ for (auto & k : drv.outputsAndOptPaths(*this)) {
+ if (!k.second.second)
+ /* FIXME: I am confused why we are calling
+ `computeFSClosure` on the output path, rather than
+ derivation itself. That doesn't seem right to me, so I
+ won't try to implemented this for CA derivations. */
+ throw UnimplementedError("exportReferences on CA derivations is not yet implemented");
+ computeFSClosure(*k.second.second, paths);
+ }
+ }
+ }
+
+ return paths;
+}
+
+
void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & storePaths,
bool includeImpureInfo, bool showClosureSize,
Base hashBase,
@@ -727,30 +847,43 @@ const Store::Stats & Store::getStats()
}
-void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
- const StorePath & storePath, RepairFlag repair, CheckSigsFlag checkSigs)
+static std::string makeCopyPathMessage(
+ std::string_view srcUri,
+ std::string_view dstUri,
+ std::string_view storePath)
{
- auto srcUri = srcStore->getUri();
- auto dstUri = dstStore->getUri();
+ return srcUri == "local" || srcUri == "daemon"
+ ? fmt("copying path '%s' to '%s'", storePath, dstUri)
+ : dstUri == "local" || dstUri == "daemon"
+ ? fmt("copying path '%s' from '%s'", storePath, srcUri)
+ : fmt("copying path '%s' from '%s' to '%s'", storePath, srcUri, dstUri);
+}
+
+void copyStorePath(
+ Store & srcStore,
+ Store & dstStore,
+ const StorePath & storePath,
+ RepairFlag repair,
+ CheckSigsFlag checkSigs)
+{
+ auto srcUri = srcStore.getUri();
+ auto dstUri = dstStore.getUri();
+ auto storePathS = srcStore.printStorePath(storePath);
Activity act(*logger, lvlInfo, actCopyPath,
- srcUri == "local" || srcUri == "daemon"
- ? fmt("copying path '%s' to '%s'", srcStore->printStorePath(storePath), dstUri)
- : dstUri == "local" || dstUri == "daemon"
- ? fmt("copying path '%s' from '%s'", srcStore->printStorePath(storePath), srcUri)
- : fmt("copying path '%s' from '%s' to '%s'", srcStore->printStorePath(storePath), srcUri, dstUri),
- {srcStore->printStorePath(storePath), srcUri, dstUri});
+ makeCopyPathMessage(srcUri, dstUri, storePathS),
+ {storePathS, srcUri, dstUri});
PushActivity pact(act.id);
- auto info = srcStore->queryPathInfo(storePath);
+ auto info = srcStore.queryPathInfo(storePath);
uint64_t total = 0;
// recompute store path on the chance dstStore does it differently
if (info->ca && info->references.empty()) {
auto info2 = make_ref<ValidPathInfo>(*info);
- info2->path = dstStore->makeFixedOutputPathFromCA(info->path.name(), *info->ca);
- if (dstStore->storeDir == srcStore->storeDir)
+ info2->path = dstStore.makeFixedOutputPathFromCA(info->path.name(), *info->ca);
+ if (dstStore.storeDir == srcStore.storeDir)
assert(info->path == info2->path);
info = info2;
}
@@ -767,37 +900,61 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
act.progress(total, info->narSize);
});
TeeSink tee { sink, progressSink };
- srcStore->narFromPath(storePath, tee);
+ srcStore.narFromPath(storePath, tee);
}, [&]() {
- throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", srcStore->printStorePath(storePath), srcStore->getUri());
+ throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", srcStore.printStorePath(storePath), srcStore.getUri());
});
- dstStore->addToStore(*info, *source, repair, checkSigs);
+ dstStore.addToStore(*info, *source, repair, checkSigs);
}
-std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStore, const RealisedPath::Set & paths,
- RepairFlag repair, CheckSigsFlag checkSigs, SubstituteFlag substitute)
+std::map<StorePath, StorePath> copyPaths(
+ Store & srcStore,
+ Store & dstStore,
+ const RealisedPath::Set & paths,
+ RepairFlag repair,
+ CheckSigsFlag checkSigs,
+ SubstituteFlag substitute)
{
StorePathSet storePaths;
- std::set<Realisation> realisations;
+ std::set<Realisation> toplevelRealisations;
for (auto & path : paths) {
storePaths.insert(path.path());
if (auto realisation = std::get_if<Realisation>(&path.raw)) {
- settings.requireExperimentalFeature("ca-derivations");
- realisations.insert(*realisation);
+ settings.requireExperimentalFeature(Xp::CaDerivations);
+ toplevelRealisations.insert(*realisation);
}
}
auto pathsMap = copyPaths(srcStore, dstStore, storePaths, repair, checkSigs, substitute);
+
+ ThreadPool pool;
+
try {
- for (auto & realisation : realisations) {
- dstStore->registerDrvOutput(realisation, checkSigs);
- }
+ // Copy the realisation closure
+ processGraph<Realisation>(
+ pool, Realisation::closure(srcStore, toplevelRealisations),
+ [&](const Realisation & current) -> std::set<Realisation> {
+ std::set<Realisation> children;
+ for (const auto & [drvOutput, _] : current.dependentRealisations) {
+ auto currentChild = srcStore.queryRealisation(drvOutput);
+ if (!currentChild)
+ throw Error(
+ "incomplete realisation closure: '%s' is a "
+ "dependency of '%s' but isn't registered",
+ drvOutput.to_string(), current.id.to_string());
+ children.insert(*currentChild);
+ }
+ return children;
+ },
+ [&](const Realisation& current) -> void {
+ dstStore.registerDrvOutput(current, checkSigs);
+ });
} catch (MissingExperimentalFeature & e) {
// Don't fail if the remote doesn't support CA derivations is it might
// not be within our control to change that, and we might still want
// to at least copy the output paths.
- if (e.missingFeature == "ca-derivations")
+ if (e.missingFeature == Xp::CaDerivations)
ignoreException();
else
throw;
@@ -806,10 +963,15 @@ std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStor
return pathsMap;
}
-std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStore, const StorePathSet & storePaths,
- RepairFlag repair, CheckSigsFlag checkSigs, SubstituteFlag substitute)
+std::map<StorePath, StorePath> copyPaths(
+ Store & srcStore,
+ Store & dstStore,
+ const StorePathSet & storePaths,
+ RepairFlag repair,
+ CheckSigsFlag checkSigs,
+ SubstituteFlag substitute)
{
- auto valid = dstStore->queryValidPaths(storePaths, substitute);
+ auto valid = dstStore.queryValidPaths(storePaths, substitute);
StorePathSet missing;
for (auto & path : storePaths)
@@ -819,9 +981,31 @@ std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStor
for (auto & path : storePaths)
pathsMap.insert_or_assign(path, path);
-
Activity act(*logger, lvlInfo, actCopyPaths, fmt("copying %d paths", missing.size()));
+ auto sorted = srcStore.topoSortPaths(missing);
+ std::reverse(sorted.begin(), sorted.end());
+
+ auto source = sinkToSource([&](Sink & sink) {
+ sink << sorted.size();
+ for (auto & storePath : sorted) {
+ auto srcUri = srcStore.getUri();
+ auto dstUri = dstStore.getUri();
+ auto storePathS = srcStore.printStorePath(storePath);
+ Activity act(*logger, lvlInfo, actCopyPath,
+ makeCopyPathMessage(srcUri, dstUri, storePathS),
+ {storePathS, srcUri, dstUri});
+ PushActivity pact(act.id);
+
+ auto info = srcStore.queryPathInfo(storePath);
+ info->write(sink, srcStore, 16);
+ srcStore.narFromPath(storePath, sink);
+ }
+ });
+
+ dstStore.addMultipleToStore(*source, repair, checkSigs);
+
+ #if 0
std::atomic<size_t> nrDone{0};
std::atomic<size_t> nrFailed{0};
std::atomic<uint64_t> bytesExpected{0};
@@ -837,18 +1021,21 @@ std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStor
StorePathSet(missing.begin(), missing.end()),
[&](const StorePath & storePath) {
- auto info = srcStore->queryPathInfo(storePath);
+ auto info = srcStore.queryPathInfo(storePath);
auto storePathForDst = storePath;
if (info->ca && info->references.empty()) {
- storePathForDst = dstStore->makeFixedOutputPathFromCA(storePath.name(), *info->ca);
- if (dstStore->storeDir == srcStore->storeDir)
+ storePathForDst = dstStore.makeFixedOutputPathFromCA(storePath.name(), *info->ca);
+ if (dstStore.storeDir == srcStore.storeDir)
assert(storePathForDst == storePath);
if (storePathForDst != storePath)
- debug("replaced path '%s' to '%s' for substituter '%s'", srcStore->printStorePath(storePath), dstStore->printStorePath(storePathForDst), dstStore->getUri());
+ debug("replaced path '%s' to '%s' for substituter '%s'",
+ srcStore.printStorePath(storePath),
+ dstStore.printStorePath(storePathForDst),
+ dstStore.getUri());
}
pathsMap.insert_or_assign(storePath, storePathForDst);
- if (dstStore->isValidPath(storePath)) {
+ if (dstStore.isValidPath(storePath)) {
nrDone++;
showProgress();
return StorePathSet();
@@ -863,19 +1050,22 @@ std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStor
[&](const StorePath & storePath) {
checkInterrupt();
- auto info = srcStore->queryPathInfo(storePath);
+ auto info = srcStore.queryPathInfo(storePath);
auto storePathForDst = storePath;
if (info->ca && info->references.empty()) {
- storePathForDst = dstStore->makeFixedOutputPathFromCA(storePath.name(), *info->ca);
- if (dstStore->storeDir == srcStore->storeDir)
+ storePathForDst = dstStore.makeFixedOutputPathFromCA(storePath.name(), *info->ca);
+ if (dstStore.storeDir == srcStore.storeDir)
assert(storePathForDst == storePath);
if (storePathForDst != storePath)
- debug("replaced path '%s' to '%s' for substituter '%s'", srcStore->printStorePath(storePath), dstStore->printStorePath(storePathForDst), dstStore->getUri());
+ debug("replaced path '%s' to '%s' for substituter '%s'",
+ srcStore.printStorePath(storePath),
+ dstStore.printStorePath(storePathForDst),
+ dstStore.getUri());
}
pathsMap.insert_or_assign(storePath, storePathForDst);
- if (!dstStore->isValidPath(storePathForDst)) {
+ if (!dstStore.isValidPath(storePathForDst)) {
MaintainCount<decltype(nrRunning)> mc(nrRunning);
showProgress();
try {
@@ -884,7 +1074,7 @@ std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStor
nrFailed++;
if (!settings.keepGoing)
throw e;
- logger->log(lvlError, fmt("could not copy %s: %s", dstStore->printStorePath(storePath), e.what()));
+ logger->log(lvlError, fmt("could not copy %s: %s", dstStore.printStorePath(storePath), e.what()));
showProgress();
return;
}
@@ -893,9 +1083,27 @@ std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStor
nrDone++;
showProgress();
});
+ #endif
+
return pathsMap;
}
+void copyClosure(
+ Store & srcStore,
+ Store & dstStore,
+ const RealisedPath::Set & paths,
+ RepairFlag repair,
+ CheckSigsFlag checkSigs,
+ SubstituteFlag substitute)
+{
+ if (&srcStore == &dstStore) return;
+
+ RealisedPath::Set closure;
+ RealisedPath::closure(srcStore, paths, closure);
+
+ copyPaths(srcStore, dstStore, closure, repair, checkSigs, substitute);
+}
+
std::optional<ValidPathInfo> decodeValidPathInfo(const Store & store, std::istream & str, std::optional<HashResult> hashGiven)
{
std::string path;
@@ -968,10 +1176,10 @@ bool ValidPathInfo::isContentAddressed(const Store & store) const
if (! ca) return false;
auto caPath = std::visit(overloaded {
- [&](TextHash th) {
+ [&](const TextHash & th) {
return store.makeTextPath(path.name(), th.hash, references);
},
- [&](FixedOutputHash fsh) {
+ [&](const FixedOutputHash & fsh) {
auto refs = references;
bool hasSelfReference = false;
if (refs.count(path)) {
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index f66298991..aa44651d4 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -180,6 +180,8 @@ struct StoreConfig : public Config
StoreConfig() = delete;
+ StringSet getDefaultSystemFeatures();
+
virtual ~StoreConfig() { }
virtual const std::string name() = 0;
@@ -196,7 +198,7 @@ struct StoreConfig : public Config
Setting<bool> wantMassQuery{this, false, "want-mass-query", "whether this substituter can be queried efficiently for path validity"};
- Setting<StringSet> systemFeatures{this, settings.systemFeatures,
+ Setting<StringSet> systemFeatures{this, getDefaultSystemFeatures(),
"system-features",
"Optional features that the system this store builds on implements (like \"kvm\")."};
@@ -230,7 +232,6 @@ protected:
struct State
{
- // FIXME: fix key
LRUCache<std::string, PathInfoCacheValue> pathInfoCache;
};
@@ -368,6 +369,14 @@ public:
void queryPathInfo(const StorePath & path,
Callback<ref<const ValidPathInfo>> callback) noexcept;
+ /* Query the information about a realisation. */
+ std::shared_ptr<const Realisation> queryRealisation(const DrvOutput &);
+
+ /* Asynchronous version of queryRealisation(). */
+ void queryRealisation(const DrvOutput &,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept;
+
+
/* Check whether the given valid path info is sufficiently attested, by
either being signed by a trusted public key or content-addressed, in
order to be included in the given store.
@@ -392,11 +401,11 @@ protected:
virtual void queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept = 0;
+ virtual void queryRealisationUncached(const DrvOutput &,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept = 0;
public:
- virtual std::optional<const Realisation> queryRealisation(const DrvOutput &) = 0;
-
/* Queries the set of incoming FS references for a store path.
The result is not cleared. */
virtual void queryReferrers(const StorePath & path, StorePathSet & referrers)
@@ -428,9 +437,10 @@ public:
virtual StorePathSet querySubstitutablePaths(const StorePathSet & paths) { return {}; };
/* Query substitute info (i.e. references, derivers and download
- sizes) of a map of paths to their optional ca values. If a path
- does not have substitute info, it's omitted from the resulting
- ‘infos’ map. */
+ sizes) of a map of paths to their optional ca values. The info
+ of the first succeeding substituter for each path will be
+ returned. If a path does not have substitute info, it's omitted
+ from the resulting ‘infos’ map. */
virtual void querySubstitutablePathInfos(const StorePathCAMap & paths,
SubstitutablePathInfos & infos) { return; };
@@ -438,13 +448,19 @@ public:
virtual void addToStore(const ValidPathInfo & info, Source & narSource,
RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs) = 0;
+ /* Import multiple paths into the store. */
+ virtual void addMultipleToStore(
+ Source & source,
+ RepairFlag repair = NoRepair,
+ CheckSigsFlag checkSigs = CheckSigs);
+
/* Copy the contents of a path to the store and register the
validity the resulting path. The resulting path is returned.
The function object `filter' can be used to exclude files (see
libutil/archive.hh). */
virtual StorePath addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256,
- PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair);
+ PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair, const StorePathSet & references = StorePathSet());
/* Copy the contents of a path to the store and register the
validity the resulting path, using a constant amount of
@@ -460,7 +476,8 @@ public:
`dump` may be drained */
// FIXME: remove?
virtual StorePath addToStoreFromDump(Source & dump, const string & name,
- FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair)
+ FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair,
+ const StorePathSet & references = StorePathSet())
{ unsupported("addToStoreFromDump"); }
/* Like addToStore, but the contents written to the output path is
@@ -495,7 +512,8 @@ public:
not derivations, substitute them. */
virtual void buildPaths(
const std::vector<DerivedPath> & paths,
- BuildMode buildMode = bmNormal);
+ BuildMode buildMode = bmNormal,
+ std::shared_ptr<Store> evalStore = nullptr);
/* Build a single non-materialized derivation (i.e. not from an
on-disk .drv file).
@@ -541,7 +559,7 @@ public:
/* Add a store path as a temporary root of the garbage collector.
The root disappears as soon as we exit. */
virtual void addTempRoot(const StorePath & path)
- { warn("not creating temp root, store doesn't support GC"); }
+ { debug("not creating temporary root, store doesn't support GC"); }
/* Add an indirect root, which is merely a symlink to `path' from
/nix/var/nix/gcroots/auto/<hash of `path'>. `path' is supposed
@@ -551,26 +569,6 @@ public:
virtual void addIndirectRoot(const Path & path)
{ unsupported("addIndirectRoot"); }
- /* Acquire the global GC lock, then immediately release it. This
- function must be called after registering a new permanent root,
- but before exiting. Otherwise, it is possible that a running
- garbage collector doesn't see the new root and deletes the
- stuff we've just built. By acquiring the lock briefly, we
- ensure that either:
-
- - The collector is already running, and so we block until the
- collector is finished. The collector will know about our
- *temporary* locks, which should include whatever it is we
- want to register as a permanent lock.
-
- - The collector isn't running, or it's just started but hasn't
- acquired the GC lock yet. In that case we get and release
- the lock right away, then exit. The collector scans the
- permanent root and sees ours.
-
- In either case the permanent root is seen by the collector. */
- virtual void syncWithGC() { };
-
/* Find the roots of the garbage collector. Each root is a pair
(link, storepath) where `link' is the path of the symlink
outside of the Nix store that point to `storePath'. If
@@ -695,6 +693,11 @@ public:
const Stats & getStats();
+ /* Computes the full closure of of a set of store-paths for e.g.
+ derivations that need this information for `exportReferencesGraph`.
+ */
+ StorePathSet exportReferences(const StorePathSet & storePaths, const StorePathSet & inputPaths);
+
/* Return the build log of the specified store path, if available,
or null otherwise. */
virtual std::shared_ptr<std::string> getBuildLog(const StorePath & path)
@@ -730,6 +733,11 @@ public:
virtual void createUser(const std::string & userName, uid_t userId)
{ }
+ /*
+ * Synchronises the options of the client with those of the daemon
+ * (a no-op when there’s no daemon)
+ */
+ virtual void setOptions() { }
protected:
Stats stats;
@@ -744,8 +752,12 @@ protected:
/* Copy a path from one store to another. */
-void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
- const StorePath & storePath, RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs);
+void copyStorePath(
+ Store & srcStore,
+ Store & dstStore,
+ const StorePath & storePath,
+ RepairFlag repair = NoRepair,
+ CheckSigsFlag checkSigs = CheckSigs);
/* Copy store paths from one store to another. The paths may be copied
@@ -754,17 +766,27 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
of store paths is not automatically closed; use copyClosure() for
that. Returns a map of what each path was copied to the dstStore
as. */
-std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStore,
+std::map<StorePath, StorePath> copyPaths(
+ Store & srcStore, Store & dstStore,
const RealisedPath::Set &,
RepairFlag repair = NoRepair,
CheckSigsFlag checkSigs = CheckSigs,
SubstituteFlag substitute = NoSubstitute);
-std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStore,
- const StorePathSet& paths,
+
+std::map<StorePath, StorePath> copyPaths(
+ Store & srcStore, Store & dstStore,
+ const StorePathSet & paths,
RepairFlag repair = NoRepair,
CheckSigsFlag checkSigs = CheckSigs,
SubstituteFlag substitute = NoSubstitute);
+/* Copy the closure of `paths` from `srcStore` to `dstStore`. */
+void copyClosure(
+ Store & srcStore, Store & dstStore,
+ const RealisedPath::Set & paths,
+ RepairFlag repair = NoRepair,
+ CheckSigsFlag checkSigs = CheckSigs,
+ SubstituteFlag substitute = NoSubstitute);
/* Remove the temporary roots file for this process. Any temporary
root becomes garbage after this point unless it has been registered
@@ -864,4 +886,9 @@ std::pair<std::string, Store::Params> splitUriAndParams(const std::string & uri)
std::optional<ContentAddress> getDerivationCA(const BasicDerivation & drv);
+std::map<DrvOutput, StorePath> drvOutputReferences(
+ Store & store,
+ const Derivation & drv,
+ const StorePath & outputPath);
+
}
diff --git a/src/libstore/tests/local.mk b/src/libstore/tests/local.mk
new file mode 100644
index 000000000..f74295d97
--- /dev/null
+++ b/src/libstore/tests/local.mk
@@ -0,0 +1,15 @@
+check: libstore-tests_RUN
+
+programs += libstore-tests
+
+libstore-tests_DIR := $(d)
+
+libstore-tests_INSTALL_DIR :=
+
+libstore-tests_SOURCES := $(wildcard $(d)/*.cc)
+
+libstore-tests_CXXFLAGS += -I src/libstore -I src/libutil
+
+libstore-tests_LIBS = libstore libutil
+
+libstore-tests_LDFLAGS := $(GTEST_LIBS)
diff --git a/src/libstore/tests/machines.cc b/src/libstore/tests/machines.cc
new file mode 100644
index 000000000..f51052b14
--- /dev/null
+++ b/src/libstore/tests/machines.cc
@@ -0,0 +1,169 @@
+#include "machines.hh"
+#include "globals.hh"
+
+#include <gmock/gmock-matchers.h>
+
+using testing::Contains;
+using testing::ElementsAre;
+using testing::EndsWith;
+using testing::Eq;
+using testing::Field;
+using testing::SizeIs;
+
+using nix::absPath;
+using nix::FormatError;
+using nix::getMachines;
+using nix::Machine;
+using nix::Machines;
+using nix::pathExists;
+using nix::Settings;
+using nix::settings;
+
+class Environment : public ::testing::Environment {
+ public:
+ void SetUp() override { settings.thisSystem = "TEST_ARCH-TEST_OS"; }
+};
+
+testing::Environment* const foo_env =
+ testing::AddGlobalTestEnvironment(new Environment);
+
+TEST(machines, getMachinesWithEmptyBuilders) {
+ settings.builders = "";
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(0));
+}
+
+TEST(machines, getMachinesUriOnly) {
+ settings.builders = "nix@scratchy.labs.cs.uu.nl";
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(1));
+ EXPECT_THAT(actual[0], Field(&Machine::storeUri, Eq("ssh://nix@scratchy.labs.cs.uu.nl")));
+ EXPECT_THAT(actual[0], Field(&Machine::systemTypes, ElementsAre("TEST_ARCH-TEST_OS")));
+ EXPECT_THAT(actual[0], Field(&Machine::sshKey, SizeIs(0)));
+ EXPECT_THAT(actual[0], Field(&Machine::maxJobs, Eq(1)));
+ EXPECT_THAT(actual[0], Field(&Machine::speedFactor, Eq(1)));
+ EXPECT_THAT(actual[0], Field(&Machine::supportedFeatures, SizeIs(0)));
+ EXPECT_THAT(actual[0], Field(&Machine::mandatoryFeatures, SizeIs(0)));
+ EXPECT_THAT(actual[0], Field(&Machine::sshPublicHostKey, SizeIs(0)));
+}
+
+TEST(machines, getMachinesDefaults) {
+ settings.builders = "nix@scratchy.labs.cs.uu.nl - - - - - - -";
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(1));
+ EXPECT_THAT(actual[0], Field(&Machine::storeUri, Eq("ssh://nix@scratchy.labs.cs.uu.nl")));
+ EXPECT_THAT(actual[0], Field(&Machine::systemTypes, ElementsAre("TEST_ARCH-TEST_OS")));
+ EXPECT_THAT(actual[0], Field(&Machine::sshKey, SizeIs(0)));
+ EXPECT_THAT(actual[0], Field(&Machine::maxJobs, Eq(1)));
+ EXPECT_THAT(actual[0], Field(&Machine::speedFactor, Eq(1)));
+ EXPECT_THAT(actual[0], Field(&Machine::supportedFeatures, SizeIs(0)));
+ EXPECT_THAT(actual[0], Field(&Machine::mandatoryFeatures, SizeIs(0)));
+ EXPECT_THAT(actual[0], Field(&Machine::sshPublicHostKey, SizeIs(0)));
+}
+
+TEST(machines, getMachinesWithNewLineSeparator) {
+ settings.builders = "nix@scratchy.labs.cs.uu.nl\nnix@itchy.labs.cs.uu.nl";
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(2));
+ EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl"))));
+ EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@itchy.labs.cs.uu.nl"))));
+}
+
+TEST(machines, getMachinesWithSemicolonSeparator) {
+ settings.builders = "nix@scratchy.labs.cs.uu.nl ; nix@itchy.labs.cs.uu.nl";
+ Machines actual = getMachines();
+ EXPECT_THAT(actual, SizeIs(2));
+ EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl"))));
+ EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@itchy.labs.cs.uu.nl"))));
+}
+
+TEST(machines, getMachinesWithCorrectCompleteSingleBuilder) {
+ settings.builders = "nix@scratchy.labs.cs.uu.nl i686-linux "
+ "/home/nix/.ssh/id_scratchy_auto 8 3 kvm "
+ "benchmark SSH+HOST+PUBLIC+KEY+BASE64+ENCODED==";
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(1));
+ EXPECT_THAT(actual[0], Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl")));
+ EXPECT_THAT(actual[0], Field(&Machine::systemTypes, ElementsAre("i686-linux")));
+ EXPECT_THAT(actual[0], Field(&Machine::sshKey, Eq("/home/nix/.ssh/id_scratchy_auto")));
+ EXPECT_THAT(actual[0], Field(&Machine::maxJobs, Eq(8)));
+ EXPECT_THAT(actual[0], Field(&Machine::speedFactor, Eq(3)));
+ EXPECT_THAT(actual[0], Field(&Machine::supportedFeatures, ElementsAre("kvm")));
+ EXPECT_THAT(actual[0], Field(&Machine::mandatoryFeatures, ElementsAre("benchmark")));
+ EXPECT_THAT(actual[0], Field(&Machine::sshPublicHostKey, Eq("SSH+HOST+PUBLIC+KEY+BASE64+ENCODED==")));
+}
+
+TEST(machines,
+ getMachinesWithCorrectCompleteSingleBuilderWithTabColumnDelimiter) {
+ settings.builders =
+ "nix@scratchy.labs.cs.uu.nl\ti686-linux\t/home/nix/.ssh/"
+ "id_scratchy_auto\t8\t3\tkvm\tbenchmark\tSSH+HOST+PUBLIC+"
+ "KEY+BASE64+ENCODED==";
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(1));
+ EXPECT_THAT(actual[0], Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl")));
+ EXPECT_THAT(actual[0], Field(&Machine::systemTypes, ElementsAre("i686-linux")));
+ EXPECT_THAT(actual[0], Field(&Machine::sshKey, Eq("/home/nix/.ssh/id_scratchy_auto")));
+ EXPECT_THAT(actual[0], Field(&Machine::maxJobs, Eq(8)));
+ EXPECT_THAT(actual[0], Field(&Machine::speedFactor, Eq(3)));
+ EXPECT_THAT(actual[0], Field(&Machine::supportedFeatures, ElementsAre("kvm")));
+ EXPECT_THAT(actual[0], Field(&Machine::mandatoryFeatures, ElementsAre("benchmark")));
+ EXPECT_THAT(actual[0], Field(&Machine::sshPublicHostKey, Eq("SSH+HOST+PUBLIC+KEY+BASE64+ENCODED==")));
+}
+
+TEST(machines, getMachinesWithMultiOptions) {
+ settings.builders = "nix@scratchy.labs.cs.uu.nl Arch1,Arch2 - - - "
+ "SupportedFeature1,SupportedFeature2 "
+ "MandatoryFeature1,MandatoryFeature2";
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(1));
+ EXPECT_THAT(actual[0], Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl")));
+ EXPECT_THAT(actual[0], Field(&Machine::systemTypes, ElementsAre("Arch1", "Arch2")));
+ EXPECT_THAT(actual[0], Field(&Machine::supportedFeatures, ElementsAre("SupportedFeature1", "SupportedFeature2")));
+ EXPECT_THAT(actual[0], Field(&Machine::mandatoryFeatures, ElementsAre("MandatoryFeature1", "MandatoryFeature2")));
+}
+
+TEST(machines, getMachinesWithIncorrectFormat) {
+ settings.builders = "nix@scratchy.labs.cs.uu.nl - - eight";
+ EXPECT_THROW(getMachines(), FormatError);
+ settings.builders = "nix@scratchy.labs.cs.uu.nl - - -1";
+ EXPECT_THROW(getMachines(), FormatError);
+ settings.builders = "nix@scratchy.labs.cs.uu.nl - - 8 three";
+ EXPECT_THROW(getMachines(), FormatError);
+ settings.builders = "nix@scratchy.labs.cs.uu.nl - - 8 -3";
+ EXPECT_THROW(getMachines(), FormatError);
+ settings.builders = "nix@scratchy.labs.cs.uu.nl - - 8 3 - - BAD_BASE64";
+ EXPECT_THROW(getMachines(), FormatError);
+}
+
+TEST(machines, getMachinesWithCorrectFileReference) {
+ auto path = absPath("src/libstore/tests/test-data/machines.valid");
+ ASSERT_TRUE(pathExists(path));
+
+ settings.builders = std::string("@") + path;
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(3));
+ EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl"))));
+ EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@itchy.labs.cs.uu.nl"))));
+ EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@poochie.labs.cs.uu.nl"))));
+}
+
+TEST(machines, getMachinesWithCorrectFileReferenceToEmptyFile) {
+ auto path = "/dev/null";
+ ASSERT_TRUE(pathExists(path));
+
+ settings.builders = std::string("@") + path;
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(0));
+}
+
+TEST(machines, getMachinesWithIncorrectFileReference) {
+ settings.builders = std::string("@") + absPath("/not/a/file");
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(0));
+}
+
+TEST(machines, getMachinesWithCorrectFileReferenceToIncorrectFile) {
+ settings.builders = std::string("@") + absPath("src/libstore/tests/test-data/machines.bad_format");
+ EXPECT_THROW(getMachines(), FormatError);
+}
diff --git a/src/libstore/tests/references.cc b/src/libstore/tests/references.cc
new file mode 100644
index 000000000..d91d1cedd
--- /dev/null
+++ b/src/libstore/tests/references.cc
@@ -0,0 +1,45 @@
+#include "references.hh"
+
+#include <gtest/gtest.h>
+
+namespace nix {
+
+TEST(references, scan)
+{
+ std::string hash1 = "dc04vv14dak1c1r48qa0m23vr9jy8sm0";
+ std::string hash2 = "zc842j0rz61mjsp3h3wp5ly71ak6qgdn";
+
+ {
+ RefScanSink scanner(StringSet{hash1});
+ auto s = "foobar";
+ scanner(s);
+ ASSERT_EQ(scanner.getResult(), StringSet{});
+ }
+
+ {
+ RefScanSink scanner(StringSet{hash1});
+ auto s = "foobar" + hash1 + "xyzzy";
+ scanner(s);
+ ASSERT_EQ(scanner.getResult(), StringSet{hash1});
+ }
+
+ {
+ RefScanSink scanner(StringSet{hash1, hash2});
+ auto s = "foobar" + hash1 + "xyzzy" + hash2;
+ scanner(((std::string_view) s).substr(0, 10));
+ scanner(((std::string_view) s).substr(10, 5));
+ scanner(((std::string_view) s).substr(15, 5));
+ scanner(((std::string_view) s).substr(20));
+ ASSERT_EQ(scanner.getResult(), StringSet({hash1, hash2}));
+ }
+
+ {
+ RefScanSink scanner(StringSet{hash1, hash2});
+ auto s = "foobar" + hash1 + "xyzzy" + hash2;
+ for (auto & i : s)
+ scanner(std::string(1, i));
+ ASSERT_EQ(scanner.getResult(), StringSet({hash1, hash2}));
+ }
+}
+
+}
diff --git a/src/libstore/tests/test-data/machines.bad_format b/src/libstore/tests/test-data/machines.bad_format
new file mode 100644
index 000000000..7255a1216
--- /dev/null
+++ b/src/libstore/tests/test-data/machines.bad_format
@@ -0,0 +1 @@
+nix@scratchy.labs.cs.uu.nl - - eight
diff --git a/src/libstore/tests/test-data/machines.valid b/src/libstore/tests/test-data/machines.valid
new file mode 100644
index 000000000..1a6c8017c
--- /dev/null
+++ b/src/libstore/tests/test-data/machines.valid
@@ -0,0 +1,3 @@
+nix@scratchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 1 kvm
+nix@itchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 2
+nix@poochie.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 1 2 kvm benchmark c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFDQVFDWWV5R1laNTNzd1VjMUZNSHBWL1BCcXlKaFR5S1JoRkpWWVRpRHlQN2h5c1JGa0w4VDlLOGdhL2Y2L3c3QjN2SjNHSFRIUFkybENiUEdZbGNLd2h6M2ZRbFNNOEViNi95b3ZLajdvM1FsMEx5Y0dzdGJvRmcwWkZKNldncUxsR0ltS0NobUlxOGZ3TW5ZTWUxbnRQeTBUZFZjSU1tOTV3YzF3SjBMd2c3cEVMRmtHazdkeTVvYnM4a3lGZ0pORDVRSmFwQWJjeWp4Z1QzdzdMcktNZ2xzeWhhd01JNVpkMGZsQTVudW5OZ3pid3plYVhLaUsyTW0vdGJXYTU1YTd4QmNYdHpIZGlPSWdSajJlRWxaMGh5bk10YjBmcklsdmxIcEtLaVFaZ3pQdCtIVXQ2bXpRMkRVME52MGYyYnNSU0krOGpJU2pQcmdlcVVHRldMUzVIUTg2N2xSMlpiaWtyclhZNTdqbVFEZk5DRHY1VFBHZU9UekFEd2pjMDc2aFZ3VFJCd3VTZFhtaWNxTS95b3lrWitkV1dnZ25MenE5QU1tdlNZcDhmZkZDcS9CSDBZNUFXWTFHay9vS3hMVTNaOWt3ZDd2UWNFQWFCQ2dxdnVZRGdTaHE1RlhndDM3OVZESWtEL05ZSTg2QXVvajVDRmVNTzlRM2pJSlRadlh6c1VldjVoSnA2djcxSVh5ODVtbTY5R20zcXdicVE1SjVQZDU1Um56SitpaW5BNjZxTEFSc0Y4amNsSnd5ekFXclBoYU9DRVY2bjVMeVhVazhzMW9EVVR4V1pWN25rVkFTbHJ0MllGcjN5dzdjRTRXQVhsemhHcDhocmdLMVVkMUlyeDVnZWRaSnBWcy9uNWVybmJFMUxmb2x5UHUvRUFIWlh6VGd4dHVDUFNobXc9PQo=
diff --git a/src/libstore/uds-remote-store.cc b/src/libstore/uds-remote-store.cc
index cac4fa036..5c38323cd 100644
--- a/src/libstore/uds-remote-store.cc
+++ b/src/libstore/uds-remote-store.cc
@@ -45,30 +45,20 @@ std::string UDSRemoteStore::getUri()
}
+void UDSRemoteStore::Connection::closeWrite()
+{
+ shutdown(fd.get(), SHUT_WR);
+}
+
+
ref<RemoteStore::Connection> UDSRemoteStore::openConnection()
{
auto conn = make_ref<Connection>();
/* Connect to a daemon that does the privileged work for us. */
- conn->fd = socket(PF_UNIX, SOCK_STREAM
- #ifdef SOCK_CLOEXEC
- | SOCK_CLOEXEC
- #endif
- , 0);
- if (!conn->fd)
- throw SysError("cannot create Unix domain socket");
- closeOnExec(conn->fd.get());
-
- string socketPath = path ? *path : settings.nixDaemonSocketFile;
-
- struct sockaddr_un addr;
- addr.sun_family = AF_UNIX;
- if (socketPath.size() + 1 >= sizeof(addr.sun_path))
- throw Error("socket path '%1%' is too long", socketPath);
- strcpy(addr.sun_path, socketPath.c_str());
-
- if (::connect(conn->fd.get(), (struct sockaddr *) &addr, sizeof(addr)) == -1)
- throw SysError("cannot connect to daemon at '%1%'", socketPath);
+ conn->fd = createUnixDomainSocket();
+
+ nix::connect(conn->fd.get(), path ? *path : settings.nixDaemonSocketFile);
conn->from.fd = conn->fd.get();
conn->to.fd = conn->fd.get();
diff --git a/src/libstore/uds-remote-store.hh b/src/libstore/uds-remote-store.hh
index ddc7716cd..f8dfcca70 100644
--- a/src/libstore/uds-remote-store.hh
+++ b/src/libstore/uds-remote-store.hh
@@ -40,6 +40,12 @@ public:
private:
+ struct Connection : RemoteStore::Connection
+ {
+ AutoCloseFD fd;
+ void closeWrite() override;
+ };
+
ref<RemoteStore::Connection> openConnection() override;
std::optional<std::string> path;
};
diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh
index 001ed25e3..93cf546d2 100644
--- a/src/libstore/worker-protocol.hh
+++ b/src/libstore/worker-protocol.hh
@@ -9,7 +9,7 @@ namespace nix {
#define WORKER_MAGIC_1 0x6e697863
#define WORKER_MAGIC_2 0x6478696f
-#define PROTOCOL_VERSION (1 << 8 | 29)
+#define PROTOCOL_VERSION (1 << 8 | 32)
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
@@ -55,6 +55,7 @@ typedef enum {
wopQueryDerivationOutputMap = 41,
wopRegisterDrvOutput = 42,
wopQueryRealisation = 43,
+ wopAddMultipleToStore = 44,
} WorkerOp;
diff --git a/src/libutil/ansicolor.hh b/src/libutil/ansicolor.hh
index ae741f867..38305e71c 100644
--- a/src/libutil/ansicolor.hh
+++ b/src/libutil/ansicolor.hh
@@ -9,7 +9,7 @@ namespace nix {
#define ANSI_ITALIC "\e[3m"
#define ANSI_RED "\e[31;1m"
#define ANSI_GREEN "\e[32;1m"
-#define ANSI_YELLOW "\e[33;1m"
+#define ANSI_WARNING "\e[35;1m"
#define ANSI_BLUE "\e[34;1m"
#define ANSI_MAGENTA "\e[35;1m"
#define ANSI_CYAN "\e[36;1m"
diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc
index ed0eb2fb5..d78ec2b93 100644
--- a/src/libutil/archive.cc
+++ b/src/libutil/archive.cc
@@ -42,7 +42,7 @@ static string caseHackSuffix = "~nix~case~hack~";
PathFilter defaultPathFilter = [](const Path &) { return true; };
-static void dumpContents(const Path & path, size_t size,
+static void dumpContents(const Path & path, off_t size,
Sink & sink)
{
sink << "contents" << size;
@@ -76,7 +76,7 @@ static void dump(const Path & path, Sink & sink, PathFilter & filter)
sink << "type" << "regular";
if (st.st_mode & S_IXUSR)
sink << "executable" << "";
- dumpContents(path, (size_t) st.st_size, sink);
+ dumpContents(path, st.st_size, sink);
}
else if (S_ISDIR(st.st_mode)) {
diff --git a/src/libutil/args.cc b/src/libutil/args.cc
index afed0670f..9df279faf 100644
--- a/src/libutil/args.cc
+++ b/src/libutil/args.cc
@@ -331,6 +331,7 @@ MultiCommand::MultiCommand(const Commands & commands_)
if (i == commands.end())
throw UsageError("'%s' is not a recognised command", s);
command = {s, i->second()};
+ command->second->parent = this;
}}
});
diff --git a/src/libutil/args.hh b/src/libutil/args.hh
index c08ba8abd..7521b3065 100644
--- a/src/libutil/args.hh
+++ b/src/libutil/args.hh
@@ -12,6 +12,8 @@ namespace nix {
enum HashType : char;
+class MultiCommand;
+
class Args
{
public:
@@ -89,6 +91,14 @@ protected:
})
, arity(1)
{ }
+
+ template<class I>
+ Handler(std::optional<I> * dest)
+ : fun([=](std::vector<std::string> ss) {
+ *dest = string2IntWithUnitPrefix<I>(ss[0]);
+ })
+ , arity(1)
+ { }
};
/* Options. */
@@ -169,11 +179,13 @@ public:
virtual nlohmann::json toJSON();
friend class MultiCommand;
+
+ MultiCommand * parent = nullptr;
};
/* A command is an argument parser that can be executed by calling its
run() method. */
-struct Command : virtual Args
+struct Command : virtual public Args
{
friend class MultiCommand;
@@ -193,7 +205,7 @@ typedef std::map<std::string, std::function<ref<Command>()>> Commands;
/* An argument parser that supports multiple subcommands,
i.e. ‘<command> <subcommand>’. */
-class MultiCommand : virtual Args
+class MultiCommand : virtual public Args
{
public:
Commands commands;
diff --git a/src/libutil/closure.hh b/src/libutil/closure.hh
new file mode 100644
index 000000000..779b9b2d5
--- /dev/null
+++ b/src/libutil/closure.hh
@@ -0,0 +1,69 @@
+#include <set>
+#include <future>
+#include "sync.hh"
+
+using std::set;
+
+namespace nix {
+
+template<typename T>
+using GetEdgesAsync = std::function<void(const T &, std::function<void(std::promise<set<T>> &)>)>;
+
+template<typename T>
+void computeClosure(
+ const set<T> startElts,
+ set<T> & res,
+ GetEdgesAsync<T> getEdgesAsync
+)
+{
+ struct State
+ {
+ size_t pending;
+ set<T> & res;
+ std::exception_ptr exc;
+ };
+
+ Sync<State> state_(State{0, res, 0});
+
+ std::function<void(const T &)> enqueue;
+
+ std::condition_variable done;
+
+ enqueue = [&](const T & current) -> void {
+ {
+ auto state(state_.lock());
+ if (state->exc) return;
+ if (!state->res.insert(current).second) return;
+ state->pending++;
+ }
+
+ getEdgesAsync(current, [&](std::promise<set<T>> & prom) {
+ try {
+ auto children = prom.get_future().get();
+ for (auto & child : children)
+ enqueue(child);
+ {
+ auto state(state_.lock());
+ assert(state->pending);
+ if (!--state->pending) done.notify_one();
+ }
+ } catch (...) {
+ auto state(state_.lock());
+ if (!state->exc) state->exc = std::current_exception();
+ assert(state->pending);
+ if (!--state->pending) done.notify_one();
+ };
+ });
+ };
+
+ for (auto & startElt : startElts)
+ enqueue(startElt);
+
+ {
+ auto state(state_.lock());
+ while (state->pending) state.wait(done);
+ if (state->exc) std::rethrow_exception(state->exc);
+ }
+}
+
+}
diff --git a/src/libutil/comparator.hh b/src/libutil/comparator.hh
index 0315dc506..eecd5b819 100644
--- a/src/libutil/comparator.hh
+++ b/src/libutil/comparator.hh
@@ -25,6 +25,8 @@
}
#define GENERATE_EQUAL(args...) GENERATE_ONE_CMP(==, args)
#define GENERATE_LEQ(args...) GENERATE_ONE_CMP(<, args)
+#define GENERATE_NEQ(args...) GENERATE_ONE_CMP(!=, args)
#define GENERATE_CMP(args...) \
GENERATE_EQUAL(args) \
- GENERATE_LEQ(args)
+ GENERATE_LEQ(args) \
+ GENERATE_NEQ(args)
diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc
index 7e725cae1..f80ca664c 100644
--- a/src/libutil/compression.cc
+++ b/src/libutil/compression.cc
@@ -12,12 +12,12 @@
#include <brotli/decode.h>
#include <brotli/encode.h>
-#include <zlib.h>
-
#include <iostream>
namespace nix {
+static const int COMPRESSION_LEVEL_DEFAULT = -1;
+
// Don't feed brotli too much at once.
struct ChunkedCompressionSink : CompressionSink
{
@@ -67,14 +67,16 @@ struct ArchiveCompressionSink : CompressionSink
Sink & nextSink;
struct archive * archive;
- ArchiveCompressionSink(Sink & nextSink, std::string format, bool parallel) : nextSink(nextSink) {
+ ArchiveCompressionSink(Sink & nextSink, std::string format, bool parallel, int level = COMPRESSION_LEVEL_DEFAULT) : nextSink(nextSink)
+ {
archive = archive_write_new();
if (!archive) throw Error("failed to initialize libarchive");
check(archive_write_add_filter_by_name(archive, format.c_str()), "couldn't initialize compression (%s)");
check(archive_write_set_format_raw(archive));
- if (format == "xz" && parallel) {
+ if (parallel)
check(archive_write_set_filter_option(archive, format.c_str(), "threads", "0"));
- }
+ if (level != COMPRESSION_LEVEL_DEFAULT)
+ check(archive_write_set_filter_option(archive, format.c_str(), "compression-level", std::to_string(level).c_str()));
// disable internal buffering
check(archive_write_set_bytes_per_block(archive, 0));
// disable output padding
@@ -128,7 +130,11 @@ private:
struct NoneSink : CompressionSink
{
Sink & nextSink;
- NoneSink(Sink & nextSink) : nextSink(nextSink) { }
+ NoneSink(Sink & nextSink, int level = COMPRESSION_LEVEL_DEFAULT) : nextSink(nextSink)
+ {
+ if (level != COMPRESSION_LEVEL_DEFAULT)
+ warn("requested compression level '%d' not supported by compression method 'none'", level);
+ }
void finish() override { flush(); }
void write(std::string_view data) override { nextSink(data); }
};
@@ -259,13 +265,13 @@ struct BrotliCompressionSink : ChunkedCompressionSink
}
};
-ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel)
+ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel, int level)
{
std::vector<std::string> la_supports = {
"bzip2", "compress", "grzip", "gzip", "lrzip", "lz4", "lzip", "lzma", "lzop", "xz", "zstd"
};
if (std::find(la_supports.begin(), la_supports.end(), method) != la_supports.end()) {
- return make_ref<ArchiveCompressionSink>(nextSink, method, parallel);
+ return make_ref<ArchiveCompressionSink>(nextSink, method, parallel, level);
}
if (method == "none")
return make_ref<NoneSink>(nextSink);
@@ -275,10 +281,10 @@ ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & next
throw UnknownCompressionMethod("unknown compression method '%s'", method);
}
-ref<std::string> compress(const std::string & method, const std::string & in, const bool parallel)
+ref<std::string> compress(const std::string & method, const std::string & in, const bool parallel, int level)
{
StringSink ssink;
- auto sink = makeCompressionSink(method, ssink, parallel);
+ auto sink = makeCompressionSink(method, ssink, parallel, level);
(*sink)(in);
sink->finish();
return ssink.s;
diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh
index 338a0d9f2..9b1e4a9d4 100644
--- a/src/libutil/compression.hh
+++ b/src/libutil/compression.hh
@@ -19,9 +19,9 @@ ref<std::string> decompress(const std::string & method, const std::string & in);
std::unique_ptr<FinishSink> makeDecompressionSink(const std::string & method, Sink & nextSink);
-ref<std::string> compress(const std::string & method, const std::string & in, const bool parallel = false);
+ref<std::string> compress(const std::string & method, const std::string & in, const bool parallel = false, int level = -1);
-ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel = false);
+ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel = false, int level = -1);
MakeError(UnknownCompressionMethod, Error);
diff --git a/src/libutil/config.cc b/src/libutil/config.cc
index bda07cd55..92ab265d3 100644
--- a/src/libutil/config.cc
+++ b/src/libutil/config.cc
@@ -1,6 +1,7 @@
#include "config.hh"
#include "args.hh"
#include "abstract-setting-to-json.hh"
+#include "experimental-features.hh"
#include <nlohmann/json.hpp>
@@ -152,6 +153,16 @@ nlohmann::json Config::toJSON()
return res;
}
+std::string Config::toKeyValue()
+{
+ auto res = std::string();
+ for (auto & s : _settings)
+ if (!s.second.isAlias) {
+ res += fmt("%s = %s\n", s.first, s.second.setting->to_string());
+ }
+ return res;
+}
+
void Config::convertToArgs(Args & args, const std::string & category)
{
for (auto & s : _settings)
@@ -167,11 +178,6 @@ AbstractSetting::AbstractSetting(
{
}
-void AbstractSetting::setDefault(const std::string & str)
-{
- if (!overridden) set(str);
-}
-
nlohmann::json AbstractSetting::toJSON()
{
return nlohmann::json(toJSONObject());
@@ -308,6 +314,31 @@ template<> std::string BaseSetting<StringSet>::to_string() const
return concatStringsSep(" ", value);
}
+template<> void BaseSetting<std::set<ExperimentalFeature>>::set(const std::string & str, bool append)
+{
+ if (!append) value.clear();
+ for (auto & s : tokenizeString<StringSet>(str)) {
+ auto thisXpFeature = parseExperimentalFeature(s);
+ if (thisXpFeature)
+ value.insert(thisXpFeature.value());
+ else
+ warn("unknown experimental feature '%s'", s);
+ }
+}
+
+template<> bool BaseSetting<std::set<ExperimentalFeature>>::isAppendable()
+{
+ return true;
+}
+
+template<> std::string BaseSetting<std::set<ExperimentalFeature>>::to_string() const
+{
+ StringSet stringifiedXpFeatures;
+ for (auto & feature : value)
+ stringifiedXpFeatures.insert(std::string(showExperimentalFeature(feature)));
+ return concatStringsSep(" ", stringifiedXpFeatures);
+}
+
template<> void BaseSetting<StringMap>::set(const std::string & str, bool append)
{
if (!append) value.clear();
@@ -343,6 +374,7 @@ template class BaseSetting<std::string>;
template class BaseSetting<Strings>;
template class BaseSetting<StringSet>;
template class BaseSetting<StringMap>;
+template class BaseSetting<std::set<ExperimentalFeature>>;
void PathSetting::set(const std::string & str, bool append)
{
@@ -385,6 +417,16 @@ nlohmann::json GlobalConfig::toJSON()
return res;
}
+std::string GlobalConfig::toKeyValue()
+{
+ std::string res;
+ std::map<std::string, Config::SettingInfo> settings;
+ globalConfig.getSettings(settings);
+ for (auto & s : settings)
+ res += fmt("%s = %s\n", s.first, s.second.value);
+ return res;
+}
+
void GlobalConfig::convertToArgs(Args & args, const std::string & category)
{
for (auto & config : *configRegistrations)
diff --git a/src/libutil/config.hh b/src/libutil/config.hh
index bf81b4892..736810bf3 100644
--- a/src/libutil/config.hh
+++ b/src/libutil/config.hh
@@ -100,6 +100,12 @@ public:
virtual nlohmann::json toJSON() = 0;
/**
+ * Outputs all settings in a key-value pair format suitable to be used as
+ * `nix.conf`
+ */
+ virtual std::string toKeyValue() = 0;
+
+ /**
* Converts settings to `Args` to be used on the command line interface
* - args: args to write to
* - category: category of the settings
@@ -169,6 +175,8 @@ public:
nlohmann::json toJSON() override;
+ std::string toKeyValue() override;
+
void convertToArgs(Args & args, const std::string & category) override;
};
@@ -186,8 +194,6 @@ public:
bool overridden = false;
- void setDefault(const std::string & str);
-
protected:
AbstractSetting(
@@ -245,6 +251,7 @@ public:
bool operator !=(const T & v2) const { return value != v2; }
void operator =(const T & v) { assign(v); }
virtual void assign(const T & v) { value = v; }
+ void setDefault(const T & v) { if (!overridden) value = v; }
void set(const std::string & str, bool append = false) override;
@@ -330,6 +337,8 @@ struct GlobalConfig : public AbstractConfig
nlohmann::json toJSON() override;
+ std::string toKeyValue() override;
+
void convertToArgs(Args & args, const std::string & category) override;
struct Register
diff --git a/src/libutil/error.cc b/src/libutil/error.cc
index 0eea3455d..203d79087 100644
--- a/src/libutil/error.cc
+++ b/src/libutil/error.cc
@@ -185,15 +185,15 @@ void printAtPos(const ErrPos & pos, std::ostream & out)
if (pos) {
switch (pos.origin) {
case foFile: {
- out << fmt(ANSI_BLUE "at " ANSI_YELLOW "%s:%s" ANSI_NORMAL ":", pos.file, showErrPos(pos));
+ out << fmt(ANSI_BLUE "at " ANSI_WARNING "%s:%s" ANSI_NORMAL ":", pos.file, showErrPos(pos));
break;
}
case foString: {
- out << fmt(ANSI_BLUE "at " ANSI_YELLOW "«string»:%s" ANSI_NORMAL ":", showErrPos(pos));
+ out << fmt(ANSI_BLUE "at " ANSI_WARNING "«string»:%s" ANSI_NORMAL ":", showErrPos(pos));
break;
}
case foStdin: {
- out << fmt(ANSI_BLUE "at " ANSI_YELLOW "«stdin»:%s" ANSI_NORMAL ":", showErrPos(pos));
+ out << fmt(ANSI_BLUE "at " ANSI_WARNING "«stdin»:%s" ANSI_NORMAL ":", showErrPos(pos));
break;
}
default:
@@ -232,7 +232,7 @@ std::ostream & showErrorInfo(std::ostream & out, const ErrorInfo & einfo, bool s
break;
}
case Verbosity::lvlWarn: {
- prefix = ANSI_YELLOW "warning";
+ prefix = ANSI_WARNING "warning";
break;
}
case Verbosity::lvlInfo: {
@@ -252,7 +252,7 @@ std::ostream & showErrorInfo(std::ostream & out, const ErrorInfo & einfo, bool s
break;
}
case Verbosity::lvlDebug: {
- prefix = ANSI_YELLOW "debug";
+ prefix = ANSI_WARNING "debug";
break;
}
default:
diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc
new file mode 100644
index 000000000..b49f47e1d
--- /dev/null
+++ b/src/libutil/experimental-features.cc
@@ -0,0 +1,59 @@
+#include "experimental-features.hh"
+#include "util.hh"
+
+#include "nlohmann/json.hpp"
+
+namespace nix {
+
+std::map<ExperimentalFeature, std::string> stringifiedXpFeatures = {
+ { Xp::CaDerivations, "ca-derivations" },
+ { Xp::Flakes, "flakes" },
+ { Xp::NixCommand, "nix-command" },
+ { Xp::RecursiveNix, "recursive-nix" },
+ { Xp::NoUrlLiterals, "no-url-literals" },
+};
+
+const std::optional<ExperimentalFeature> parseExperimentalFeature(const std::string_view & name)
+{
+ using ReverseXpMap = std::map<std::string_view, ExperimentalFeature>;
+
+ static auto reverseXpMap = []()
+ {
+ auto reverseXpMap = std::make_unique<ReverseXpMap>();
+ for (auto & [feature, name] : stringifiedXpFeatures)
+ (*reverseXpMap)[name] = feature;
+ return reverseXpMap;
+ }();
+
+ if (auto feature = get(*reverseXpMap, name))
+ return *feature;
+ else
+ return std::nullopt;
+}
+
+std::string_view showExperimentalFeature(const ExperimentalFeature feature)
+{
+ return stringifiedXpFeatures.at(feature);
+}
+
+std::set<ExperimentalFeature> parseFeatures(const std::set<std::string> & rawFeatures)
+{
+ std::set<ExperimentalFeature> res;
+ for (auto & rawFeature : rawFeatures) {
+ if (auto feature = parseExperimentalFeature(rawFeature))
+ res.insert(*feature);
+ }
+ return res;
+}
+
+MissingExperimentalFeature::MissingExperimentalFeature(ExperimentalFeature feature)
+ : Error("experimental Nix feature '%1%' is disabled; use '--extra-experimental-features %1%' to override", showExperimentalFeature(feature))
+ , missingFeature(feature)
+{}
+
+std::ostream & operator <<(std::ostream & str, const ExperimentalFeature & feature)
+{
+ return str << showExperimentalFeature(feature);
+}
+
+}
diff --git a/src/libutil/experimental-features.hh b/src/libutil/experimental-features.hh
new file mode 100644
index 000000000..291a58e32
--- /dev/null
+++ b/src/libutil/experimental-features.hh
@@ -0,0 +1,56 @@
+#pragma once
+
+#include "comparator.hh"
+#include "error.hh"
+#include "nlohmann/json_fwd.hpp"
+#include "types.hh"
+
+namespace nix {
+
+/**
+ * The list of available experimental features.
+ *
+ * If you update this, don’t forget to also change the map defining their
+ * string representation in the corresponding `.cc` file.
+ **/
+enum struct ExperimentalFeature
+{
+ CaDerivations,
+ Flakes,
+ NixCommand,
+ RecursiveNix,
+ NoUrlLiterals
+};
+
+/**
+ * Just because writing `ExperimentalFeature::CaDerivations` is way too long
+ */
+using Xp = ExperimentalFeature;
+
+const std::optional<ExperimentalFeature> parseExperimentalFeature(
+ const std::string_view & name);
+std::string_view showExperimentalFeature(const ExperimentalFeature);
+
+std::ostream & operator<<(
+ std::ostream & str,
+ const ExperimentalFeature & feature);
+
+/**
+ * Parse a set of strings to the corresponding set of experimental features,
+ * ignoring (but warning for) any unkwown feature.
+ */
+std::set<ExperimentalFeature> parseFeatures(const std::set<std::string> &);
+
+class MissingExperimentalFeature : public Error
+{
+public:
+ ExperimentalFeature missingFeature;
+
+ MissingExperimentalFeature(ExperimentalFeature);
+ virtual const char * sname() const override
+ {
+ return "MissingExperimentalFeature";
+ }
+};
+
+}
diff --git a/src/libutil/fmt.hh b/src/libutil/fmt.hh
index 85c0e9429..fd335b811 100644
--- a/src/libutil/fmt.hh
+++ b/src/libutil/fmt.hh
@@ -82,7 +82,7 @@ struct yellowtxt
template <class T>
std::ostream & operator<<(std::ostream & out, const yellowtxt<T> & y)
{
- return out << ANSI_YELLOW << y.value << ANSI_NORMAL;
+ return out << ANSI_WARNING << y.value << ANSI_NORMAL;
}
template <class T>
diff --git a/src/libutil/local.mk b/src/libutil/local.mk
index 3a6415ee3..f880c0fc5 100644
--- a/src/libutil/local.mk
+++ b/src/libutil/local.mk
@@ -6,7 +6,7 @@ libutil_DIR := $(d)
libutil_SOURCES := $(wildcard $(d)/*.cc)
-libutil_LDFLAGS = -pthread $(OPENSSL_LIBS) $(LIBBROTLI_LIBS) $(LIBARCHIVE_LIBS) $(BOOST_LDFLAGS) -lboost_context
+libutil_LDFLAGS += -pthread $(OPENSSL_LIBS) $(LIBBROTLI_LIBS) $(LIBARCHIVE_LIBS) $(BOOST_LDFLAGS) -lboost_context
ifeq ($(HAVE_LIBCPUID), 1)
libutil_LDFLAGS += -lcpuid
diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc
index d2e801175..f8a121ed1 100644
--- a/src/libutil/logging.cc
+++ b/src/libutil/logging.cc
@@ -27,7 +27,7 @@ Logger * logger = makeSimpleLogger(true);
void Logger::warn(const std::string & msg)
{
- log(lvlWarn, ANSI_YELLOW "warning:" ANSI_NORMAL " " + msg);
+ log(lvlWarn, ANSI_WARNING "warning:" ANSI_NORMAL " " + msg);
}
void Logger::writeToStdout(std::string_view s)
@@ -46,7 +46,7 @@ public:
: printBuildLogs(printBuildLogs)
{
systemd = getEnv("IN_SYSTEMD") == "1";
- tty = isatty(STDERR_FILENO);
+ tty = shouldANSI();
}
bool isVerbose() override {
@@ -163,7 +163,7 @@ struct JSONLogger : Logger {
void write(const nlohmann::json & json)
{
- prevLogger.log(lvlError, "@nix " + json.dump());
+ prevLogger.log(lvlError, "@nix " + json.dump(-1, ' ', false, nlohmann::json::error_handler_t::replace));
}
void log(Verbosity lvl, const FormatOrString & fs) override
diff --git a/src/libutil/ref.hh b/src/libutil/ref.hh
index 0be2a7e74..347b81f73 100644
--- a/src/libutil/ref.hh
+++ b/src/libutil/ref.hh
@@ -17,7 +17,7 @@ private:
public:
- ref<T>(const ref<T> & r)
+ ref(const ref<T> & r)
: p(r.p)
{ }
@@ -73,6 +73,16 @@ public:
return ref<T2>((std::shared_ptr<T2>) p);
}
+ bool operator == (const ref<T> & other) const
+ {
+ return p == other.p;
+ }
+
+ bool operator != (const ref<T> & other) const
+ {
+ return p != other.p;
+ }
+
private:
template<typename T2, typename... Args>
@@ -89,4 +99,47 @@ make_ref(Args&&... args)
return ref<T>(p);
}
+
+/* A non-nullable pointer.
+ This is similar to a C++ "& reference", but mutable.
+ This is similar to ref<T> but backed by a regular pointer instead of a smart pointer.
+ */
+template<typename T>
+class ptr {
+private:
+ T * p;
+
+public:
+ ptr<T>(const ptr<T> & r)
+ : p(r.p)
+ { }
+
+ explicit ptr<T>(T * p)
+ : p(p)
+ {
+ if (!p)
+ throw std::invalid_argument("null pointer cast to ptr");
+ }
+
+ T* operator ->() const
+ {
+ return &*p;
+ }
+
+ T& operator *() const
+ {
+ return *p;
+ }
+
+ bool operator == (const ptr<T> & other) const
+ {
+ return p == other.p;
+ }
+
+ bool operator != (const ptr<T> & other) const
+ {
+ return p != other.p;
+ }
+};
+
}
diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc
index 374b48d79..16f3476c2 100644
--- a/src/libutil/serialise.cc
+++ b/src/libutil/serialise.cc
@@ -244,7 +244,8 @@ std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
if (!cur.empty()) (*coro)(false);
}
- void finish() {
+ void finish() override
+ {
if (!coro) return;
if (!*coro) abort();
(*coro)(true);
diff --git a/src/libutil/tarfile.cc b/src/libutil/tarfile.cc
index 24905130d..50e691a3d 100644
--- a/src/libutil/tarfile.cc
+++ b/src/libutil/tarfile.cc
@@ -39,32 +39,30 @@ void TarArchive::check(int err, const std::string & reason)
throw Error(reason, archive_error_string(this->archive));
}
-TarArchive::TarArchive(Source & source, bool raw) : buffer(4096)
+TarArchive::TarArchive(Source & source, bool raw)
+ : source(&source), buffer(4096)
{
- this->archive = archive_read_new();
- this->source = &source;
-
- if (!raw) {
- archive_read_support_filter_all(archive);
+ init();
+ if (!raw)
archive_read_support_format_all(archive);
- } else {
- archive_read_support_filter_all(archive);
+ else
archive_read_support_format_raw(archive);
- archive_read_support_format_empty(archive);
- }
check(archive_read_open(archive, (void *)this, callback_open, callback_read, callback_close), "Failed to open archive (%s)");
}
-
TarArchive::TarArchive(const Path & path)
{
- this->archive = archive_read_new();
-
- archive_read_support_filter_all(archive);
+ init();
archive_read_support_format_all(archive);
check(archive_read_open_filename(archive, path.c_str(), 16384), "failed to open archive: %s");
}
+void TarArchive::init()
+{
+ archive = archive_read_new();
+ archive_read_support_filter_all(archive);
+}
+
void TarArchive::close()
{
check(archive_read_close(this->archive), "Failed to close archive (%s)");
@@ -87,13 +85,16 @@ static void extract_archive(TarArchive & archive, const Path & destDir)
struct archive_entry * entry;
int r = archive_read_next_header(archive.archive, &entry);
if (r == ARCHIVE_EOF) break;
- else if (r == ARCHIVE_WARN)
+ auto name = archive_entry_pathname(entry);
+ if (!name)
+ throw Error("cannot get archive member name: %s", archive_error_string(archive.archive));
+ if (r == ARCHIVE_WARN)
warn(archive_error_string(archive.archive));
else
archive.check(r);
archive_entry_set_pathname(entry,
- (destDir + "/" + archive_entry_pathname(entry)).c_str());
+ (destDir + "/" + name).c_str());
archive.check(archive_read_extract(archive.archive, entry, flags));
}
diff --git a/src/libutil/tarfile.hh b/src/libutil/tarfile.hh
index 4d9141fd4..f107a7e2e 100644
--- a/src/libutil/tarfile.hh
+++ b/src/libutil/tarfile.hh
@@ -17,10 +17,13 @@ struct TarArchive {
// disable copy constructor
TarArchive(const TarArchive &) = delete;
+ void init();
+
void close();
~TarArchive();
};
+
void unpackTarfile(Source & source, const Path & destDir);
void unpackTarfile(const Path & tarFile, const Path & destDir);
diff --git a/src/libutil/tests/closure.cc b/src/libutil/tests/closure.cc
new file mode 100644
index 000000000..7597e7807
--- /dev/null
+++ b/src/libutil/tests/closure.cc
@@ -0,0 +1,70 @@
+#include "closure.hh"
+#include <gtest/gtest.h>
+
+namespace nix {
+
+using namespace std;
+
+map<string, set<string>> testGraph = {
+ { "A", { "B", "C", "G" } },
+ { "B", { "A" } }, // Loops back to A
+ { "C", { "F" } }, // Indirect reference
+ { "D", { "A" } }, // Not reachable, but has backreferences
+ { "E", {} }, // Just not reachable
+ { "F", {} },
+ { "G", { "G" } }, // Self reference
+};
+
+TEST(closure, correctClosure) {
+ set<string> aClosure;
+ set<string> expectedClosure = {"A", "B", "C", "F", "G"};
+ computeClosure<string>(
+ {"A"},
+ aClosure,
+ [&](const string currentNode, function<void(promise<set<string>> &)> processEdges) {
+ promise<set<string>> promisedNodes;
+ promisedNodes.set_value(testGraph[currentNode]);
+ processEdges(promisedNodes);
+ }
+ );
+
+ ASSERT_EQ(aClosure, expectedClosure);
+}
+
+TEST(closure, properlyHandlesDirectExceptions) {
+ struct TestExn {};
+ set<string> aClosure;
+ EXPECT_THROW(
+ computeClosure<string>(
+ {"A"},
+ aClosure,
+ [&](const string currentNode, function<void(promise<set<string>> &)> processEdges) {
+ throw TestExn();
+ }
+ ),
+ TestExn
+ );
+}
+
+TEST(closure, properlyHandlesExceptionsInPromise) {
+ struct TestExn {};
+ set<string> aClosure;
+ EXPECT_THROW(
+ computeClosure<string>(
+ {"A"},
+ aClosure,
+ [&](const string currentNode, function<void(promise<set<string>> &)> processEdges) {
+ promise<set<string>> promise;
+ try {
+ throw TestExn();
+ } catch (...) {
+ promise.set_exception(std::current_exception());
+ }
+ processEdges(promise);
+ }
+ ),
+ TestExn
+ );
+}
+
+}
diff --git a/src/libutil/tests/logging.cc b/src/libutil/tests/logging.cc
index d990e5499..cef3bd481 100644
--- a/src/libutil/tests/logging.cc
+++ b/src/libutil/tests/logging.cc
@@ -336,7 +336,7 @@ namespace nix {
ASSERT_STREQ(
hintfmt("only one arg %1% %2%", "fulfilled").str().c_str(),
- "only one arg " ANSI_YELLOW "fulfilled" ANSI_NORMAL " ");
+ "only one arg " ANSI_WARNING "fulfilled" ANSI_NORMAL " ");
}
@@ -344,7 +344,7 @@ namespace nix {
ASSERT_STREQ(
hintfmt("what about this %1% %2%", "%3%", "one", "two").str().c_str(),
- "what about this " ANSI_YELLOW "%3%" ANSI_NORMAL " " ANSI_YELLOW "one" ANSI_NORMAL);
+ "what about this " ANSI_WARNING "%3%" ANSI_NORMAL " " ANSI_YELLOW "one" ANSI_NORMAL);
}
diff --git a/src/libutil/tests/tests.cc b/src/libutil/tests/tests.cc
index 58df9c5ac..92972ed14 100644
--- a/src/libutil/tests/tests.cc
+++ b/src/libutil/tests/tests.cc
@@ -4,6 +4,8 @@
#include <limits.h>
#include <gtest/gtest.h>
+#include <numeric>
+
namespace nix {
/* ----------- tests for util.hh ------------------------------------------------*/
@@ -282,6 +284,17 @@ namespace nix {
ASSERT_EQ(decoded, s);
}
+ TEST(base64Encode, encodeAndDecodeNonPrintable) {
+ char s[256];
+ std::iota(std::rbegin(s), std::rend(s), 0);
+
+ auto encoded = base64Encode(s);
+ auto decoded = base64Decode(encoded);
+
+ EXPECT_EQ(decoded.length(), 255);
+ ASSERT_EQ(decoded, s);
+ }
+
/* ----------------------------------------------------------------------------
* base64Decode
* --------------------------------------------------------------------------*/
@@ -294,6 +307,10 @@ namespace nix {
ASSERT_EQ(base64Decode("cXVvZCBlcmF0IGRlbW9uc3RyYW5kdW0="), "quod erat demonstrandum");
}
+ TEST(base64Decode, decodeThrowsOnInvalidChar) {
+ ASSERT_THROW(base64Decode("cXVvZCBlcm_0IGRlbW9uc3RyYW5kdW0="), Error);
+ }
+
/* ----------------------------------------------------------------------------
* toLower
* --------------------------------------------------------------------------*/
diff --git a/src/libutil/url.cc b/src/libutil/url.cc
index c1bab866c..f6232d255 100644
--- a/src/libutil/url.cc
+++ b/src/libutil/url.cc
@@ -32,7 +32,7 @@ ParsedURL parseURL(const std::string & url)
auto isFile = scheme.find("file") != std::string::npos;
if (authority && *authority != "" && isFile)
- throw Error("file:// URL '%s' has unexpected authority '%s'",
+ throw BadURL("file:// URL '%s' has unexpected authority '%s'",
url, *authority);
if (isFile && path.empty())
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 5f597bf06..defb77a10 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -4,16 +4,18 @@
#include "finally.hh"
#include "serialise.hh"
+#include <array>
#include <cctype>
#include <cerrno>
+#include <climits>
#include <cstdio>
#include <cstdlib>
#include <cstring>
-#include <climits>
+#include <future>
#include <iostream>
+#include <mutex>
#include <sstream>
#include <thread>
-#include <future>
#include <fcntl.h>
#include <grp.h>
@@ -155,6 +157,9 @@ Path canonPath(const Path & path, bool resolveSymlinks)
s.clear(); /* restart for symlinks pointing to absolute path */
} else {
s = dirOf(s);
+ if (s == "/") { // we don’t want trailing slashes here, which dirOf only produces if s = /
+ s.clear();
+ }
}
}
}
@@ -410,7 +415,7 @@ static void _deletePath(int parentfd, const Path & path, uint64_t & bytesFreed)
}
int fd = openat(parentfd, path.c_str(), O_RDONLY);
- if (!fd)
+ if (fd == -1)
throw SysError("opening directory '%1%'", path);
AutoCloseDir dir(fdopendir(fd));
if (!dir)
@@ -432,12 +437,9 @@ static void _deletePath(const Path & path, uint64_t & bytesFreed)
if (dir == "")
dir = "/";
- AutoCloseFD dirfd(open(dir.c_str(), O_RDONLY));
+ AutoCloseFD dirfd{open(dir.c_str(), O_RDONLY)};
if (!dirfd) {
- // This really shouldn't fail silently, but it's left this way
- // for backwards compatibility.
if (errno == ENOENT) return;
-
throw SysError("opening directory '%1%'", path);
}
@@ -560,7 +562,7 @@ Path getConfigDir()
std::vector<Path> getConfigDirs()
{
Path configHome = getConfigDir();
- string configDirs = getEnv("XDG_CONFIG_DIRS").value_or("");
+ string configDirs = getEnv("XDG_CONFIG_DIRS").value_or("/etc/xdg");
std::vector<Path> result = tokenizeString<std::vector<string>>(configDirs, ":");
result.insert(result.begin(), configHome);
return result;
@@ -901,7 +903,7 @@ int Pid::wait()
return status;
}
if (errno != EINTR)
- throw SysError("cannot get child exit status");
+ throw SysError("cannot get exit status of PID %d", pid);
checkInterrupt();
}
}
@@ -937,9 +939,6 @@ void killUser(uid_t uid)
users to which the current process can send signals. So we
fork a process, switch to uid, and send a mass kill. */
- ProcessOptions options;
- options.allowVfork = false;
-
Pid pid = startProcess([&]() {
if (setuid(uid) == -1)
@@ -962,7 +961,7 @@ void killUser(uid_t uid)
}
_exit(0);
- }, options);
+ });
int status = pid.wait();
if (status != 0)
@@ -1032,17 +1031,10 @@ std::vector<char *> stringsToCharPtrs(const Strings & ss)
return res;
}
-// Output = "standard out" output stream
string runProgram(Path program, bool searchPath, const Strings & args,
const std::optional<std::string> & input)
{
- RunOptions opts(program, args);
- opts.searchPath = searchPath;
- // This allows you to refer to a program with a pathname relative to the
- // PATH variable.
- opts.input = input;
-
- auto res = runProgram(opts);
+ auto res = runProgram(RunOptions {.program = program, .searchPath = searchPath, .args = args, .input = input});
if (!statusOk(res.first))
throw ExecError(res.first, fmt("program '%1%' %2%", program, statusToString(res.first)));
@@ -1051,9 +1043,8 @@ string runProgram(Path program, bool searchPath, const Strings & args,
}
// Output = error code + "standard out" output stream
-std::pair<int, std::string> runProgram(const RunOptions & options_)
+std::pair<int, std::string> runProgram(RunOptions && options)
{
- RunOptions options(options_);
StringSink sink;
options.standardOut = &sink;
@@ -1091,8 +1082,7 @@ void runProgram2(const RunOptions & options)
// vfork implies that the environment of the main process and the fork will
// be shared (technically this is undefined, but in practice that's the
// case), so we can't use it if we alter the environment
- if (options.environment)
- processOptions.allowVfork = false;
+ processOptions.allowVfork = !options.environment;
/* Fork. */
Pid pid = startProcess([&]() {
@@ -1215,7 +1205,7 @@ void closeOnExec(int fd)
//////////////////////////////////////////////////////////////////////
-bool _isInterrupted = false;
+std::atomic<bool> _isInterrupted = false;
static thread_local bool interruptThrown = false;
thread_local std::function<bool()> interruptCheck;
@@ -1369,6 +1359,12 @@ void ignoreException()
}
}
+bool shouldANSI()
+{
+ return isatty(STDERR_FILENO)
+ && getEnv("TERM").value_or("dumb") != "dumb"
+ && !getEnv("NO_COLOR").has_value();
+}
std::string filterANSIEscapes(const std::string & s, bool filterAll, unsigned int width)
{
@@ -1440,8 +1436,7 @@ std::string filterANSIEscapes(const std::string & s, bool filterAll, unsigned in
}
-static char base64Chars[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
-
+constexpr char base64Chars[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
string base64Encode(std::string_view s)
{
@@ -1466,15 +1461,15 @@ string base64Encode(std::string_view s)
string base64Decode(std::string_view s)
{
- bool init = false;
- char decode[256];
- if (!init) {
- // FIXME: not thread-safe.
- memset(decode, -1, sizeof(decode));
+ constexpr char npos = -1;
+ constexpr std::array<char, 256> base64DecodeChars = [&]() {
+ std::array<char, 256> result{};
+ for (auto& c : result)
+ c = npos;
for (int i = 0; i < 64; i++)
- decode[(int) base64Chars[i]] = i;
- init = true;
- }
+ result[base64Chars[i]] = i;
+ return result;
+ }();
string res;
unsigned int d = 0, bits = 0;
@@ -1483,8 +1478,8 @@ string base64Decode(std::string_view s)
if (c == '=') break;
if (c == '\n') continue;
- char digit = decode[(unsigned char) c];
- if (digit == -1)
+ char digit = base64DecodeChars[(unsigned char) c];
+ if (digit == npos)
throw Error("invalid character in Base64 string: '%c'", c);
bits += 6;
@@ -1637,9 +1632,39 @@ void setStackSize(size_t stackSize)
#endif
}
-void restoreProcessContext()
+static AutoCloseFD fdSavedMountNamespace;
+
+void saveMountNamespace()
+{
+#if __linux__
+ static std::once_flag done;
+ std::call_once(done, []() {
+ AutoCloseFD fd = open("/proc/self/ns/mnt", O_RDONLY);
+ if (!fd)
+ throw SysError("saving parent mount namespace");
+ fdSavedMountNamespace = std::move(fd);
+ });
+#endif
+}
+
+void restoreMountNamespace()
+{
+#if __linux__
+ try {
+ if (fdSavedMountNamespace && setns(fdSavedMountNamespace.get(), CLONE_NEWNS) == -1)
+ throw SysError("restoring parent mount namespace");
+ } catch (Error & e) {
+ debug(e.msg());
+ }
+#endif
+}
+
+void restoreProcessContext(bool restoreMounts)
{
restoreSignals();
+ if (restoreMounts) {
+ restoreMountNamespace();
+ }
restoreAffinity();
@@ -1677,7 +1702,7 @@ std::unique_ptr<InterruptCallback> createInterruptCallback(std::function<void()>
}
-AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode)
+AutoCloseFD createUnixDomainSocket()
{
AutoCloseFD fdSocket = socket(PF_UNIX, SOCK_STREAM
#ifdef SOCK_CLOEXEC
@@ -1686,19 +1711,16 @@ AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode)
, 0);
if (!fdSocket)
throw SysError("cannot create Unix domain socket");
-
closeOnExec(fdSocket.get());
+ return fdSocket;
+}
- struct sockaddr_un addr;
- addr.sun_family = AF_UNIX;
- if (path.size() + 1 >= sizeof(addr.sun_path))
- throw Error("socket path '%1%' is too long", path);
- strcpy(addr.sun_path, path.c_str());
- unlink(path.c_str());
+AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode)
+{
+ auto fdSocket = nix::createUnixDomainSocket();
- if (bind(fdSocket.get(), (struct sockaddr *) &addr, sizeof(addr)) == -1)
- throw SysError("cannot bind to socket '%1%'", path);
+ bind(fdSocket.get(), path);
if (chmod(path.c_str(), mode) == -1)
throw SysError("changing permissions on '%1%'", path);
@@ -1710,6 +1732,66 @@ AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode)
}
+void bind(int fd, const std::string & path)
+{
+ unlink(path.c_str());
+
+ struct sockaddr_un addr;
+ addr.sun_family = AF_UNIX;
+
+ if (path.size() + 1 >= sizeof(addr.sun_path)) {
+ Pid pid = startProcess([&]() {
+ auto dir = dirOf(path);
+ if (chdir(dir.c_str()) == -1)
+ throw SysError("chdir to '%s' failed", dir);
+ std::string base(baseNameOf(path));
+ if (base.size() + 1 >= sizeof(addr.sun_path))
+ throw Error("socket path '%s' is too long", base);
+ memcpy(addr.sun_path, base.c_str(), base.size() + 1);
+ if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1)
+ throw SysError("cannot bind to socket '%s'", path);
+ _exit(0);
+ });
+ int status = pid.wait();
+ if (status != 0)
+ throw Error("cannot bind to socket '%s'", path);
+ } else {
+ memcpy(addr.sun_path, path.c_str(), path.size() + 1);
+ if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1)
+ throw SysError("cannot bind to socket '%s'", path);
+ }
+}
+
+
+void connect(int fd, const std::string & path)
+{
+ struct sockaddr_un addr;
+ addr.sun_family = AF_UNIX;
+
+ if (path.size() + 1 >= sizeof(addr.sun_path)) {
+ Pid pid = startProcess([&]() {
+ auto dir = dirOf(path);
+ if (chdir(dir.c_str()) == -1)
+ throw SysError("chdir to '%s' failed", dir);
+ std::string base(baseNameOf(path));
+ if (base.size() + 1 >= sizeof(addr.sun_path))
+ throw Error("socket path '%s' is too long", base);
+ memcpy(addr.sun_path, base.c_str(), base.size() + 1);
+ if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1)
+ throw SysError("cannot connect to socket at '%s'", path);
+ _exit(0);
+ });
+ int status = pid.wait();
+ if (status != 0)
+ throw Error("cannot connect to socket at '%s'", path);
+ } else {
+ memcpy(addr.sun_path, path.c_str(), path.size() + 1);
+ if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1)
+ throw SysError("cannot connect to socket at '%s'", path);
+ }
+}
+
+
string showBytes(uint64_t bytes)
{
return fmt("%.2f MiB", bytes / (1024.0 * 1024.0));
@@ -1719,8 +1801,10 @@ string showBytes(uint64_t bytes)
// FIXME: move to libstore/build
void commonChildInit(Pipe & logPipe)
{
+ logger = makeSimpleLogger();
+
const static string pathNullDevice = "/dev/null";
- restoreProcessContext();
+ restoreProcessContext(false);
/* Put the child in a separate session (and thus a separate
process group) so that it has no controlling terminal (meaning
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
index f84d0fb31..0bdb37a79 100644
--- a/src/libutil/util.hh
+++ b/src/libutil/util.hh
@@ -259,10 +259,10 @@ void killUser(uid_t uid);
pid to the caller. */
struct ProcessOptions
{
- string errorPrefix = "error: ";
+ string errorPrefix = "";
bool dieWithParent = true;
bool runExitHandlers = false;
- bool allowVfork = true;
+ bool allowVfork = false;
};
pid_t startProcess(std::function<void()> fun, const ProcessOptions & options = ProcessOptions());
@@ -276,26 +276,20 @@ string runProgram(Path program, bool searchPath = false,
struct RunOptions
{
+ Path program;
+ bool searchPath = true;
+ Strings args;
std::optional<uid_t> uid;
std::optional<uid_t> gid;
std::optional<Path> chdir;
std::optional<std::map<std::string, std::string>> environment;
- Path program;
- bool searchPath = true;
- Strings args;
std::optional<std::string> input;
Source * standardIn = nullptr;
Sink * standardOut = nullptr;
bool mergeStderrToStdout = false;
- bool _killStderr = false;
-
- RunOptions(const Path & program, const Strings & args)
- : program(program), args(args) { };
-
- RunOptions & killStderr(bool v) { _killStderr = true; return *this; }
};
-std::pair<int, std::string> runProgram(const RunOptions & options);
+std::pair<int, std::string> runProgram(RunOptions && options);
void runProgram2(const RunOptions & options);
@@ -306,7 +300,15 @@ void setStackSize(size_t stackSize);
/* Restore the original inherited Unix process context (such as signal
masks, stack size, CPU affinity). */
-void restoreProcessContext();
+void restoreProcessContext(bool restoreMounts = true);
+
+/* Save the current mount namespace. Ignored if called more than
+ once. */
+void saveMountNamespace();
+
+/* Restore the mount namespace saved by saveMountNamespace(). Ignored
+ if saveMountNamespace() was never called. */
+void restoreMountNamespace();
class ExecError : public Error
@@ -335,7 +337,7 @@ void closeOnExec(int fd);
/* User interruption. */
-extern bool _isInterrupted;
+extern std::atomic<bool> _isInterrupted;
extern thread_local std::function<bool()> interruptCheck;
@@ -482,6 +484,9 @@ constexpr char treeLast[] = "└───";
constexpr char treeLine[] = "│ ";
constexpr char treeNull[] = " ";
+/* Determine whether ANSI escape sequences are appropriate for the
+ present output. */
+bool shouldANSI();
/* Truncate a string to 'width' printable characters. If 'filterAll'
is true, all ANSI escape sequences are filtered out. Otherwise,
@@ -514,6 +519,29 @@ std::optional<typename T::mapped_type> get(const T & map, const typename T::key_
}
+/* Remove and return the first item from a container. */
+template <class T>
+std::optional<typename T::value_type> remove_begin(T & c)
+{
+ auto i = c.begin();
+ if (i == c.end()) return {};
+ auto v = std::move(*i);
+ c.erase(i);
+ return v;
+}
+
+
+/* Remove and return the first item from a container. */
+template <class T>
+std::optional<typename T::value_type> pop(T & c)
+{
+ if (c.empty()) return {};
+ auto v = std::move(c.front());
+ c.pop();
+ return v;
+}
+
+
template<typename T>
class Callback;
@@ -574,9 +602,18 @@ extern PathFilter defaultPathFilter;
/* Common initialisation performed in child processes. */
void commonChildInit(Pipe & logPipe);
+/* Create a Unix domain socket. */
+AutoCloseFD createUnixDomainSocket();
+
/* Create a Unix domain socket in listen mode. */
AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode);
+/* Bind a Unix domain socket to a path. */
+void bind(int fd, const std::string & path);
+
+/* Connect to a Unix domain socket. */
+void connect(int fd, const std::string & path);
+
// A Rust/Python-like enumerate() iterator adapter.
// Borrowed from http://reedbeta.com/blog/python-like-enumerate-in-cpp17.
diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc
index 9acbedda2..75576ef8a 100755
--- a/src/nix-build/nix-build.cc
+++ b/src/nix-build/nix-build.cc
@@ -1,10 +1,15 @@
#include <cstring>
#include <fstream>
#include <iostream>
+#include <filesystem>
#include <regex>
#include <sstream>
#include <vector>
+#include <map>
+#include <nlohmann/json.hpp>
+
+#include "parsed-derivations.hh"
#include "store-api.hh"
#include "local-fs-store.hh"
#include "globals.hh"
@@ -100,7 +105,8 @@ static void main_nix_build(int argc, char * * argv)
// List of environment variables kept for --pure
std::set<string> keepVars{
- "HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL",
+ "HOME", "XDG_RUNTIME_DIR", "USER", "LOGNAME", "DISPLAY",
+ "WAYLAND_DISPLAY", "WAYLAND_SOCKET", "PATH", "TERM", "IN_NIX_SHELL",
"NIX_SHELL_PRESERVE_PROMPT", "TZ", "PAGER", "NIX_BUILD_SHELL", "SHLVL",
"http_proxy", "https_proxy", "ftp_proxy", "all_proxy", "no_proxy"
};
@@ -245,8 +251,9 @@ static void main_nix_build(int argc, char * * argv)
throw UsageError("'-p' and '-E' are mutually exclusive");
auto store = openStore();
+ auto evalStore = myArgs.evalStoreUrl ? openStore(*myArgs.evalStoreUrl) : store;
- auto state = std::make_unique<EvalState>(myArgs.searchPath, store);
+ auto state = std::make_unique<EvalState>(myArgs.searchPath, evalStore, store);
state->repair = repair;
auto autoArgs = myArgs.getAutoArgs(*state);
@@ -263,7 +270,7 @@ static void main_nix_build(int argc, char * * argv)
if (packages) {
std::ostringstream joined;
- joined << "with import <nixpkgs> { }; (pkgs.runCommandCC or pkgs.runCommand) \"shell\" { buildInputs = [ ";
+ joined << "{...}@args: with import <nixpkgs> args; (pkgs.runCommandCC or pkgs.runCommand) \"shell\" { buildInputs = [ ";
for (const auto & i : left)
joined << '(' << i << ") ";
joined << "]; } \"\"";
@@ -296,8 +303,8 @@ static void main_nix_build(int argc, char * * argv)
absolute = canonPath(absPath(i), true);
} catch (Error & e) {};
auto [path, outputNames] = parsePathWithOutputs(absolute);
- if (store->isStorePath(path) && hasSuffix(path, ".drv"))
- drvs.push_back(DrvInfo(*state, store, absolute));
+ if (evalStore->isStorePath(path) && hasSuffix(path, ".drv"))
+ drvs.push_back(DrvInfo(*state, evalStore, absolute));
else
/* If we're in a #! script, interpret filenames
relative to the script. */
@@ -335,7 +342,7 @@ static void main_nix_build(int argc, char * * argv)
printMissing(ref<Store>(store), willBuild, willSubstitute, unknown, downloadSize, narSize);
if (!dryRun)
- store->buildPaths(paths, buildMode);
+ store->buildPaths(paths, buildMode, evalStore);
};
if (runEnv) {
@@ -343,9 +350,10 @@ static void main_nix_build(int argc, char * * argv)
throw UsageError("nix-shell requires a single derivation");
auto & drvInfo = drvs.front();
- auto drv = store->derivationFromPath(store->parseStorePath(drvInfo.queryDrvPath()));
+ auto drv = evalStore->derivationFromPath(evalStore->parseStorePath(drvInfo.queryDrvPath()));
std::vector<StorePathWithOutputs> pathsToBuild;
+ RealisedPath::Set pathsToCopy;
/* Figure out what bash shell to use. If $NIX_BUILD_SHELL
is not set, then build bashInteractive from
@@ -364,7 +372,9 @@ static void main_nix_build(int argc, char * * argv)
if (!drv)
throw Error("the 'bashInteractive' attribute in <nixpkgs> did not evaluate to a derivation");
- pathsToBuild.push_back({store->parseStorePath(drv->queryDrvPath())});
+ auto bashDrv = store->parseStorePath(drv->queryDrvPath());
+ pathsToBuild.push_back({bashDrv});
+ pathsToCopy.insert(bashDrv);
shell = drv->queryOutPath() + "/bin/bash";
@@ -379,14 +389,25 @@ static void main_nix_build(int argc, char * * argv)
for (const auto & input : drv.inputDrvs)
if (std::all_of(envExclude.cbegin(), envExclude.cend(),
[&](const string & exclude) { return !std::regex_search(store->printStorePath(input.first), std::regex(exclude)); }))
+ {
pathsToBuild.push_back({input.first, input.second});
- for (const auto & src : drv.inputSrcs)
+ pathsToCopy.insert(input.first);
+ }
+ for (const auto & src : drv.inputSrcs) {
pathsToBuild.push_back({src});
+ pathsToCopy.insert(src);
+ }
buildPaths(pathsToBuild);
if (dryRun) return;
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
+ auto resolvedDrv = drv.tryResolve(*store);
+ assert(resolvedDrv && "Successfully resolved the derivation");
+ drv = *resolvedDrv;
+ }
+
// Set the environment.
auto env = getEnv();
@@ -422,12 +443,45 @@ static void main_nix_build(int argc, char * * argv)
} else
env[var.first] = var.second;
+ std::string structuredAttrsRC;
+
+ if (env.count("__json")) {
+ StorePathSet inputs;
+ for (auto & [depDrvPath, wantedDepOutputs] : drv.inputDrvs) {
+ auto outputs = evalStore->queryPartialDerivationOutputMap(depDrvPath);
+ for (auto & i : wantedDepOutputs) {
+ auto o = outputs.at(i);
+ store->computeFSClosure(*o, inputs);
+ }
+ }
+
+ ParsedDerivation parsedDrv(
+ StorePath(store->parseStorePath(drvInfo.queryDrvPath())),
+ drv
+ );
+
+ if (auto structAttrs = parsedDrv.prepareStructuredAttrs(*store, inputs)) {
+ auto json = structAttrs.value();
+ structuredAttrsRC = writeStructuredAttrsShell(json);
+
+ auto attrsJSON = (Path) tmpDir + "/.attrs.json";
+ writeFile(attrsJSON, json.dump());
+
+ auto attrsSH = (Path) tmpDir + "/.attrs.sh";
+ writeFile(attrsSH, structuredAttrsRC);
+
+ env["NIX_ATTRS_SH_FILE"] = attrsSH;
+ env["NIX_ATTRS_JSON_FILE"] = attrsJSON;
+ keepTmp = true;
+ }
+ }
+
/* Run a shell using the derivation's environment. For
convenience, source $stdenv/setup to setup additional
environment variables and shell functions. Also don't
lose the current $PATH directories. */
auto rcfile = (Path) tmpDir + "/rc";
- writeFile(rcfile, fmt(
+ std::string rc = fmt(
R"(_nix_shell_clean_tmpdir() { rm -rf %1%; }; )"s +
(keepTmp ?
"trap _nix_shell_clean_tmpdir EXIT; "
@@ -436,8 +490,12 @@ static void main_nix_build(int argc, char * * argv)
"_nix_shell_clean_tmpdir; ") +
(pure ? "" : "[ -n \"$PS1\" ] && [ -e ~/.bashrc ] && source ~/.bashrc;") +
"%2%"
- "dontAddDisableDepTrack=1; "
- "[ -e $stdenv/setup ] && source $stdenv/setup; "
+ // always clear PATH.
+ // when nix-shell is run impure, we rehydrate it with the `p=$PATH` above
+ "unset PATH;"
+ "dontAddDisableDepTrack=1;\n"
+ + structuredAttrsRC +
+ "\n[ -e $stdenv/setup ] && source $stdenv/setup; "
"%3%"
"PATH=%4%:\"$PATH\"; "
"SHELL=%5%; "
@@ -455,7 +513,9 @@ static void main_nix_build(int argc, char * * argv)
shellEscape(dirOf(*shell)),
shellEscape(*shell),
(getenv("TZ") ? (string("export TZ=") + shellEscape(getenv("TZ")) + "; ") : ""),
- envCommand));
+ envCommand);
+ vomit("Sourcing nix-shell with file %s and contents:\n%s", rcfile, rc);
+ writeFile(rcfile, rc);
Strings envStrs;
for (auto & i : env)
@@ -484,6 +544,7 @@ static void main_nix_build(int argc, char * * argv)
std::vector<StorePathWithOutputs> pathsToBuild;
std::vector<std::pair<StorePath, std::string>> pathsToBuildOrdered;
+ RealisedPath::Set drvsToCopy;
std::map<StorePath, std::pair<size_t, StringSet>> drvMap;
@@ -496,13 +557,13 @@ static void main_nix_build(int argc, char * * argv)
pathsToBuild.push_back({drvPath, {outputName}});
pathsToBuildOrdered.push_back({drvPath, {outputName}});
+ drvsToCopy.insert(drvPath);
auto i = drvMap.find(drvPath);
if (i != drvMap.end())
i->second.second.insert(outputName);
- else {
+ else
drvMap[drvPath] = {drvMap.size(), {outputName}};
- }
}
buildPaths(pathsToBuild);
@@ -517,7 +578,7 @@ static void main_nix_build(int argc, char * * argv)
if (counter)
drvPrefix += fmt("-%d", counter + 1);
- auto builtOutputs = store->queryPartialDerivationOutputMap(drvPath);
+ auto builtOutputs = evalStore->queryPartialDerivationOutputMap(drvPath);
auto maybeOutputPath = builtOutputs.at(outputName);
assert(maybeOutputPath);
diff --git a/src/nix-copy-closure/nix-copy-closure.cc b/src/nix-copy-closure/nix-copy-closure.cc
index 02ccbe541..841d50fd3 100755
--- a/src/nix-copy-closure/nix-copy-closure.cc
+++ b/src/nix-copy-closure/nix-copy-closure.cc
@@ -54,10 +54,7 @@ static int main_nix_copy_closure(int argc, char ** argv)
for (auto & path : storePaths)
storePaths2.insert(from->followLinksToStorePath(path));
- RealisedPath::Set closure;
- RealisedPath::closure(*from, storePaths2, closure);
-
- copyPaths(from, to, closure, NoRepair, NoCheckSigs, useSubstitutes);
+ copyClosure(*from, *to, storePaths2, NoRepair, NoCheckSigs, useSubstitutes);
return 0;
}
diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc
index e04954d45..4056d973d 100644
--- a/src/nix-env/nix-env.cc
+++ b/src/nix-env/nix-env.cc
@@ -879,7 +879,7 @@ static void queryJSON(Globals & globals, vector<DrvInfo> & elems)
placeholder.write(nullptr);
} else {
PathSet context;
- printValueAsJSON(*globals.state, true, *v, placeholder, context);
+ printValueAsJSON(*globals.state, true, *v, noPos, placeholder, context);
}
}
}
@@ -1204,37 +1204,6 @@ static void opSwitchProfile(Globals & globals, Strings opFlags, Strings opArgs)
}
-static constexpr GenerationNumber prevGen = std::numeric_limits<GenerationNumber>::max();
-
-
-static void switchGeneration(Globals & globals, GenerationNumber dstGen)
-{
- PathLocks lock;
- lockProfile(lock, globals.profile);
-
- auto [gens, curGen] = findGenerations(globals.profile);
-
- std::optional<Generation> dst;
- for (auto & i : gens)
- if ((dstGen == prevGen && i.number < curGen) ||
- (dstGen >= 0 && i.number == dstGen))
- dst = i;
-
- if (!dst) {
- if (dstGen == prevGen)
- throw Error("no generation older than the current (%1%) exists", curGen.value_or(0));
- else
- throw Error("generation %1% does not exist", dstGen);
- }
-
- printInfo("switching from generation %1% to %2%", curGen.value_or(0), dst->number);
-
- if (globals.dryRun) return;
-
- switchLink(globals.profile, dst->path);
-}
-
-
static void opSwitchGeneration(Globals & globals, Strings opFlags, Strings opArgs)
{
if (opFlags.size() > 0)
@@ -1243,7 +1212,7 @@ static void opSwitchGeneration(Globals & globals, Strings opFlags, Strings opArg
throw UsageError("exactly one argument expected");
if (auto dstGen = string2Int<GenerationNumber>(opArgs.front()))
- switchGeneration(globals, *dstGen);
+ switchGeneration(globals.profile, *dstGen, globals.dryRun);
else
throw UsageError("expected a generation number");
}
@@ -1256,7 +1225,7 @@ static void opRollback(Globals & globals, Strings opFlags, Strings opArgs)
if (opArgs.size() != 0)
throw UsageError("no arguments expected");
- switchGeneration(globals, prevGen);
+ switchGeneration(globals.profile, {}, globals.dryRun);
}
@@ -1296,12 +1265,12 @@ static void opDeleteGenerations(Globals & globals, Strings opFlags, Strings opAr
} else if (opArgs.size() == 1 && opArgs.front().find('d') != string::npos) {
deleteGenerationsOlderThan(globals.profile, opArgs.front(), globals.dryRun);
} else if (opArgs.size() == 1 && opArgs.front().find('+') != string::npos) {
- if(opArgs.front().size() < 2)
- throw Error("invalid number of generations ‘%1%’", opArgs.front());
+ if (opArgs.front().size() < 2)
+ throw Error("invalid number of generations '%1%'", opArgs.front());
string str_max = string(opArgs.front(), 1, opArgs.front().size());
auto max = string2Int<GenerationNumber>(str_max);
if (!max || *max == 0)
- throw Error("invalid number of generations to keep ‘%1%’", opArgs.front());
+ throw Error("invalid number of generations to keep '%1%'", opArgs.front());
deleteGenerationsGreaterThan(globals.profile, *max, globals.dryRun);
} else {
std::set<GenerationNumber> gens;
diff --git a/src/nix-env/user-env.cc b/src/nix-env/user-env.cc
index 5ceb2ae67..1fd4bcbd3 100644
--- a/src/nix-env/user-env.cc
+++ b/src/nix-env/user-env.cc
@@ -131,9 +131,9 @@ bool createUserEnv(EvalState & state, DrvInfos & elems,
state.forceValue(topLevel);
PathSet context;
Attr & aDrvPath(*topLevel.attrs->find(state.sDrvPath));
- auto topLevelDrv = state.store->parseStorePath(state.coerceToPath(aDrvPath.pos ? *(aDrvPath.pos) : noPos, *(aDrvPath.value), context));
+ auto topLevelDrv = state.store->parseStorePath(state.coerceToPath(*aDrvPath.pos, *aDrvPath.value, context));
Attr & aOutPath(*topLevel.attrs->find(state.sOutPath));
- Path topLevelOut = state.coerceToPath(aOutPath.pos ? *(aOutPath.pos) : noPos, *(aOutPath.value), context);
+ Path topLevelOut = state.coerceToPath(*aOutPath.pos, *aOutPath.value, context);
/* Realise the resulting store expression. */
debug("building user environment");
diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc
index 95903d882..19a954ddd 100644
--- a/src/nix-instantiate/nix-instantiate.cc
+++ b/src/nix-instantiate/nix-instantiate.cc
@@ -50,9 +50,9 @@ void processExpr(EvalState & state, const Strings & attrPaths,
else
state.autoCallFunction(autoArgs, v, vRes);
if (output == okXML)
- printValueAsXML(state, strict, location, vRes, std::cout, context);
+ printValueAsXML(state, strict, location, vRes, std::cout, context, noPos);
else if (output == okJSON)
- printValueAsJSON(state, strict, vRes, std::cout, context);
+ printValueAsJSON(state, strict, vRes, v.determinePos(noPos), std::cout, context);
else {
if (strict) state.forceValueDeep(vRes);
std::cout << vRes << std::endl;
@@ -153,8 +153,9 @@ static int main_nix_instantiate(int argc, char * * argv)
settings.readOnlyMode = true;
auto store = openStore();
+ auto evalStore = myArgs.evalStoreUrl ? openStore(*myArgs.evalStoreUrl) : store;
- auto state = std::make_unique<EvalState>(myArgs.searchPath, store);
+ auto state = std::make_unique<EvalState>(myArgs.searchPath, evalStore, store);
state->repair = repair;
Bindings & autoArgs = *myArgs.getAutoArgs(*state);
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index b327793e7..f0ce0368a 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -801,6 +801,9 @@ static void opServe(Strings opFlags, Strings opArgs)
settings.enforceDeterminism = readInt(in);
settings.runDiffHook = true;
}
+ if (GET_PROTOCOL_MINOR(clientVersion) >= 7) {
+ settings.keepFailed = (bool) readInt(in);
+ }
settings.printRepeatedBuilds = false;
};
diff --git a/src/nix/app.cc b/src/nix/app.cc
index cf147c631..9719a65dd 100644
--- a/src/nix/app.cc
+++ b/src/nix/app.cc
@@ -3,34 +3,79 @@
#include "eval-inline.hh"
#include "eval-cache.hh"
#include "names.hh"
+#include "command.hh"
namespace nix {
-App Installable::toApp(EvalState & state)
+struct InstallableDerivedPath : Installable
{
- auto [cursor, attrPath] = getCursor(state);
+ ref<Store> store;
+ const DerivedPath derivedPath;
- auto type = cursor->getAttr("type")->getString();
+ InstallableDerivedPath(ref<Store> store, const DerivedPath & derivedPath)
+ : store(store)
+ , derivedPath(derivedPath)
+ {
+ }
+
+
+ std::string what() override { return derivedPath.to_string(*store); }
+
+ DerivedPaths toDerivedPaths() override
+ {
+ return {derivedPath};
+ }
- auto checkProgram = [&](const Path & program)
+ std::optional<StorePath> getStorePath() override
{
- if (!state.store->isInStore(program))
- throw Error("app program '%s' is not in the Nix store", program);
- };
+ return std::nullopt;
+ }
+};
+
+/**
+ * Return the rewrites that are needed to resolve a string whose context is
+ * included in `dependencies`
+ */
+StringPairs resolveRewrites(Store & store, const BuiltPaths dependencies)
+{
+ StringPairs res;
+ for (auto & dep : dependencies)
+ if (auto drvDep = std::get_if<BuiltPathBuilt>(&dep))
+ for (auto & [ outputName, outputPath ] : drvDep->outputs)
+ res.emplace(
+ downstreamPlaceholder(store, drvDep->drvPath, outputName),
+ store.printStorePath(outputPath)
+ );
+ return res;
+}
+
+/**
+ * Resolve the given string assuming the given context
+ */
+std::string resolveString(Store & store, const std::string & toResolve, const BuiltPaths dependencies)
+{
+ auto rewrites = resolveRewrites(store, dependencies);
+ return rewriteStrings(toResolve, rewrites);
+}
+
+UnresolvedApp Installable::toApp(EvalState & state)
+{
+ auto [cursor, attrPath] = getCursor(state);
+
+ auto type = cursor->getAttr("type")->getString();
if (type == "app") {
auto [program, context] = cursor->getAttr("program")->getStringWithContext();
- checkProgram(program);
std::vector<StorePathWithOutputs> context2;
for (auto & [path, name] : context)
context2.push_back({state.store->parseStorePath(path), {name}});
- return App {
+ return UnresolvedApp{App {
.context = std::move(context2),
.program = program,
- };
+ }};
}
else if (type == "derivation") {
@@ -45,15 +90,33 @@ App Installable::toApp(EvalState & state)
? aMainProgram->getString()
: DrvName(name).name;
auto program = outPath + "/bin/" + mainProgram;
- checkProgram(program);
- return App {
+ return UnresolvedApp { App {
.context = { { drvPath, {outputName} } },
.program = program,
- };
+ }};
}
else
throw Error("attribute '%s' has unsupported type '%s'", attrPath, type);
}
+// FIXME: move to libcmd
+App UnresolvedApp::resolve(ref<Store> evalStore, ref<Store> store)
+{
+ auto res = unresolved;
+
+ std::vector<std::shared_ptr<Installable>> installableContext;
+
+ for (auto & ctxElt : unresolved.context)
+ installableContext.push_back(
+ std::make_shared<InstallableDerivedPath>(store, ctxElt.toDerivedPath()));
+
+ auto builtContext = build(evalStore, store, Realise::Outputs, installableContext);
+ res.program = resolveString(*store, unresolved.program, builtContext);
+ if (!store->isInStore(res.program))
+ throw Error("app program '%s' is not in the Nix store", res.program);
+
+ return res;
+}
+
}
diff --git a/src/nix/build.cc b/src/nix/build.cc
index 03159b6cc..6e31757a2 100644
--- a/src/nix/build.cc
+++ b/src/nix/build.cc
@@ -52,7 +52,12 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
void run(ref<Store> store) override
{
- auto buildables = build(store, dryRun ? Realise::Nothing : Realise::Outputs, installables, buildMode);
+ auto buildables = build(
+ getEvalStore(), store,
+ dryRun ? Realise::Derivation : Realise::Outputs,
+ installables, buildMode);
+
+ if (json) logger->cout("%s", derivedPathsWithHintsToJSON(buildables, store).dump());
if (dryRun) return;
@@ -61,14 +66,13 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
for (const auto & [_i, buildable] : enumerate(buildables)) {
auto i = _i;
std::visit(overloaded {
- [&](DerivedPathWithHints::Opaque bo) {
+ [&](const BuiltPath::Opaque & bo) {
std::string symlink = outLink;
if (i) symlink += fmt("-%d", i);
store2->addPermRoot(bo.path, absPath(symlink));
},
- [&](DerivedPathWithHints::Built bfd) {
- auto builtOutputs = store->queryDerivationOutputMap(bfd.drvPath);
- for (auto & output : builtOutputs) {
+ [&](const BuiltPath::Built & bfd) {
+ for (auto & output : bfd.outputs) {
std::string symlink = outLink;
if (i) symlink += fmt("-%d", i);
if (output.first != "out") symlink += fmt("-%s", output.first);
@@ -79,8 +83,6 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
}
updateProfile(buildables);
-
- if (json) logger->cout("%s", derivedPathsWithHintsToJSON(buildables, store).dump());
}
};
diff --git a/src/nix/bundle.cc b/src/nix/bundle.cc
index 53dccc63a..aca024bca 100644
--- a/src/nix/bundle.cc
+++ b/src/nix/bundle.cc
@@ -59,7 +59,7 @@ struct CmdBundle : InstallableCommand
Strings getDefaultFlakeAttrPathPrefixes() override
{
- Strings res{"apps." + settings.thisSystem.get() + ".", "packages"};
+ Strings res{"apps." + settings.thisSystem.get() + "."};
for (auto & s : SourceExprCommand::getDefaultFlakeAttrPathPrefixes())
res.push_back(s);
return res;
@@ -69,8 +69,7 @@ struct CmdBundle : InstallableCommand
{
auto evalState = getEvalState();
- auto app = installable->toApp(*evalState);
- store->buildPaths(toDerivedPaths(app.context));
+ auto app = installable->toApp(*evalState).resolve(getEvalStore(), store);
auto [bundlerFlakeRef, bundlerName] = parseFlakeRefWithFragment(bundler, absPath("."));
const flake::LockFlags lockFlags{ .writeLockFile = false };
diff --git a/src/nix/copy.cc b/src/nix/copy.cc
index f59f7c76b..197c85316 100644
--- a/src/nix/copy.cc
+++ b/src/nix/copy.cc
@@ -8,7 +8,7 @@
using namespace nix;
-struct CmdCopy : RealisedPathsCommand
+struct CmdCopy : BuiltPathsCommand
{
std::string srcUri, dstUri;
@@ -16,10 +16,10 @@ struct CmdCopy : RealisedPathsCommand
SubstituteFlag substitute = NoSubstitute;
- using RealisedPathsCommand::run;
+ using BuiltPathsCommand::run;
CmdCopy()
- : RealisedPathsCommand(true)
+ : BuiltPathsCommand(true)
{
addFlag({
.longName = "from",
@@ -75,16 +75,22 @@ struct CmdCopy : RealisedPathsCommand
if (srcUri.empty() && dstUri.empty())
throw UsageError("you must pass '--from' and/or '--to'");
- RealisedPathsCommand::run(store);
+ BuiltPathsCommand::run(store);
}
- void run(ref<Store> srcStore, std::vector<RealisedPath> paths) override
+ void run(ref<Store> srcStore, BuiltPaths && paths) override
{
ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
+ RealisedPath::Set stuffToCopy;
+
+ for (auto & builtPath : paths) {
+ auto theseRealisations = builtPath.toRealisedPaths(*srcStore);
+ stuffToCopy.insert(theseRealisations.begin(), theseRealisations.end());
+ }
+
copyPaths(
- srcStore, dstStore, RealisedPath::Set(paths.begin(), paths.end()),
- NoRepair, checkSigs, substitute);
+ *srcStore, *dstStore, stuffToCopy, NoRepair, checkSigs, substitute);
}
};
diff --git a/src/nix/daemon.cc b/src/nix/daemon.cc
index 2cf2a04c9..6a40a0bd3 100644
--- a/src/nix/daemon.cc
+++ b/src/nix/daemon.cc
@@ -156,9 +156,6 @@ static void daemonLoop()
if (chdir("/") == -1)
throw SysError("cannot change current directory");
- // Get rid of children automatically; don't let them become zombies.
- setSigChldAction(true);
-
AutoCloseFD fdSocket;
// Handle socket-based activation by systemd.
@@ -176,6 +173,9 @@ static void daemonLoop()
fdSocket = createUnixDomainSocket(settings.nixDaemonSocketFile, 0666);
}
+ // Get rid of children automatically; don't let them become zombies.
+ setSigChldAction(true);
+
// Loop accepting connections.
while (1) {
diff --git a/src/nix/develop.cc b/src/nix/develop.cc
index 498a7b45c..5aad53919 100644
--- a/src/nix/develop.cc
+++ b/src/nix/develop.cc
@@ -7,8 +7,10 @@
#include "derivations.hh"
#include "affinity.hh"
#include "progress-bar.hh"
+#include "run.hh"
-#include <regex>
+#include <memory>
+#include <nlohmann/json.hpp>
using namespace nix;
@@ -25,94 +27,142 @@ static DevelopSettings developSettings;
static GlobalConfig::Register rDevelopSettings(&developSettings);
-struct Var
-{
- bool exported = true;
- bool associative = false;
- std::string quoted; // quoted string or array
-};
-
struct BuildEnvironment
{
- std::map<std::string, Var> env;
- std::string bashFunctions;
-};
+ struct String
+ {
+ bool exported;
+ std::string value;
-BuildEnvironment readEnvironment(const Path & path)
-{
- BuildEnvironment res;
+ bool operator == (const String & other) const
+ {
+ return exported == other.exported && value == other.value;
+ }
+ };
- std::set<std::string> exported;
+ using Array = std::vector<std::string>;
- debug("reading environment file '%s'", path);
+ using Associative = std::map<std::string, std::string>;
- auto file = readFile(path);
+ using Value = std::variant<String, Array, Associative>;
- auto pos = file.cbegin();
+ std::map<std::string, Value> vars;
+ std::map<std::string, std::string> bashFunctions;
- static std::string varNameRegex =
- R"re((?:[a-zA-Z_][a-zA-Z0-9_]*))re";
+ static BuildEnvironment fromJSON(std::string_view in)
+ {
+ BuildEnvironment res;
- static std::string simpleStringRegex =
- R"re((?:[a-zA-Z0-9_/:\.\-\+=]*))re";
+ std::set<std::string> exported;
- static std::string dquotedStringRegex =
- R"re((?:\$?"(?:[^"\\]|\\[$`"\\\n])*"))re";
+ auto json = nlohmann::json::parse(in);
- static std::string squotedStringRegex =
- R"re((?:\$?(?:'(?:[^'\\]|\\[abeEfnrtv\\'"?])*'|\\')+))re";
+ for (auto & [name, info] : json["variables"].items()) {
+ std::string type = info["type"];
+ if (type == "var" || type == "exported")
+ res.vars.insert({name, BuildEnvironment::String { .exported = type == "exported", .value = info["value"] }});
+ else if (type == "array")
+ res.vars.insert({name, (Array) info["value"]});
+ else if (type == "associative")
+ res.vars.insert({name, (Associative) info["value"]});
+ }
- static std::string indexedArrayRegex =
- R"re((?:\(( *\[[0-9]+\]="(?:[^"\\]|\\.)*")*\)))re";
+ for (auto & [name, def] : json["bashFunctions"].items()) {
+ res.bashFunctions.insert({name, def});
+ }
- static std::regex declareRegex(
- "^declare -a?x (" + varNameRegex + ")(=(" +
- dquotedStringRegex + "|" + indexedArrayRegex + "))?\n");
+ return res;
+ }
- static std::regex varRegex(
- "^(" + varNameRegex + ")=(" + simpleStringRegex + "|" + squotedStringRegex + "|" + indexedArrayRegex + ")\n");
+ std::string toJSON() const
+ {
+ auto res = nlohmann::json::object();
+
+ auto vars2 = nlohmann::json::object();
+ for (auto & [name, value] : vars) {
+ auto info = nlohmann::json::object();
+ if (auto str = std::get_if<String>(&value)) {
+ info["type"] = str->exported ? "exported" : "var";
+ info["value"] = str->value;
+ }
+ else if (auto arr = std::get_if<Array>(&value)) {
+ info["type"] = "array";
+ info["value"] = *arr;
+ }
+ else if (auto arr = std::get_if<Associative>(&value)) {
+ info["type"] = "associative";
+ info["value"] = *arr;
+ }
+ vars2[name] = std::move(info);
+ }
+ res["variables"] = std::move(vars2);
- /* Note: we distinguish between an indexed and associative array
- using the space before the closing parenthesis. Will
- undoubtedly regret this some day. */
- static std::regex assocArrayRegex(
- "^(" + varNameRegex + ")=" + R"re((?:\(( *\[[^\]]+\]="(?:[^"\\]|\\.)*")* *\)))re" + "\n");
+ res["bashFunctions"] = bashFunctions;
- static std::regex functionRegex(
- "^" + varNameRegex + " \\(\\) *\n");
+ auto json = res.dump();
- while (pos != file.end()) {
+ assert(BuildEnvironment::fromJSON(json) == *this);
- std::smatch match;
+ return json;
+ }
- if (std::regex_search(pos, file.cend(), match, declareRegex, std::regex_constants::match_continuous)) {
- pos = match[0].second;
- exported.insert(match[1]);
+ void toBash(std::ostream & out, const std::set<std::string> & ignoreVars) const
+ {
+ for (auto & [name, value] : vars) {
+ if (!ignoreVars.count(name)) {
+ if (auto str = std::get_if<String>(&value)) {
+ out << fmt("%s=%s\n", name, shellEscape(str->value));
+ if (str->exported)
+ out << fmt("export %s\n", name);
+ }
+ else if (auto arr = std::get_if<Array>(&value)) {
+ out << "declare -a " << name << "=(";
+ for (auto & s : *arr)
+ out << shellEscape(s) << " ";
+ out << ")\n";
+ }
+ else if (auto arr = std::get_if<Associative>(&value)) {
+ out << "declare -A " << name << "=(";
+ for (auto & [n, v] : *arr)
+ out << "[" << shellEscape(n) << "]=" << shellEscape(v) << " ";
+ out << ")\n";
+ }
+ }
}
- else if (std::regex_search(pos, file.cend(), match, varRegex, std::regex_constants::match_continuous)) {
- pos = match[0].second;
- res.env.insert({match[1], Var { .exported = exported.count(match[1]) > 0, .quoted = match[2] }});
+ for (auto & [name, def] : bashFunctions) {
+ out << name << " ()\n{\n" << def << "}\n";
}
+ }
- else if (std::regex_search(pos, file.cend(), match, assocArrayRegex, std::regex_constants::match_continuous)) {
- pos = match[0].second;
- res.env.insert({match[1], Var { .associative = true, .quoted = match[2] }});
- }
+ static std::string getString(const Value & value)
+ {
+ if (auto str = std::get_if<String>(&value))
+ return str->value;
+ else
+ throw Error("bash variable is not a string");
+ }
- else if (std::regex_search(pos, file.cend(), match, functionRegex, std::regex_constants::match_continuous)) {
- res.bashFunctions = std::string(pos, file.cend());
- break;
+ static Array getStrings(const Value & value)
+ {
+ if (auto str = std::get_if<String>(&value))
+ return tokenizeString<Array>(str->value);
+ else if (auto arr = std::get_if<Array>(&value)) {
+ return *arr;
+ } else if (auto assoc = std::get_if<Associative>(&value)) {
+ Array assocKeys;
+ std::for_each(assoc->begin(), assoc->end(), [&](auto & n) { assocKeys.push_back(n.first); });
+ return assocKeys;
}
-
- else throw Error("shell environment '%s' has unexpected line '%s'",
- path, file.substr(pos - file.cbegin(), 60));
+ else
+ throw Error("bash variable is not a string or array");
}
- res.env.erase("__output");
-
- return res;
-}
+ bool operator == (const BuildEnvironment & other) const
+ {
+ return vars == other.vars && bashFunctions == other.bashFunctions;
+ }
+};
const static std::string getEnvSh =
#include "get-env.sh.gen.hh"
@@ -123,15 +173,15 @@ const static std::string getEnvSh =
modified derivation with the same dependencies and nearly the same
initial environment variables, that just writes the resulting
environment to a file and exits. */
-StorePath getDerivationEnvironment(ref<Store> store, const StorePath & drvPath)
+static StorePath getDerivationEnvironment(ref<Store> store, ref<Store> evalStore, const StorePath & drvPath)
{
- auto drv = store->derivationFromPath(drvPath);
+ auto drv = evalStore->derivationFromPath(drvPath);
auto builder = baseNameOf(drv.builder);
if (builder != "bash")
throw Error("'nix develop' only works on derivations that use 'bash' as their builder");
- auto getEnvShPath = store->addTextToStore("get-env.sh", getEnvSh, {});
+ auto getEnvShPath = evalStore->addTextToStore("get-env.sh", getEnvSh, {});
drv.args = {store->printStorePath(getEnvShPath)};
@@ -144,26 +194,34 @@ StorePath getDerivationEnvironment(ref<Store> store, const StorePath & drvPath)
/* Rehash and write the derivation. FIXME: would be nice to use
'buildDerivation', but that's privileged. */
drv.name += "-env";
- for (auto & output : drv.outputs) {
- output.second = { .output = DerivationOutputInputAddressed { .path = StorePath::dummy } };
- drv.env[output.first] = "";
- }
drv.inputSrcs.insert(std::move(getEnvShPath));
- Hash h = std::get<0>(hashDerivationModulo(*store, drv, true));
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
+ for (auto & output : drv.outputs) {
+ output.second = {
+ .output = DerivationOutputDeferred{},
+ };
+ drv.env[output.first] = hashPlaceholder(output.first);
+ }
+ } else {
+ for (auto & output : drv.outputs) {
+ output.second = { .output = DerivationOutputInputAddressed { .path = StorePath::dummy } };
+ drv.env[output.first] = "";
+ }
+ Hash h = std::get<0>(hashDerivationModulo(*evalStore, drv, true));
- for (auto & output : drv.outputs) {
- auto outPath = store->makeOutputPath(output.first, h, drv.name);
- output.second = { .output = DerivationOutputInputAddressed { .path = outPath } };
- drv.env[output.first] = store->printStorePath(outPath);
+ for (auto & output : drv.outputs) {
+ auto outPath = store->makeOutputPath(output.first, h, drv.name);
+ output.second = { .output = DerivationOutputInputAddressed { .path = outPath } };
+ drv.env[output.first] = store->printStorePath(outPath);
+ }
}
- auto shellDrvPath = writeDerivation(*store, drv);
+ auto shellDrvPath = writeDerivation(*evalStore, drv);
/* Build the derivation. */
- store->buildPaths({DerivedPath::Built{shellDrvPath}});
+ store->buildPaths({DerivedPath::Built{shellDrvPath}}, bmNormal, evalStore);
- for (auto & [_0, outputAndOptPath] : drv.outputsAndOptPaths(*store)) {
- auto & [_1, optPath] = outputAndOptPath;
+ for (auto & [_0, optPath] : evalStore->queryPartialDerivationOutputMap(shellDrvPath)) {
assert(optPath);
auto & outPath = *optPath;
assert(store->isValidPath(outPath));
@@ -177,18 +235,15 @@ StorePath getDerivationEnvironment(ref<Store> store, const StorePath & drvPath)
struct Common : InstallableCommand, MixProfile
{
- std::set<string> ignoreVars{
+ std::set<std::string> ignoreVars{
"BASHOPTS",
- "EUID",
"HOME", // FIXME: don't ignore in pure mode?
- "HOSTNAME",
"NIX_BUILD_TOP",
"NIX_ENFORCE_PURITY",
"NIX_LOG_FD",
+ "NIX_REMOTE",
"PPID",
- "PWD",
"SHELLOPTS",
- "SHLVL",
"SSL_CERT_FILE", // FIXME: only want to ignore /no-cert-file.crt
"TEMP",
"TEMPDIR",
@@ -224,22 +279,10 @@ struct Common : InstallableCommand, MixProfile
out << "nix_saved_PATH=\"$PATH\"\n";
- for (auto & i : buildEnvironment.env) {
- if (!ignoreVars.count(i.first) && !hasPrefix(i.first, "BASH_")) {
- if (i.second.associative)
- out << fmt("declare -A %s=(%s)\n", i.first, i.second.quoted);
- else {
- out << fmt("%s=%s\n", i.first, i.second.quoted);
- if (i.second.exported)
- out << fmt("export %s\n", i.first);
- }
- }
- }
+ buildEnvironment.toBash(out, ignoreVars);
out << "PATH=\"$PATH:$nix_saved_PATH\"\n";
- out << buildEnvironment.bashFunctions << "\n";
-
out << "export NIX_BUILD_TOP=\"$(mktemp -d -t nix-shell.XXXXXX)\"\n";
for (auto & i : {"TMP", "TMPDIR", "TEMP", "TEMPDIR"})
out << fmt("export %s=\"$NIX_BUILD_TOP\"\n", i);
@@ -249,25 +292,25 @@ struct Common : InstallableCommand, MixProfile
auto script = out.str();
/* Substitute occurrences of output paths. */
- auto outputs = buildEnvironment.env.find("outputs");
- assert(outputs != buildEnvironment.env.end());
+ auto outputs = buildEnvironment.vars.find("outputs");
+ assert(outputs != buildEnvironment.vars.end());
// FIXME: properly unquote 'outputs'.
StringMap rewrites;
- for (auto & outputName : tokenizeString<std::vector<std::string>>(replaceStrings(outputs->second.quoted, "'", ""))) {
- auto from = buildEnvironment.env.find(outputName);
- assert(from != buildEnvironment.env.end());
+ for (auto & outputName : BuildEnvironment::getStrings(outputs->second)) {
+ auto from = buildEnvironment.vars.find(outputName);
+ assert(from != buildEnvironment.vars.end());
// FIXME: unquote
- rewrites.insert({from->second.quoted, outputsDir + "/" + outputName});
+ rewrites.insert({BuildEnvironment::getString(from->second), outputsDir + "/" + outputName});
}
/* Substitute redirects. */
for (auto & [installable_, dir_] : redirects) {
auto dir = absPath(dir_);
auto installable = parseInstallable(store, installable_);
- auto buildable = installable->toDerivedPathWithHints();
- auto doRedirect = [&](const StorePath & path)
- {
+ auto builtPaths = toStorePaths(
+ getEvalStore(), store, Realise::Nothing, OperateOn::Output, {installable});
+ for (auto & path: builtPaths) {
auto from = store->printStorePath(path);
if (script.find(from) == std::string::npos)
warn("'%s' (path '%s') is not used by this build environment", installable->what(), from);
@@ -275,16 +318,7 @@ struct Common : InstallableCommand, MixProfile
printInfo("redirecting '%s' to '%s'", from, dir);
rewrites.insert({from, dir});
}
- };
- std::visit(overloaded {
- [&](const DerivedPathWithHints::Opaque & bo) {
- doRedirect(bo.path);
- },
- [&](const DerivedPathWithHints::Built & bfd) {
- for (auto & [outputName, path] : bfd.outputs)
- if (path) doRedirect(*path);
- },
- }, buildable.raw());
+ }
}
return rewriteStrings(script, rewrites);
@@ -294,6 +328,12 @@ struct Common : InstallableCommand, MixProfile
{
return {"devShell." + settings.thisSystem.get(), "defaultPackage." + settings.thisSystem.get()};
}
+ Strings getDefaultFlakeAttrPathPrefixes() override
+ {
+ auto res = SourceExprCommand::getDefaultFlakeAttrPathPrefixes();
+ res.emplace_front("devShells." + settings.thisSystem.get() + ".");
+ return res;
+ }
StorePath getShellOutPath(ref<Store> store)
{
@@ -309,7 +349,7 @@ struct Common : InstallableCommand, MixProfile
auto & drvPath = *drvs.begin();
- return getDerivationEnvironment(store, drvPath);
+ return getDerivationEnvironment(store, getEvalStore(), drvPath);
}
}
@@ -321,7 +361,9 @@ struct Common : InstallableCommand, MixProfile
updateProfile(shellOutPath);
- return {readEnvironment(strPath), strPath};
+ debug("reading environment file '%s'", strPath);
+
+ return {BuildEnvironment::fromJSON(readFile(store->toRealPath(shellOutPath))), strPath};
}
};
@@ -351,6 +393,12 @@ struct CmdDevelop : Common, MixEnvironment
});
addFlag({
+ .longName = "unpack",
+ .description = "Run the `unpack` phase.",
+ .handler = {&phase, {"unpack"}},
+ });
+
+ addFlag({
.longName = "configure",
.description = "Run the `configure` phase.",
.handler = {&phase, {"configure"}},
@@ -404,7 +452,7 @@ struct CmdDevelop : Common, MixEnvironment
if (verbosity >= lvlDebug)
script += "set -x\n";
- script += fmt("rm -f '%s'\n", rcFilePath);
+ script += fmt("command rm -f '%s'\n", rcFilePath);
if (phase) {
if (!command.empty())
@@ -423,7 +471,7 @@ struct CmdDevelop : Common, MixEnvironment
}
else {
- script += "[ -n \"$PS1\" ] && [ -e ~/.bashrc ] && source ~/.bashrc;\n";
+ script = "[ -n \"$PS1\" ] && [ -e ~/.bashrc ] && source ~/.bashrc;\n" + script;
if (developSettings.bashPrompt != "")
script += fmt("[ -n \"$PS1\" ] && PS1=%s;\n", shellEscape(developSettings.bashPrompt));
if (developSettings.bashPromptSuffix != "")
@@ -432,8 +480,6 @@ struct CmdDevelop : Common, MixEnvironment
writeFull(rcFileFd.get(), script);
- stopProgressBar();
-
setEnviron();
// prevent garbage collection until shell exits
setenv("NIX_GCROOT", gcroot.data(), 1);
@@ -443,16 +489,20 @@ struct CmdDevelop : Common, MixEnvironment
try {
auto state = getEvalState();
+ auto nixpkgsLockFlags = lockFlags;
+ nixpkgsLockFlags.inputOverrides = {};
+ nixpkgsLockFlags.inputUpdates = {};
+
auto bashInstallable = std::make_shared<InstallableFlake>(
this,
state,
installable->nixpkgsFlakeRef(),
Strings{"bashInteractive"},
Strings{"legacyPackages." + settings.thisSystem.get() + "."},
- lockFlags);
+ nixpkgsLockFlags);
- shell = state->store->printStorePath(
- toStorePath(state->store, Realise::Outputs, OperateOn::Output, bashInstallable)) + "/bin/bash";
+ shell = store->printStorePath(
+ toStorePath(getEvalStore(), store, Realise::Outputs, OperateOn::Output, bashInstallable)) + "/bin/bash";
} catch (Error &) {
ignoreException();
}
@@ -462,15 +512,25 @@ struct CmdDevelop : Common, MixEnvironment
auto args = phase || !command.empty() ? Strings{std::string(baseNameOf(shell)), rcFilePath}
: Strings{std::string(baseNameOf(shell)), "--rcfile", rcFilePath};
- restoreProcessContext();
-
- execvp(shell.c_str(), stringsToCharPtrs(args).data());
+ // Need to chdir since phases assume in flake directory
+ if (phase) {
+ // chdir if installable is a flake of type git+file or path
+ auto installableFlake = std::dynamic_pointer_cast<InstallableFlake>(installable);
+ if (installableFlake) {
+ auto sourcePath = installableFlake->getLockedFlake()->flake.resolvedRef.input.getSourcePath();
+ if (sourcePath) {
+ if (chdir(sourcePath->c_str()) == -1) {
+ throw SysError("chdir to '%s' failed", *sourcePath);
+ }
+ }
+ }
+ }
- throw SysError("executing shell '%s'", shell);
+ runProgramInStore(store, shell, args);
}
};
-struct CmdPrintDevEnv : Common
+struct CmdPrintDevEnv : Common, MixJSON
{
std::string description() override
{
@@ -492,7 +552,10 @@ struct CmdPrintDevEnv : Common
stopProgressBar();
- std::cout << makeRcScript(store, buildEnvironment);
+ logger->writeToStdout(
+ json
+ ? buildEnvironment.toJSON()
+ : makeRcScript(store, buildEnvironment));
}
};
diff --git a/src/nix/develop.md b/src/nix/develop.md
index e71d9f8aa..1f214966a 100644
--- a/src/nix/develop.md
+++ b/src/nix/develop.md
@@ -29,6 +29,7 @@ R""(
* Run a particular build phase directly:
```console
+ # nix develop --unpack
# nix develop --configure
# nix develop --build
# nix develop --check
@@ -84,11 +85,20 @@ the flake's `nixConfig` attribute.
# Flake output attributes
-If no flake output attribute is given, `nix run` tries the following
+If no flake output attribute is given, `nix develop` tries the following
flake output attributes:
* `devShell.<system>`
* `defaultPackage.<system>`
+If a flake output *name* is given, `nix develop` tries the following flake
+output attributes:
+
+* `devShells.<system>.<name>`
+
+* `packages.<system>.<name>`
+
+* `legacyPackages.<system>.<name>`
+
)""
diff --git a/src/nix/diff-closures.cc b/src/nix/diff-closures.cc
index 0c7d531c1..734c41e0e 100644
--- a/src/nix/diff-closures.cc
+++ b/src/nix/diff-closures.cc
@@ -131,9 +131,9 @@ struct CmdDiffClosures : SourceExprCommand
void run(ref<Store> store) override
{
auto before = parseInstallable(store, _before);
- auto beforePath = toStorePath(store, Realise::Outputs, operateOn, before);
+ auto beforePath = toStorePath(getEvalStore(), store, Realise::Outputs, operateOn, before);
auto after = parseInstallable(store, _after);
- auto afterPath = toStorePath(store, Realise::Outputs, operateOn, after);
+ auto afterPath = toStorePath(getEvalStore(), store, Realise::Outputs, operateOn, after);
printClosureDiff(store, beforePath, afterPath, "");
}
};
diff --git a/src/nix/edit.cc b/src/nix/edit.cc
index b26417b18..fc48db0d7 100644
--- a/src/nix/edit.cc
+++ b/src/nix/edit.cc
@@ -31,7 +31,7 @@ struct CmdEdit : InstallableCommand
auto [v, pos] = installable->toValue(*state);
try {
- pos = findDerivationFilename(*state, *v, installable->what());
+ pos = findPackageFilename(*state, *v, installable->what());
} catch (NoPositionInfo &) {
}
diff --git a/src/nix/eval.cc b/src/nix/eval.cc
index 65d61e005..c7517cf79 100644
--- a/src/nix/eval.cc
+++ b/src/nix/eval.cc
@@ -112,7 +112,7 @@ struct CmdEval : MixJSON, InstallableCommand
else if (json) {
JSONPlaceholder jsonOut(std::cout);
- printValueAsJSON(*state, true, *v, jsonOut, context);
+ printValueAsJSON(*state, true, *v, pos, jsonOut, context);
}
else {
diff --git a/src/nix/flake-check.md b/src/nix/flake-check.md
index dc079ba0c..07031c909 100644
--- a/src/nix/flake-check.md
+++ b/src/nix/flake-check.md
@@ -22,42 +22,47 @@ This command verifies that the flake specified by flake reference
that the derivations specified by the flake's `checks` output can be
built successfully.
+If the `keep-going` option is set to `true`, Nix will keep evaluating as much
+as it can and report the errors as it encounters them. Otherwise it will stop
+at the first error.
+
# Evaluation checks
-This following flake output attributes must be derivations:
+The following flake output attributes must be derivations:
* `checks.`*system*`.`*name*
-* `defaultPackage.`*system*`
-* `devShell.`*system*`
-* `nixosConfigurations.`*name*`.config.system.build.toplevel
+* `defaultPackage.`*system*
+* `devShell.`*system*
+* `devShells.`*system*`.`*name*
+* `nixosConfigurations.`*name*`.config.system.build.toplevel`
* `packages.`*system*`.`*name*
The following flake output attributes must be [app
definitions](./nix3-run.md):
* `apps.`*system*`.`*name*
-* `defaultApp.`*system*`
+* `defaultApp.`*system*
The following flake output attributes must be [template
definitions](./nix3-flake-init.md):
* `defaultTemplate`
-* `templates`.`*name*
+* `templates.`*name*
The following flake output attributes must be *Nixpkgs overlays*:
* `overlay`
-* `overlays`.`*name*
+* `overlays.`*name*
The following flake output attributes must be *NixOS modules*:
* `nixosModule`
-* `nixosModules`.`*name*
+* `nixosModules.`*name*
The following flake output attributes must be
[bundlers](./nix3-bundle.md):
-* `bundlers`.`*name*
+* `bundlers.`*name*
* `defaultBundler`
In addition, the `hydraJobs` output is evaluated in the same way as
diff --git a/src/nix/flake-show.md b/src/nix/flake-show.md
index 1a42c44a0..e484cf47e 100644
--- a/src/nix/flake-show.md
+++ b/src/nix/flake-show.md
@@ -35,4 +35,7 @@ specified by flake reference *flake-url*. These are the top-level
attributes in the `outputs` of the flake, as well as lower-level
attributes for some standard outputs (e.g. `packages` or `checks`).
+With `--json`, the output is in a JSON representation suitable for automatic
+processing by other tools.
+
)""
diff --git a/src/nix/flake.cc b/src/nix/flake.cc
index 62a413e27..97f4d911c 100644
--- a/src/nix/flake.cc
+++ b/src/nix/flake.cc
@@ -84,6 +84,7 @@ struct CmdFlakeUpdate : FlakeCommand
lockFlags.recreateLockFile = true;
lockFlags.writeLockFile = true;
+ lockFlags.applyNixConfig = true;
lockFlake();
}
@@ -114,6 +115,7 @@ struct CmdFlakeLock : FlakeCommand
settings.tarballTtl = 0;
lockFlags.writeLockFile = true;
+ lockFlags.applyNixConfig = true;
lockFlake();
}
@@ -129,8 +131,18 @@ static void enumerateOutputs(EvalState & state, Value & vFlake,
state.forceAttrs(*aOutputs->value);
- for (auto & attr : *aOutputs->value->attrs)
- callback(attr.name, *attr.value, *attr.pos);
+ auto sHydraJobs = state.symbols.create("hydraJobs");
+
+ /* Hack: ensure that hydraJobs is evaluated before anything
+ else. This way we can disable IFD for hydraJobs and then enable
+ it for other outputs. */
+ if (auto attr = aOutputs->value->attrs->get(sHydraJobs))
+ callback(attr->name, *attr->value, *attr->pos);
+
+ for (auto & attr : *aOutputs->value->attrs) {
+ if (attr.name != sHydraJobs)
+ callback(attr.name, *attr.value, *attr.pos);
+ }
}
struct CmdFlakeMetadata : FlakeCommand, MixJSON
@@ -240,6 +252,14 @@ struct CmdFlakeInfo : CmdFlakeMetadata
}
};
+static bool argHasName(std::string_view arg, std::string_view expected)
+{
+ return
+ arg == expected
+ || arg == "_"
+ || (hasPrefix(arg, "_") && arg.substr(1) == expected);
+}
+
struct CmdFlakeCheck : FlakeCommand
{
bool build = true;
@@ -267,30 +287,50 @@ struct CmdFlakeCheck : FlakeCommand
void run(nix::ref<nix::Store> store) override
{
- settings.readOnlyMode = !build;
+ if (!build) {
+ settings.readOnlyMode = true;
+ evalSettings.enableImportFromDerivation.setDefault(false);
+ }
auto state = getEvalState();
+
+ lockFlags.applyNixConfig = true;
auto flake = lockFlake();
+ bool hasErrors = false;
+ auto reportError = [&](const Error & e) {
+ try {
+ throw e;
+ } catch (Error & e) {
+ if (settings.keepGoing) {
+ ignoreException();
+ hasErrors = true;
+ }
+ else
+ throw;
+ }
+ };
+
// FIXME: rewrite to use EvalCache.
auto checkSystemName = [&](const std::string & system, const Pos & pos) {
// FIXME: what's the format of "system"?
if (system.find('-') == std::string::npos)
- throw Error("'%s' is not a valid system type, at %s", system, pos);
+ reportError(Error("'%s' is not a valid system type, at %s", system, pos));
};
- auto checkDerivation = [&](const std::string & attrPath, Value & v, const Pos & pos) {
+ auto checkDerivation = [&](const std::string & attrPath, Value & v, const Pos & pos) -> std::optional<StorePath> {
try {
auto drvInfo = getDerivation(*state, v, false);
if (!drvInfo)
throw Error("flake attribute '%s' is not a derivation", attrPath);
// FIXME: check meta attributes
- return store->parseStorePath(drvInfo->queryDrvPath());
+ return std::make_optional(store->parseStorePath(drvInfo->queryDrvPath()));
} catch (Error & e) {
e.addTrace(pos, hintfmt("while checking the derivation '%s'", attrPath));
- throw;
+ reportError(e);
}
+ return std::nullopt;
};
std::vector<DerivedPath> drvPaths;
@@ -307,23 +347,27 @@ struct CmdFlakeCheck : FlakeCommand
#endif
} catch (Error & e) {
e.addTrace(pos, hintfmt("while checking the app definition '%s'", attrPath));
- throw;
+ reportError(e);
}
};
auto checkOverlay = [&](const std::string & attrPath, Value & v, const Pos & pos) {
try {
state->forceValue(v, pos);
- if (!v.isLambda() || v.lambda.fun->matchAttrs || std::string(v.lambda.fun->arg) != "final")
+ if (!v.isLambda()
+ || v.lambda.fun->hasFormals()
+ || !argHasName(v.lambda.fun->arg, "final"))
throw Error("overlay does not take an argument named 'final'");
auto body = dynamic_cast<ExprLambda *>(v.lambda.fun->body);
- if (!body || body->matchAttrs || std::string(body->arg) != "prev")
+ if (!body
+ || body->hasFormals()
+ || !argHasName(body->arg, "prev"))
throw Error("overlay does not take an argument named 'prev'");
// FIXME: if we have a 'nixpkgs' input, use it to
// evaluate the overlay.
} catch (Error & e) {
e.addTrace(pos, hintfmt("while checking the overlay '%s'", attrPath));
- throw;
+ reportError(e);
}
};
@@ -331,7 +375,7 @@ struct CmdFlakeCheck : FlakeCommand
try {
state->forceValue(v, pos);
if (v.isLambda()) {
- if (!v.lambda.fun->matchAttrs || !v.lambda.fun->formals->ellipsis)
+ if (!v.lambda.fun->hasFormals() || !v.lambda.fun->formals->ellipsis)
throw Error("module must match an open attribute set ('{ config, ... }')");
} else if (v.type() == nAttrs) {
for (auto & attr : *v.attrs)
@@ -347,7 +391,7 @@ struct CmdFlakeCheck : FlakeCommand
// check the module.
} catch (Error & e) {
e.addTrace(pos, hintfmt("while checking the NixOS module '%s'", attrPath));
- throw;
+ reportError(e);
}
};
@@ -362,14 +406,18 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *v.attrs) {
state->forceAttrs(*attr.value, *attr.pos);
- if (!state->isDerivation(*attr.value))
- checkHydraJobs(attrPath + "." + (std::string) attr.name,
- *attr.value, *attr.pos);
+ auto attrPath2 = attrPath + "." + (std::string) attr.name;
+ if (state->isDerivation(*attr.value)) {
+ Activity act(*logger, lvlChatty, actUnknown,
+ fmt("checking Hydra job '%s'", attrPath2));
+ checkDerivation(attrPath2, *attr.value, *attr.pos);
+ } else
+ checkHydraJobs(attrPath2, *attr.value, *attr.pos);
}
} catch (Error & e) {
e.addTrace(pos, hintfmt("while checking the Hydra jobset '%s'", attrPath));
- throw;
+ reportError(e);
}
};
@@ -384,7 +432,7 @@ struct CmdFlakeCheck : FlakeCommand
throw Error("attribute 'config.system.build.toplevel' is not a derivation");
} catch (Error & e) {
e.addTrace(pos, hintfmt("while checking the NixOS configuration '%s'", attrPath));
- throw;
+ reportError(e);
}
};
@@ -418,7 +466,7 @@ struct CmdFlakeCheck : FlakeCommand
}
} catch (Error & e) {
e.addTrace(pos, hintfmt("while checking the template '%s'", attrPath));
- throw;
+ reportError(e);
}
};
@@ -428,12 +476,12 @@ struct CmdFlakeCheck : FlakeCommand
if (!v.isLambda())
throw Error("bundler must be a function");
if (!v.lambda.fun->formals ||
- v.lambda.fun->formals->argNames.find(state->symbols.create("program")) == v.lambda.fun->formals->argNames.end() ||
- v.lambda.fun->formals->argNames.find(state->symbols.create("system")) == v.lambda.fun->formals->argNames.end())
+ !v.lambda.fun->formals->argNames.count(state->symbols.create("program")) ||
+ !v.lambda.fun->formals->argNames.count(state->symbols.create("system")))
throw Error("bundler must take formal arguments 'program' and 'system'");
} catch (Error & e) {
e.addTrace(pos, hintfmt("while checking the template '%s'", attrPath));
- throw;
+ reportError(e);
}
};
@@ -450,6 +498,8 @@ struct CmdFlakeCheck : FlakeCommand
fmt("checking flake output '%s'", name));
try {
+ evalSettings.enableImportFromDerivation.setDefault(name != "hydraJobs");
+
state->forceValue(vOutput, pos);
if (name == "checks") {
@@ -461,13 +511,13 @@ struct CmdFlakeCheck : FlakeCommand
auto drvPath = checkDerivation(
fmt("%s.%s.%s", name, attr.name, attr2.name),
*attr2.value, *attr2.pos);
- if ((std::string) attr.name == settings.thisSystem.get())
- drvPaths.push_back(DerivedPath::Built{drvPath});
+ if (drvPath && (std::string) attr.name == settings.thisSystem.get())
+ drvPaths.push_back(DerivedPath::Built{*drvPath});
}
}
}
- else if (name == "packages") {
+ else if (name == "packages" || name == "devShells") {
state->forceAttrs(vOutput, pos);
for (auto & attr : *vOutput.attrs) {
checkSystemName(attr.name, *attr.pos);
@@ -574,7 +624,7 @@ struct CmdFlakeCheck : FlakeCommand
} catch (Error & e) {
e.addTrace(pos, hintfmt("while checking flake output '%s'", name));
- throw;
+ reportError(e);
}
});
}
@@ -583,6 +633,8 @@ struct CmdFlakeCheck : FlakeCommand
Activity act(*logger, lvlInfo, actUnknown, "running flake checks");
store->buildPaths(drvPaths);
}
+ if (hasErrors)
+ throw Error("some errors were encountered during the evaluation");
}
};
@@ -820,12 +872,12 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun
if (!dryRun && !dstUri.empty()) {
ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
- copyPaths(store, dstStore, sources);
+ copyPaths(*store, *dstStore, sources);
}
}
};
-struct CmdFlakeShow : FlakeCommand
+struct CmdFlakeShow : FlakeCommand, MixJSON
{
bool showLegacy = false;
@@ -852,51 +904,69 @@ struct CmdFlakeShow : FlakeCommand
void run(nix::ref<nix::Store> store) override
{
+ evalSettings.enableImportFromDerivation.setDefault(false);
+
auto state = getEvalState();
auto flake = std::make_shared<LockedFlake>(lockFlake());
- std::function<void(eval_cache::AttrCursor & visitor, const std::vector<Symbol> & attrPath, const std::string & headerPrefix, const std::string & nextPrefix)> visit;
-
- visit = [&](eval_cache::AttrCursor & visitor, const std::vector<Symbol> & attrPath, const std::string & headerPrefix, const std::string & nextPrefix)
+ std::function<nlohmann::json(
+ eval_cache::AttrCursor & visitor,
+ const std::vector<Symbol> & attrPath,
+ const std::string & headerPrefix,
+ const std::string & nextPrefix)> visit;
+
+ visit = [&](
+ eval_cache::AttrCursor & visitor,
+ const std::vector<Symbol> & attrPath,
+ const std::string & headerPrefix,
+ const std::string & nextPrefix)
+ -> nlohmann::json
{
+ auto j = nlohmann::json::object();
+
Activity act(*logger, lvlInfo, actUnknown,
fmt("evaluating '%s'", concatStringsSep(".", attrPath)));
try {
auto recurse = [&]()
{
- logger->cout("%s", headerPrefix);
+ if (!json)
+ logger->cout("%s", headerPrefix);
auto attrs = visitor.getAttrs();
for (const auto & [i, attr] : enumerate(attrs)) {
bool last = i + 1 == attrs.size();
auto visitor2 = visitor.getAttr(attr);
auto attrPath2(attrPath);
attrPath2.push_back(attr);
- visit(*visitor2, attrPath2,
+ auto j2 = visit(*visitor2, attrPath2,
fmt(ANSI_GREEN "%s%s" ANSI_NORMAL ANSI_BOLD "%s" ANSI_NORMAL, nextPrefix, last ? treeLast : treeConn, attr),
nextPrefix + (last ? treeNull : treeLine));
+ if (json) j.emplace(attr, std::move(j2));
}
};
auto showDerivation = [&]()
{
auto name = visitor.getAttr(state->sName)->getString();
-
- /*
- std::string description;
-
- if (auto aMeta = visitor.maybeGetAttr("meta")) {
- if (auto aDescription = aMeta->maybeGetAttr("description"))
- description = aDescription->getString();
+ if (json) {
+ std::optional<std::string> description;
+ if (auto aMeta = visitor.maybeGetAttr("meta")) {
+ if (auto aDescription = aMeta->maybeGetAttr("description"))
+ description = aDescription->getString();
+ }
+ j.emplace("type", "derivation");
+ j.emplace("name", name);
+ if (description)
+ j.emplace("description", *description);
+ } else {
+ logger->cout("%s: %s '%s'",
+ headerPrefix,
+ attrPath.size() == 2 && attrPath[0] == "devShell" ? "development environment" :
+ attrPath.size() >= 2 && attrPath[0] == "devShells" ? "development environment" :
+ attrPath.size() == 3 && attrPath[0] == "checks" ? "derivation" :
+ attrPath.size() >= 1 && attrPath[0] == "hydraJobs" ? "derivation" :
+ "package",
+ name);
}
- */
-
- logger->cout("%s: %s '%s'",
- headerPrefix,
- attrPath.size() == 2 && attrPath[0] == "devShell" ? "development environment" :
- attrPath.size() == 3 && attrPath[0] == "checks" ? "derivation" :
- attrPath.size() >= 1 && attrPath[0] == "hydraJobs" ? "derivation" :
- "package",
- name);
};
if (attrPath.size() == 0
@@ -911,6 +981,7 @@ struct CmdFlakeShow : FlakeCommand
|| ((attrPath.size() == 1 || attrPath.size() == 2)
&& (attrPath[0] == "checks"
|| attrPath[0] == "packages"
+ || attrPath[0] == "devShells"
|| attrPath[0] == "apps"))
)
{
@@ -919,7 +990,7 @@ struct CmdFlakeShow : FlakeCommand
else if (
(attrPath.size() == 2 && (attrPath[0] == "defaultPackage" || attrPath[0] == "devShell"))
- || (attrPath.size() == 3 && (attrPath[0] == "checks" || attrPath[0] == "packages"))
+ || (attrPath.size() == 3 && (attrPath[0] == "checks" || attrPath[0] == "packages" || attrPath[0] == "devShells"))
)
{
if (visitor.isDerivation())
@@ -939,7 +1010,7 @@ struct CmdFlakeShow : FlakeCommand
if (attrPath.size() == 1)
recurse();
else if (!showLegacy)
- logger->cout("%s: " ANSI_YELLOW "omitted" ANSI_NORMAL " (use '--legacy' to show)", headerPrefix);
+ logger->warn(fmt("%s: " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--legacy' to show)", headerPrefix));
else {
if (visitor.isDerivation())
showDerivation();
@@ -956,7 +1027,11 @@ struct CmdFlakeShow : FlakeCommand
auto aType = visitor.maybeGetAttr("type");
if (!aType || aType->getString() != "app")
throw EvalError("not an app definition");
- logger->cout("%s: app", headerPrefix);
+ if (json) {
+ j.emplace("type", "app");
+ } else {
+ logger->cout("%s: app", headerPrefix);
+ }
}
else if (
@@ -964,27 +1039,41 @@ struct CmdFlakeShow : FlakeCommand
(attrPath.size() == 2 && attrPath[0] == "templates"))
{
auto description = visitor.getAttr("description")->getString();
- logger->cout("%s: template: " ANSI_BOLD "%s" ANSI_NORMAL, headerPrefix, description);
+ if (json) {
+ j.emplace("type", "template");
+ j.emplace("description", description);
+ } else {
+ logger->cout("%s: template: " ANSI_BOLD "%s" ANSI_NORMAL, headerPrefix, description);
+ }
}
else {
- logger->cout("%s: %s",
- headerPrefix,
+ auto [type, description] =
(attrPath.size() == 1 && attrPath[0] == "overlay")
- || (attrPath.size() == 2 && attrPath[0] == "overlays") ? "Nixpkgs overlay" :
- attrPath.size() == 2 && attrPath[0] == "nixosConfigurations" ? "NixOS configuration" :
- attrPath.size() == 2 && attrPath[0] == "nixosModules" ? "NixOS module" :
- ANSI_YELLOW "unknown" ANSI_NORMAL);
+ || (attrPath.size() == 2 && attrPath[0] == "overlays") ? std::make_pair("nixpkgs-overlay", "Nixpkgs overlay") :
+ attrPath.size() == 2 && attrPath[0] == "nixosConfigurations" ? std::make_pair("nixos-configuration", "NixOS configuration") :
+ (attrPath.size() == 1 && attrPath[0] == "nixosModule")
+ || (attrPath.size() == 2 && attrPath[0] == "nixosModules") ? std::make_pair("nixos-module", "NixOS module") :
+ std::make_pair("unknown", "unknown");
+ if (json) {
+ j.emplace("type", type);
+ } else {
+ logger->cout("%s: " ANSI_WARNING "%s" ANSI_NORMAL, headerPrefix, description);
+ }
}
} catch (EvalError & e) {
if (!(attrPath.size() > 0 && attrPath[0] == "legacyPackages"))
throw;
}
+
+ return j;
};
auto cache = openEvalCache(*state, flake);
- visit(*cache->getRoot(), {}, fmt(ANSI_BOLD "%s" ANSI_NORMAL, flake->flake.lockedRef), "");
+ auto j = visit(*cache->getRoot(), {}, fmt(ANSI_BOLD "%s" ANSI_NORMAL, flake->flake.lockedRef), "");
+ if (json)
+ logger->cout("%s", j.dump());
}
};
@@ -1062,7 +1151,7 @@ struct CmdFlake : NixMultiCommand
{
if (!command)
throw UsageError("'nix flake' requires a sub-command.");
- settings.requireExperimentalFeature("flakes");
+ settings.requireExperimentalFeature(Xp::Flakes);
command->second->prepare();
command->second->run();
}
diff --git a/src/nix/flake.md b/src/nix/flake.md
index 9e936a049..3b5812a0a 100644
--- a/src/nix/flake.md
+++ b/src/nix/flake.md
@@ -186,8 +186,8 @@ Currently the `type` attribute can be one of the following:
attribute `url`.
In URL form, the schema must be `http://`, `https://` or `file://`
- URLs and the extension must be `.zip`, `.tar`, `.tar.gz`, `.tar.xz`
- or `.tar.bz2`.
+ URLs and the extension must be `.zip`, `.tar`, `.tar.gz`, `.tar.xz`,
+ `.tar.bz2` or `.tar.zst`.
* `github`: A more efficient way to fetch repositories from
GitHub. The following attributes are required:
@@ -225,7 +225,7 @@ Currently the `type` attribute can be one of the following:
[flake:]<flake-id>(/<rev-or-ref>(/rev)?)?
```
- These perform a lookup of `<flake-id>` in the flake registry. or
+ These perform a lookup of `<flake-id>` in the flake registry. For
example, `nixpkgs` and `nixpkgs/release-20.09` are indirect flake
references. The specified `rev` and/or `ref` are merged with the
entry in the registry; see [nix registry](./nix3-registry.md) for
diff --git a/src/nix/get-env.sh b/src/nix/get-env.sh
index 091c0f573..42c806450 100644
--- a/src/nix/get-env.sh
+++ b/src/nix/get-env.sh
@@ -8,12 +8,123 @@ if [[ -n $stdenv ]]; then
source $stdenv/setup
fi
-for __output in $outputs; do
+# Better to use compgen, but stdenv bash doesn't have it.
+__vars="$(declare -p)"
+__functions="$(declare -F)"
+
+__dumpEnv() {
+ printf '{\n'
+
+ printf ' "bashFunctions": {\n'
+ local __first=1
+ while read __line; do
+ if ! [[ $__line =~ ^declare\ -f\ (.*) ]]; then continue; fi
+ __fun_name="${BASH_REMATCH[1]}"
+ __fun_body="$(type $__fun_name)"
+ if [[ $__fun_body =~ \{(.*)\} ]]; then
+ if [[ -z $__first ]]; then printf ',\n'; else __first=; fi
+ __fun_body="${BASH_REMATCH[1]}"
+ printf " "
+ __escapeString "$__fun_name"
+ printf ':'
+ __escapeString "$__fun_body"
+ else
+ printf "Cannot parse definition of function '%s'.\n" "$__fun_name" >&2
+ return 1
+ fi
+ done < <(printf "%s\n" "$__functions")
+ printf '\n },\n'
+
+ printf ' "variables": {\n'
+ local __first=1
+ while read __line; do
+ if ! [[ $__line =~ ^declare\ (-[^ ])\ ([^=]*) ]]; then continue; fi
+ local type="${BASH_REMATCH[1]}"
+ local __var_name="${BASH_REMATCH[2]}"
+
+ if [[ $__var_name =~ ^BASH_ || \
+ $__var_name = _ || \
+ $__var_name = DIRSTACK || \
+ $__var_name = EUID || \
+ $__var_name = FUNCNAME || \
+ $__var_name = HISTCMD || \
+ $__var_name = HOSTNAME || \
+ $__var_name = GROUPS || \
+ $__var_name = PIPESTATUS || \
+ $__var_name = PWD || \
+ $__var_name = RANDOM || \
+ $__var_name = SHLVL || \
+ $__var_name = SECONDS \
+ ]]; then continue; fi
+
+ if [[ -z $__first ]]; then printf ',\n'; else __first=; fi
+
+ printf " "
+ __escapeString "$__var_name"
+ printf ': {'
+
+ # FIXME: handle -i, -r, -n.
+ if [[ $type == -x ]]; then
+ printf '"type": "exported", "value": '
+ __escapeString "${!__var_name}"
+ elif [[ $type == -- ]]; then
+ printf '"type": "var", "value": '
+ __escapeString "${!__var_name}"
+ elif [[ $type == -a ]]; then
+ printf '"type": "array", "value": ['
+ local __first2=1
+ __var_name="$__var_name[@]"
+ for __i in "${!__var_name}"; do
+ if [[ -z $__first2 ]]; then printf ', '; else __first2=; fi
+ __escapeString "$__i"
+ printf ' '
+ done
+ printf ']'
+ elif [[ $type == -A ]]; then
+ printf '"type": "associative", "value": {\n'
+ local __first2=1
+ declare -n __var_name2="$__var_name"
+ for __i in "${!__var_name2[@]}"; do
+ if [[ -z $__first2 ]]; then printf ',\n'; else __first2=; fi
+ printf " "
+ __escapeString "$__i"
+ printf ": "
+ __escapeString "${__var_name2[$__i]}"
+ done
+ printf '\n }'
+ else
+ printf '"type": "unknown"'
+ fi
+
+ printf "}"
+ done < <(printf "%s\n" "$__vars")
+ printf '\n }\n}'
+}
+
+__escapeString() {
+ local __s="$1"
+ __s="${__s//\\/\\\\}"
+ __s="${__s//\"/\\\"}"
+ __s="${__s//$'\n'/\\n}"
+ __s="${__s//$'\r'/\\r}"
+ __s="${__s//$'\t'/\\t}"
+ printf '"%s"' "$__s"
+}
+
+# In case of `__structuredAttrs = true;` the list of outputs is an associative
+# array with a format like `outname => /nix/store/hash-drvname-outname`, so `__olist`
+# must contain the array's keys (hence `${!...[@]}`) in this case.
+if [ -e .attrs.sh ]; then
+ __olist="${!outputs[@]}"
+else
+ __olist=$outputs
+fi
+
+for __output in $__olist; do
if [[ -z $__done ]]; then
- export > ${!__output}
- set >> ${!__output}
+ __dumpEnv > ${!__output}
__done=1
else
- echo -n >> ${!__output}
+ echo -n >> "${!__output}"
fi
done
diff --git a/src/nix/local.mk b/src/nix/local.mk
index 83b6dd08b..e4ec7634d 100644
--- a/src/nix/local.mk
+++ b/src/nix/local.mk
@@ -14,7 +14,7 @@ nix_SOURCES := \
$(wildcard src/nix-instantiate/*.cc) \
$(wildcard src/nix-store/*.cc) \
-nix_CXXFLAGS += -I src/libutil -I src/libstore -I src/libfetchers -I src/libexpr -I src/libmain -I src/libcmd
+nix_CXXFLAGS += -I src/libutil -I src/libstore -I src/libfetchers -I src/libexpr -I src/libmain -I src/libcmd -I doc/manual
nix_LIBS = libexpr libmain libfetchers libstore libutil libcmd
@@ -30,3 +30,5 @@ src/nix-env/user-env.cc: src/nix-env/buildenv.nix.gen.hh
src/nix/develop.cc: src/nix/get-env.sh.gen.hh
src/nix-channel/nix-channel.cc: src/nix-channel/unpack-channel.nix.gen.hh
+
+src/nix/main.cc: doc/manual/generate-manpage.nix.gen.hh doc/manual/utils.nix.gen.hh
diff --git a/src/nix/log.cc b/src/nix/log.cc
index 638bb5073..fd3c1d787 100644
--- a/src/nix/log.cc
+++ b/src/nix/log.cc
@@ -30,15 +30,15 @@ struct CmdLog : InstallableCommand
subs.push_front(store);
- auto b = installable->toDerivedPathWithHints();
+ auto b = installable->toDerivedPath();
RunPager pager;
for (auto & sub : subs) {
auto log = std::visit(overloaded {
- [&](DerivedPathWithHints::Opaque bo) {
+ [&](const DerivedPath::Opaque & bo) {
return sub->getBuildLog(bo.path);
},
- [&](DerivedPathWithHints::Built bfd) {
+ [&](const DerivedPath::Built & bfd) {
return sub->getBuildLog(bfd.drvPath);
},
}, b.raw());
diff --git a/src/nix/main.cc b/src/nix/main.cc
index 008482be3..60b0aa410 100644
--- a/src/nix/main.cc
+++ b/src/nix/main.cc
@@ -10,6 +10,7 @@
#include "filetransfer.hh"
#include "finally.hh"
#include "loggers.hh"
+#include "markdown.hh"
#include <sys/types.h>
#include <sys/socket.h>
@@ -163,9 +164,46 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs
}
};
-static void showHelp(std::vector<std::string> subcommand)
+/* Render the help for the specified subcommand to stdout using
+ lowdown. */
+static void showHelp(std::vector<std::string> subcommand, MultiCommand & toplevel)
{
- showManPage(subcommand.empty() ? "nix" : fmt("nix3-%s", concatStringsSep("-", subcommand)));
+ auto mdName = subcommand.empty() ? "nix" : fmt("nix3-%s", concatStringsSep("-", subcommand));
+
+ evalSettings.restrictEval = false;
+ evalSettings.pureEval = false;
+ EvalState state({}, openStore("dummy://"));
+
+ auto vGenerateManpage = state.allocValue();
+ state.eval(state.parseExprFromString(
+ #include "generate-manpage.nix.gen.hh"
+ , "/"), *vGenerateManpage);
+
+ auto vUtils = state.allocValue();
+ state.cacheFile(
+ "/utils.nix", "/utils.nix",
+ state.parseExprFromString(
+ #include "utils.nix.gen.hh"
+ , "/"),
+ *vUtils);
+
+ auto vArgs = state.allocValue();
+ state.mkAttrs(*vArgs, 16);
+ auto vJson = state.allocAttr(*vArgs, state.symbols.create("command"));
+ mkString(*vJson, toplevel.toJSON().dump());
+ vArgs->attrs->sort();
+
+ auto vRes = state.allocValue();
+ state.callFunction(*vGenerateManpage, *vArgs, *vRes, noPos);
+
+ auto attr = vRes->attrs->get(state.symbols.create(mdName + ".md"));
+ if (!attr)
+ throw UsageError("Nix has no subcommand '%s'", concatStringsSep("", subcommand));
+
+ auto markdown = state.forceString(*attr->value);
+
+ RunPager pager;
+ std::cout << renderMarkdownToTerminal(markdown) << "\n";
}
struct CmdHelp : Command
@@ -194,7 +232,10 @@ struct CmdHelp : Command
void run() override
{
- showHelp(subcommand);
+ assert(parent);
+ MultiCommand * toplevel = parent;
+ while (toplevel->parent) toplevel = toplevel->parent;
+ showHelp(subcommand, *toplevel);
}
};
@@ -214,6 +255,16 @@ void mainWrapped(int argc, char * * argv)
initNix();
initGC();
+ #if __linux__
+ if (getuid() == 0) {
+ try {
+ saveMountNamespace();
+ if (unshare(CLONE_NEWNS) == -1)
+ throw SysError("setting up a private mount namespace");
+ } catch (Error & e) { }
+ }
+ #endif
+
programPath = argv[0];
auto programName = std::string(baseNameOf(programPath));
@@ -277,7 +328,7 @@ void mainWrapped(int argc, char * * argv)
} else
break;
}
- showHelp(subcommand);
+ showHelp(subcommand, args);
return;
} catch (UsageError &) {
if (!completions) throw;
@@ -296,7 +347,7 @@ void mainWrapped(int argc, char * * argv)
if (args.command->first != "repl"
&& args.command->first != "doctor"
&& args.command->first != "upgrade-nix")
- settings.requireExperimentalFeature("nix-command");
+ settings.requireExperimentalFeature(Xp::NixCommand);
if (args.useNet && !haveInternet()) {
warn("you don't have Internet access; disabling some network-dependent features");
diff --git a/src/nix/make-content-addressable.cc b/src/nix/make-content-addressable.cc
index f5bdc7e65..12f303a10 100644
--- a/src/nix/make-content-addressable.cc
+++ b/src/nix/make-content-addressable.cc
@@ -25,7 +25,7 @@ struct CmdMakeContentAddressable : StorePathsCommand, MixJSON
;
}
- void run(ref<Store> store, StorePaths storePaths) override
+ void run(ref<Store> store, StorePaths && storePaths) override
{
auto paths = store->topoSortPaths(StorePathSet(storePaths.begin(), storePaths.end()));
diff --git a/src/nix/path-info.cc b/src/nix/path-info.cc
index 518cd5568..3743d7504 100644
--- a/src/nix/path-info.cc
+++ b/src/nix/path-info.cc
@@ -79,7 +79,7 @@ struct CmdPathInfo : StorePathsCommand, MixJSON
std::cout << fmt("\t%6.1f%c", res, idents.at(power));
}
- void run(ref<Store> store, StorePaths storePaths) override
+ void run(ref<Store> store, StorePaths && storePaths) override
{
size_t pathLen = 0;
for (auto & storePath : storePaths)
diff --git a/src/nix/path-info.md b/src/nix/path-info.md
index 76a83e39d..7a1714ba4 100644
--- a/src/nix/path-info.md
+++ b/src/nix/path-info.md
@@ -82,7 +82,7 @@ This command shows information about the store paths produced by
By default, this command only prints the store paths. You can get
additional information by passing flags such as `--closure-size`,
---size`, `--sigs` or `--json`.
+`--size`, `--sigs` or `--json`.
> **Warning**
>
diff --git a/src/nix/prefetch.cc b/src/nix/prefetch.cc
index b7da3ea5a..768d37595 100644
--- a/src/nix/prefetch.cc
+++ b/src/nix/prefetch.cc
@@ -199,26 +199,24 @@ static int main_nix_prefetch_url(int argc, char * * argv)
state->forceAttrs(v);
/* Extract the URL. */
- auto attr = v.attrs->find(state->symbols.create("urls"));
- if (attr == v.attrs->end())
- throw Error("attribute set does not contain a 'urls' attribute");
- state->forceList(*attr->value);
- if (attr->value->listSize() < 1)
+ auto & attr = v.attrs->need(state->symbols.create("urls"));
+ state->forceList(*attr.value);
+ if (attr.value->listSize() < 1)
throw Error("'urls' list is empty");
- url = state->forceString(*attr->value->listElems()[0]);
+ url = state->forceString(*attr.value->listElems()[0]);
/* Extract the hash mode. */
- attr = v.attrs->find(state->symbols.create("outputHashMode"));
- if (attr == v.attrs->end())
+ auto attr2 = v.attrs->get(state->symbols.create("outputHashMode"));
+ if (!attr2)
printInfo("warning: this does not look like a fetchurl call");
else
- unpack = state->forceString(*attr->value) == "recursive";
+ unpack = state->forceString(*attr2->value) == "recursive";
/* Extract the name. */
if (!name) {
- attr = v.attrs->find(state->symbols.create("name"));
- if (attr != v.attrs->end())
- name = state->forceString(*attr->value);
+ auto attr3 = v.attrs->get(state->symbols.create("name"));
+ if (!attr3)
+ name = state->forceString(*attr3->value);
}
}
@@ -283,8 +281,6 @@ struct CmdStorePrefetchFile : StoreCommand, MixJSON
expectArg("url", &url);
}
- Category category() override { return catUtility; }
-
std::string description() override
{
return "download a file into the Nix store";
diff --git a/src/nix/print-dev-env.md b/src/nix/print-dev-env.md
index b80252acf..2aad491de 100644
--- a/src/nix/print-dev-env.md
+++ b/src/nix/print-dev-env.md
@@ -8,12 +8,43 @@ R""(
# . <(nix print-dev-env nixpkgs#hello)
```
+* Get the build environment in JSON format:
+
+ ```console
+ # nix print-dev-env nixpkgs#hello --json
+ ```
+
+ The output will look like this:
+
+ ```json
+ {
+ "bashFunctions": {
+ "buildPhase": " \n runHook preBuild;\n...",
+ ...
+ },
+ "variables": {
+ "src": {
+ "type": "exported",
+ "value": "/nix/store/3x7dwzq014bblazs7kq20p9hyzz0qh8g-hello-2.10.tar.gz"
+ },
+ "postUnpackHooks": {
+ "type": "array",
+ "value": ["_updateSourceDateEpochFromSourceRoot"]
+ },
+ ...
+ }
+ }
+ ```
+
# Description
-This command prints a shell script that can be sourced by `b`ash and
-that sets the environment variables and shell functions defined by the
-build process of *installable*. This allows you to get a similar build
+This command prints a shell script that can be sourced by `bash` and
+that sets the variables and shell functions defined by the build
+process of *installable*. This allows you to get a similar build
environment in your current shell rather than in a subshell (as with
`nix develop`).
+With `--json`, the output is a JSON serialisation of the variables and
+functions defined by the build process.
+
)""
diff --git a/src/nix/profile-history.md b/src/nix/profile-history.md
index d0fe40c82..f0bfe5037 100644
--- a/src/nix/profile-history.md
+++ b/src/nix/profile-history.md
@@ -6,10 +6,10 @@ R""(
```console
# nix profile history
- Version 508 -> 509:
+ Version 508 (2020-04-10):
flake:nixpkgs#legacyPackages.x86_64-linux.awscli: ∅ -> 1.17.13
- Version 509 -> 510:
+ Version 509 (2020-05-16) <- 508:
flake:nixpkgs#legacyPackages.x86_64-linux.awscli: 1.17.13 -> 1.18.211
```
diff --git a/src/nix/profile-remove.md b/src/nix/profile-remove.md
index dcf825da9..ba85441d8 100644
--- a/src/nix/profile-remove.md
+++ b/src/nix/profile-remove.md
@@ -15,6 +15,7 @@ R""(
```
* Remove all packages:
+
```console
# nix profile remove '.*'
```
diff --git a/src/nix/profile-rollback.md b/src/nix/profile-rollback.md
new file mode 100644
index 000000000..6bb75aa5e
--- /dev/null
+++ b/src/nix/profile-rollback.md
@@ -0,0 +1,26 @@
+R""(
+
+# Examples
+
+* Roll back your default profile to the previous version:
+
+ ```console
+ # nix profile rollback
+ switching profile from version 519 to 518
+ ```
+
+* Switch your default profile to version 510:
+
+ ```console
+ # nix profile rollback --to 510
+ switching profile from version 518 to 510
+ ```
+
+# Description
+
+This command switches a profile to the most recent version older
+than the currently active version, or if `--to` *N* is given, to
+version *N* of the profile. To see the available versions of a
+profile, use `nix profile history`.
+
+)""
diff --git a/src/nix/profile-upgrade.md b/src/nix/profile-upgrade.md
index 2bd5d256d..e06e74abe 100644
--- a/src/nix/profile-upgrade.md
+++ b/src/nix/profile-upgrade.md
@@ -18,7 +18,7 @@ R""(
* Upgrade a specific profile element by number:
```console
- # nix profile info
+ # nix profile list
0 flake:nixpkgs#legacyPackages.x86_64-linux.spotify …
# nix profile upgrade 0
diff --git a/src/nix/profile-wipe-history.md b/src/nix/profile-wipe-history.md
new file mode 100644
index 000000000..b4b262864
--- /dev/null
+++ b/src/nix/profile-wipe-history.md
@@ -0,0 +1,20 @@
+R""(
+
+# Examples
+
+* Delete all versions of the default profile older than 100 days:
+
+ ```console
+ # nix profile wipe-history --profile /tmp/profile --older-than 100d
+ removing profile version 515
+ removing profile version 514
+ ```
+
+# Description
+
+This command deletes non-current versions of a profile, making it
+impossible to roll back to these versions. By default, all non-current
+versions are deleted. With `--older-than` *N*`d`, all non-current
+versions older than *N* days are deleted.
+
+)""
diff --git a/src/nix/profile.cc b/src/nix/profile.cc
index 667904cd2..96a20f673 100644
--- a/src/nix/profile.cc
+++ b/src/nix/profile.cc
@@ -12,6 +12,7 @@
#include <nlohmann/json.hpp>
#include <regex>
+#include <iomanip>
using namespace nix;
@@ -97,10 +98,8 @@ struct ProfileManifest
else if (pathExists(profile + "/manifest.nix")) {
// FIXME: needed because of pure mode; ugly.
- if (state.allowedPaths) {
- state.allowedPaths->insert(state.store->followLinksToStore(profile));
- state.allowedPaths->insert(state.store->followLinksToStore(profile + "/manifest.nix"));
- }
+ state.allowPath(state.store->followLinksToStore(profile));
+ state.allowPath(state.store->followLinksToStore(profile + "/manifest.nix"));
auto drvInfos = queryInstalled(state, state.store->followLinksToStore(profile));
@@ -253,17 +252,17 @@ struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile
manifest.elements.emplace_back(std::move(element));
} else {
- auto buildables = build(store, Realise::Outputs, {installable}, bmNormal);
+ auto buildables = build(getEvalStore(), store, Realise::Outputs, {installable}, bmNormal);
for (auto & buildable : buildables) {
ProfileElement element;
std::visit(overloaded {
- [&](DerivedPathWithHints::Opaque bo) {
+ [&](const BuiltPath::Opaque & bo) {
pathsToBuild.push_back(bo);
element.storePaths.insert(bo.path);
},
- [&](DerivedPathWithHints::Built bfd) {
+ [&](const BuiltPath::Built & bfd) {
// TODO: Why are we querying if we know the output
// names already? Is it just to figure out what the
// default one is?
@@ -426,7 +425,7 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf
attrPath,
};
- pathsToBuild.push_back(DerivedPath::Built{drv.drvPath, {"out"}}); // FIXME
+ pathsToBuild.push_back(DerivedPath::Built{drv.drvPath, {drv.outputName}});
}
}
@@ -528,10 +527,11 @@ struct CmdProfileHistory : virtual StoreCommand, EvalCommand, MixDefaultProfile
if (!first) std::cout << "\n";
first = false;
- if (prevGen)
- std::cout << fmt("Version %d -> %d:\n", prevGen->first.number, gen.number);
- else
- std::cout << fmt("Version %d:\n", gen.number);
+ std::cout << fmt("Version %s%d" ANSI_NORMAL " (%s)%s:\n",
+ gen.number == curGen ? ANSI_GREEN : ANSI_BOLD,
+ gen.number,
+ std::put_time(std::gmtime(&gen.creationTime), "%Y-%m-%d"),
+ prevGen ? fmt(" <- %d", prevGen->first.number) : "");
ProfileManifest::printDiff(
prevGen ? prevGen->second : ProfileManifest(),
@@ -543,6 +543,76 @@ struct CmdProfileHistory : virtual StoreCommand, EvalCommand, MixDefaultProfile
}
};
+struct CmdProfileRollback : virtual StoreCommand, MixDefaultProfile, MixDryRun
+{
+ std::optional<GenerationNumber> version;
+
+ CmdProfileRollback()
+ {
+ addFlag({
+ .longName = "to",
+ .description = "The profile version to roll back to.",
+ .labels = {"version"},
+ .handler = {&version},
+ });
+ }
+
+ std::string description() override
+ {
+ return "roll back to the previous version or a specified version of a profile";
+ }
+
+ std::string doc() override
+ {
+ return
+ #include "profile-rollback.md"
+ ;
+ }
+
+ void run(ref<Store> store) override
+ {
+ switchGeneration(*profile, version, dryRun);
+ }
+};
+
+struct CmdProfileWipeHistory : virtual StoreCommand, MixDefaultProfile, MixDryRun
+{
+ std::optional<std::string> minAge;
+
+ CmdProfileWipeHistory()
+ {
+ addFlag({
+ .longName = "older-than",
+ .description =
+ "Delete versions older than the specified age. *age* "
+ "must be in the format *N*`d`, where *N* denotes a number "
+ "of days.",
+ .labels = {"age"},
+ .handler = {&minAge},
+ });
+ }
+
+ std::string description() override
+ {
+ return "delete non-current versions of a profile";
+ }
+
+ std::string doc() override
+ {
+ return
+ #include "profile-wipe-history.md"
+ ;
+ }
+
+ void run(ref<Store> store) override
+ {
+ if (minAge)
+ deleteGenerationsOlderThan(*profile, *minAge, dryRun);
+ else
+ deleteOldGenerations(*profile, dryRun);
+ }
+};
+
struct CmdProfile : NixMultiCommand
{
CmdProfile()
@@ -553,6 +623,8 @@ struct CmdProfile : NixMultiCommand
{"list", []() { return make_ref<CmdProfileList>(); }},
{"diff-closures", []() { return make_ref<CmdProfileDiffClosures>(); }},
{"history", []() { return make_ref<CmdProfileHistory>(); }},
+ {"rollback", []() { return make_ref<CmdProfileRollback>(); }},
+ {"wipe-history", []() { return make_ref<CmdProfileWipeHistory>(); }},
})
{ }
diff --git a/src/nix/realisation.cc b/src/nix/realisation.cc
index 9ee9ccb91..c9a7157cd 100644
--- a/src/nix/realisation.cc
+++ b/src/nix/realisation.cc
@@ -28,7 +28,7 @@ struct CmdRealisation : virtual NixMultiCommand
static auto rCmdRealisation = registerCommand<CmdRealisation>("realisation");
-struct CmdRealisationInfo : RealisedPathsCommand, MixJSON
+struct CmdRealisationInfo : BuiltPathsCommand, MixJSON
{
std::string description() override
{
@@ -44,12 +44,19 @@ struct CmdRealisationInfo : RealisedPathsCommand, MixJSON
Category category() override { return catSecondary; }
- void run(ref<Store> store, std::vector<RealisedPath> paths) override
+ void run(ref<Store> store, BuiltPaths && paths) override
{
- settings.requireExperimentalFeature("ca-derivations");
+ settings.requireExperimentalFeature(Xp::CaDerivations);
+ RealisedPath::Set realisations;
+
+ for (auto & builtPath : paths) {
+ auto theseRealisations = builtPath.toRealisedPaths(*store);
+ realisations.insert(theseRealisations.begin(), theseRealisations.end());
+ }
+
if (json) {
nlohmann::json res = nlohmann::json::array();
- for (auto & path : paths) {
+ for (auto & path : realisations) {
nlohmann::json currentPath;
if (auto realisation = std::get_if<Realisation>(&path.raw))
currentPath = realisation->toJSON();
@@ -61,7 +68,7 @@ struct CmdRealisationInfo : RealisedPathsCommand, MixJSON
std::cout << res.dump();
}
else {
- for (auto & path : paths) {
+ for (auto & path : realisations) {
if (auto realisation = std::get_if<Realisation>(&path.raw)) {
std::cout <<
realisation->id.to_string() << " " <<
diff --git a/src/nix/registry-add.md b/src/nix/registry-add.md
index 80a31996a..a947fa0b3 100644
--- a/src/nix/registry-add.md
+++ b/src/nix/registry-add.md
@@ -21,6 +21,13 @@ R""(
# nix registry add nixpkgs/nixos-20.03 ~/Dev/nixpkgs
```
+* Add `nixpkgs` pointing to `github:nixos/nixpkgs` to your custom flake
+ registry:
+
+ ```console
+ nix registry add --registry ./custom-flake-registry.json nixpkgs github:nixos/nixpkgs
+ ```
+
# Description
This command adds an entry to the user registry that maps flake
diff --git a/src/nix/registry-pin.md b/src/nix/registry-pin.md
index 6e97e003e..ebc0e3eff 100644
--- a/src/nix/registry-pin.md
+++ b/src/nix/registry-pin.md
@@ -24,6 +24,13 @@ R""(
```
+* Pin `nixpkgs` in a custom registry to its most recent Git revision:
+
+ ```console
+ # nix registry pin --registry ./custom-flake-registry.json nixpkgs
+ ```
+
+
# Description
This command adds an entry to the user registry that maps flake
diff --git a/src/nix/registry-remove.md b/src/nix/registry-remove.md
index 4c0eb4947..eecd4c6e7 100644
--- a/src/nix/registry-remove.md
+++ b/src/nix/registry-remove.md
@@ -8,6 +8,12 @@ R""(
# nix registry remove nixpkgs
```
+* Remove the entry `nixpkgs` from a custom registry:
+
+ ```console
+ # nix registry remove --registry ./custom-flake-registry.json nixpkgs
+ ```
+
# Description
This command removes from the user registry any entry for flake
diff --git a/src/nix/registry.cc b/src/nix/registry.cc
index f9719600f..c496f94f8 100644
--- a/src/nix/registry.cc
+++ b/src/nix/registry.cc
@@ -10,6 +10,46 @@
using namespace nix;
using namespace nix::flake;
+
+class RegistryCommand : virtual Args
+{
+ std::string registry_path;
+
+ std::shared_ptr<fetchers::Registry> registry;
+
+public:
+
+ RegistryCommand()
+ {
+ addFlag({
+ .longName = "registry",
+ .description = "The registry to operate on.",
+ .labels = {"registry"},
+ .handler = {&registry_path},
+ });
+ }
+
+ std::shared_ptr<fetchers::Registry> getRegistry()
+ {
+ if (registry) return registry;
+ if (registry_path.empty()) {
+ registry = fetchers::getUserRegistry();
+ } else {
+ registry = fetchers::getCustomRegistry(registry_path);
+ }
+ return registry;
+ }
+
+ Path getRegistryPath()
+ {
+ if (registry_path.empty()) {
+ return fetchers::getUserRegistryPath();
+ } else {
+ return registry_path;
+ }
+ }
+};
+
struct CmdRegistryList : StoreCommand
{
std::string description() override
@@ -45,7 +85,7 @@ struct CmdRegistryList : StoreCommand
}
};
-struct CmdRegistryAdd : MixEvalArgs, Command
+struct CmdRegistryAdd : MixEvalArgs, Command, RegistryCommand
{
std::string fromUrl, toUrl;
@@ -71,16 +111,16 @@ struct CmdRegistryAdd : MixEvalArgs, Command
{
auto fromRef = parseFlakeRef(fromUrl);
auto toRef = parseFlakeRef(toUrl);
+ auto registry = getRegistry();
fetchers::Attrs extraAttrs;
if (toRef.subdir != "") extraAttrs["dir"] = toRef.subdir;
- auto userRegistry = fetchers::getUserRegistry();
- userRegistry->remove(fromRef.input);
- userRegistry->add(fromRef.input, toRef.input, extraAttrs);
- userRegistry->write(fetchers::getUserRegistryPath());
+ registry->remove(fromRef.input);
+ registry->add(fromRef.input, toRef.input, extraAttrs);
+ registry->write(getRegistryPath());
}
};
-struct CmdRegistryRemove : virtual Args, MixEvalArgs, Command
+struct CmdRegistryRemove : RegistryCommand, Command
{
std::string url;
@@ -103,19 +143,21 @@ struct CmdRegistryRemove : virtual Args, MixEvalArgs, Command
void run() override
{
- auto userRegistry = fetchers::getUserRegistry();
- userRegistry->remove(parseFlakeRef(url).input);
- userRegistry->write(fetchers::getUserRegistryPath());
+ auto registry = getRegistry();
+ registry->remove(parseFlakeRef(url).input);
+ registry->write(getRegistryPath());
}
};
-struct CmdRegistryPin : virtual Args, EvalCommand
+struct CmdRegistryPin : RegistryCommand, EvalCommand
{
std::string url;
+ std::string locked;
+
std::string description() override
{
- return "pin a flake to its current version in user flake registry";
+ return "pin a flake to its current version or to the current version of a flake URL";
}
std::string doc() override
@@ -128,18 +170,31 @@ struct CmdRegistryPin : virtual Args, EvalCommand
CmdRegistryPin()
{
expectArg("url", &url);
+
+ expectArgs({
+ .label = "locked",
+ .optional = true,
+ .handler = {&locked},
+ .completer = {[&](size_t, std::string_view prefix) {
+ completeFlakeRef(getStore(), prefix);
+ }}
+ });
}
void run(nix::ref<nix::Store> store) override
{
+ if (locked.empty()) {
+ locked = url;
+ }
+ auto registry = getRegistry();
auto ref = parseFlakeRef(url);
- auto userRegistry = fetchers::getUserRegistry();
- userRegistry->remove(ref.input);
- auto [tree, resolved] = ref.resolve(store).input.fetch(store);
+ auto locked_ref = parseFlakeRef(locked);
+ registry->remove(ref.input);
+ auto [tree, resolved] = locked_ref.resolve(store).input.fetch(store);
fetchers::Attrs extraAttrs;
if (ref.subdir != "") extraAttrs["dir"] = ref.subdir;
- userRegistry->add(ref.input, resolved, extraAttrs);
- userRegistry->write(fetchers::getUserRegistryPath());
+ registry->add(ref.input, resolved, extraAttrs);
+ registry->write(getRegistryPath());
}
};
@@ -171,6 +226,7 @@ struct CmdRegistry : virtual NixMultiCommand
void run() override
{
+ settings.requireExperimentalFeature(Xp::Flakes);
if (!command)
throw UsageError("'nix registry' requires a sub-command.");
command->second->prepare();
diff --git a/src/nix/registry.md b/src/nix/registry.md
index 557e5795b..a1674bd2e 100644
--- a/src/nix/registry.md
+++ b/src/nix/registry.md
@@ -41,7 +41,7 @@ A registry is a JSON file with the following format:
```json
{
"version": 2,
- [
+ "flakes": [
{
"from": {
"type": "indirect",
diff --git a/src/nix/run.cc b/src/nix/run.cc
index b5d8ab38a..b01fdebaa 100644
--- a/src/nix/run.cc
+++ b/src/nix/run.cc
@@ -1,3 +1,4 @@
+#include "run.hh"
#include "command.hh"
#include "common-args.hh"
#include "shared.hh"
@@ -20,45 +21,43 @@ using namespace nix;
std::string chrootHelperName = "__run_in_chroot";
-struct RunCommon : virtual Command
-{
+namespace nix {
- using Command::run;
+void runProgramInStore(ref<Store> store,
+ const std::string & program,
+ const Strings & args)
+{
+ stopProgressBar();
- void runProgram(ref<Store> store,
- const std::string & program,
- const Strings & args)
- {
- stopProgressBar();
+ restoreProcessContext();
- restoreProcessContext();
+ /* If this is a diverted store (i.e. its "logical" location
+ (typically /nix/store) differs from its "physical" location
+ (e.g. /home/eelco/nix/store), then run the command in a
+ chroot. For non-root users, this requires running it in new
+ mount and user namespaces. Unfortunately,
+ unshare(CLONE_NEWUSER) doesn't work in a multithreaded program
+ (which "nix" is), so we exec() a single-threaded helper program
+ (chrootHelper() below) to do the work. */
+ auto store2 = store.dynamic_pointer_cast<LocalStore>();
- /* If this is a diverted store (i.e. its "logical" location
- (typically /nix/store) differs from its "physical" location
- (e.g. /home/eelco/nix/store), then run the command in a
- chroot. For non-root users, this requires running it in new
- mount and user namespaces. Unfortunately,
- unshare(CLONE_NEWUSER) doesn't work in a multithreaded
- program (which "nix" is), so we exec() a single-threaded
- helper program (chrootHelper() below) to do the work. */
- auto store2 = store.dynamic_pointer_cast<LocalStore>();
+ if (store2 && store->storeDir != store2->getRealStoreDir()) {
+ Strings helperArgs = { chrootHelperName, store->storeDir, store2->getRealStoreDir(), program };
+ for (auto & arg : args) helperArgs.push_back(arg);
- if (store2 && store->storeDir != store2->realStoreDir) {
- Strings helperArgs = { chrootHelperName, store->storeDir, store2->realStoreDir, program };
- for (auto & arg : args) helperArgs.push_back(arg);
+ execv(readLink("/proc/self/exe").c_str(), stringsToCharPtrs(helperArgs).data());
- execv(readLink("/proc/self/exe").c_str(), stringsToCharPtrs(helperArgs).data());
+ throw SysError("could not execute chroot helper");
+ }
- throw SysError("could not execute chroot helper");
- }
+ execvp(program.c_str(), stringsToCharPtrs(args).data());
- execvp(program.c_str(), stringsToCharPtrs(args).data());
+ throw SysError("unable to execute '%s'", program);
+}
- throw SysError("unable to execute '%s'", program);
- }
-};
+}
-struct CmdShell : InstallablesCommand, RunCommon, MixEnvironment
+struct CmdShell : InstallablesCommand, MixEnvironment
{
using InstallablesCommand::run;
@@ -93,7 +92,7 @@ struct CmdShell : InstallablesCommand, RunCommon, MixEnvironment
void run(ref<Store> store) override
{
- auto outPaths = toStorePaths(store, Realise::Outputs, OperateOn::Output, installables);
+ auto outPaths = toStorePaths(getEvalStore(), store, Realise::Outputs, OperateOn::Output, installables);
auto accessor = store->getFSAccessor();
@@ -125,13 +124,13 @@ struct CmdShell : InstallablesCommand, RunCommon, MixEnvironment
Strings args;
for (auto & arg : command) args.push_back(arg);
- runProgram(store, *command.begin(), args);
+ runProgramInStore(store, *command.begin(), args);
}
};
static auto rCmdShell = registerCommand<CmdShell>("shell");
-struct CmdRun : InstallableCommand, RunCommon
+struct CmdRun : InstallableCommand
{
using InstallableCommand::run;
@@ -168,7 +167,7 @@ struct CmdRun : InstallableCommand, RunCommon
Strings getDefaultFlakeAttrPathPrefixes() override
{
- Strings res{"apps." + settings.thisSystem.get() + ".", "packages"};
+ Strings res{"apps." + settings.thisSystem.get() + "."};
for (auto & s : SourceExprCommand::getDefaultFlakeAttrPathPrefixes())
res.push_back(s);
return res;
@@ -178,14 +177,12 @@ struct CmdRun : InstallableCommand, RunCommon
{
auto state = getEvalState();
- auto app = installable->toApp(*state);
-
- state->store->buildPaths(toDerivedPaths(app.context));
+ auto app = installable->toApp(*state).resolve(getEvalStore(), store);
Strings allArgs{app.program};
for (auto & i : args) allArgs.push_back(i);
- runProgram(store, app.program, allArgs);
+ runProgramInStore(store, app.program, allArgs);
}
};
diff --git a/src/nix/run.hh b/src/nix/run.hh
new file mode 100644
index 000000000..6180a87dd
--- /dev/null
+++ b/src/nix/run.hh
@@ -0,0 +1,11 @@
+#pragma once
+
+#include "store-api.hh"
+
+namespace nix {
+
+void runProgramInStore(ref<Store> store,
+ const std::string & program,
+ const Strings & args);
+
+}
diff --git a/src/nix/search.cc b/src/nix/search.cc
index c52a48d4e..0d8fdd5c2 100644
--- a/src/nix/search.cc
+++ b/src/nix/search.cc
@@ -62,6 +62,7 @@ struct CmdSearch : InstallableCommand, MixJSON
void run(ref<Store> store) override
{
settings.readOnlyMode = true;
+ evalSettings.enableImportFromDerivation.setDefault(false);
// Empty search string should match all packages
// Use "^" here instead of ".*" due to differences in resulting highlighting
diff --git a/src/nix/show-config.cc b/src/nix/show-config.cc
index 91721219b..29944e748 100644
--- a/src/nix/show-config.cc
+++ b/src/nix/show-config.cc
@@ -22,10 +22,7 @@ struct CmdShowConfig : Command, MixJSON
// FIXME: use appropriate JSON types (bool, ints, etc).
logger->cout("%s", globalConfig.toJSON().dump());
} else {
- std::map<std::string, Config::SettingInfo> settings;
- globalConfig.getSettings(settings);
- for (auto & s : settings)
- logger->cout("%s = %s", s.first, s.second.value);
+ logger->cout("%s", globalConfig.toKeyValue());
}
}
};
diff --git a/src/nix/show-derivation.cc b/src/nix/show-derivation.cc
index 2588a011d..c614be68d 100644
--- a/src/nix/show-derivation.cc
+++ b/src/nix/show-derivation.cc
@@ -65,18 +65,18 @@ struct CmdShowDerivation : InstallablesCommand
auto & outputName = _outputName; // work around clang bug
auto outputObj { outputsObj.object(outputName) };
std::visit(overloaded {
- [&](DerivationOutputInputAddressed doi) {
+ [&](const DerivationOutputInputAddressed & doi) {
outputObj.attr("path", store->printStorePath(doi.path));
},
- [&](DerivationOutputCAFixed dof) {
+ [&](const DerivationOutputCAFixed & dof) {
outputObj.attr("path", store->printStorePath(dof.path(*store, drv.name, outputName)));
outputObj.attr("hashAlgo", dof.hash.printMethodAlgo());
outputObj.attr("hash", dof.hash.hash.to_string(Base16, false));
},
- [&](DerivationOutputCAFloating dof) {
+ [&](const DerivationOutputCAFloating & dof) {
outputObj.attr("hashAlgo", makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType));
},
- [&](DerivationOutputDeferred) {},
+ [&](const DerivationOutputDeferred &) {},
}, output.output);
}
}
diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc
index c64b472b6..3d659d6d2 100644
--- a/src/nix/sigs.cc
+++ b/src/nix/sigs.cc
@@ -27,7 +27,7 @@ struct CmdCopySigs : StorePathsCommand
return "copy store path signatures from substituters";
}
- void run(ref<Store> store, StorePaths storePaths) override
+ void run(ref<Store> store, StorePaths && storePaths) override
{
if (substituterUris.empty())
throw UsageError("you must specify at least one substituter using '-s'");
@@ -113,7 +113,7 @@ struct CmdSign : StorePathsCommand
return "sign store paths";
}
- void run(ref<Store> store, StorePaths storePaths) override
+ void run(ref<Store> store, StorePaths && storePaths) override
{
if (secretKeyFile.empty())
throw UsageError("you must specify a secret key file using '-k'");
@@ -218,8 +218,7 @@ struct CmdKey : NixMultiCommand
void run() override
{
if (!command)
- throw UsageError("'nix flake' requires a sub-command.");
- settings.requireExperimentalFeature("flakes");
+ throw UsageError("'nix key' requires a sub-command.");
command->second->prepare();
command->second->run();
}
diff --git a/src/nix/store-delete.cc b/src/nix/store-delete.cc
index 10245978e..e4a3cb554 100644
--- a/src/nix/store-delete.cc
+++ b/src/nix/store-delete.cc
@@ -30,7 +30,7 @@ struct CmdStoreDelete : StorePathsCommand
;
}
- void run(ref<Store> store, std::vector<StorePath> storePaths) override
+ void run(ref<Store> store, std::vector<StorePath> && storePaths) override
{
for (auto & path : storePaths)
options.pathsToDelete.insert(path);
diff --git a/src/nix/store-repair.cc b/src/nix/store-repair.cc
index 1c7a4392e..8fcb3639a 100644
--- a/src/nix/store-repair.cc
+++ b/src/nix/store-repair.cc
@@ -17,7 +17,7 @@ struct CmdStoreRepair : StorePathsCommand
;
}
- void run(ref<Store> store, std::vector<StorePath> storePaths) override
+ void run(ref<Store> store, std::vector<StorePath> && storePaths) override
{
for (auto & path : storePaths)
store->repairPath(path);
diff --git a/src/nix/verify.cc b/src/nix/verify.cc
index 1721c7f16..e92df1303 100644
--- a/src/nix/verify.cc
+++ b/src/nix/verify.cc
@@ -59,7 +59,7 @@ struct CmdVerify : StorePathsCommand
;
}
- void run(ref<Store> store, StorePaths storePaths) override
+ void run(ref<Store> store, StorePaths && storePaths) override
{
std::vector<ref<Store>> substituters;
for (auto & s : substituterUris)
@@ -97,15 +97,11 @@ struct CmdVerify : StorePathsCommand
if (!noContents) {
- std::unique_ptr<AbstractHashSink> hashSink;
- if (!info->ca)
- hashSink = std::make_unique<HashSink>(info->narHash.type);
- else
- hashSink = std::make_unique<HashModuloSink>(info->narHash.type, std::string(info->path.hashPart()));
+ auto hashSink = HashSink(info->narHash.type);
- store->narFromPath(info->path, *hashSink);
+ store->narFromPath(info->path, hashSink);
- auto hash = hashSink->finish();
+ auto hash = hashSink.finish();
if (hash.first != info->narHash) {
corrupted++;
diff --git a/src/nix/why-depends.cc b/src/nix/why-depends.cc
index 7a4ca5172..2f6b361bb 100644
--- a/src/nix/why-depends.cc
+++ b/src/nix/why-depends.cc
@@ -62,9 +62,9 @@ struct CmdWhyDepends : SourceExprCommand
void run(ref<Store> store) override
{
auto package = parseInstallable(store, _package);
- auto packagePath = toStorePath(store, Realise::Outputs, operateOn, package);
+ auto packagePath = toStorePath(getEvalStore(), store, Realise::Outputs, operateOn, package);
auto dependency = parseInstallable(store, _dependency);
- auto dependencyPath = toStorePath(store, Realise::Derivation, operateOn, dependency);
+ auto dependencyPath = toStorePath(getEvalStore(), store, Realise::Derivation, operateOn, dependency);
auto dependencyPathHash = dependencyPath.hashPart();
StorePathSet closure;
diff --git a/src/nlohmann/json.hpp b/src/nlohmann/json.hpp
index c9af0bed3..a70aaf8cb 100644
--- a/src/nlohmann/json.hpp
+++ b/src/nlohmann/json.hpp
@@ -1,12 +1,12 @@
/*
__ _____ _____ _____
__| | __| | | | JSON for Modern C++
-| | |__ | | | | | | version 3.5.0
+| | |__ | | | | | | version 3.9.1
|_____|_____|_____|_|___| https://github.com/nlohmann/json
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
SPDX-License-Identifier: MIT
-Copyright (c) 2013-2018 Niels Lohmann <http://nlohmann.me>.
+Copyright (c) 2013-2019 Niels Lohmann <http://nlohmann.me>.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -27,92 +27,1997 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
-#ifndef NLOHMANN_JSON_HPP
-#define NLOHMANN_JSON_HPP
+#ifndef INCLUDE_NLOHMANN_JSON_HPP_
+#define INCLUDE_NLOHMANN_JSON_HPP_
#define NLOHMANN_JSON_VERSION_MAJOR 3
-#define NLOHMANN_JSON_VERSION_MINOR 5
-#define NLOHMANN_JSON_VERSION_PATCH 0
+#define NLOHMANN_JSON_VERSION_MINOR 9
+#define NLOHMANN_JSON_VERSION_PATCH 1
#include <algorithm> // all_of, find, for_each
-#include <cassert> // assert
-#include <ciso646> // and, not, or
#include <cstddef> // nullptr_t, ptrdiff_t, size_t
#include <functional> // hash, less
#include <initializer_list> // initializer_list
#include <iosfwd> // istream, ostream
#include <iterator> // random_access_iterator_tag
+#include <memory> // unique_ptr
#include <numeric> // accumulate
#include <string> // string, stoi, to_string
#include <utility> // declval, forward, move, pair, swap
+#include <vector> // vector
-// #include <nlohmann/json_fwd.hpp>
-#ifndef NLOHMANN_JSON_FWD_HPP
-#define NLOHMANN_JSON_FWD_HPP
+// #include <nlohmann/adl_serializer.hpp>
-#include <cstdint> // int64_t, uint64_t
+
+#include <utility>
+
+// #include <nlohmann/detail/conversions/from_json.hpp>
+
+
+#include <algorithm> // transform
+#include <array> // array
+#include <forward_list> // forward_list
+#include <iterator> // inserter, front_inserter, end
#include <map> // map
-#include <memory> // allocator
#include <string> // string
-#include <vector> // vector
+#include <tuple> // tuple, make_tuple
+#include <type_traits> // is_arithmetic, is_same, is_enum, underlying_type, is_convertible
+#include <unordered_map> // unordered_map
+#include <utility> // pair, declval
+#include <valarray> // valarray
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+
+#include <exception> // exception
+#include <stdexcept> // runtime_error
+#include <string> // to_string
+
+// #include <nlohmann/detail/input/position_t.hpp>
+
+
+#include <cstddef> // size_t
-/*!
-@brief namespace for Niels Lohmann
-@see https://github.com/nlohmann
-@since version 1.0.0
-*/
namespace nlohmann
{
-/*!
-@brief default JSONSerializer template argument
+namespace detail
+{
+/// struct to capture the start position of the current token
+struct position_t
+{
+ /// the total number of characters read
+ std::size_t chars_read_total = 0;
+ /// the number of characters read in the current line
+ std::size_t chars_read_current_line = 0;
+ /// the number of lines read
+ std::size_t lines_read = 0;
-This serializer ignores the template arguments and uses ADL
-([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl))
-for serialization.
-*/
-template<typename T = void, typename SFINAE = void>
-struct adl_serializer;
+ /// conversion to size_t to preserve SAX interface
+ constexpr operator size_t() const
+ {
+ return chars_read_total;
+ }
+};
-template<template<typename U, typename V, typename... Args> class ObjectType =
- std::map,
- template<typename U, typename... Args> class ArrayType = std::vector,
- class StringType = std::string, class BooleanType = bool,
- class NumberIntegerType = std::int64_t,
- class NumberUnsignedType = std::uint64_t,
- class NumberFloatType = double,
- template<typename U> class AllocatorType = std::allocator,
- template<typename T, typename SFINAE = void> class JSONSerializer =
- adl_serializer>
-class basic_json;
+} // namespace detail
+} // namespace nlohmann
-/*!
-@brief JSON Pointer
+// #include <nlohmann/detail/macro_scope.hpp>
-A JSON pointer defines a string syntax for identifying a specific value
-within a JSON document. It can be used with functions `at` and
-`operator[]`. Furthermore, JSON pointers are the base for JSON patches.
-@sa [RFC 6901](https://tools.ietf.org/html/rfc6901)
+#include <utility> // pair
+// #include <nlohmann/thirdparty/hedley/hedley.hpp>
+/* Hedley - https://nemequ.github.io/hedley
+ * Created by Evan Nemerson <evan@nemerson.com>
+ *
+ * To the extent possible under law, the author(s) have dedicated all
+ * copyright and related and neighboring rights to this software to
+ * the public domain worldwide. This software is distributed without
+ * any warranty.
+ *
+ * For details, see <http://creativecommons.org/publicdomain/zero/1.0/>.
+ * SPDX-License-Identifier: CC0-1.0
+ */
+
+#if !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < 13)
+#if defined(JSON_HEDLEY_VERSION)
+ #undef JSON_HEDLEY_VERSION
+#endif
+#define JSON_HEDLEY_VERSION 13
-@since version 2.0.0
-*/
-template<typename BasicJsonType>
-class json_pointer;
+#if defined(JSON_HEDLEY_STRINGIFY_EX)
+ #undef JSON_HEDLEY_STRINGIFY_EX
+#endif
+#define JSON_HEDLEY_STRINGIFY_EX(x) #x
-/*!
-@brief default JSON class
+#if defined(JSON_HEDLEY_STRINGIFY)
+ #undef JSON_HEDLEY_STRINGIFY
+#endif
+#define JSON_HEDLEY_STRINGIFY(x) JSON_HEDLEY_STRINGIFY_EX(x)
-This type is the default specialization of the @ref basic_json class which
-uses the standard template types.
+#if defined(JSON_HEDLEY_CONCAT_EX)
+ #undef JSON_HEDLEY_CONCAT_EX
+#endif
+#define JSON_HEDLEY_CONCAT_EX(a,b) a##b
-@since version 1.0.0
-*/
-using json = basic_json<>;
-} // namespace nlohmann
+#if defined(JSON_HEDLEY_CONCAT)
+ #undef JSON_HEDLEY_CONCAT
+#endif
+#define JSON_HEDLEY_CONCAT(a,b) JSON_HEDLEY_CONCAT_EX(a,b)
+#if defined(JSON_HEDLEY_CONCAT3_EX)
+ #undef JSON_HEDLEY_CONCAT3_EX
#endif
+#define JSON_HEDLEY_CONCAT3_EX(a,b,c) a##b##c
-// #include <nlohmann/detail/macro_scope.hpp>
+#if defined(JSON_HEDLEY_CONCAT3)
+ #undef JSON_HEDLEY_CONCAT3
+#endif
+#define JSON_HEDLEY_CONCAT3(a,b,c) JSON_HEDLEY_CONCAT3_EX(a,b,c)
+
+#if defined(JSON_HEDLEY_VERSION_ENCODE)
+ #undef JSON_HEDLEY_VERSION_ENCODE
+#endif
+#define JSON_HEDLEY_VERSION_ENCODE(major,minor,revision) (((major) * 1000000) + ((minor) * 1000) + (revision))
+
+#if defined(JSON_HEDLEY_VERSION_DECODE_MAJOR)
+ #undef JSON_HEDLEY_VERSION_DECODE_MAJOR
+#endif
+#define JSON_HEDLEY_VERSION_DECODE_MAJOR(version) ((version) / 1000000)
+
+#if defined(JSON_HEDLEY_VERSION_DECODE_MINOR)
+ #undef JSON_HEDLEY_VERSION_DECODE_MINOR
+#endif
+#define JSON_HEDLEY_VERSION_DECODE_MINOR(version) (((version) % 1000000) / 1000)
+
+#if defined(JSON_HEDLEY_VERSION_DECODE_REVISION)
+ #undef JSON_HEDLEY_VERSION_DECODE_REVISION
+#endif
+#define JSON_HEDLEY_VERSION_DECODE_REVISION(version) ((version) % 1000)
+
+#if defined(JSON_HEDLEY_GNUC_VERSION)
+ #undef JSON_HEDLEY_GNUC_VERSION
+#endif
+#if defined(__GNUC__) && defined(__GNUC_PATCHLEVEL__)
+ #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__)
+#elif defined(__GNUC__)
+ #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, 0)
+#endif
+
+#if defined(JSON_HEDLEY_GNUC_VERSION_CHECK)
+ #undef JSON_HEDLEY_GNUC_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_GNUC_VERSION)
+ #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GNUC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_MSVC_VERSION)
+ #undef JSON_HEDLEY_MSVC_VERSION
+#endif
+#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 140000000)
+ #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 10000000, (_MSC_FULL_VER % 10000000) / 100000, (_MSC_FULL_VER % 100000) / 100)
+#elif defined(_MSC_FULL_VER)
+ #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 1000000, (_MSC_FULL_VER % 1000000) / 10000, (_MSC_FULL_VER % 10000) / 10)
+#elif defined(_MSC_VER)
+ #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_VER / 100, _MSC_VER % 100, 0)
+#endif
+
+#if defined(JSON_HEDLEY_MSVC_VERSION_CHECK)
+ #undef JSON_HEDLEY_MSVC_VERSION_CHECK
+#endif
+#if !defined(_MSC_VER)
+ #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (0)
+#elif defined(_MSC_VER) && (_MSC_VER >= 1400)
+ #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 10000000) + (minor * 100000) + (patch)))
+#elif defined(_MSC_VER) && (_MSC_VER >= 1200)
+ #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 1000000) + (minor * 10000) + (patch)))
+#else
+ #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_VER >= ((major * 100) + (minor)))
+#endif
+
+#if defined(JSON_HEDLEY_INTEL_VERSION)
+ #undef JSON_HEDLEY_INTEL_VERSION
+#endif
+#if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE)
+ #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, __INTEL_COMPILER_UPDATE)
+#elif defined(__INTEL_COMPILER)
+ #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, 0)
+#endif
+
+#if defined(JSON_HEDLEY_INTEL_VERSION_CHECK)
+ #undef JSON_HEDLEY_INTEL_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_INTEL_VERSION)
+ #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_PGI_VERSION)
+ #undef JSON_HEDLEY_PGI_VERSION
+#endif
+#if defined(__PGI) && defined(__PGIC__) && defined(__PGIC_MINOR__) && defined(__PGIC_PATCHLEVEL__)
+ #define JSON_HEDLEY_PGI_VERSION JSON_HEDLEY_VERSION_ENCODE(__PGIC__, __PGIC_MINOR__, __PGIC_PATCHLEVEL__)
+#endif
+
+#if defined(JSON_HEDLEY_PGI_VERSION_CHECK)
+ #undef JSON_HEDLEY_PGI_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_PGI_VERSION)
+ #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PGI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_SUNPRO_VERSION)
+ #undef JSON_HEDLEY_SUNPRO_VERSION
+#endif
+#if defined(__SUNPRO_C) && (__SUNPRO_C > 0x1000)
+ #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_C >> 16) & 0xf) * 10) + ((__SUNPRO_C >> 12) & 0xf), (((__SUNPRO_C >> 8) & 0xf) * 10) + ((__SUNPRO_C >> 4) & 0xf), (__SUNPRO_C & 0xf) * 10)
+#elif defined(__SUNPRO_C)
+ #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_C >> 8) & 0xf, (__SUNPRO_C >> 4) & 0xf, (__SUNPRO_C) & 0xf)
+#elif defined(__SUNPRO_CC) && (__SUNPRO_CC > 0x1000)
+ #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_CC >> 16) & 0xf) * 10) + ((__SUNPRO_CC >> 12) & 0xf), (((__SUNPRO_CC >> 8) & 0xf) * 10) + ((__SUNPRO_CC >> 4) & 0xf), (__SUNPRO_CC & 0xf) * 10)
+#elif defined(__SUNPRO_CC)
+ #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_CC >> 8) & 0xf, (__SUNPRO_CC >> 4) & 0xf, (__SUNPRO_CC) & 0xf)
+#endif
+
+#if defined(JSON_HEDLEY_SUNPRO_VERSION_CHECK)
+ #undef JSON_HEDLEY_SUNPRO_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_SUNPRO_VERSION)
+ #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_SUNPRO_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION)
+ #undef JSON_HEDLEY_EMSCRIPTEN_VERSION
+#endif
+#if defined(__EMSCRIPTEN__)
+ #define JSON_HEDLEY_EMSCRIPTEN_VERSION JSON_HEDLEY_VERSION_ENCODE(__EMSCRIPTEN_major__, __EMSCRIPTEN_minor__, __EMSCRIPTEN_tiny__)
+#endif
+
+#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK)
+ #undef JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION)
+ #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_EMSCRIPTEN_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_ARM_VERSION)
+ #undef JSON_HEDLEY_ARM_VERSION
+#endif
+#if defined(__CC_ARM) && defined(__ARMCOMPILER_VERSION)
+ #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCOMPILER_VERSION / 1000000, (__ARMCOMPILER_VERSION % 1000000) / 10000, (__ARMCOMPILER_VERSION % 10000) / 100)
+#elif defined(__CC_ARM) && defined(__ARMCC_VERSION)
+ #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCC_VERSION / 1000000, (__ARMCC_VERSION % 1000000) / 10000, (__ARMCC_VERSION % 10000) / 100)
+#endif
+
+#if defined(JSON_HEDLEY_ARM_VERSION_CHECK)
+ #undef JSON_HEDLEY_ARM_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_ARM_VERSION)
+ #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_ARM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_IBM_VERSION)
+ #undef JSON_HEDLEY_IBM_VERSION
+#endif
+#if defined(__ibmxl__)
+ #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ibmxl_version__, __ibmxl_release__, __ibmxl_modification__)
+#elif defined(__xlC__) && defined(__xlC_ver__)
+ #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, (__xlC_ver__ >> 8) & 0xff)
+#elif defined(__xlC__)
+ #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, 0)
+#endif
+
+#if defined(JSON_HEDLEY_IBM_VERSION_CHECK)
+ #undef JSON_HEDLEY_IBM_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_IBM_VERSION)
+ #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IBM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_TI_VERSION)
+ #undef JSON_HEDLEY_TI_VERSION
+#endif
+#if \
+ defined(__TI_COMPILER_VERSION__) && \
+ ( \
+ defined(__TMS470__) || defined(__TI_ARM__) || \
+ defined(__MSP430__) || \
+ defined(__TMS320C2000__) \
+ )
+#if (__TI_COMPILER_VERSION__ >= 16000000)
+ #define JSON_HEDLEY_TI_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
+#endif
+#endif
+
+#if defined(JSON_HEDLEY_TI_VERSION_CHECK)
+ #undef JSON_HEDLEY_TI_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_TI_VERSION)
+ #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_TI_CL2000_VERSION)
+ #undef JSON_HEDLEY_TI_CL2000_VERSION
+#endif
+#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C2000__)
+ #define JSON_HEDLEY_TI_CL2000_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
+#endif
+
+#if defined(JSON_HEDLEY_TI_CL2000_VERSION_CHECK)
+ #undef JSON_HEDLEY_TI_CL2000_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_TI_CL2000_VERSION)
+ #define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL2000_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_TI_CL430_VERSION)
+ #undef JSON_HEDLEY_TI_CL430_VERSION
+#endif
+#if defined(__TI_COMPILER_VERSION__) && defined(__MSP430__)
+ #define JSON_HEDLEY_TI_CL430_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
+#endif
+
+#if defined(JSON_HEDLEY_TI_CL430_VERSION_CHECK)
+ #undef JSON_HEDLEY_TI_CL430_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_TI_CL430_VERSION)
+ #define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL430_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_TI_ARMCL_VERSION)
+ #undef JSON_HEDLEY_TI_ARMCL_VERSION
+#endif
+#if defined(__TI_COMPILER_VERSION__) && (defined(__TMS470__) || defined(__TI_ARM__))
+ #define JSON_HEDLEY_TI_ARMCL_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
+#endif
+
+#if defined(JSON_HEDLEY_TI_ARMCL_VERSION_CHECK)
+ #undef JSON_HEDLEY_TI_ARMCL_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_TI_ARMCL_VERSION)
+ #define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_ARMCL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_TI_CL6X_VERSION)
+ #undef JSON_HEDLEY_TI_CL6X_VERSION
+#endif
+#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C6X__)
+ #define JSON_HEDLEY_TI_CL6X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
+#endif
+
+#if defined(JSON_HEDLEY_TI_CL6X_VERSION_CHECK)
+ #undef JSON_HEDLEY_TI_CL6X_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_TI_CL6X_VERSION)
+ #define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL6X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_TI_CL7X_VERSION)
+ #undef JSON_HEDLEY_TI_CL7X_VERSION
+#endif
+#if defined(__TI_COMPILER_VERSION__) && defined(__C7000__)
+ #define JSON_HEDLEY_TI_CL7X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
+#endif
+
+#if defined(JSON_HEDLEY_TI_CL7X_VERSION_CHECK)
+ #undef JSON_HEDLEY_TI_CL7X_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_TI_CL7X_VERSION)
+ #define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL7X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_TI_CLPRU_VERSION)
+ #undef JSON_HEDLEY_TI_CLPRU_VERSION
+#endif
+#if defined(__TI_COMPILER_VERSION__) && defined(__PRU__)
+ #define JSON_HEDLEY_TI_CLPRU_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000))
+#endif
+
+#if defined(JSON_HEDLEY_TI_CLPRU_VERSION_CHECK)
+ #undef JSON_HEDLEY_TI_CLPRU_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_TI_CLPRU_VERSION)
+ #define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CLPRU_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_CRAY_VERSION)
+ #undef JSON_HEDLEY_CRAY_VERSION
+#endif
+#if defined(_CRAYC)
+ #if defined(_RELEASE_PATCHLEVEL)
+ #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, _RELEASE_PATCHLEVEL)
+ #else
+ #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, 0)
+ #endif
+#endif
+
+#if defined(JSON_HEDLEY_CRAY_VERSION_CHECK)
+ #undef JSON_HEDLEY_CRAY_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_CRAY_VERSION)
+ #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_CRAY_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_IAR_VERSION)
+ #undef JSON_HEDLEY_IAR_VERSION
+#endif
+#if defined(__IAR_SYSTEMS_ICC__)
+ #if __VER__ > 1000
+ #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE((__VER__ / 1000000), ((__VER__ / 1000) % 1000), (__VER__ % 1000))
+ #else
+ #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE(VER / 100, __VER__ % 100, 0)
+ #endif
+#endif
+
+#if defined(JSON_HEDLEY_IAR_VERSION_CHECK)
+ #undef JSON_HEDLEY_IAR_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_IAR_VERSION)
+ #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IAR_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_TINYC_VERSION)
+ #undef JSON_HEDLEY_TINYC_VERSION
+#endif
+#if defined(__TINYC__)
+ #define JSON_HEDLEY_TINYC_VERSION JSON_HEDLEY_VERSION_ENCODE(__TINYC__ / 1000, (__TINYC__ / 100) % 10, __TINYC__ % 100)
+#endif
+
+#if defined(JSON_HEDLEY_TINYC_VERSION_CHECK)
+ #undef JSON_HEDLEY_TINYC_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_TINYC_VERSION)
+ #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TINYC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_DMC_VERSION)
+ #undef JSON_HEDLEY_DMC_VERSION
+#endif
+#if defined(__DMC__)
+ #define JSON_HEDLEY_DMC_VERSION JSON_HEDLEY_VERSION_ENCODE(__DMC__ >> 8, (__DMC__ >> 4) & 0xf, __DMC__ & 0xf)
+#endif
+
+#if defined(JSON_HEDLEY_DMC_VERSION_CHECK)
+ #undef JSON_HEDLEY_DMC_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_DMC_VERSION)
+ #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_DMC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_COMPCERT_VERSION)
+ #undef JSON_HEDLEY_COMPCERT_VERSION
+#endif
+#if defined(__COMPCERT_VERSION__)
+ #define JSON_HEDLEY_COMPCERT_VERSION JSON_HEDLEY_VERSION_ENCODE(__COMPCERT_VERSION__ / 10000, (__COMPCERT_VERSION__ / 100) % 100, __COMPCERT_VERSION__ % 100)
+#endif
+
+#if defined(JSON_HEDLEY_COMPCERT_VERSION_CHECK)
+ #undef JSON_HEDLEY_COMPCERT_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_COMPCERT_VERSION)
+ #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_COMPCERT_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_PELLES_VERSION)
+ #undef JSON_HEDLEY_PELLES_VERSION
+#endif
+#if defined(__POCC__)
+ #define JSON_HEDLEY_PELLES_VERSION JSON_HEDLEY_VERSION_ENCODE(__POCC__ / 100, __POCC__ % 100, 0)
+#endif
+
+#if defined(JSON_HEDLEY_PELLES_VERSION_CHECK)
+ #undef JSON_HEDLEY_PELLES_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_PELLES_VERSION)
+ #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PELLES_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_GCC_VERSION)
+ #undef JSON_HEDLEY_GCC_VERSION
+#endif
+#if \
+ defined(JSON_HEDLEY_GNUC_VERSION) && \
+ !defined(__clang__) && \
+ !defined(JSON_HEDLEY_INTEL_VERSION) && \
+ !defined(JSON_HEDLEY_PGI_VERSION) && \
+ !defined(JSON_HEDLEY_ARM_VERSION) && \
+ !defined(JSON_HEDLEY_TI_VERSION) && \
+ !defined(JSON_HEDLEY_TI_ARMCL_VERSION) && \
+ !defined(JSON_HEDLEY_TI_CL430_VERSION) && \
+ !defined(JSON_HEDLEY_TI_CL2000_VERSION) && \
+ !defined(JSON_HEDLEY_TI_CL6X_VERSION) && \
+ !defined(JSON_HEDLEY_TI_CL7X_VERSION) && \
+ !defined(JSON_HEDLEY_TI_CLPRU_VERSION) && \
+ !defined(__COMPCERT__)
+ #define JSON_HEDLEY_GCC_VERSION JSON_HEDLEY_GNUC_VERSION
+#endif
+
+#if defined(JSON_HEDLEY_GCC_VERSION_CHECK)
+ #undef JSON_HEDLEY_GCC_VERSION_CHECK
+#endif
+#if defined(JSON_HEDLEY_GCC_VERSION)
+ #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch))
+#else
+ #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (0)
+#endif
+
+#if defined(JSON_HEDLEY_HAS_ATTRIBUTE)
+ #undef JSON_HEDLEY_HAS_ATTRIBUTE
+#endif
+#if defined(__has_attribute)
+ #define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) __has_attribute(attribute)
+#else
+ #define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) (0)
+#endif
+
+#if defined(JSON_HEDLEY_GNUC_HAS_ATTRIBUTE)
+ #undef JSON_HEDLEY_GNUC_HAS_ATTRIBUTE
+#endif
+#if defined(__has_attribute)
+ #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) __has_attribute(attribute)
+#else
+ #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_GCC_HAS_ATTRIBUTE)
+ #undef JSON_HEDLEY_GCC_HAS_ATTRIBUTE
+#endif
+#if defined(__has_attribute)
+ #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) __has_attribute(attribute)
+#else
+ #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE)
+ #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE
+#endif
+#if \
+ defined(__has_cpp_attribute) && \
+ defined(__cplusplus) && \
+ (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0))
+ #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) __has_cpp_attribute(attribute)
+#else
+ #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) (0)
+#endif
+
+#if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS)
+ #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS
+#endif
+#if !defined(__cplusplus) || !defined(__has_cpp_attribute)
+ #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0)
+#elif \
+ !defined(JSON_HEDLEY_PGI_VERSION) && \
+ !defined(JSON_HEDLEY_IAR_VERSION) && \
+ (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) && \
+ (!defined(JSON_HEDLEY_MSVC_VERSION) || JSON_HEDLEY_MSVC_VERSION_CHECK(19,20,0))
+ #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(ns::attribute)
+#else
+ #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0)
+#endif
+
+#if defined(JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE)
+ #undef JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE
+#endif
+#if defined(__has_cpp_attribute) && defined(__cplusplus)
+ #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute)
+#else
+ #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE)
+ #undef JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE
+#endif
+#if defined(__has_cpp_attribute) && defined(__cplusplus)
+ #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute)
+#else
+ #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_HAS_BUILTIN)
+ #undef JSON_HEDLEY_HAS_BUILTIN
+#endif
+#if defined(__has_builtin)
+ #define JSON_HEDLEY_HAS_BUILTIN(builtin) __has_builtin(builtin)
+#else
+ #define JSON_HEDLEY_HAS_BUILTIN(builtin) (0)
+#endif
+
+#if defined(JSON_HEDLEY_GNUC_HAS_BUILTIN)
+ #undef JSON_HEDLEY_GNUC_HAS_BUILTIN
+#endif
+#if defined(__has_builtin)
+ #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin)
+#else
+ #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_GCC_HAS_BUILTIN)
+ #undef JSON_HEDLEY_GCC_HAS_BUILTIN
+#endif
+#if defined(__has_builtin)
+ #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin)
+#else
+ #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_HAS_FEATURE)
+ #undef JSON_HEDLEY_HAS_FEATURE
+#endif
+#if defined(__has_feature)
+ #define JSON_HEDLEY_HAS_FEATURE(feature) __has_feature(feature)
+#else
+ #define JSON_HEDLEY_HAS_FEATURE(feature) (0)
+#endif
+
+#if defined(JSON_HEDLEY_GNUC_HAS_FEATURE)
+ #undef JSON_HEDLEY_GNUC_HAS_FEATURE
+#endif
+#if defined(__has_feature)
+ #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature)
+#else
+ #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_GCC_HAS_FEATURE)
+ #undef JSON_HEDLEY_GCC_HAS_FEATURE
+#endif
+#if defined(__has_feature)
+ #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature)
+#else
+ #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_HAS_EXTENSION)
+ #undef JSON_HEDLEY_HAS_EXTENSION
+#endif
+#if defined(__has_extension)
+ #define JSON_HEDLEY_HAS_EXTENSION(extension) __has_extension(extension)
+#else
+ #define JSON_HEDLEY_HAS_EXTENSION(extension) (0)
+#endif
+
+#if defined(JSON_HEDLEY_GNUC_HAS_EXTENSION)
+ #undef JSON_HEDLEY_GNUC_HAS_EXTENSION
+#endif
+#if defined(__has_extension)
+ #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension)
+#else
+ #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_GCC_HAS_EXTENSION)
+ #undef JSON_HEDLEY_GCC_HAS_EXTENSION
+#endif
+#if defined(__has_extension)
+ #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension)
+#else
+ #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE)
+ #undef JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE
+#endif
+#if defined(__has_declspec_attribute)
+ #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) __has_declspec_attribute(attribute)
+#else
+ #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) (0)
+#endif
+
+#if defined(JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE)
+ #undef JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE
+#endif
+#if defined(__has_declspec_attribute)
+ #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute)
+#else
+ #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE)
+ #undef JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE
+#endif
+#if defined(__has_declspec_attribute)
+ #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute)
+#else
+ #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_HAS_WARNING)
+ #undef JSON_HEDLEY_HAS_WARNING
+#endif
+#if defined(__has_warning)
+ #define JSON_HEDLEY_HAS_WARNING(warning) __has_warning(warning)
+#else
+ #define JSON_HEDLEY_HAS_WARNING(warning) (0)
+#endif
+
+#if defined(JSON_HEDLEY_GNUC_HAS_WARNING)
+ #undef JSON_HEDLEY_GNUC_HAS_WARNING
+#endif
+#if defined(__has_warning)
+ #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning)
+#else
+ #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_GCC_HAS_WARNING)
+ #undef JSON_HEDLEY_GCC_HAS_WARNING
+#endif
+#if defined(__has_warning)
+ #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning)
+#else
+ #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
+#endif
+
+/* JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ is for
+ HEDLEY INTERNAL USE ONLY. API subject to change without notice. */
+#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_)
+ #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_
+#endif
+#if defined(__cplusplus)
+# if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat")
+# if JSON_HEDLEY_HAS_WARNING("-Wc++17-extensions")
+# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \
+ _Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \
+ xpr \
+ JSON_HEDLEY_DIAGNOSTIC_POP
+# else
+# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \
+ xpr \
+ JSON_HEDLEY_DIAGNOSTIC_POP
+# endif
+# endif
+#endif
+#if !defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(x) x
+#endif
+
+#if defined(JSON_HEDLEY_CONST_CAST)
+ #undef JSON_HEDLEY_CONST_CAST
+#endif
+#if defined(__cplusplus)
+# define JSON_HEDLEY_CONST_CAST(T, expr) (const_cast<T>(expr))
+#elif \
+ JSON_HEDLEY_HAS_WARNING("-Wcast-qual") || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+# define JSON_HEDLEY_CONST_CAST(T, expr) (__extension__ ({ \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \
+ ((T) (expr)); \
+ JSON_HEDLEY_DIAGNOSTIC_POP \
+ }))
+#else
+# define JSON_HEDLEY_CONST_CAST(T, expr) ((T) (expr))
+#endif
+
+#if defined(JSON_HEDLEY_REINTERPRET_CAST)
+ #undef JSON_HEDLEY_REINTERPRET_CAST
+#endif
+#if defined(__cplusplus)
+ #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) (reinterpret_cast<T>(expr))
+#else
+ #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) ((T) (expr))
+#endif
+
+#if defined(JSON_HEDLEY_STATIC_CAST)
+ #undef JSON_HEDLEY_STATIC_CAST
+#endif
+#if defined(__cplusplus)
+ #define JSON_HEDLEY_STATIC_CAST(T, expr) (static_cast<T>(expr))
+#else
+ #define JSON_HEDLEY_STATIC_CAST(T, expr) ((T) (expr))
+#endif
+
+#if defined(JSON_HEDLEY_CPP_CAST)
+ #undef JSON_HEDLEY_CPP_CAST
+#endif
+#if defined(__cplusplus)
+# if JSON_HEDLEY_HAS_WARNING("-Wold-style-cast")
+# define JSON_HEDLEY_CPP_CAST(T, expr) \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ _Pragma("clang diagnostic ignored \"-Wold-style-cast\"") \
+ ((T) (expr)) \
+ JSON_HEDLEY_DIAGNOSTIC_POP
+# elif JSON_HEDLEY_IAR_VERSION_CHECK(8,3,0)
+# define JSON_HEDLEY_CPP_CAST(T, expr) \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ _Pragma("diag_suppress=Pe137") \
+ JSON_HEDLEY_DIAGNOSTIC_POP \
+# else
+# define JSON_HEDLEY_CPP_CAST(T, expr) ((T) (expr))
+# endif
+#else
+# define JSON_HEDLEY_CPP_CAST(T, expr) (expr)
+#endif
+
+#if \
+ (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \
+ defined(__clang__) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \
+ JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,0,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
+ JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) || \
+ JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,17) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(8,0,0) || \
+ (JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) && defined(__C99_PRAGMA_OPERATOR))
+ #define JSON_HEDLEY_PRAGMA(value) _Pragma(#value)
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0)
+ #define JSON_HEDLEY_PRAGMA(value) __pragma(value)
+#else
+ #define JSON_HEDLEY_PRAGMA(value)
+#endif
+
+#if defined(JSON_HEDLEY_DIAGNOSTIC_PUSH)
+ #undef JSON_HEDLEY_DIAGNOSTIC_PUSH
+#endif
+#if defined(JSON_HEDLEY_DIAGNOSTIC_POP)
+ #undef JSON_HEDLEY_DIAGNOSTIC_POP
+#endif
+#if defined(__clang__)
+ #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push")
+ #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("clang diagnostic pop")
+#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)")
+ #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)")
+#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push")
+ #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop")
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_PUSH __pragma(warning(push))
+ #define JSON_HEDLEY_DIAGNOSTIC_POP __pragma(warning(pop))
+#elif JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("push")
+ #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("pop")
+#elif \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,4,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("diag_push")
+ #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("diag_pop")
+#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)")
+ #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)")
+#else
+ #define JSON_HEDLEY_DIAGNOSTIC_PUSH
+ #define JSON_HEDLEY_DIAGNOSTIC_POP
+#endif
+
+#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED)
+ #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED
+#endif
+#if JSON_HEDLEY_HAS_WARNING("-Wdeprecated-declarations")
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"")
+#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warning(disable:1478 1786)")
+#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444")
+#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:4996))
+#elif \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1291,1718")
+#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && !defined(__cplusplus)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,E_DEPRECATED_ATT,E_DEPRECATED_ATT_MESS)")
+#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && defined(__cplusplus)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,symdeprecated,symdeprecated2)")
+#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress=Pe1444,Pe1215")
+#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warn(disable:2241)")
+#else
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED
+#endif
+
+#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS)
+ #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS
+#endif
+#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas")
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("clang diagnostic ignored \"-Wunknown-pragmas\"")
+#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("warning(disable:161)")
+#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 1675")
+#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("GCC diagnostic ignored \"-Wunknown-pragmas\"")
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:4068))
+#elif \
+ JSON_HEDLEY_TI_VERSION_CHECK(16,9,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163")
+#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163")
+#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress=Pe161")
+#else
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS
+#endif
+
+#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES)
+ #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES
+#endif
+#if JSON_HEDLEY_HAS_WARNING("-Wunknown-attributes")
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("clang diagnostic ignored \"-Wunknown-attributes\"")
+#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+#elif JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("warning(disable:1292)")
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:5030))
+#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097")
+#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("error_messages(off,attrskipunsup)")
+#elif \
+ JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1173")
+#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress=Pe1097")
+#else
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES
+#endif
+
+#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL)
+ #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL
+#endif
+#if JSON_HEDLEY_HAS_WARNING("-Wcast-qual")
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("clang diagnostic ignored \"-Wcast-qual\"")
+#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("warning(disable:2203 2331)")
+#elif JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0)
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("GCC diagnostic ignored \"-Wcast-qual\"")
+#else
+ #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL
+#endif
+
+#if defined(JSON_HEDLEY_DEPRECATED)
+ #undef JSON_HEDLEY_DEPRECATED
+#endif
+#if defined(JSON_HEDLEY_DEPRECATED_FOR)
+ #undef JSON_HEDLEY_DEPRECATED_FOR
+#endif
+#if JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0)
+ #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated("Since " # since))
+ #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated("Since " #since "; use " #replacement))
+#elif defined(__cplusplus) && (__cplusplus >= 201402L)
+ #define JSON_HEDLEY_DEPRECATED(since) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since)]])
+ #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since "; use " #replacement)]])
+#elif \
+ JSON_HEDLEY_HAS_EXTENSION(attribute_deprecated_with_message) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) || \
+ JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(18,1,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0)
+ #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__("Since " #since)))
+ #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__("Since " #since "; use " #replacement)))
+#elif \
+ JSON_HEDLEY_HAS_ATTRIBUTE(deprecated) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+ #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__))
+ #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__))
+#elif \
+ JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \
+ JSON_HEDLEY_PELLES_VERSION_CHECK(6,50,0)
+ #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated)
+ #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated)
+#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
+ #define JSON_HEDLEY_DEPRECATED(since) _Pragma("deprecated")
+ #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) _Pragma("deprecated")
+#else
+ #define JSON_HEDLEY_DEPRECATED(since)
+ #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement)
+#endif
+
+#if defined(JSON_HEDLEY_UNAVAILABLE)
+ #undef JSON_HEDLEY_UNAVAILABLE
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(warning) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+ #define JSON_HEDLEY_UNAVAILABLE(available_since) __attribute__((__warning__("Not available until " #available_since)))
+#else
+ #define JSON_HEDLEY_UNAVAILABLE(available_since)
+#endif
+
+#if defined(JSON_HEDLEY_WARN_UNUSED_RESULT)
+ #undef JSON_HEDLEY_WARN_UNUSED_RESULT
+#endif
+#if defined(JSON_HEDLEY_WARN_UNUSED_RESULT_MSG)
+ #undef JSON_HEDLEY_WARN_UNUSED_RESULT_MSG
+#endif
+#if (JSON_HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard) >= 201907L)
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]])
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard(msg)]])
+#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard)
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]])
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]])
+#elif \
+ JSON_HEDLEY_HAS_ATTRIBUTE(warn_unused_result) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
+ (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \
+ JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0)
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT __attribute__((__warn_unused_result__))
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) __attribute__((__warn_unused_result__))
+#elif defined(_Check_return_) /* SAL */
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT _Check_return_
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) _Check_return_
+#else
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT
+ #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg)
+#endif
+
+#if defined(JSON_HEDLEY_SENTINEL)
+ #undef JSON_HEDLEY_SENTINEL
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(sentinel) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0)
+ #define JSON_HEDLEY_SENTINEL(position) __attribute__((__sentinel__(position)))
+#else
+ #define JSON_HEDLEY_SENTINEL(position)
+#endif
+
+#if defined(JSON_HEDLEY_NO_RETURN)
+ #undef JSON_HEDLEY_NO_RETURN
+#endif
+#if JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
+ #define JSON_HEDLEY_NO_RETURN __noreturn
+#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+ #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__))
+#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
+ #define JSON_HEDLEY_NO_RETURN _Noreturn
+#elif defined(__cplusplus) && (__cplusplus >= 201103L)
+ #define JSON_HEDLEY_NO_RETURN JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[noreturn]])
+#elif \
+ JSON_HEDLEY_HAS_ATTRIBUTE(noreturn) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,2,0) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+ #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__))
+#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0)
+ #define JSON_HEDLEY_NO_RETURN _Pragma("does_not_return")
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0)
+ #define JSON_HEDLEY_NO_RETURN __declspec(noreturn)
+#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus)
+ #define JSON_HEDLEY_NO_RETURN _Pragma("FUNC_NEVER_RETURNS;")
+#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0)
+ #define JSON_HEDLEY_NO_RETURN __attribute((noreturn))
+#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0)
+ #define JSON_HEDLEY_NO_RETURN __declspec(noreturn)
+#else
+ #define JSON_HEDLEY_NO_RETURN
+#endif
+
+#if defined(JSON_HEDLEY_NO_ESCAPE)
+ #undef JSON_HEDLEY_NO_ESCAPE
+#endif
+#if JSON_HEDLEY_HAS_ATTRIBUTE(noescape)
+ #define JSON_HEDLEY_NO_ESCAPE __attribute__((__noescape__))
+#else
+ #define JSON_HEDLEY_NO_ESCAPE
+#endif
+
+#if defined(JSON_HEDLEY_UNREACHABLE)
+ #undef JSON_HEDLEY_UNREACHABLE
+#endif
+#if defined(JSON_HEDLEY_UNREACHABLE_RETURN)
+ #undef JSON_HEDLEY_UNREACHABLE_RETURN
+#endif
+#if defined(JSON_HEDLEY_ASSUME)
+ #undef JSON_HEDLEY_ASSUME
+#endif
+#if \
+ JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+ #define JSON_HEDLEY_ASSUME(expr) __assume(expr)
+#elif JSON_HEDLEY_HAS_BUILTIN(__builtin_assume)
+ #define JSON_HEDLEY_ASSUME(expr) __builtin_assume(expr)
+#elif \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0)
+ #if defined(__cplusplus)
+ #define JSON_HEDLEY_ASSUME(expr) std::_nassert(expr)
+ #else
+ #define JSON_HEDLEY_ASSUME(expr) _nassert(expr)
+ #endif
+#endif
+#if \
+ (JSON_HEDLEY_HAS_BUILTIN(__builtin_unreachable) && (!defined(JSON_HEDLEY_ARM_VERSION))) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \
+ JSON_HEDLEY_PGI_VERSION_CHECK(18,10,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(13,1,5)
+ #define JSON_HEDLEY_UNREACHABLE() __builtin_unreachable()
+#elif defined(JSON_HEDLEY_ASSUME)
+ #define JSON_HEDLEY_UNREACHABLE() JSON_HEDLEY_ASSUME(0)
+#endif
+#if !defined(JSON_HEDLEY_ASSUME)
+ #if defined(JSON_HEDLEY_UNREACHABLE)
+ #define JSON_HEDLEY_ASSUME(expr) JSON_HEDLEY_STATIC_CAST(void, ((expr) ? 1 : (JSON_HEDLEY_UNREACHABLE(), 1)))
+ #else
+ #define JSON_HEDLEY_ASSUME(expr) JSON_HEDLEY_STATIC_CAST(void, expr)
+ #endif
+#endif
+#if defined(JSON_HEDLEY_UNREACHABLE)
+ #if \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0)
+ #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return (JSON_HEDLEY_STATIC_CAST(void, JSON_HEDLEY_ASSUME(0)), (value))
+ #else
+ #define JSON_HEDLEY_UNREACHABLE_RETURN(value) JSON_HEDLEY_UNREACHABLE()
+ #endif
+#else
+ #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return (value)
+#endif
+#if !defined(JSON_HEDLEY_UNREACHABLE)
+ #define JSON_HEDLEY_UNREACHABLE() JSON_HEDLEY_ASSUME(0)
+#endif
+
+JSON_HEDLEY_DIAGNOSTIC_PUSH
+#if JSON_HEDLEY_HAS_WARNING("-Wpedantic")
+ #pragma clang diagnostic ignored "-Wpedantic"
+#endif
+#if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") && defined(__cplusplus)
+ #pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
+#endif
+#if JSON_HEDLEY_GCC_HAS_WARNING("-Wvariadic-macros",4,0,0)
+ #if defined(__clang__)
+ #pragma clang diagnostic ignored "-Wvariadic-macros"
+ #elif defined(JSON_HEDLEY_GCC_VERSION)
+ #pragma GCC diagnostic ignored "-Wvariadic-macros"
+ #endif
+#endif
+#if defined(JSON_HEDLEY_NON_NULL)
+ #undef JSON_HEDLEY_NON_NULL
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(nonnull) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0)
+ #define JSON_HEDLEY_NON_NULL(...) __attribute__((__nonnull__(__VA_ARGS__)))
+#else
+ #define JSON_HEDLEY_NON_NULL(...)
+#endif
+JSON_HEDLEY_DIAGNOSTIC_POP
+
+#if defined(JSON_HEDLEY_PRINTF_FORMAT)
+ #undef JSON_HEDLEY_PRINTF_FORMAT
+#endif
+#if defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && !defined(__USE_MINGW_ANSI_STDIO)
+ #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(ms_printf, string_idx, first_to_check)))
+#elif defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && defined(__USE_MINGW_ANSI_STDIO)
+ #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(gnu_printf, string_idx, first_to_check)))
+#elif \
+ JSON_HEDLEY_HAS_ATTRIBUTE(format) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+ #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(__printf__, string_idx, first_to_check)))
+#elif JSON_HEDLEY_PELLES_VERSION_CHECK(6,0,0)
+ #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __declspec(vaformat(printf,string_idx,first_to_check))
+#else
+ #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check)
+#endif
+
+#if defined(JSON_HEDLEY_CONSTEXPR)
+ #undef JSON_HEDLEY_CONSTEXPR
+#endif
+#if defined(__cplusplus)
+ #if __cplusplus >= 201103L
+ #define JSON_HEDLEY_CONSTEXPR JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(constexpr)
+ #endif
+#endif
+#if !defined(JSON_HEDLEY_CONSTEXPR)
+ #define JSON_HEDLEY_CONSTEXPR
+#endif
+
+#if defined(JSON_HEDLEY_PREDICT)
+ #undef JSON_HEDLEY_PREDICT
+#endif
+#if defined(JSON_HEDLEY_LIKELY)
+ #undef JSON_HEDLEY_LIKELY
+#endif
+#if defined(JSON_HEDLEY_UNLIKELY)
+ #undef JSON_HEDLEY_UNLIKELY
+#endif
+#if defined(JSON_HEDLEY_UNPREDICTABLE)
+ #undef JSON_HEDLEY_UNPREDICTABLE
+#endif
+#if JSON_HEDLEY_HAS_BUILTIN(__builtin_unpredictable)
+ #define JSON_HEDLEY_UNPREDICTABLE(expr) __builtin_unpredictable((expr))
+#endif
+#if \
+ JSON_HEDLEY_HAS_BUILTIN(__builtin_expect_with_probability) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(9,0,0)
+# define JSON_HEDLEY_PREDICT(expr, value, probability) __builtin_expect_with_probability( (expr), (value), (probability))
+# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) __builtin_expect_with_probability(!!(expr), 1 , (probability))
+# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) __builtin_expect_with_probability(!!(expr), 0 , (probability))
+# define JSON_HEDLEY_LIKELY(expr) __builtin_expect (!!(expr), 1 )
+# define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect (!!(expr), 0 )
+#elif \
+ JSON_HEDLEY_HAS_BUILTIN(__builtin_expect) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
+ JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,27) || \
+ JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0)
+# define JSON_HEDLEY_PREDICT(expr, expected, probability) \
+ (((probability) >= 0.9) ? __builtin_expect((expr), (expected)) : (JSON_HEDLEY_STATIC_CAST(void, expected), (expr)))
+# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) \
+ (__extension__ ({ \
+ double hedley_probability_ = (probability); \
+ ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 1) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 0) : !!(expr))); \
+ }))
+# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) \
+ (__extension__ ({ \
+ double hedley_probability_ = (probability); \
+ ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 0) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 1) : !!(expr))); \
+ }))
+# define JSON_HEDLEY_LIKELY(expr) __builtin_expect(!!(expr), 1)
+# define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect(!!(expr), 0)
+#else
+# define JSON_HEDLEY_PREDICT(expr, expected, probability) (JSON_HEDLEY_STATIC_CAST(void, expected), (expr))
+# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) (!!(expr))
+# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) (!!(expr))
+# define JSON_HEDLEY_LIKELY(expr) (!!(expr))
+# define JSON_HEDLEY_UNLIKELY(expr) (!!(expr))
+#endif
+#if !defined(JSON_HEDLEY_UNPREDICTABLE)
+ #define JSON_HEDLEY_UNPREDICTABLE(expr) JSON_HEDLEY_PREDICT(expr, 1, 0.5)
+#endif
+
+#if defined(JSON_HEDLEY_MALLOC)
+ #undef JSON_HEDLEY_MALLOC
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(malloc) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+ #define JSON_HEDLEY_MALLOC __attribute__((__malloc__))
+#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0)
+ #define JSON_HEDLEY_MALLOC _Pragma("returns_new_memory")
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(14, 0, 0)
+ #define JSON_HEDLEY_MALLOC __declspec(restrict)
+#else
+ #define JSON_HEDLEY_MALLOC
+#endif
+
+#if defined(JSON_HEDLEY_PURE)
+ #undef JSON_HEDLEY_PURE
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(pure) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(2,96,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
+ JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0)
+# define JSON_HEDLEY_PURE __attribute__((__pure__))
+#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0)
+# define JSON_HEDLEY_PURE _Pragma("does_not_write_global_data")
+#elif defined(__cplusplus) && \
+ ( \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) \
+ )
+# define JSON_HEDLEY_PURE _Pragma("FUNC_IS_PURE;")
+#else
+# define JSON_HEDLEY_PURE
+#endif
+
+#if defined(JSON_HEDLEY_CONST)
+ #undef JSON_HEDLEY_CONST
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(const) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(2,5,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \
+ JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0)
+ #define JSON_HEDLEY_CONST __attribute__((__const__))
+#elif \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0)
+ #define JSON_HEDLEY_CONST _Pragma("no_side_effect")
+#else
+ #define JSON_HEDLEY_CONST JSON_HEDLEY_PURE
+#endif
+
+#if defined(JSON_HEDLEY_RESTRICT)
+ #undef JSON_HEDLEY_RESTRICT
+#endif
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && !defined(__cplusplus)
+ #define JSON_HEDLEY_RESTRICT restrict
+#elif \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \
+ JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
+ JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,4) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)) || \
+ JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \
+ defined(__clang__)
+ #define JSON_HEDLEY_RESTRICT __restrict
+#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,3,0) && !defined(__cplusplus)
+ #define JSON_HEDLEY_RESTRICT _Restrict
+#else
+ #define JSON_HEDLEY_RESTRICT
+#endif
+
+#if defined(JSON_HEDLEY_INLINE)
+ #undef JSON_HEDLEY_INLINE
+#endif
+#if \
+ (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \
+ (defined(__cplusplus) && (__cplusplus >= 199711L))
+ #define JSON_HEDLEY_INLINE inline
+#elif \
+ defined(JSON_HEDLEY_GCC_VERSION) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(6,2,0)
+ #define JSON_HEDLEY_INLINE __inline__
+#elif \
+ JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,1,0) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+ #define JSON_HEDLEY_INLINE __inline
+#else
+ #define JSON_HEDLEY_INLINE
+#endif
+
+#if defined(JSON_HEDLEY_ALWAYS_INLINE)
+ #undef JSON_HEDLEY_ALWAYS_INLINE
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(always_inline) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+# define JSON_HEDLEY_ALWAYS_INLINE __attribute__((__always_inline__)) JSON_HEDLEY_INLINE
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0)
+# define JSON_HEDLEY_ALWAYS_INLINE __forceinline
+#elif defined(__cplusplus) && \
+ ( \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) \
+ )
+# define JSON_HEDLEY_ALWAYS_INLINE _Pragma("FUNC_ALWAYS_INLINE;")
+#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
+# define JSON_HEDLEY_ALWAYS_INLINE _Pragma("inline=forced")
+#else
+# define JSON_HEDLEY_ALWAYS_INLINE JSON_HEDLEY_INLINE
+#endif
+
+#if defined(JSON_HEDLEY_NEVER_INLINE)
+ #undef JSON_HEDLEY_NEVER_INLINE
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(noinline) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \
+ JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \
+ (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \
+ (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \
+ (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \
+ JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \
+ JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0)
+ #define JSON_HEDLEY_NEVER_INLINE __attribute__((__noinline__))
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0)
+ #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline)
+#elif JSON_HEDLEY_PGI_VERSION_CHECK(10,2,0)
+ #define JSON_HEDLEY_NEVER_INLINE _Pragma("noinline")
+#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus)
+ #define JSON_HEDLEY_NEVER_INLINE _Pragma("FUNC_CANNOT_INLINE;")
+#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
+ #define JSON_HEDLEY_NEVER_INLINE _Pragma("inline=never")
+#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0)
+ #define JSON_HEDLEY_NEVER_INLINE __attribute((noinline))
+#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0)
+ #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline)
+#else
+ #define JSON_HEDLEY_NEVER_INLINE
+#endif
+
+#if defined(JSON_HEDLEY_PRIVATE)
+ #undef JSON_HEDLEY_PRIVATE
+#endif
+#if defined(JSON_HEDLEY_PUBLIC)
+ #undef JSON_HEDLEY_PUBLIC
+#endif
+#if defined(JSON_HEDLEY_IMPORT)
+ #undef JSON_HEDLEY_IMPORT
+#endif
+#if defined(_WIN32) || defined(__CYGWIN__)
+# define JSON_HEDLEY_PRIVATE
+# define JSON_HEDLEY_PUBLIC __declspec(dllexport)
+# define JSON_HEDLEY_IMPORT __declspec(dllimport)
+#else
+# if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(visibility) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \
+ JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \
+ ( \
+ defined(__TI_EABI__) && \
+ ( \
+ (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) \
+ ) \
+ )
+# define JSON_HEDLEY_PRIVATE __attribute__((__visibility__("hidden")))
+# define JSON_HEDLEY_PUBLIC __attribute__((__visibility__("default")))
+# else
+# define JSON_HEDLEY_PRIVATE
+# define JSON_HEDLEY_PUBLIC
+# endif
+# define JSON_HEDLEY_IMPORT extern
+#endif
+
+#if defined(JSON_HEDLEY_NO_THROW)
+ #undef JSON_HEDLEY_NO_THROW
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(nothrow) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+ #define JSON_HEDLEY_NO_THROW __attribute__((__nothrow__))
+#elif \
+ JSON_HEDLEY_MSVC_VERSION_CHECK(13,1,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0)
+ #define JSON_HEDLEY_NO_THROW __declspec(nothrow)
+#else
+ #define JSON_HEDLEY_NO_THROW
+#endif
+
+#if defined(JSON_HEDLEY_FALL_THROUGH)
+ #undef JSON_HEDLEY_FALL_THROUGH
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(fallthrough) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(7,0,0)
+ #define JSON_HEDLEY_FALL_THROUGH __attribute__((__fallthrough__))
+#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(clang,fallthrough)
+ #define JSON_HEDLEY_FALL_THROUGH JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[clang::fallthrough]])
+#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE(fallthrough)
+ #define JSON_HEDLEY_FALL_THROUGH JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[fallthrough]])
+#elif defined(__fallthrough) /* SAL */
+ #define JSON_HEDLEY_FALL_THROUGH __fallthrough
+#else
+ #define JSON_HEDLEY_FALL_THROUGH
+#endif
+
+#if defined(JSON_HEDLEY_RETURNS_NON_NULL)
+ #undef JSON_HEDLEY_RETURNS_NON_NULL
+#endif
+#if \
+ JSON_HEDLEY_HAS_ATTRIBUTE(returns_nonnull) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0)
+ #define JSON_HEDLEY_RETURNS_NON_NULL __attribute__((__returns_nonnull__))
+#elif defined(_Ret_notnull_) /* SAL */
+ #define JSON_HEDLEY_RETURNS_NON_NULL _Ret_notnull_
+#else
+ #define JSON_HEDLEY_RETURNS_NON_NULL
+#endif
+
+#if defined(JSON_HEDLEY_ARRAY_PARAM)
+ #undef JSON_HEDLEY_ARRAY_PARAM
+#endif
+#if \
+ defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \
+ !defined(__STDC_NO_VLA__) && \
+ !defined(__cplusplus) && \
+ !defined(JSON_HEDLEY_PGI_VERSION) && \
+ !defined(JSON_HEDLEY_TINYC_VERSION)
+ #define JSON_HEDLEY_ARRAY_PARAM(name) (name)
+#else
+ #define JSON_HEDLEY_ARRAY_PARAM(name)
+#endif
+
+#if defined(JSON_HEDLEY_IS_CONSTANT)
+ #undef JSON_HEDLEY_IS_CONSTANT
+#endif
+#if defined(JSON_HEDLEY_REQUIRE_CONSTEXPR)
+ #undef JSON_HEDLEY_REQUIRE_CONSTEXPR
+#endif
+/* JSON_HEDLEY_IS_CONSTEXPR_ is for
+ HEDLEY INTERNAL USE ONLY. API subject to change without notice. */
+#if defined(JSON_HEDLEY_IS_CONSTEXPR_)
+ #undef JSON_HEDLEY_IS_CONSTEXPR_
+#endif
+#if \
+ JSON_HEDLEY_HAS_BUILTIN(__builtin_constant_p) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,19) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \
+ JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \
+ (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) && !defined(__cplusplus)) || \
+ JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0)
+ #define JSON_HEDLEY_IS_CONSTANT(expr) __builtin_constant_p(expr)
+#endif
+#if !defined(__cplusplus)
+# if \
+ JSON_HEDLEY_HAS_BUILTIN(__builtin_types_compatible_p) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \
+ JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0) || \
+ JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,24)
+#if defined(__INTPTR_TYPE__)
+ #define JSON_HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0)), int*)
+#else
+ #include <stdint.h>
+ #define JSON_HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((intptr_t) ((expr) * 0)) : (int*) 0)), int*)
+#endif
+# elif \
+ ( \
+ defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \
+ !defined(JSON_HEDLEY_SUNPRO_VERSION) && \
+ !defined(JSON_HEDLEY_PGI_VERSION) && \
+ !defined(JSON_HEDLEY_IAR_VERSION)) || \
+ JSON_HEDLEY_HAS_EXTENSION(c_generic_selections) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0) || \
+ JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \
+ JSON_HEDLEY_ARM_VERSION_CHECK(5,3,0)
+#if defined(__INTPTR_TYPE__)
+ #define JSON_HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0), int*: 1, void*: 0)
+#else
+ #include <stdint.h>
+ #define JSON_HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((intptr_t) * 0) : (int*) 0), int*: 1, void*: 0)
+#endif
+# elif \
+ defined(JSON_HEDLEY_GCC_VERSION) || \
+ defined(JSON_HEDLEY_INTEL_VERSION) || \
+ defined(JSON_HEDLEY_TINYC_VERSION) || \
+ defined(JSON_HEDLEY_TI_ARMCL_VERSION) || \
+ JSON_HEDLEY_TI_CL430_VERSION_CHECK(18,12,0) || \
+ defined(JSON_HEDLEY_TI_CL2000_VERSION) || \
+ defined(JSON_HEDLEY_TI_CL6X_VERSION) || \
+ defined(JSON_HEDLEY_TI_CL7X_VERSION) || \
+ defined(JSON_HEDLEY_TI_CLPRU_VERSION) || \
+ defined(__clang__)
+# define JSON_HEDLEY_IS_CONSTEXPR_(expr) ( \
+ sizeof(void) != \
+ sizeof(*( \
+ 1 ? \
+ ((void*) ((expr) * 0L) ) : \
+((struct { char v[sizeof(void) * 2]; } *) 1) \
+ ) \
+ ) \
+ )
+# endif
+#endif
+#if defined(JSON_HEDLEY_IS_CONSTEXPR_)
+ #if !defined(JSON_HEDLEY_IS_CONSTANT)
+ #define JSON_HEDLEY_IS_CONSTANT(expr) JSON_HEDLEY_IS_CONSTEXPR_(expr)
+ #endif
+ #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (JSON_HEDLEY_IS_CONSTEXPR_(expr) ? (expr) : (-1))
+#else
+ #if !defined(JSON_HEDLEY_IS_CONSTANT)
+ #define JSON_HEDLEY_IS_CONSTANT(expr) (0)
+ #endif
+ #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (expr)
+#endif
+
+#if defined(JSON_HEDLEY_BEGIN_C_DECLS)
+ #undef JSON_HEDLEY_BEGIN_C_DECLS
+#endif
+#if defined(JSON_HEDLEY_END_C_DECLS)
+ #undef JSON_HEDLEY_END_C_DECLS
+#endif
+#if defined(JSON_HEDLEY_C_DECL)
+ #undef JSON_HEDLEY_C_DECL
+#endif
+#if defined(__cplusplus)
+ #define JSON_HEDLEY_BEGIN_C_DECLS extern "C" {
+ #define JSON_HEDLEY_END_C_DECLS }
+ #define JSON_HEDLEY_C_DECL extern "C"
+#else
+ #define JSON_HEDLEY_BEGIN_C_DECLS
+ #define JSON_HEDLEY_END_C_DECLS
+ #define JSON_HEDLEY_C_DECL
+#endif
+
+#if defined(JSON_HEDLEY_STATIC_ASSERT)
+ #undef JSON_HEDLEY_STATIC_ASSERT
+#endif
+#if \
+ !defined(__cplusplus) && ( \
+ (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) || \
+ JSON_HEDLEY_HAS_FEATURE(c_static_assert) || \
+ JSON_HEDLEY_GCC_VERSION_CHECK(6,0,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \
+ defined(_Static_assert) \
+ )
+# define JSON_HEDLEY_STATIC_ASSERT(expr, message) _Static_assert(expr, message)
+#elif \
+ (defined(__cplusplus) && (__cplusplus >= 201103L)) || \
+ JSON_HEDLEY_MSVC_VERSION_CHECK(16,0,0)
+# define JSON_HEDLEY_STATIC_ASSERT(expr, message) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(static_assert(expr, message))
+#else
+# define JSON_HEDLEY_STATIC_ASSERT(expr, message)
+#endif
+
+#if defined(JSON_HEDLEY_NULL)
+ #undef JSON_HEDLEY_NULL
+#endif
+#if defined(__cplusplus)
+ #if __cplusplus >= 201103L
+ #define JSON_HEDLEY_NULL JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(nullptr)
+ #elif defined(NULL)
+ #define JSON_HEDLEY_NULL NULL
+ #else
+ #define JSON_HEDLEY_NULL JSON_HEDLEY_STATIC_CAST(void*, 0)
+ #endif
+#elif defined(NULL)
+ #define JSON_HEDLEY_NULL NULL
+#else
+ #define JSON_HEDLEY_NULL ((void*) 0)
+#endif
+
+#if defined(JSON_HEDLEY_MESSAGE)
+ #undef JSON_HEDLEY_MESSAGE
+#endif
+#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas")
+# define JSON_HEDLEY_MESSAGE(msg) \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \
+ JSON_HEDLEY_PRAGMA(message msg) \
+ JSON_HEDLEY_DIAGNOSTIC_POP
+#elif \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,4,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message msg)
+#elif JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0)
+# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(_CRI message msg)
+#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0)
+# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg))
+#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,0,0)
+# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg))
+#else
+# define JSON_HEDLEY_MESSAGE(msg)
+#endif
+
+#if defined(JSON_HEDLEY_WARNING)
+ #undef JSON_HEDLEY_WARNING
+#endif
+#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas")
+# define JSON_HEDLEY_WARNING(msg) \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \
+ JSON_HEDLEY_PRAGMA(clang warning msg) \
+ JSON_HEDLEY_DIAGNOSTIC_POP
+#elif \
+ JSON_HEDLEY_GCC_VERSION_CHECK(4,8,0) || \
+ JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \
+ JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0)
+# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(GCC warning msg)
+#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0)
+# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(message(msg))
+#else
+# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_MESSAGE(msg)
+#endif
+
+#if defined(JSON_HEDLEY_REQUIRE)
+ #undef JSON_HEDLEY_REQUIRE
+#endif
+#if defined(JSON_HEDLEY_REQUIRE_MSG)
+ #undef JSON_HEDLEY_REQUIRE_MSG
+#endif
+#if JSON_HEDLEY_HAS_ATTRIBUTE(diagnose_if)
+# if JSON_HEDLEY_HAS_WARNING("-Wgcc-compat")
+# define JSON_HEDLEY_REQUIRE(expr) \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \
+ __attribute__((diagnose_if(!(expr), #expr, "error"))) \
+ JSON_HEDLEY_DIAGNOSTIC_POP
+# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \
+ __attribute__((diagnose_if(!(expr), msg, "error"))) \
+ JSON_HEDLEY_DIAGNOSTIC_POP
+# else
+# define JSON_HEDLEY_REQUIRE(expr) __attribute__((diagnose_if(!(expr), #expr, "error")))
+# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) __attribute__((diagnose_if(!(expr), msg, "error")))
+# endif
+#else
+# define JSON_HEDLEY_REQUIRE(expr)
+# define JSON_HEDLEY_REQUIRE_MSG(expr,msg)
+#endif
+
+#if defined(JSON_HEDLEY_FLAGS)
+ #undef JSON_HEDLEY_FLAGS
+#endif
+#if JSON_HEDLEY_HAS_ATTRIBUTE(flag_enum)
+ #define JSON_HEDLEY_FLAGS __attribute__((__flag_enum__))
+#endif
+
+#if defined(JSON_HEDLEY_FLAGS_CAST)
+ #undef JSON_HEDLEY_FLAGS_CAST
+#endif
+#if JSON_HEDLEY_INTEL_VERSION_CHECK(19,0,0)
+# define JSON_HEDLEY_FLAGS_CAST(T, expr) (__extension__ ({ \
+ JSON_HEDLEY_DIAGNOSTIC_PUSH \
+ _Pragma("warning(disable:188)") \
+ ((T) (expr)); \
+ JSON_HEDLEY_DIAGNOSTIC_POP \
+ }))
+#else
+# define JSON_HEDLEY_FLAGS_CAST(T, expr) JSON_HEDLEY_STATIC_CAST(T, expr)
+#endif
+
+#if defined(JSON_HEDLEY_EMPTY_BASES)
+ #undef JSON_HEDLEY_EMPTY_BASES
+#endif
+#if JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,23918) && !JSON_HEDLEY_MSVC_VERSION_CHECK(20,0,0)
+ #define JSON_HEDLEY_EMPTY_BASES __declspec(empty_bases)
+#else
+ #define JSON_HEDLEY_EMPTY_BASES
+#endif
+
+/* Remaining macros are deprecated. */
+
+#if defined(JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK)
+ #undef JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK
+#endif
+#if defined(__clang__)
+ #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) (0)
+#else
+ #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch)
+#endif
+
+#if defined(JSON_HEDLEY_CLANG_HAS_ATTRIBUTE)
+ #undef JSON_HEDLEY_CLANG_HAS_ATTRIBUTE
+#endif
+#define JSON_HEDLEY_CLANG_HAS_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_ATTRIBUTE(attribute)
+
+#if defined(JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE)
+ #undef JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE
+#endif
+#define JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute)
+
+#if defined(JSON_HEDLEY_CLANG_HAS_BUILTIN)
+ #undef JSON_HEDLEY_CLANG_HAS_BUILTIN
+#endif
+#define JSON_HEDLEY_CLANG_HAS_BUILTIN(builtin) JSON_HEDLEY_HAS_BUILTIN(builtin)
+
+#if defined(JSON_HEDLEY_CLANG_HAS_FEATURE)
+ #undef JSON_HEDLEY_CLANG_HAS_FEATURE
+#endif
+#define JSON_HEDLEY_CLANG_HAS_FEATURE(feature) JSON_HEDLEY_HAS_FEATURE(feature)
+
+#if defined(JSON_HEDLEY_CLANG_HAS_EXTENSION)
+ #undef JSON_HEDLEY_CLANG_HAS_EXTENSION
+#endif
+#define JSON_HEDLEY_CLANG_HAS_EXTENSION(extension) JSON_HEDLEY_HAS_EXTENSION(extension)
+
+#if defined(JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE)
+ #undef JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE
+#endif
+#define JSON_HEDLEY_CLANG_HAS_DECLSPEC_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute)
+
+#if defined(JSON_HEDLEY_CLANG_HAS_WARNING)
+ #undef JSON_HEDLEY_CLANG_HAS_WARNING
+#endif
+#define JSON_HEDLEY_CLANG_HAS_WARNING(warning) JSON_HEDLEY_HAS_WARNING(warning)
+
+#endif /* !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < X) */
// This file contains all internal macro definitions
@@ -131,6 +2036,18 @@ using json = basic_json<>;
#endif
#endif
+// C++ language standard detection
+#if (defined(__cplusplus) && __cplusplus >= 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
+ #define JSON_HAS_CPP_20
+ #define JSON_HAS_CPP_17
+ #define JSON_HAS_CPP_14
+#elif (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464
+ #define JSON_HAS_CPP_17
+ #define JSON_HAS_CPP_14
+#elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1)
+ #define JSON_HAS_CPP_14
+#endif
+
// disable float-equal warnings on GCC/clang
#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__)
#pragma GCC diagnostic push
@@ -143,15 +2060,6 @@ using json = basic_json<>;
#pragma GCC diagnostic ignored "-Wdocumentation"
#endif
-// allow for portable deprecation warnings
-#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__)
- #define JSON_DEPRECATED __attribute__((deprecated))
-#elif defined(_MSC_VER)
- #define JSON_DEPRECATED __declspec(deprecated)
-#else
- #define JSON_DEPRECATED
-#endif
-
// allow to disable exceptions
#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(JSON_NOEXCEPTION)
#define JSON_THROW(exception) throw exception
@@ -159,6 +2067,7 @@ using json = basic_json<>;
#define JSON_CATCH(exception) catch(exception)
#define JSON_INTERNAL_CATCH(exception) catch(exception)
#else
+ #include <cstdlib>
#define JSON_THROW(exception) std::abort()
#define JSON_TRY if(true)
#define JSON_CATCH(exception) if(false)
@@ -185,21 +2094,10 @@ using json = basic_json<>;
#define JSON_INTERNAL_CATCH JSON_INTERNAL_CATCH_USER
#endif
-// manual branch prediction
-#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__)
- #define JSON_LIKELY(x) __builtin_expect(!!(x), 1)
- #define JSON_UNLIKELY(x) __builtin_expect(!!(x), 0)
-#else
- #define JSON_LIKELY(x) x
- #define JSON_UNLIKELY(x) x
-#endif
-
-// C++ language standard detection
-#if (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464
- #define JSON_HAS_CPP_17
- #define JSON_HAS_CPP_14
-#elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1)
- #define JSON_HAS_CPP_14
+// allow to override assert
+#if !defined(JSON_ASSERT)
+ #include <cassert> // assert
+ #define JSON_ASSERT(x) assert(x)
#endif
/*!
@@ -207,30 +2105,30 @@ using json = basic_json<>;
@def NLOHMANN_JSON_SERIALIZE_ENUM
@since version 3.4.0
*/
-#define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...) \
- template<typename BasicJsonType> \
- inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \
- { \
- static_assert(std::is_enum<ENUM_TYPE>::value, #ENUM_TYPE " must be an enum!"); \
- static const std::pair<ENUM_TYPE, BasicJsonType> m[] = __VA_ARGS__; \
- auto it = std::find_if(std::begin(m), std::end(m), \
- [e](const std::pair<ENUM_TYPE, BasicJsonType>& ej_pair) -> bool \
- { \
- return ej_pair.first == e; \
- }); \
- j = ((it != std::end(m)) ? it : std::begin(m))->second; \
- } \
- template<typename BasicJsonType> \
- inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \
- { \
- static_assert(std::is_enum<ENUM_TYPE>::value, #ENUM_TYPE " must be an enum!"); \
- static const std::pair<ENUM_TYPE, BasicJsonType> m[] = __VA_ARGS__; \
- auto it = std::find_if(std::begin(m), std::end(m), \
- [j](const std::pair<ENUM_TYPE, BasicJsonType>& ej_pair) -> bool \
- { \
- return ej_pair.second == j; \
- }); \
- e = ((it != std::end(m)) ? it : std::begin(m))->first; \
+#define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...) \
+ template<typename BasicJsonType> \
+ inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \
+ { \
+ static_assert(std::is_enum<ENUM_TYPE>::value, #ENUM_TYPE " must be an enum!"); \
+ static const std::pair<ENUM_TYPE, BasicJsonType> m[] = __VA_ARGS__; \
+ auto it = std::find_if(std::begin(m), std::end(m), \
+ [e](const std::pair<ENUM_TYPE, BasicJsonType>& ej_pair) -> bool \
+ { \
+ return ej_pair.first == e; \
+ }); \
+ j = ((it != std::end(m)) ? it : std::begin(m))->second; \
+ } \
+ template<typename BasicJsonType> \
+ inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \
+ { \
+ static_assert(std::is_enum<ENUM_TYPE>::value, #ENUM_TYPE " must be an enum!"); \
+ static const std::pair<ENUM_TYPE, BasicJsonType> m[] = __VA_ARGS__; \
+ auto it = std::find_if(std::begin(m), std::end(m), \
+ [&j](const std::pair<ENUM_TYPE, BasicJsonType>& ej_pair) -> bool \
+ { \
+ return ej_pair.second == j; \
+ }); \
+ e = ((it != std::end(m)) ? it : std::begin(m))->first; \
}
// Ugly macros to avoid uglier copy-paste when specializing basic_json. They
@@ -242,17 +2140,533 @@ using json = basic_json<>;
class StringType, class BooleanType, class NumberIntegerType, \
class NumberUnsignedType, class NumberFloatType, \
template<typename> class AllocatorType, \
- template<typename, typename = void> class JSONSerializer>
+ template<typename, typename = void> class JSONSerializer, \
+ class BinaryType>
#define NLOHMANN_BASIC_JSON_TPL \
basic_json<ObjectType, ArrayType, StringType, BooleanType, \
NumberIntegerType, NumberUnsignedType, NumberFloatType, \
- AllocatorType, JSONSerializer>
+ AllocatorType, JSONSerializer, BinaryType>
+
+// Macros to simplify conversion from/to types
+
+#define NLOHMANN_JSON_EXPAND( x ) x
+#define NLOHMANN_JSON_GET_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, NAME,...) NAME
+#define NLOHMANN_JSON_PASTE(...) NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_GET_MACRO(__VA_ARGS__, \
+ NLOHMANN_JSON_PASTE64, \
+ NLOHMANN_JSON_PASTE63, \
+ NLOHMANN_JSON_PASTE62, \
+ NLOHMANN_JSON_PASTE61, \
+ NLOHMANN_JSON_PASTE60, \
+ NLOHMANN_JSON_PASTE59, \
+ NLOHMANN_JSON_PASTE58, \
+ NLOHMANN_JSON_PASTE57, \
+ NLOHMANN_JSON_PASTE56, \
+ NLOHMANN_JSON_PASTE55, \
+ NLOHMANN_JSON_PASTE54, \
+ NLOHMANN_JSON_PASTE53, \
+ NLOHMANN_JSON_PASTE52, \
+ NLOHMANN_JSON_PASTE51, \
+ NLOHMANN_JSON_PASTE50, \
+ NLOHMANN_JSON_PASTE49, \
+ NLOHMANN_JSON_PASTE48, \
+ NLOHMANN_JSON_PASTE47, \
+ NLOHMANN_JSON_PASTE46, \
+ NLOHMANN_JSON_PASTE45, \
+ NLOHMANN_JSON_PASTE44, \
+ NLOHMANN_JSON_PASTE43, \
+ NLOHMANN_JSON_PASTE42, \
+ NLOHMANN_JSON_PASTE41, \
+ NLOHMANN_JSON_PASTE40, \
+ NLOHMANN_JSON_PASTE39, \
+ NLOHMANN_JSON_PASTE38, \
+ NLOHMANN_JSON_PASTE37, \
+ NLOHMANN_JSON_PASTE36, \
+ NLOHMANN_JSON_PASTE35, \
+ NLOHMANN_JSON_PASTE34, \
+ NLOHMANN_JSON_PASTE33, \
+ NLOHMANN_JSON_PASTE32, \
+ NLOHMANN_JSON_PASTE31, \
+ NLOHMANN_JSON_PASTE30, \
+ NLOHMANN_JSON_PASTE29, \
+ NLOHMANN_JSON_PASTE28, \
+ NLOHMANN_JSON_PASTE27, \
+ NLOHMANN_JSON_PASTE26, \
+ NLOHMANN_JSON_PASTE25, \
+ NLOHMANN_JSON_PASTE24, \
+ NLOHMANN_JSON_PASTE23, \
+ NLOHMANN_JSON_PASTE22, \
+ NLOHMANN_JSON_PASTE21, \
+ NLOHMANN_JSON_PASTE20, \
+ NLOHMANN_JSON_PASTE19, \
+ NLOHMANN_JSON_PASTE18, \
+ NLOHMANN_JSON_PASTE17, \
+ NLOHMANN_JSON_PASTE16, \
+ NLOHMANN_JSON_PASTE15, \
+ NLOHMANN_JSON_PASTE14, \
+ NLOHMANN_JSON_PASTE13, \
+ NLOHMANN_JSON_PASTE12, \
+ NLOHMANN_JSON_PASTE11, \
+ NLOHMANN_JSON_PASTE10, \
+ NLOHMANN_JSON_PASTE9, \
+ NLOHMANN_JSON_PASTE8, \
+ NLOHMANN_JSON_PASTE7, \
+ NLOHMANN_JSON_PASTE6, \
+ NLOHMANN_JSON_PASTE5, \
+ NLOHMANN_JSON_PASTE4, \
+ NLOHMANN_JSON_PASTE3, \
+ NLOHMANN_JSON_PASTE2, \
+ NLOHMANN_JSON_PASTE1)(__VA_ARGS__))
+#define NLOHMANN_JSON_PASTE2(func, v1) func(v1)
+#define NLOHMANN_JSON_PASTE3(func, v1, v2) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE2(func, v2)
+#define NLOHMANN_JSON_PASTE4(func, v1, v2, v3) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE3(func, v2, v3)
+#define NLOHMANN_JSON_PASTE5(func, v1, v2, v3, v4) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE4(func, v2, v3, v4)
+#define NLOHMANN_JSON_PASTE6(func, v1, v2, v3, v4, v5) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE5(func, v2, v3, v4, v5)
+#define NLOHMANN_JSON_PASTE7(func, v1, v2, v3, v4, v5, v6) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE6(func, v2, v3, v4, v5, v6)
+#define NLOHMANN_JSON_PASTE8(func, v1, v2, v3, v4, v5, v6, v7) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE7(func, v2, v3, v4, v5, v6, v7)
+#define NLOHMANN_JSON_PASTE9(func, v1, v2, v3, v4, v5, v6, v7, v8) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE8(func, v2, v3, v4, v5, v6, v7, v8)
+#define NLOHMANN_JSON_PASTE10(func, v1, v2, v3, v4, v5, v6, v7, v8, v9) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE9(func, v2, v3, v4, v5, v6, v7, v8, v9)
+#define NLOHMANN_JSON_PASTE11(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE10(func, v2, v3, v4, v5, v6, v7, v8, v9, v10)
+#define NLOHMANN_JSON_PASTE12(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE11(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11)
+#define NLOHMANN_JSON_PASTE13(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE12(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12)
+#define NLOHMANN_JSON_PASTE14(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE13(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13)
+#define NLOHMANN_JSON_PASTE15(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE14(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14)
+#define NLOHMANN_JSON_PASTE16(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE15(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15)
+#define NLOHMANN_JSON_PASTE17(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE16(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16)
+#define NLOHMANN_JSON_PASTE18(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE17(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17)
+#define NLOHMANN_JSON_PASTE19(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE18(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18)
+#define NLOHMANN_JSON_PASTE20(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE19(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19)
+#define NLOHMANN_JSON_PASTE21(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE20(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20)
+#define NLOHMANN_JSON_PASTE22(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE21(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21)
+#define NLOHMANN_JSON_PASTE23(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE22(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22)
+#define NLOHMANN_JSON_PASTE24(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE23(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23)
+#define NLOHMANN_JSON_PASTE25(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE24(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24)
+#define NLOHMANN_JSON_PASTE26(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE25(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25)
+#define NLOHMANN_JSON_PASTE27(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE26(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26)
+#define NLOHMANN_JSON_PASTE28(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE27(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27)
+#define NLOHMANN_JSON_PASTE29(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE28(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28)
+#define NLOHMANN_JSON_PASTE30(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE29(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29)
+#define NLOHMANN_JSON_PASTE31(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE30(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30)
+#define NLOHMANN_JSON_PASTE32(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE31(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31)
+#define NLOHMANN_JSON_PASTE33(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE32(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32)
+#define NLOHMANN_JSON_PASTE34(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE33(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33)
+#define NLOHMANN_JSON_PASTE35(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE34(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34)
+#define NLOHMANN_JSON_PASTE36(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE35(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35)
+#define NLOHMANN_JSON_PASTE37(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE36(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36)
+#define NLOHMANN_JSON_PASTE38(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE37(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37)
+#define NLOHMANN_JSON_PASTE39(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE38(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38)
+#define NLOHMANN_JSON_PASTE40(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE39(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39)
+#define NLOHMANN_JSON_PASTE41(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE40(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40)
+#define NLOHMANN_JSON_PASTE42(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE41(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41)
+#define NLOHMANN_JSON_PASTE43(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE42(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42)
+#define NLOHMANN_JSON_PASTE44(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE43(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43)
+#define NLOHMANN_JSON_PASTE45(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE44(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44)
+#define NLOHMANN_JSON_PASTE46(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE45(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45)
+#define NLOHMANN_JSON_PASTE47(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE46(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46)
+#define NLOHMANN_JSON_PASTE48(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE47(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47)
+#define NLOHMANN_JSON_PASTE49(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE48(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48)
+#define NLOHMANN_JSON_PASTE50(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE49(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49)
+#define NLOHMANN_JSON_PASTE51(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE50(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50)
+#define NLOHMANN_JSON_PASTE52(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE51(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51)
+#define NLOHMANN_JSON_PASTE53(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE52(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52)
+#define NLOHMANN_JSON_PASTE54(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE53(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53)
+#define NLOHMANN_JSON_PASTE55(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE54(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54)
+#define NLOHMANN_JSON_PASTE56(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE55(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55)
+#define NLOHMANN_JSON_PASTE57(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE56(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56)
+#define NLOHMANN_JSON_PASTE58(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE57(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57)
+#define NLOHMANN_JSON_PASTE59(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE58(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58)
+#define NLOHMANN_JSON_PASTE60(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE59(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59)
+#define NLOHMANN_JSON_PASTE61(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE60(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60)
+#define NLOHMANN_JSON_PASTE62(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE61(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61)
+#define NLOHMANN_JSON_PASTE63(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE62(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62)
+#define NLOHMANN_JSON_PASTE64(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE63(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63)
+
+#define NLOHMANN_JSON_TO(v1) nlohmann_json_j[#v1] = nlohmann_json_t.v1;
+#define NLOHMANN_JSON_FROM(v1) nlohmann_json_j.at(#v1).get_to(nlohmann_json_t.v1);
+
+/*!
+@brief macro
+@def NLOHMANN_DEFINE_TYPE_INTRUSIVE
+@since version 3.9.0
+*/
+#define NLOHMANN_DEFINE_TYPE_INTRUSIVE(Type, ...) \
+ friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \
+ friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) }
+
+/*!
+@brief macro
+@def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE
+@since version 3.9.0
+*/
+#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Type, ...) \
+ inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \
+ inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) }
+
+#ifndef JSON_USE_IMPLICIT_CONVERSIONS
+ #define JSON_USE_IMPLICIT_CONVERSIONS 1
+#endif
+
+#if JSON_USE_IMPLICIT_CONVERSIONS
+ #define JSON_EXPLICIT
+#else
+ #define JSON_EXPLICIT explicit
+#endif
+
+
+namespace nlohmann
+{
+namespace detail
+{
+////////////////
+// exceptions //
+////////////////
+
+/*!
+@brief general exception of the @ref basic_json class
+
+This class is an extension of `std::exception` objects with a member @a id for
+exception ids. It is used as the base class for all exceptions thrown by the
+@ref basic_json class. This class can hence be used as "wildcard" to catch
+exceptions.
+
+Subclasses:
+- @ref parse_error for exceptions indicating a parse error
+- @ref invalid_iterator for exceptions indicating errors with iterators
+- @ref type_error for exceptions indicating executing a member function with
+ a wrong type
+- @ref out_of_range for exceptions indicating access out of the defined range
+- @ref other_error for exceptions indicating other library errors
+
+@internal
+@note To have nothrow-copy-constructible exceptions, we internally use
+ `std::runtime_error` which can cope with arbitrary-length error messages.
+ Intermediate strings are built with static functions and then passed to
+ the actual constructor.
+@endinternal
+
+@liveexample{The following code shows how arbitrary library exceptions can be
+caught.,exception}
+
+@since version 3.0.0
+*/
+class exception : public std::exception
+{
+ public:
+ /// returns the explanatory string
+ JSON_HEDLEY_RETURNS_NON_NULL
+ const char* what() const noexcept override
+ {
+ return m.what();
+ }
+
+ /// the id of the exception
+ const int id;
+
+ protected:
+ JSON_HEDLEY_NON_NULL(3)
+ exception(int id_, const char* what_arg) : id(id_), m(what_arg) {}
+
+ static std::string name(const std::string& ename, int id_)
+ {
+ return "[json.exception." + ename + "." + std::to_string(id_) + "] ";
+ }
+
+ private:
+ /// an exception object as storage for error messages
+ std::runtime_error m;
+};
+
+/*!
+@brief exception indicating a parse error
+
+This exception is thrown by the library when a parse error occurs. Parse errors
+can occur during the deserialization of JSON text, CBOR, MessagePack, as well
+as when using JSON Patch.
+
+Member @a byte holds the byte index of the last read character in the input
+file.
+
+Exceptions have ids 1xx.
+
+name / id | example message | description
+------------------------------ | --------------- | -------------------------
+json.exception.parse_error.101 | parse error at 2: unexpected end of input; expected string literal | This error indicates a syntax error while deserializing a JSON text. The error message describes that an unexpected token (character) was encountered, and the member @a byte indicates the error position.
+json.exception.parse_error.102 | parse error at 14: missing or wrong low surrogate | JSON uses the `\uxxxx` format to describe Unicode characters. Code points above above 0xFFFF are split into two `\uxxxx` entries ("surrogate pairs"). This error indicates that the surrogate pair is incomplete or contains an invalid code point.
+json.exception.parse_error.103 | parse error: code points above 0x10FFFF are invalid | Unicode supports code points up to 0x10FFFF. Code points above 0x10FFFF are invalid.
+json.exception.parse_error.104 | parse error: JSON patch must be an array of objects | [RFC 6902](https://tools.ietf.org/html/rfc6902) requires a JSON Patch document to be a JSON document that represents an array of objects.
+json.exception.parse_error.105 | parse error: operation must have string member 'op' | An operation of a JSON Patch document must contain exactly one "op" member, whose value indicates the operation to perform. Its value must be one of "add", "remove", "replace", "move", "copy", or "test"; other values are errors.
+json.exception.parse_error.106 | parse error: array index '01' must not begin with '0' | An array index in a JSON Pointer ([RFC 6901](https://tools.ietf.org/html/rfc6901)) may be `0` or any number without a leading `0`.
+json.exception.parse_error.107 | parse error: JSON pointer must be empty or begin with '/' - was: 'foo' | A JSON Pointer must be a Unicode string containing a sequence of zero or more reference tokens, each prefixed by a `/` character.
+json.exception.parse_error.108 | parse error: escape character '~' must be followed with '0' or '1' | In a JSON Pointer, only `~0` and `~1` are valid escape sequences.
+json.exception.parse_error.109 | parse error: array index 'one' is not a number | A JSON Pointer array index must be a number.
+json.exception.parse_error.110 | parse error at 1: cannot read 2 bytes from vector | When parsing CBOR or MessagePack, the byte vector ends before the complete value has been read.
+json.exception.parse_error.112 | parse error at 1: error reading CBOR; last byte: 0xF8 | Not all types of CBOR or MessagePack are supported. This exception occurs if an unsupported byte was read.
+json.exception.parse_error.113 | parse error at 2: expected a CBOR string; last byte: 0x98 | While parsing a map key, a value that is not a string has been read.
+json.exception.parse_error.114 | parse error: Unsupported BSON record type 0x0F | The parsing of the corresponding BSON record type is not implemented (yet).
+json.exception.parse_error.115 | parse error at byte 5: syntax error while parsing UBJSON high-precision number: invalid number text: 1A | A UBJSON high-precision number could not be parsed.
+
+@note For an input with n bytes, 1 is the index of the first character and n+1
+ is the index of the terminating null byte or the end of file. This also
+ holds true when reading a byte vector (CBOR or MessagePack).
+
+@liveexample{The following code shows how a `parse_error` exception can be
+caught.,parse_error}
+
+@sa - @ref exception for the base class of the library exceptions
+@sa - @ref invalid_iterator for exceptions indicating errors with iterators
+@sa - @ref type_error for exceptions indicating executing a member function with
+ a wrong type
+@sa - @ref out_of_range for exceptions indicating access out of the defined range
+@sa - @ref other_error for exceptions indicating other library errors
+
+@since version 3.0.0
+*/
+class parse_error : public exception
+{
+ public:
+ /*!
+ @brief create a parse error exception
+ @param[in] id_ the id of the exception
+ @param[in] pos the position where the error occurred (or with
+ chars_read_total=0 if the position cannot be
+ determined)
+ @param[in] what_arg the explanatory string
+ @return parse_error object
+ */
+ static parse_error create(int id_, const position_t& pos, const std::string& what_arg)
+ {
+ std::string w = exception::name("parse_error", id_) + "parse error" +
+ position_string(pos) + ": " + what_arg;
+ return parse_error(id_, pos.chars_read_total, w.c_str());
+ }
+
+ static parse_error create(int id_, std::size_t byte_, const std::string& what_arg)
+ {
+ std::string w = exception::name("parse_error", id_) + "parse error" +
+ (byte_ != 0 ? (" at byte " + std::to_string(byte_)) : "") +
+ ": " + what_arg;
+ return parse_error(id_, byte_, w.c_str());
+ }
+
+ /*!
+ @brief byte index of the parse error
+
+ The byte index of the last read character in the input file.
+
+ @note For an input with n bytes, 1 is the index of the first character and
+ n+1 is the index of the terminating null byte or the end of file.
+ This also holds true when reading a byte vector (CBOR or MessagePack).
+ */
+ const std::size_t byte;
+
+ private:
+ parse_error(int id_, std::size_t byte_, const char* what_arg)
+ : exception(id_, what_arg), byte(byte_) {}
+
+ static std::string position_string(const position_t& pos)
+ {
+ return " at line " + std::to_string(pos.lines_read + 1) +
+ ", column " + std::to_string(pos.chars_read_current_line);
+ }
+};
+
+/*!
+@brief exception indicating errors with iterators
+
+This exception is thrown if iterators passed to a library function do not match
+the expected semantics.
+
+Exceptions have ids 2xx.
+
+name / id | example message | description
+----------------------------------- | --------------- | -------------------------
+json.exception.invalid_iterator.201 | iterators are not compatible | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid.
+json.exception.invalid_iterator.202 | iterator does not fit current value | In an erase or insert function, the passed iterator @a pos does not belong to the JSON value for which the function was called. It hence does not define a valid position for the deletion/insertion.
+json.exception.invalid_iterator.203 | iterators do not fit current value | Either iterator passed to function @ref erase(IteratorType first, IteratorType last) does not belong to the JSON value from which values shall be erased. It hence does not define a valid range to delete values from.
+json.exception.invalid_iterator.204 | iterators out of range | When an iterator range for a primitive type (number, boolean, or string) is passed to a constructor or an erase function, this range has to be exactly (@ref begin(), @ref end()), because this is the only way the single stored value is expressed. All other ranges are invalid.
+json.exception.invalid_iterator.205 | iterator out of range | When an iterator for a primitive type (number, boolean, or string) is passed to an erase function, the iterator has to be the @ref begin() iterator, because it is the only way to address the stored value. All other iterators are invalid.
+json.exception.invalid_iterator.206 | cannot construct with iterators from null | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) belong to a JSON null value and hence to not define a valid range.
+json.exception.invalid_iterator.207 | cannot use key() for non-object iterators | The key() member function can only be used on iterators belonging to a JSON object, because other types do not have a concept of a key.
+json.exception.invalid_iterator.208 | cannot use operator[] for object iterators | The operator[] to specify a concrete offset cannot be used on iterators belonging to a JSON object, because JSON objects are unordered.
+json.exception.invalid_iterator.209 | cannot use offsets with object iterators | The offset operators (+, -, +=, -=) cannot be used on iterators belonging to a JSON object, because JSON objects are unordered.
+json.exception.invalid_iterator.210 | iterators do not fit | The iterator range passed to the insert function are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid.
+json.exception.invalid_iterator.211 | passed iterators may not belong to container | The iterator range passed to the insert function must not be a subrange of the container to insert to.
+json.exception.invalid_iterator.212 | cannot compare iterators of different containers | When two iterators are compared, they must belong to the same container.
+json.exception.invalid_iterator.213 | cannot compare order of object iterators | The order of object iterators cannot be compared, because JSON objects are unordered.
+json.exception.invalid_iterator.214 | cannot get value | Cannot get value for iterator: Either the iterator belongs to a null value or it is an iterator to a primitive type (number, boolean, or string), but the iterator is different to @ref begin().
+
+@liveexample{The following code shows how an `invalid_iterator` exception can be
+caught.,invalid_iterator}
+
+@sa - @ref exception for the base class of the library exceptions
+@sa - @ref parse_error for exceptions indicating a parse error
+@sa - @ref type_error for exceptions indicating executing a member function with
+ a wrong type
+@sa - @ref out_of_range for exceptions indicating access out of the defined range
+@sa - @ref other_error for exceptions indicating other library errors
+
+@since version 3.0.0
+*/
+class invalid_iterator : public exception
+{
+ public:
+ static invalid_iterator create(int id_, const std::string& what_arg)
+ {
+ std::string w = exception::name("invalid_iterator", id_) + what_arg;
+ return invalid_iterator(id_, w.c_str());
+ }
+
+ private:
+ JSON_HEDLEY_NON_NULL(3)
+ invalid_iterator(int id_, const char* what_arg)
+ : exception(id_, what_arg) {}
+};
+
+/*!
+@brief exception indicating executing a member function with a wrong type
+
+This exception is thrown in case of a type error; that is, a library function is
+executed on a JSON value whose type does not match the expected semantics.
+
+Exceptions have ids 3xx.
+
+name / id | example message | description
+----------------------------- | --------------- | -------------------------
+json.exception.type_error.301 | cannot create object from initializer list | To create an object from an initializer list, the initializer list must consist only of a list of pairs whose first element is a string. When this constraint is violated, an array is created instead.
+json.exception.type_error.302 | type must be object, but is array | During implicit or explicit value conversion, the JSON type must be compatible to the target type. For instance, a JSON string can only be converted into string types, but not into numbers or boolean types.
+json.exception.type_error.303 | incompatible ReferenceType for get_ref, actual type is object | To retrieve a reference to a value stored in a @ref basic_json object with @ref get_ref, the type of the reference must match the value type. For instance, for a JSON array, the @a ReferenceType must be @ref array_t &.
+json.exception.type_error.304 | cannot use at() with string | The @ref at() member functions can only be executed for certain JSON types.
+json.exception.type_error.305 | cannot use operator[] with string | The @ref operator[] member functions can only be executed for certain JSON types.
+json.exception.type_error.306 | cannot use value() with string | The @ref value() member functions can only be executed for certain JSON types.
+json.exception.type_error.307 | cannot use erase() with string | The @ref erase() member functions can only be executed for certain JSON types.
+json.exception.type_error.308 | cannot use push_back() with string | The @ref push_back() and @ref operator+= member functions can only be executed for certain JSON types.
+json.exception.type_error.309 | cannot use insert() with | The @ref insert() member functions can only be executed for certain JSON types.
+json.exception.type_error.310 | cannot use swap() with number | The @ref swap() member functions can only be executed for certain JSON types.
+json.exception.type_error.311 | cannot use emplace_back() with string | The @ref emplace_back() member function can only be executed for certain JSON types.
+json.exception.type_error.312 | cannot use update() with string | The @ref update() member functions can only be executed for certain JSON types.
+json.exception.type_error.313 | invalid value to unflatten | The @ref unflatten function converts an object whose keys are JSON Pointers back into an arbitrary nested JSON value. The JSON Pointers must not overlap, because then the resulting value would not be well defined.
+json.exception.type_error.314 | only objects can be unflattened | The @ref unflatten function only works for an object whose keys are JSON Pointers.
+json.exception.type_error.315 | values in object must be primitive | The @ref unflatten function only works for an object whose keys are JSON Pointers and whose values are primitive.
+json.exception.type_error.316 | invalid UTF-8 byte at index 10: 0x7E | The @ref dump function only works with UTF-8 encoded strings; that is, if you assign a `std::string` to a JSON value, make sure it is UTF-8 encoded. |
+json.exception.type_error.317 | JSON value cannot be serialized to requested format | The dynamic type of the object cannot be represented in the requested serialization format (e.g. a raw `true` or `null` JSON object cannot be serialized to BSON) |
+
+@liveexample{The following code shows how a `type_error` exception can be
+caught.,type_error}
+
+@sa - @ref exception for the base class of the library exceptions
+@sa - @ref parse_error for exceptions indicating a parse error
+@sa - @ref invalid_iterator for exceptions indicating errors with iterators
+@sa - @ref out_of_range for exceptions indicating access out of the defined range
+@sa - @ref other_error for exceptions indicating other library errors
+
+@since version 3.0.0
+*/
+class type_error : public exception
+{
+ public:
+ static type_error create(int id_, const std::string& what_arg)
+ {
+ std::string w = exception::name("type_error", id_) + what_arg;
+ return type_error(id_, w.c_str());
+ }
+
+ private:
+ JSON_HEDLEY_NON_NULL(3)
+ type_error(int id_, const char* what_arg) : exception(id_, what_arg) {}
+};
+
+/*!
+@brief exception indicating access out of the defined range
+
+This exception is thrown in case a library function is called on an input
+parameter that exceeds the expected range, for instance in case of array
+indices or nonexisting object keys.
+
+Exceptions have ids 4xx.
+
+name / id | example message | description
+------------------------------- | --------------- | -------------------------
+json.exception.out_of_range.401 | array index 3 is out of range | The provided array index @a i is larger than @a size-1.
+json.exception.out_of_range.402 | array index '-' (3) is out of range | The special array index `-` in a JSON Pointer never describes a valid element of the array, but the index past the end. That is, it can only be used to add elements at this position, but not to read it.
+json.exception.out_of_range.403 | key 'foo' not found | The provided key was not found in the JSON object.
+json.exception.out_of_range.404 | unresolved reference token 'foo' | A reference token in a JSON Pointer could not be resolved.
+json.exception.out_of_range.405 | JSON pointer has no parent | The JSON Patch operations 'remove' and 'add' can not be applied to the root element of the JSON value.
+json.exception.out_of_range.406 | number overflow parsing '10E1000' | A parsed number could not be stored as without changing it to NaN or INF.
+json.exception.out_of_range.407 | number overflow serializing '9223372036854775808' | UBJSON and BSON only support integer numbers up to 9223372036854775807. (until version 3.8.0) |
+json.exception.out_of_range.408 | excessive array size: 8658170730974374167 | The size (following `#`) of an UBJSON array or object exceeds the maximal capacity. |
+json.exception.out_of_range.409 | BSON key cannot contain code point U+0000 (at byte 2) | Key identifiers to be serialized to BSON cannot contain code point U+0000, since the key is stored as zero-terminated c-string |
+
+@liveexample{The following code shows how an `out_of_range` exception can be
+caught.,out_of_range}
+
+@sa - @ref exception for the base class of the library exceptions
+@sa - @ref parse_error for exceptions indicating a parse error
+@sa - @ref invalid_iterator for exceptions indicating errors with iterators
+@sa - @ref type_error for exceptions indicating executing a member function with
+ a wrong type
+@sa - @ref other_error for exceptions indicating other library errors
+
+@since version 3.0.0
+*/
+class out_of_range : public exception
+{
+ public:
+ static out_of_range create(int id_, const std::string& what_arg)
+ {
+ std::string w = exception::name("out_of_range", id_) + what_arg;
+ return out_of_range(id_, w.c_str());
+ }
+
+ private:
+ JSON_HEDLEY_NON_NULL(3)
+ out_of_range(int id_, const char* what_arg) : exception(id_, what_arg) {}
+};
+
+/*!
+@brief exception indicating other library errors
+
+This exception is thrown in case of errors that cannot be classified with the
+other exception types.
+
+Exceptions have ids 5xx.
+
+name / id | example message | description
+------------------------------ | --------------- | -------------------------
+json.exception.other_error.501 | unsuccessful: {"op":"test","path":"/baz", "value":"bar"} | A JSON Patch operation 'test' failed. The unsuccessful operation is also printed.
+
+@sa - @ref exception for the base class of the library exceptions
+@sa - @ref parse_error for exceptions indicating a parse error
+@sa - @ref invalid_iterator for exceptions indicating errors with iterators
+@sa - @ref type_error for exceptions indicating executing a member function with
+ a wrong type
+@sa - @ref out_of_range for exceptions indicating access out of the defined range
+
+@liveexample{The following code shows how an `other_error` exception can be
+caught.,other_error}
+
+@since version 3.0.0
+*/
+class other_error : public exception
+{
+ public:
+ static other_error create(int id_, const std::string& what_arg)
+ {
+ std::string w = exception::name("other_error", id_) + what_arg;
+ return other_error(id_, w.c_str());
+ }
+
+ private:
+ JSON_HEDLEY_NON_NULL(3)
+ other_error(int id_, const char* what_arg) : exception(id_, what_arg) {}
+};
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/macro_scope.hpp>
// #include <nlohmann/detail/meta/cpp_future.hpp>
-#include <ciso646> // not
#include <cstddef> // size_t
#include <type_traits> // conditional, enable_if, false_type, integral_constant, is_constructible, is_integral, is_same, remove_cv, remove_reference, true_type
@@ -317,13 +2731,10 @@ constexpr T static_const<T>::value;
// #include <nlohmann/detail/meta/type_traits.hpp>
-#include <ciso646> // not
#include <limits> // numeric_limits
#include <type_traits> // false_type, is_constructible, is_integral, is_same, true_type
#include <utility> // declval
-// #include <nlohmann/json_fwd.hpp>
-
// #include <nlohmann/detail/iterators/iterator_traits.hpp>
@@ -336,11 +2747,11 @@ namespace nlohmann
{
namespace detail
{
-template <typename ...Ts> struct make_void
+template<typename ...Ts> struct make_void
{
using type = void;
};
-template <typename ...Ts> using void_t = typename make_void<Ts...>::type;
+template<typename ...Ts> using void_t = typename make_void<Ts...>::type;
} // namespace detail
} // namespace nlohmann
@@ -351,10 +2762,10 @@ namespace nlohmann
{
namespace detail
{
-template <typename It, typename = void>
+template<typename It, typename = void>
struct iterator_types {};
-template <typename It>
+template<typename It>
struct iterator_types <
It,
void_t<typename It::difference_type, typename It::value_type, typename It::pointer,
@@ -369,18 +2780,18 @@ struct iterator_types <
// This is required as some compilers implement std::iterator_traits in a way that
// doesn't work with SFINAE. See https://github.com/nlohmann/json/issues/1341.
-template <typename T, typename = void>
+template<typename T, typename = void>
struct iterator_traits
{
};
-template <typename T>
+template<typename T>
struct iterator_traits < T, enable_if_t < !std::is_pointer<T>::value >>
: iterator_types<T>
{
};
-template <typename T>
+template<typename T>
struct iterator_traits<T*, enable_if_t<std::is_object<T>::value>>
{
using iterator_category = std::random_access_iterator_tag;
@@ -389,8 +2800,10 @@ struct iterator_traits<T*, enable_if_t<std::is_object<T>::value>>
using pointer = T*;
using reference = T&;
};
-}
-}
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/macro_scope.hpp>
// #include <nlohmann/detail/meta/cpp_future.hpp>
@@ -402,7 +2815,7 @@ struct iterator_traits<T*, enable_if_t<std::is_object<T>::value>>
// #include <nlohmann/detail/meta/void_t.hpp>
-// http://en.cppreference.com/w/cpp/experimental/is_detected
+// https://en.cppreference.com/w/cpp/experimental/is_detected
namespace nlohmann
{
namespace detail
@@ -412,48 +2825,128 @@ struct nonesuch
nonesuch() = delete;
~nonesuch() = delete;
nonesuch(nonesuch const&) = delete;
+ nonesuch(nonesuch const&&) = delete;
void operator=(nonesuch const&) = delete;
+ void operator=(nonesuch&&) = delete;
};
-template <class Default,
- class AlwaysVoid,
- template <class...> class Op,
- class... Args>
+template<class Default,
+ class AlwaysVoid,
+ template<class...> class Op,
+ class... Args>
struct detector
{
using value_t = std::false_type;
using type = Default;
};
-template <class Default, template <class...> class Op, class... Args>
+template<class Default, template<class...> class Op, class... Args>
struct detector<Default, void_t<Op<Args...>>, Op, Args...>
{
using value_t = std::true_type;
using type = Op<Args...>;
};
-template <template <class...> class Op, class... Args>
+template<template<class...> class Op, class... Args>
using is_detected = typename detector<nonesuch, void, Op, Args...>::value_t;
-template <template <class...> class Op, class... Args>
+template<template<class...> class Op, class... Args>
using detected_t = typename detector<nonesuch, void, Op, Args...>::type;
-template <class Default, template <class...> class Op, class... Args>
+template<class Default, template<class...> class Op, class... Args>
using detected_or = detector<Default, void, Op, Args...>;
-template <class Default, template <class...> class Op, class... Args>
+template<class Default, template<class...> class Op, class... Args>
using detected_or_t = typename detected_or<Default, Op, Args...>::type;
-template <class Expected, template <class...> class Op, class... Args>
+template<class Expected, template<class...> class Op, class... Args>
using is_detected_exact = std::is_same<Expected, detected_t<Op, Args...>>;
-template <class To, template <class...> class Op, class... Args>
+template<class To, template<class...> class Op, class... Args>
using is_detected_convertible =
std::is_convertible<detected_t<Op, Args...>, To>;
} // namespace detail
} // namespace nlohmann
-// #include <nlohmann/detail/macro_scope.hpp>
+// #include <nlohmann/json_fwd.hpp>
+#ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_
+#define INCLUDE_NLOHMANN_JSON_FWD_HPP_
+
+#include <cstdint> // int64_t, uint64_t
+#include <map> // map
+#include <memory> // allocator
+#include <string> // string
+#include <vector> // vector
+
+/*!
+@brief namespace for Niels Lohmann
+@see https://github.com/nlohmann
+@since version 1.0.0
+*/
+namespace nlohmann
+{
+/*!
+@brief default JSONSerializer template argument
+
+This serializer ignores the template arguments and uses ADL
+([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl))
+for serialization.
+*/
+template<typename T = void, typename SFINAE = void>
+struct adl_serializer;
+
+template<template<typename U, typename V, typename... Args> class ObjectType =
+ std::map,
+ template<typename U, typename... Args> class ArrayType = std::vector,
+ class StringType = std::string, class BooleanType = bool,
+ class NumberIntegerType = std::int64_t,
+ class NumberUnsignedType = std::uint64_t,
+ class NumberFloatType = double,
+ template<typename U> class AllocatorType = std::allocator,
+ template<typename T, typename SFINAE = void> class JSONSerializer =
+ adl_serializer,
+ class BinaryType = std::vector<std::uint8_t>>
+class basic_json;
+
+/*!
+@brief JSON Pointer
+
+A JSON pointer defines a string syntax for identifying a specific value
+within a JSON document. It can be used with functions `at` and
+`operator[]`. Furthermore, JSON pointers are the base for JSON patches.
+
+@sa [RFC 6901](https://tools.ietf.org/html/rfc6901)
+
+@since version 2.0.0
+*/
+template<typename BasicJsonType>
+class json_pointer;
+
+/*!
+@brief default JSON class
+
+This type is the default specialization of the @ref basic_json class which
+uses the standard template types.
+
+@since version 1.0.0
+*/
+using json = basic_json<>;
+
+template<class Key, class T, class IgnoredLess, class Allocator>
+struct ordered_map;
+
+/*!
+@brief ordered JSON class
+
+This type preserves the insertion order of object keys.
+
+@since version 3.9.0
+*/
+using ordered_json = basic_json<nlohmann::ordered_map>;
+
+} // namespace nlohmann
+
+#endif // INCLUDE_NLOHMANN_JSON_FWD_HPP_
namespace nlohmann
@@ -486,50 +2979,73 @@ template<typename> struct is_basic_json : std::false_type {};
NLOHMANN_BASIC_JSON_TPL_DECLARATION
struct is_basic_json<NLOHMANN_BASIC_JSON_TPL> : std::true_type {};
+//////////////////////
+// json_ref helpers //
+//////////////////////
+
+template<typename>
+class json_ref;
+
+template<typename>
+struct is_json_ref : std::false_type {};
+
+template<typename T>
+struct is_json_ref<json_ref<T>> : std::true_type {};
+
//////////////////////////
// aliases for detected //
//////////////////////////
-template <typename T>
+template<typename T>
using mapped_type_t = typename T::mapped_type;
-template <typename T>
+template<typename T>
using key_type_t = typename T::key_type;
-template <typename T>
+template<typename T>
using value_type_t = typename T::value_type;
-template <typename T>
+template<typename T>
using difference_type_t = typename T::difference_type;
-template <typename T>
+template<typename T>
using pointer_t = typename T::pointer;
-template <typename T>
+template<typename T>
using reference_t = typename T::reference;
-template <typename T>
+template<typename T>
using iterator_category_t = typename T::iterator_category;
-template <typename T>
+template<typename T>
using iterator_t = typename T::iterator;
-template <typename T, typename... Args>
+template<typename T, typename... Args>
using to_json_function = decltype(T::to_json(std::declval<Args>()...));
-template <typename T, typename... Args>
+template<typename T, typename... Args>
using from_json_function = decltype(T::from_json(std::declval<Args>()...));
-template <typename T, typename U>
+template<typename T, typename U>
using get_template_function = decltype(std::declval<T>().template get<U>());
// trait checking if JSONSerializer<T>::from_json(json const&, udt&) exists
-template <typename BasicJsonType, typename T, typename = void>
+template<typename BasicJsonType, typename T, typename = void>
struct has_from_json : std::false_type {};
+// trait checking if j.get<T> is valid
+// use this trait instead of std::is_constructible or std::is_convertible,
+// both rely on, or make use of implicit conversions, and thus fail when T
+// has several constructors/operator= (see https://github.com/nlohmann/json/issues/958)
template <typename BasicJsonType, typename T>
-struct has_from_json<BasicJsonType, T,
- enable_if_t<not is_basic_json<T>::value>>
+struct is_getable
+{
+ static constexpr bool value = is_detected<get_template_function, const BasicJsonType&, T>::value;
+};
+
+template<typename BasicJsonType, typename T>
+struct has_from_json < BasicJsonType, T,
+ enable_if_t < !is_basic_json<T>::value >>
{
using serializer = typename BasicJsonType::template json_serializer<T, void>;
@@ -540,11 +3056,11 @@ struct has_from_json<BasicJsonType, T,
// This trait checks if JSONSerializer<T>::from_json(json const&) exists
// this overload is used for non-default-constructible user-defined-types
-template <typename BasicJsonType, typename T, typename = void>
+template<typename BasicJsonType, typename T, typename = void>
struct has_non_default_from_json : std::false_type {};
template<typename BasicJsonType, typename T>
-struct has_non_default_from_json<BasicJsonType, T, enable_if_t<not is_basic_json<T>::value>>
+struct has_non_default_from_json < BasicJsonType, T, enable_if_t < !is_basic_json<T>::value >>
{
using serializer = typename BasicJsonType::template json_serializer<T, void>;
@@ -555,11 +3071,11 @@ struct has_non_default_from_json<BasicJsonType, T, enable_if_t<not is_basic_json
// This trait checks if BasicJsonType::json_serializer<T>::to_json exists
// Do not evaluate the trait when T is a basic_json type, to avoid template instantiation infinite recursion.
-template <typename BasicJsonType, typename T, typename = void>
+template<typename BasicJsonType, typename T, typename = void>
struct has_to_json : std::false_type {};
-template <typename BasicJsonType, typename T>
-struct has_to_json<BasicJsonType, T, enable_if_t<not is_basic_json<T>::value>>
+template<typename BasicJsonType, typename T>
+struct has_to_json < BasicJsonType, T, enable_if_t < !is_basic_json<T>::value >>
{
using serializer = typename BasicJsonType::template json_serializer<T, void>;
@@ -573,10 +3089,10 @@ struct has_to_json<BasicJsonType, T, enable_if_t<not is_basic_json<T>::value>>
// is_ functions //
///////////////////
-template <typename T, typename = void>
+template<typename T, typename = void>
struct is_iterator_traits : std::false_type {};
-template <typename T>
+template<typename T>
struct is_iterator_traits<iterator_traits<T>>
{
private:
@@ -593,20 +3109,20 @@ struct is_iterator_traits<iterator_traits<T>>
// source: https://stackoverflow.com/a/37193089/4116453
-template <typename T, typename = void>
+template<typename T, typename = void>
struct is_complete_type : std::false_type {};
-template <typename T>
+template<typename T>
struct is_complete_type<T, decltype(void(sizeof(T)))> : std::true_type {};
-template <typename BasicJsonType, typename CompatibleObjectType,
- typename = void>
+template<typename BasicJsonType, typename CompatibleObjectType,
+ typename = void>
struct is_compatible_object_type_impl : std::false_type {};
-template <typename BasicJsonType, typename CompatibleObjectType>
+template<typename BasicJsonType, typename CompatibleObjectType>
struct is_compatible_object_type_impl <
BasicJsonType, CompatibleObjectType,
- enable_if_t<is_detected<mapped_type_t, CompatibleObjectType>::value and
+ enable_if_t < is_detected<mapped_type_t, CompatibleObjectType>::value&&
is_detected<key_type_t, CompatibleObjectType>::value >>
{
@@ -615,44 +3131,53 @@ struct is_compatible_object_type_impl <
// macOS's is_constructible does not play well with nonesuch...
static constexpr bool value =
std::is_constructible<typename object_t::key_type,
- typename CompatibleObjectType::key_type>::value and
+ typename CompatibleObjectType::key_type>::value &&
std::is_constructible<typename object_t::mapped_type,
typename CompatibleObjectType::mapped_type>::value;
};
-template <typename BasicJsonType, typename CompatibleObjectType>
+template<typename BasicJsonType, typename CompatibleObjectType>
struct is_compatible_object_type
: is_compatible_object_type_impl<BasicJsonType, CompatibleObjectType> {};
-template <typename BasicJsonType, typename ConstructibleObjectType,
- typename = void>
+template<typename BasicJsonType, typename ConstructibleObjectType,
+ typename = void>
struct is_constructible_object_type_impl : std::false_type {};
-template <typename BasicJsonType, typename ConstructibleObjectType>
+template<typename BasicJsonType, typename ConstructibleObjectType>
struct is_constructible_object_type_impl <
BasicJsonType, ConstructibleObjectType,
- enable_if_t<is_detected<mapped_type_t, ConstructibleObjectType>::value and
+ enable_if_t < is_detected<mapped_type_t, ConstructibleObjectType>::value&&
is_detected<key_type_t, ConstructibleObjectType>::value >>
{
using object_t = typename BasicJsonType::object_t;
static constexpr bool value =
- (std::is_constructible<typename ConstructibleObjectType::key_type, typename object_t::key_type>::value and
- std::is_same<typename object_t::mapped_type, typename ConstructibleObjectType::mapped_type>::value) or
- (has_from_json<BasicJsonType, typename ConstructibleObjectType::mapped_type>::value or
- has_non_default_from_json<BasicJsonType, typename ConstructibleObjectType::mapped_type >::value);
+ (std::is_default_constructible<ConstructibleObjectType>::value &&
+ (std::is_move_assignable<ConstructibleObjectType>::value ||
+ std::is_copy_assignable<ConstructibleObjectType>::value) &&
+ (std::is_constructible<typename ConstructibleObjectType::key_type,
+ typename object_t::key_type>::value &&
+ std::is_same <
+ typename object_t::mapped_type,
+ typename ConstructibleObjectType::mapped_type >::value)) ||
+ (has_from_json<BasicJsonType,
+ typename ConstructibleObjectType::mapped_type>::value ||
+ has_non_default_from_json <
+ BasicJsonType,
+ typename ConstructibleObjectType::mapped_type >::value);
};
-template <typename BasicJsonType, typename ConstructibleObjectType>
+template<typename BasicJsonType, typename ConstructibleObjectType>
struct is_constructible_object_type
: is_constructible_object_type_impl<BasicJsonType,
ConstructibleObjectType> {};
-template <typename BasicJsonType, typename CompatibleStringType,
- typename = void>
+template<typename BasicJsonType, typename CompatibleStringType,
+ typename = void>
struct is_compatible_string_type_impl : std::false_type {};
-template <typename BasicJsonType, typename CompatibleStringType>
+template<typename BasicJsonType, typename CompatibleStringType>
struct is_compatible_string_type_impl <
BasicJsonType, CompatibleStringType,
enable_if_t<is_detected_exact<typename BasicJsonType::string_t::value_type,
@@ -662,15 +3187,15 @@ struct is_compatible_string_type_impl <
std::is_constructible<typename BasicJsonType::string_t, CompatibleStringType>::value;
};
-template <typename BasicJsonType, typename ConstructibleStringType>
+template<typename BasicJsonType, typename ConstructibleStringType>
struct is_compatible_string_type
: is_compatible_string_type_impl<BasicJsonType, ConstructibleStringType> {};
-template <typename BasicJsonType, typename ConstructibleStringType,
- typename = void>
+template<typename BasicJsonType, typename ConstructibleStringType,
+ typename = void>
struct is_constructible_string_type_impl : std::false_type {};
-template <typename BasicJsonType, typename ConstructibleStringType>
+template<typename BasicJsonType, typename ConstructibleStringType>
struct is_constructible_string_type_impl <
BasicJsonType, ConstructibleStringType,
enable_if_t<is_detected_exact<typename BasicJsonType::string_t::value_type,
@@ -681,82 +3206,86 @@ struct is_constructible_string_type_impl <
typename BasicJsonType::string_t>::value;
};
-template <typename BasicJsonType, typename ConstructibleStringType>
+template<typename BasicJsonType, typename ConstructibleStringType>
struct is_constructible_string_type
: is_constructible_string_type_impl<BasicJsonType, ConstructibleStringType> {};
-template <typename BasicJsonType, typename CompatibleArrayType, typename = void>
+template<typename BasicJsonType, typename CompatibleArrayType, typename = void>
struct is_compatible_array_type_impl : std::false_type {};
-template <typename BasicJsonType, typename CompatibleArrayType>
+template<typename BasicJsonType, typename CompatibleArrayType>
struct is_compatible_array_type_impl <
BasicJsonType, CompatibleArrayType,
- enable_if_t<is_detected<value_type_t, CompatibleArrayType>::value and
- is_detected<iterator_t, CompatibleArrayType>::value and
+ enable_if_t < is_detected<value_type_t, CompatibleArrayType>::value&&
+ is_detected<iterator_t, CompatibleArrayType>::value&&
// This is needed because json_reverse_iterator has a ::iterator type...
// Therefore it is detected as a CompatibleArrayType.
// The real fix would be to have an Iterable concept.
- not is_iterator_traits<
- iterator_traits<CompatibleArrayType>>::value >>
+ !is_iterator_traits <
+ iterator_traits<CompatibleArrayType >>::value >>
{
static constexpr bool value =
std::is_constructible<BasicJsonType,
typename CompatibleArrayType::value_type>::value;
};
-template <typename BasicJsonType, typename CompatibleArrayType>
+template<typename BasicJsonType, typename CompatibleArrayType>
struct is_compatible_array_type
: is_compatible_array_type_impl<BasicJsonType, CompatibleArrayType> {};
-template <typename BasicJsonType, typename ConstructibleArrayType, typename = void>
+template<typename BasicJsonType, typename ConstructibleArrayType, typename = void>
struct is_constructible_array_type_impl : std::false_type {};
-template <typename BasicJsonType, typename ConstructibleArrayType>
+template<typename BasicJsonType, typename ConstructibleArrayType>
struct is_constructible_array_type_impl <
BasicJsonType, ConstructibleArrayType,
enable_if_t<std::is_same<ConstructibleArrayType,
typename BasicJsonType::value_type>::value >>
: std::true_type {};
-template <typename BasicJsonType, typename ConstructibleArrayType>
+template<typename BasicJsonType, typename ConstructibleArrayType>
struct is_constructible_array_type_impl <
BasicJsonType, ConstructibleArrayType,
- enable_if_t<not std::is_same<ConstructibleArrayType,
- typename BasicJsonType::value_type>::value and
- is_detected<value_type_t, ConstructibleArrayType>::value and
- is_detected<iterator_t, ConstructibleArrayType>::value and
- is_complete_type<
- detected_t<value_type_t, ConstructibleArrayType>>::value >>
+ enable_if_t < !std::is_same<ConstructibleArrayType,
+ typename BasicJsonType::value_type>::value&&
+ std::is_default_constructible<ConstructibleArrayType>::value&&
+(std::is_move_assignable<ConstructibleArrayType>::value ||
+ std::is_copy_assignable<ConstructibleArrayType>::value)&&
+is_detected<value_type_t, ConstructibleArrayType>::value&&
+is_detected<iterator_t, ConstructibleArrayType>::value&&
+is_complete_type <
+detected_t<value_type_t, ConstructibleArrayType >>::value >>
{
static constexpr bool value =
// This is needed because json_reverse_iterator has a ::iterator type,
- // furthermore, std::back_insert_iterator (and other iterators) have a base class `iterator`...
- // Therefore it is detected as a ConstructibleArrayType.
- // The real fix would be to have an Iterable concept.
- not is_iterator_traits <
- iterator_traits<ConstructibleArrayType >>::value and
-
- (std::is_same<typename ConstructibleArrayType::value_type, typename BasicJsonType::array_t::value_type>::value or
+ // furthermore, std::back_insert_iterator (and other iterators) have a
+ // base class `iterator`... Therefore it is detected as a
+ // ConstructibleArrayType. The real fix would be to have an Iterable
+ // concept.
+ !is_iterator_traits<iterator_traits<ConstructibleArrayType>>::value &&
+
+ (std::is_same<typename ConstructibleArrayType::value_type,
+ typename BasicJsonType::array_t::value_type>::value ||
has_from_json<BasicJsonType,
- typename ConstructibleArrayType::value_type>::value or
+ typename ConstructibleArrayType::value_type>::value ||
has_non_default_from_json <
BasicJsonType, typename ConstructibleArrayType::value_type >::value);
};
-template <typename BasicJsonType, typename ConstructibleArrayType>
+template<typename BasicJsonType, typename ConstructibleArrayType>
struct is_constructible_array_type
: is_constructible_array_type_impl<BasicJsonType, ConstructibleArrayType> {};
-template <typename RealIntegerType, typename CompatibleNumberIntegerType,
- typename = void>
+template<typename RealIntegerType, typename CompatibleNumberIntegerType,
+ typename = void>
struct is_compatible_integer_type_impl : std::false_type {};
-template <typename RealIntegerType, typename CompatibleNumberIntegerType>
+template<typename RealIntegerType, typename CompatibleNumberIntegerType>
struct is_compatible_integer_type_impl <
RealIntegerType, CompatibleNumberIntegerType,
- enable_if_t<std::is_integral<RealIntegerType>::value and
- std::is_integral<CompatibleNumberIntegerType>::value and
- not std::is_same<bool, CompatibleNumberIntegerType>::value >>
+ enable_if_t < std::is_integral<RealIntegerType>::value&&
+ std::is_integral<CompatibleNumberIntegerType>::value&&
+ !std::is_same<bool, CompatibleNumberIntegerType>::value >>
{
// is there an assert somewhere on overflows?
using RealLimits = std::numeric_limits<RealIntegerType>;
@@ -764,20 +3293,20 @@ struct is_compatible_integer_type_impl <
static constexpr auto value =
std::is_constructible<RealIntegerType,
- CompatibleNumberIntegerType>::value and
- CompatibleLimits::is_integer and
+ CompatibleNumberIntegerType>::value &&
+ CompatibleLimits::is_integer &&
RealLimits::is_signed == CompatibleLimits::is_signed;
};
-template <typename RealIntegerType, typename CompatibleNumberIntegerType>
+template<typename RealIntegerType, typename CompatibleNumberIntegerType>
struct is_compatible_integer_type
: is_compatible_integer_type_impl<RealIntegerType,
CompatibleNumberIntegerType> {};
-template <typename BasicJsonType, typename CompatibleType, typename = void>
+template<typename BasicJsonType, typename CompatibleType, typename = void>
struct is_compatible_type_impl: std::false_type {};
-template <typename BasicJsonType, typename CompatibleType>
+template<typename BasicJsonType, typename CompatibleType>
struct is_compatible_type_impl <
BasicJsonType, CompatibleType,
enable_if_t<is_complete_type<CompatibleType>::value >>
@@ -786,388 +3315,22 @@ struct is_compatible_type_impl <
has_to_json<BasicJsonType, CompatibleType>::value;
};
-template <typename BasicJsonType, typename CompatibleType>
+template<typename BasicJsonType, typename CompatibleType>
struct is_compatible_type
: is_compatible_type_impl<BasicJsonType, CompatibleType> {};
-} // namespace detail
-} // namespace nlohmann
-
-// #include <nlohmann/detail/exceptions.hpp>
-
-
-#include <exception> // exception
-#include <stdexcept> // runtime_error
-#include <string> // to_string
-
-// #include <nlohmann/detail/input/position_t.hpp>
-
-
-#include <cstddef> // size_t
-
-namespace nlohmann
-{
-namespace detail
-{
-/// struct to capture the start position of the current token
-struct position_t
-{
- /// the total number of characters read
- std::size_t chars_read_total = 0;
- /// the number of characters read in the current line
- std::size_t chars_read_current_line = 0;
- /// the number of lines read
- std::size_t lines_read = 0;
-
- /// conversion to size_t to preserve SAX interface
- constexpr operator size_t() const
- {
- return chars_read_total;
- }
-};
-
-}
-}
-
-
-namespace nlohmann
-{
-namespace detail
-{
-////////////////
-// exceptions //
-////////////////
-
-/*!
-@brief general exception of the @ref basic_json class
-
-This class is an extension of `std::exception` objects with a member @a id for
-exception ids. It is used as the base class for all exceptions thrown by the
-@ref basic_json class. This class can hence be used as "wildcard" to catch
-exceptions.
-
-Subclasses:
-- @ref parse_error for exceptions indicating a parse error
-- @ref invalid_iterator for exceptions indicating errors with iterators
-- @ref type_error for exceptions indicating executing a member function with
- a wrong type
-- @ref out_of_range for exceptions indicating access out of the defined range
-- @ref other_error for exceptions indicating other library errors
-
-@internal
-@note To have nothrow-copy-constructible exceptions, we internally use
- `std::runtime_error` which can cope with arbitrary-length error messages.
- Intermediate strings are built with static functions and then passed to
- the actual constructor.
-@endinternal
-
-@liveexample{The following code shows how arbitrary library exceptions can be
-caught.,exception}
-
-@since version 3.0.0
-*/
-class exception : public std::exception
-{
- public:
- /// returns the explanatory string
- const char* what() const noexcept override
- {
- return m.what();
- }
-
- /// the id of the exception
- const int id;
-
- protected:
- exception(int id_, const char* what_arg) : id(id_), m(what_arg) {}
-
- static std::string name(const std::string& ename, int id_)
- {
- return "[json.exception." + ename + "." + std::to_string(id_) + "] ";
- }
-
- private:
- /// an exception object as storage for error messages
- std::runtime_error m;
-};
-
-/*!
-@brief exception indicating a parse error
-
-This exception is thrown by the library when a parse error occurs. Parse errors
-can occur during the deserialization of JSON text, CBOR, MessagePack, as well
-as when using JSON Patch.
-
-Member @a byte holds the byte index of the last read character in the input
-file.
-
-Exceptions have ids 1xx.
-
-name / id | example message | description
------------------------------- | --------------- | -------------------------
-json.exception.parse_error.101 | parse error at 2: unexpected end of input; expected string literal | This error indicates a syntax error while deserializing a JSON text. The error message describes that an unexpected token (character) was encountered, and the member @a byte indicates the error position.
-json.exception.parse_error.102 | parse error at 14: missing or wrong low surrogate | JSON uses the `\uxxxx` format to describe Unicode characters. Code points above above 0xFFFF are split into two `\uxxxx` entries ("surrogate pairs"). This error indicates that the surrogate pair is incomplete or contains an invalid code point.
-json.exception.parse_error.103 | parse error: code points above 0x10FFFF are invalid | Unicode supports code points up to 0x10FFFF. Code points above 0x10FFFF are invalid.
-json.exception.parse_error.104 | parse error: JSON patch must be an array of objects | [RFC 6902](https://tools.ietf.org/html/rfc6902) requires a JSON Patch document to be a JSON document that represents an array of objects.
-json.exception.parse_error.105 | parse error: operation must have string member 'op' | An operation of a JSON Patch document must contain exactly one "op" member, whose value indicates the operation to perform. Its value must be one of "add", "remove", "replace", "move", "copy", or "test"; other values are errors.
-json.exception.parse_error.106 | parse error: array index '01' must not begin with '0' | An array index in a JSON Pointer ([RFC 6901](https://tools.ietf.org/html/rfc6901)) may be `0` or any number without a leading `0`.
-json.exception.parse_error.107 | parse error: JSON pointer must be empty or begin with '/' - was: 'foo' | A JSON Pointer must be a Unicode string containing a sequence of zero or more reference tokens, each prefixed by a `/` character.
-json.exception.parse_error.108 | parse error: escape character '~' must be followed with '0' or '1' | In a JSON Pointer, only `~0` and `~1` are valid escape sequences.
-json.exception.parse_error.109 | parse error: array index 'one' is not a number | A JSON Pointer array index must be a number.
-json.exception.parse_error.110 | parse error at 1: cannot read 2 bytes from vector | When parsing CBOR or MessagePack, the byte vector ends before the complete value has been read.
-json.exception.parse_error.112 | parse error at 1: error reading CBOR; last byte: 0xF8 | Not all types of CBOR or MessagePack are supported. This exception occurs if an unsupported byte was read.
-json.exception.parse_error.113 | parse error at 2: expected a CBOR string; last byte: 0x98 | While parsing a map key, a value that is not a string has been read.
-json.exception.parse_error.114 | parse error: Unsupported BSON record type 0x0F | The parsing of the corresponding BSON record type is not implemented (yet).
-
-@note For an input with n bytes, 1 is the index of the first character and n+1
- is the index of the terminating null byte or the end of file. This also
- holds true when reading a byte vector (CBOR or MessagePack).
-
-@liveexample{The following code shows how a `parse_error` exception can be
-caught.,parse_error}
-
-@sa @ref exception for the base class of the library exceptions
-@sa @ref invalid_iterator for exceptions indicating errors with iterators
-@sa @ref type_error for exceptions indicating executing a member function with
- a wrong type
-@sa @ref out_of_range for exceptions indicating access out of the defined range
-@sa @ref other_error for exceptions indicating other library errors
-
-@since version 3.0.0
-*/
-class parse_error : public exception
-{
- public:
- /*!
- @brief create a parse error exception
- @param[in] id_ the id of the exception
- @param[in] position the position where the error occurred (or with
- chars_read_total=0 if the position cannot be
- determined)
- @param[in] what_arg the explanatory string
- @return parse_error object
- */
- static parse_error create(int id_, const position_t& pos, const std::string& what_arg)
- {
- std::string w = exception::name("parse_error", id_) + "parse error" +
- position_string(pos) + ": " + what_arg;
- return parse_error(id_, pos.chars_read_total, w.c_str());
- }
-
- static parse_error create(int id_, std::size_t byte_, const std::string& what_arg)
- {
- std::string w = exception::name("parse_error", id_) + "parse error" +
- (byte_ != 0 ? (" at byte " + std::to_string(byte_)) : "") +
- ": " + what_arg;
- return parse_error(id_, byte_, w.c_str());
- }
-
- /*!
- @brief byte index of the parse error
-
- The byte index of the last read character in the input file.
-
- @note For an input with n bytes, 1 is the index of the first character and
- n+1 is the index of the terminating null byte or the end of file.
- This also holds true when reading a byte vector (CBOR or MessagePack).
- */
- const std::size_t byte;
-
- private:
- parse_error(int id_, std::size_t byte_, const char* what_arg)
- : exception(id_, what_arg), byte(byte_) {}
-
- static std::string position_string(const position_t& pos)
- {
- return " at line " + std::to_string(pos.lines_read + 1) +
- ", column " + std::to_string(pos.chars_read_current_line);
- }
-};
-
-/*!
-@brief exception indicating errors with iterators
-
-This exception is thrown if iterators passed to a library function do not match
-the expected semantics.
-
-Exceptions have ids 2xx.
-
-name / id | example message | description
------------------------------------ | --------------- | -------------------------
-json.exception.invalid_iterator.201 | iterators are not compatible | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid.
-json.exception.invalid_iterator.202 | iterator does not fit current value | In an erase or insert function, the passed iterator @a pos does not belong to the JSON value for which the function was called. It hence does not define a valid position for the deletion/insertion.
-json.exception.invalid_iterator.203 | iterators do not fit current value | Either iterator passed to function @ref erase(IteratorType first, IteratorType last) does not belong to the JSON value from which values shall be erased. It hence does not define a valid range to delete values from.
-json.exception.invalid_iterator.204 | iterators out of range | When an iterator range for a primitive type (number, boolean, or string) is passed to a constructor or an erase function, this range has to be exactly (@ref begin(), @ref end()), because this is the only way the single stored value is expressed. All other ranges are invalid.
-json.exception.invalid_iterator.205 | iterator out of range | When an iterator for a primitive type (number, boolean, or string) is passed to an erase function, the iterator has to be the @ref begin() iterator, because it is the only way to address the stored value. All other iterators are invalid.
-json.exception.invalid_iterator.206 | cannot construct with iterators from null | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) belong to a JSON null value and hence to not define a valid range.
-json.exception.invalid_iterator.207 | cannot use key() for non-object iterators | The key() member function can only be used on iterators belonging to a JSON object, because other types do not have a concept of a key.
-json.exception.invalid_iterator.208 | cannot use operator[] for object iterators | The operator[] to specify a concrete offset cannot be used on iterators belonging to a JSON object, because JSON objects are unordered.
-json.exception.invalid_iterator.209 | cannot use offsets with object iterators | The offset operators (+, -, +=, -=) cannot be used on iterators belonging to a JSON object, because JSON objects are unordered.
-json.exception.invalid_iterator.210 | iterators do not fit | The iterator range passed to the insert function are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid.
-json.exception.invalid_iterator.211 | passed iterators may not belong to container | The iterator range passed to the insert function must not be a subrange of the container to insert to.
-json.exception.invalid_iterator.212 | cannot compare iterators of different containers | When two iterators are compared, they must belong to the same container.
-json.exception.invalid_iterator.213 | cannot compare order of object iterators | The order of object iterators cannot be compared, because JSON objects are unordered.
-json.exception.invalid_iterator.214 | cannot get value | Cannot get value for iterator: Either the iterator belongs to a null value or it is an iterator to a primitive type (number, boolean, or string), but the iterator is different to @ref begin().
-
-@liveexample{The following code shows how an `invalid_iterator` exception can be
-caught.,invalid_iterator}
-
-@sa @ref exception for the base class of the library exceptions
-@sa @ref parse_error for exceptions indicating a parse error
-@sa @ref type_error for exceptions indicating executing a member function with
- a wrong type
-@sa @ref out_of_range for exceptions indicating access out of the defined range
-@sa @ref other_error for exceptions indicating other library errors
-
-@since version 3.0.0
-*/
-class invalid_iterator : public exception
-{
- public:
- static invalid_iterator create(int id_, const std::string& what_arg)
- {
- std::string w = exception::name("invalid_iterator", id_) + what_arg;
- return invalid_iterator(id_, w.c_str());
- }
-
- private:
- invalid_iterator(int id_, const char* what_arg)
- : exception(id_, what_arg) {}
-};
-/*!
-@brief exception indicating executing a member function with a wrong type
-
-This exception is thrown in case of a type error; that is, a library function is
-executed on a JSON value whose type does not match the expected semantics.
-
-Exceptions have ids 3xx.
-
-name / id | example message | description
------------------------------ | --------------- | -------------------------
-json.exception.type_error.301 | cannot create object from initializer list | To create an object from an initializer list, the initializer list must consist only of a list of pairs whose first element is a string. When this constraint is violated, an array is created instead.
-json.exception.type_error.302 | type must be object, but is array | During implicit or explicit value conversion, the JSON type must be compatible to the target type. For instance, a JSON string can only be converted into string types, but not into numbers or boolean types.
-json.exception.type_error.303 | incompatible ReferenceType for get_ref, actual type is object | To retrieve a reference to a value stored in a @ref basic_json object with @ref get_ref, the type of the reference must match the value type. For instance, for a JSON array, the @a ReferenceType must be @ref array_t&.
-json.exception.type_error.304 | cannot use at() with string | The @ref at() member functions can only be executed for certain JSON types.
-json.exception.type_error.305 | cannot use operator[] with string | The @ref operator[] member functions can only be executed for certain JSON types.
-json.exception.type_error.306 | cannot use value() with string | The @ref value() member functions can only be executed for certain JSON types.
-json.exception.type_error.307 | cannot use erase() with string | The @ref erase() member functions can only be executed for certain JSON types.
-json.exception.type_error.308 | cannot use push_back() with string | The @ref push_back() and @ref operator+= member functions can only be executed for certain JSON types.
-json.exception.type_error.309 | cannot use insert() with | The @ref insert() member functions can only be executed for certain JSON types.
-json.exception.type_error.310 | cannot use swap() with number | The @ref swap() member functions can only be executed for certain JSON types.
-json.exception.type_error.311 | cannot use emplace_back() with string | The @ref emplace_back() member function can only be executed for certain JSON types.
-json.exception.type_error.312 | cannot use update() with string | The @ref update() member functions can only be executed for certain JSON types.
-json.exception.type_error.313 | invalid value to unflatten | The @ref unflatten function converts an object whose keys are JSON Pointers back into an arbitrary nested JSON value. The JSON Pointers must not overlap, because then the resulting value would not be well defined.
-json.exception.type_error.314 | only objects can be unflattened | The @ref unflatten function only works for an object whose keys are JSON Pointers.
-json.exception.type_error.315 | values in object must be primitive | The @ref unflatten function only works for an object whose keys are JSON Pointers and whose values are primitive.
-json.exception.type_error.316 | invalid UTF-8 byte at index 10: 0x7E | The @ref dump function only works with UTF-8 encoded strings; that is, if you assign a `std::string` to a JSON value, make sure it is UTF-8 encoded. |
-json.exception.type_error.317 | JSON value cannot be serialized to requested format | The dynamic type of the object cannot be represented in the requested serialization format (e.g. a raw `true` or `null` JSON object cannot be serialized to BSON) |
-
-@liveexample{The following code shows how a `type_error` exception can be
-caught.,type_error}
+// https://en.cppreference.com/w/cpp/types/conjunction
+template<class...> struct conjunction : std::true_type { };
+template<class B1> struct conjunction<B1> : B1 { };
+template<class B1, class... Bn>
+struct conjunction<B1, Bn...>
+: std::conditional<bool(B1::value), conjunction<Bn...>, B1>::type {};
-@sa @ref exception for the base class of the library exceptions
-@sa @ref parse_error for exceptions indicating a parse error
-@sa @ref invalid_iterator for exceptions indicating errors with iterators
-@sa @ref out_of_range for exceptions indicating access out of the defined range
-@sa @ref other_error for exceptions indicating other library errors
+template<typename T1, typename T2>
+struct is_constructible_tuple : std::false_type {};
-@since version 3.0.0
-*/
-class type_error : public exception
-{
- public:
- static type_error create(int id_, const std::string& what_arg)
- {
- std::string w = exception::name("type_error", id_) + what_arg;
- return type_error(id_, w.c_str());
- }
-
- private:
- type_error(int id_, const char* what_arg) : exception(id_, what_arg) {}
-};
-
-/*!
-@brief exception indicating access out of the defined range
-
-This exception is thrown in case a library function is called on an input
-parameter that exceeds the expected range, for instance in case of array
-indices or nonexisting object keys.
-
-Exceptions have ids 4xx.
-
-name / id | example message | description
-------------------------------- | --------------- | -------------------------
-json.exception.out_of_range.401 | array index 3 is out of range | The provided array index @a i is larger than @a size-1.
-json.exception.out_of_range.402 | array index '-' (3) is out of range | The special array index `-` in a JSON Pointer never describes a valid element of the array, but the index past the end. That is, it can only be used to add elements at this position, but not to read it.
-json.exception.out_of_range.403 | key 'foo' not found | The provided key was not found in the JSON object.
-json.exception.out_of_range.404 | unresolved reference token 'foo' | A reference token in a JSON Pointer could not be resolved.
-json.exception.out_of_range.405 | JSON pointer has no parent | The JSON Patch operations 'remove' and 'add' can not be applied to the root element of the JSON value.
-json.exception.out_of_range.406 | number overflow parsing '10E1000' | A parsed number could not be stored as without changing it to NaN or INF.
-json.exception.out_of_range.407 | number overflow serializing '9223372036854775808' | UBJSON and BSON only support integer numbers up to 9223372036854775807. |
-json.exception.out_of_range.408 | excessive array size: 8658170730974374167 | The size (following `#`) of an UBJSON array or object exceeds the maximal capacity. |
-json.exception.out_of_range.409 | BSON key cannot contain code point U+0000 (at byte 2) | Key identifiers to be serialized to BSON cannot contain code point U+0000, since the key is stored as zero-terminated c-string |
-
-@liveexample{The following code shows how an `out_of_range` exception can be
-caught.,out_of_range}
-
-@sa @ref exception for the base class of the library exceptions
-@sa @ref parse_error for exceptions indicating a parse error
-@sa @ref invalid_iterator for exceptions indicating errors with iterators
-@sa @ref type_error for exceptions indicating executing a member function with
- a wrong type
-@sa @ref other_error for exceptions indicating other library errors
-
-@since version 3.0.0
-*/
-class out_of_range : public exception
-{
- public:
- static out_of_range create(int id_, const std::string& what_arg)
- {
- std::string w = exception::name("out_of_range", id_) + what_arg;
- return out_of_range(id_, w.c_str());
- }
-
- private:
- out_of_range(int id_, const char* what_arg) : exception(id_, what_arg) {}
-};
-
-/*!
-@brief exception indicating other library errors
-
-This exception is thrown in case of errors that cannot be classified with the
-other exception types.
-
-Exceptions have ids 5xx.
-
-name / id | example message | description
------------------------------- | --------------- | -------------------------
-json.exception.other_error.501 | unsuccessful: {"op":"test","path":"/baz", "value":"bar"} | A JSON Patch operation 'test' failed. The unsuccessful operation is also printed.
-
-@sa @ref exception for the base class of the library exceptions
-@sa @ref parse_error for exceptions indicating a parse error
-@sa @ref invalid_iterator for exceptions indicating errors with iterators
-@sa @ref type_error for exceptions indicating executing a member function with
- a wrong type
-@sa @ref out_of_range for exceptions indicating access out of the defined range
-
-@liveexample{The following code shows how an `other_error` exception can be
-caught.,other_error}
-
-@since version 3.0.0
-*/
-class other_error : public exception
-{
- public:
- static other_error create(int id_, const std::string& what_arg)
- {
- std::string w = exception::name("other_error", id_) + what_arg;
- return other_error(id_, w.c_str());
- }
-
- private:
- other_error(int id_, const char* what_arg) : exception(id_, what_arg) {}
-};
+template<typename T1, typename... Args>
+struct is_constructible_tuple<T1, std::tuple<Args...>> : conjunction<std::is_constructible<T1, Args>...> {};
} // namespace detail
} // namespace nlohmann
@@ -1175,9 +3338,9 @@ class other_error : public exception
#include <array> // array
-#include <ciso646> // and
#include <cstddef> // size_t
#include <cstdint> // uint8_t
+#include <string> // string
namespace nlohmann
{
@@ -1221,60 +3384,39 @@ enum class value_t : std::uint8_t
number_integer, ///< number value (signed integer)
number_unsigned, ///< number value (unsigned integer)
number_float, ///< number value (floating-point)
- discarded ///< discarded by the the parser callback function
+ binary, ///< binary array (ordered collection of bytes)
+ discarded ///< discarded by the parser callback function
};
/*!
@brief comparison operator for JSON types
Returns an ordering that is similar to Python:
-- order: null < boolean < number < object < array < string
+- order: null < boolean < number < object < array < string < binary
- furthermore, each type is not smaller than itself
- discarded values are not comparable
+- binary is represented as a b"" string in python and directly comparable to a
+ string; however, making a binary array directly comparable with a string would
+ be surprising behavior in a JSON file.
@since version 1.0.0
*/
inline bool operator<(const value_t lhs, const value_t rhs) noexcept
{
- static constexpr std::array<std::uint8_t, 8> order = {{
+ static constexpr std::array<std::uint8_t, 9> order = {{
0 /* null */, 3 /* object */, 4 /* array */, 5 /* string */,
- 1 /* boolean */, 2 /* integer */, 2 /* unsigned */, 2 /* float */
+ 1 /* boolean */, 2 /* integer */, 2 /* unsigned */, 2 /* float */,
+ 6 /* binary */
}
};
const auto l_index = static_cast<std::size_t>(lhs);
const auto r_index = static_cast<std::size_t>(rhs);
- return l_index < order.size() and r_index < order.size() and order[l_index] < order[r_index];
+ return l_index < order.size() && r_index < order.size() && order[l_index] < order[r_index];
}
} // namespace detail
} // namespace nlohmann
-// #include <nlohmann/detail/conversions/from_json.hpp>
-
-
-#include <algorithm> // transform
-#include <array> // array
-#include <ciso646> // and, not
-#include <forward_list> // forward_list
-#include <iterator> // inserter, front_inserter, end
-#include <map> // map
-#include <string> // string
-#include <tuple> // tuple, make_tuple
-#include <type_traits> // is_arithmetic, is_same, is_enum, underlying_type, is_convertible
-#include <unordered_map> // unordered_map
-#include <utility> // pair, declval
-#include <valarray> // valarray
-
-// #include <nlohmann/detail/exceptions.hpp>
-
-// #include <nlohmann/detail/macro_scope.hpp>
-
-// #include <nlohmann/detail/meta/cpp_future.hpp>
-
-// #include <nlohmann/detail/meta/type_traits.hpp>
-
-// #include <nlohmann/detail/value_t.hpp>
-
namespace nlohmann
{
@@ -1283,7 +3425,7 @@ namespace detail
template<typename BasicJsonType>
void from_json(const BasicJsonType& j, typename std::nullptr_t& n)
{
- if (JSON_UNLIKELY(not j.is_null()))
+ if (JSON_HEDLEY_UNLIKELY(!j.is_null()))
{
JSON_THROW(type_error::create(302, "type must be null, but is " + std::string(j.type_name())));
}
@@ -1291,10 +3433,10 @@ void from_json(const BasicJsonType& j, typename std::nullptr_t& n)
}
// overloads for basic_json template parameters
-template<typename BasicJsonType, typename ArithmeticType,
- enable_if_t<std::is_arithmetic<ArithmeticType>::value and
- not std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value,
- int> = 0>
+template < typename BasicJsonType, typename ArithmeticType,
+ enable_if_t < std::is_arithmetic<ArithmeticType>::value&&
+ !std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value,
+ int > = 0 >
void get_arithmetic_value(const BasicJsonType& j, ArithmeticType& val)
{
switch (static_cast<value_t>(j))
@@ -1323,7 +3465,7 @@ void get_arithmetic_value(const BasicJsonType& j, ArithmeticType& val)
template<typename BasicJsonType>
void from_json(const BasicJsonType& j, typename BasicJsonType::boolean_t& b)
{
- if (JSON_UNLIKELY(not j.is_boolean()))
+ if (JSON_HEDLEY_UNLIKELY(!j.is_boolean()))
{
JSON_THROW(type_error::create(302, "type must be boolean, but is " + std::string(j.type_name())));
}
@@ -1333,7 +3475,7 @@ void from_json(const BasicJsonType& j, typename BasicJsonType::boolean_t& b)
template<typename BasicJsonType>
void from_json(const BasicJsonType& j, typename BasicJsonType::string_t& s)
{
- if (JSON_UNLIKELY(not j.is_string()))
+ if (JSON_HEDLEY_UNLIKELY(!j.is_string()))
{
JSON_THROW(type_error::create(302, "type must be string, but is " + std::string(j.type_name())));
}
@@ -1343,13 +3485,13 @@ void from_json(const BasicJsonType& j, typename BasicJsonType::string_t& s)
template <
typename BasicJsonType, typename ConstructibleStringType,
enable_if_t <
- is_constructible_string_type<BasicJsonType, ConstructibleStringType>::value and
- not std::is_same<typename BasicJsonType::string_t,
- ConstructibleStringType>::value,
+ is_constructible_string_type<BasicJsonType, ConstructibleStringType>::value&&
+ !std::is_same<typename BasicJsonType::string_t,
+ ConstructibleStringType>::value,
int > = 0 >
void from_json(const BasicJsonType& j, ConstructibleStringType& s)
{
- if (JSON_UNLIKELY(not j.is_string()))
+ if (JSON_HEDLEY_UNLIKELY(!j.is_string()))
{
JSON_THROW(type_error::create(302, "type must be string, but is " + std::string(j.type_name())));
}
@@ -1386,13 +3528,14 @@ void from_json(const BasicJsonType& j, EnumType& e)
// forward_list doesn't have an insert method
template<typename BasicJsonType, typename T, typename Allocator,
- enable_if_t<std::is_convertible<BasicJsonType, T>::value, int> = 0>
+ enable_if_t<is_getable<BasicJsonType, T>::value, int> = 0>
void from_json(const BasicJsonType& j, std::forward_list<T, Allocator>& l)
{
- if (JSON_UNLIKELY(not j.is_array()))
+ if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
{
JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name())));
}
+ l.clear();
std::transform(j.rbegin(), j.rend(),
std::front_inserter(l), [](const BasicJsonType & i)
{
@@ -1402,15 +3545,29 @@ void from_json(const BasicJsonType& j, std::forward_list<T, Allocator>& l)
// valarray doesn't have an insert method
template<typename BasicJsonType, typename T,
- enable_if_t<std::is_convertible<BasicJsonType, T>::value, int> = 0>
+ enable_if_t<is_getable<BasicJsonType, T>::value, int> = 0>
void from_json(const BasicJsonType& j, std::valarray<T>& l)
{
- if (JSON_UNLIKELY(not j.is_array()))
+ if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
{
JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name())));
}
l.resize(j.size());
- std::copy(j.m_value.array->begin(), j.m_value.array->end(), std::begin(l));
+ std::transform(j.begin(), j.end(), std::begin(l),
+ [](const BasicJsonType & elem)
+ {
+ return elem.template get<T>();
+ });
+}
+
+template<typename BasicJsonType, typename T, std::size_t N>
+auto from_json(const BasicJsonType& j, T (&arr)[N])
+-> decltype(j.template get<T>(), void())
+{
+ for (std::size_t i = 0; i < N; ++i)
+ {
+ arr[i] = j.at(i).template get<T>();
+ }
}
template<typename BasicJsonType>
@@ -1419,7 +3576,7 @@ void from_json_array_impl(const BasicJsonType& j, typename BasicJsonType::array_
arr = *j.template get_ptr<const typename BasicJsonType::array_t*>();
}
-template <typename BasicJsonType, typename T, std::size_t N>
+template<typename BasicJsonType, typename T, std::size_t N>
auto from_json_array_impl(const BasicJsonType& j, std::array<T, N>& arr,
priority_tag<2> /*unused*/)
-> decltype(j.template get<T>(), void())
@@ -1439,46 +3596,50 @@ auto from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr, p
{
using std::end;
- arr.reserve(j.size());
+ ConstructibleArrayType ret;
+ ret.reserve(j.size());
std::transform(j.begin(), j.end(),
- std::inserter(arr, end(arr)), [](const BasicJsonType & i)
+ std::inserter(ret, end(ret)), [](const BasicJsonType & i)
{
// get<BasicJsonType>() returns *this, this won't call a from_json
// method when value_type is BasicJsonType
return i.template get<typename ConstructibleArrayType::value_type>();
});
+ arr = std::move(ret);
}
-template <typename BasicJsonType, typename ConstructibleArrayType>
+template<typename BasicJsonType, typename ConstructibleArrayType>
void from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr,
priority_tag<0> /*unused*/)
{
using std::end;
+ ConstructibleArrayType ret;
std::transform(
- j.begin(), j.end(), std::inserter(arr, end(arr)),
+ j.begin(), j.end(), std::inserter(ret, end(ret)),
[](const BasicJsonType & i)
{
// get<BasicJsonType>() returns *this, this won't call a from_json
// method when value_type is BasicJsonType
return i.template get<typename ConstructibleArrayType::value_type>();
});
+ arr = std::move(ret);
}
-template <typename BasicJsonType, typename ConstructibleArrayType,
- enable_if_t <
- is_constructible_array_type<BasicJsonType, ConstructibleArrayType>::value and
- not is_constructible_object_type<BasicJsonType, ConstructibleArrayType>::value and
- not is_constructible_string_type<BasicJsonType, ConstructibleArrayType>::value and
- not is_basic_json<ConstructibleArrayType>::value,
- int > = 0 >
-
+template < typename BasicJsonType, typename ConstructibleArrayType,
+ enable_if_t <
+ is_constructible_array_type<BasicJsonType, ConstructibleArrayType>::value&&
+ !is_constructible_object_type<BasicJsonType, ConstructibleArrayType>::value&&
+ !is_constructible_string_type<BasicJsonType, ConstructibleArrayType>::value&&
+ !std::is_same<ConstructibleArrayType, typename BasicJsonType::binary_t>::value&&
+ !is_basic_json<ConstructibleArrayType>::value,
+ int > = 0 >
auto from_json(const BasicJsonType& j, ConstructibleArrayType& arr)
-> decltype(from_json_array_impl(j, arr, priority_tag<3> {}),
j.template get<typename ConstructibleArrayType::value_type>(),
void())
{
- if (JSON_UNLIKELY(not j.is_array()))
+ if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
{
JSON_THROW(type_error::create(302, "type must be array, but is " +
std::string(j.type_name())));
@@ -1487,38 +3648,51 @@ void())
from_json_array_impl(j, arr, priority_tag<3> {});
}
+template<typename BasicJsonType>
+void from_json(const BasicJsonType& j, typename BasicJsonType::binary_t& bin)
+{
+ if (JSON_HEDLEY_UNLIKELY(!j.is_binary()))
+ {
+ JSON_THROW(type_error::create(302, "type must be binary, but is " + std::string(j.type_name())));
+ }
+
+ bin = *j.template get_ptr<const typename BasicJsonType::binary_t*>();
+}
+
template<typename BasicJsonType, typename ConstructibleObjectType,
enable_if_t<is_constructible_object_type<BasicJsonType, ConstructibleObjectType>::value, int> = 0>
void from_json(const BasicJsonType& j, ConstructibleObjectType& obj)
{
- if (JSON_UNLIKELY(not j.is_object()))
+ if (JSON_HEDLEY_UNLIKELY(!j.is_object()))
{
JSON_THROW(type_error::create(302, "type must be object, but is " + std::string(j.type_name())));
}
+ ConstructibleObjectType ret;
auto inner_object = j.template get_ptr<const typename BasicJsonType::object_t*>();
using value_type = typename ConstructibleObjectType::value_type;
std::transform(
inner_object->begin(), inner_object->end(),
- std::inserter(obj, obj.begin()),
+ std::inserter(ret, ret.begin()),
[](typename BasicJsonType::object_t::value_type const & p)
{
return value_type(p.first, p.second.template get<typename ConstructibleObjectType::mapped_type>());
});
+ obj = std::move(ret);
}
// overload for arithmetic types, not chosen for basic_json template arguments
// (BooleanType, etc..); note: Is it really necessary to provide explicit
// overloads for boolean_t etc. in case of a custom BooleanType which is not
// an arithmetic type?
-template<typename BasicJsonType, typename ArithmeticType,
- enable_if_t <
- std::is_arithmetic<ArithmeticType>::value and
- not std::is_same<ArithmeticType, typename BasicJsonType::number_unsigned_t>::value and
- not std::is_same<ArithmeticType, typename BasicJsonType::number_integer_t>::value and
- not std::is_same<ArithmeticType, typename BasicJsonType::number_float_t>::value and
- not std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value,
- int> = 0>
+template < typename BasicJsonType, typename ArithmeticType,
+ enable_if_t <
+ std::is_arithmetic<ArithmeticType>::value&&
+ !std::is_same<ArithmeticType, typename BasicJsonType::number_unsigned_t>::value&&
+ !std::is_same<ArithmeticType, typename BasicJsonType::number_integer_t>::value&&
+ !std::is_same<ArithmeticType, typename BasicJsonType::number_float_t>::value&&
+ !std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value,
+ int > = 0 >
void from_json(const BasicJsonType& j, ArithmeticType& val)
{
switch (static_cast<value_t>(j))
@@ -1567,18 +3741,19 @@ void from_json(const BasicJsonType& j, std::tuple<Args...>& t)
from_json_tuple_impl(j, t, index_sequence_for<Args...> {});
}
-template <typename BasicJsonType, typename Key, typename Value, typename Compare, typename Allocator,
- typename = enable_if_t<not std::is_constructible<
- typename BasicJsonType::string_t, Key>::value>>
+template < typename BasicJsonType, typename Key, typename Value, typename Compare, typename Allocator,
+ typename = enable_if_t < !std::is_constructible <
+ typename BasicJsonType::string_t, Key >::value >>
void from_json(const BasicJsonType& j, std::map<Key, Value, Compare, Allocator>& m)
{
- if (JSON_UNLIKELY(not j.is_array()))
+ if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
{
JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name())));
}
+ m.clear();
for (const auto& p : j)
{
- if (JSON_UNLIKELY(not p.is_array()))
+ if (JSON_HEDLEY_UNLIKELY(!p.is_array()))
{
JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(p.type_name())));
}
@@ -1586,18 +3761,19 @@ void from_json(const BasicJsonType& j, std::map<Key, Value, Compare, Allocator>&
}
}
-template <typename BasicJsonType, typename Key, typename Value, typename Hash, typename KeyEqual, typename Allocator,
- typename = enable_if_t<not std::is_constructible<
- typename BasicJsonType::string_t, Key>::value>>
+template < typename BasicJsonType, typename Key, typename Value, typename Hash, typename KeyEqual, typename Allocator,
+ typename = enable_if_t < !std::is_constructible <
+ typename BasicJsonType::string_t, Key >::value >>
void from_json(const BasicJsonType& j, std::unordered_map<Key, Value, Hash, KeyEqual, Allocator>& m)
{
- if (JSON_UNLIKELY(not j.is_array()))
+ if (JSON_HEDLEY_UNLIKELY(!j.is_array()))
{
JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name())));
}
+ m.clear();
for (const auto& p : j)
{
- if (JSON_UNLIKELY(not p.is_array()))
+ if (JSON_HEDLEY_UNLIKELY(!p.is_array()))
{
JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(p.type_name())));
}
@@ -1624,43 +3800,45 @@ namespace
{
constexpr const auto& from_json = detail::static_const<detail::from_json_fn>::value;
} // namespace
-} // namespace nlohmann
+} // namespace nlohmann
// #include <nlohmann/detail/conversions/to_json.hpp>
-#include <ciso646> // or, and, not
+#include <algorithm> // copy
#include <iterator> // begin, end
+#include <string> // string
#include <tuple> // tuple, get
#include <type_traits> // is_same, is_constructible, is_floating_point, is_enum, underlying_type
#include <utility> // move, forward, declval, pair
#include <valarray> // valarray
#include <vector> // vector
-// #include <nlohmann/detail/meta/cpp_future.hpp>
-
-// #include <nlohmann/detail/meta/type_traits.hpp>
-
-// #include <nlohmann/detail/value_t.hpp>
-
// #include <nlohmann/detail/iterators/iteration_proxy.hpp>
#include <cstddef> // size_t
-#include <string> // string, to_string
#include <iterator> // input_iterator_tag
+#include <string> // string, to_string
#include <tuple> // tuple_size, get, tuple_element
-// #include <nlohmann/detail/value_t.hpp>
-
// #include <nlohmann/detail/meta/type_traits.hpp>
+// #include <nlohmann/detail/value_t.hpp>
+
namespace nlohmann
{
namespace detail
{
-template <typename IteratorType> class iteration_proxy_value
+template<typename string_type>
+void int_to_string( string_type& target, std::size_t value )
+{
+ // For ADL
+ using std::to_string;
+ target = to_string(value);
+}
+template<typename IteratorType> class iteration_proxy_value
{
public:
using difference_type = std::ptrdiff_t;
@@ -1668,6 +3846,7 @@ template <typename IteratorType> class iteration_proxy_value
using pointer = value_type * ;
using reference = value_type & ;
using iterator_category = std::input_iterator_tag;
+ using string_type = typename std::remove_cv< typename std::remove_reference<decltype( std::declval<IteratorType>().key() ) >::type >::type;
private:
/// the iterator
@@ -1677,9 +3856,9 @@ template <typename IteratorType> class iteration_proxy_value
/// last stringified array index
mutable std::size_t array_index_last = 0;
/// a string representation of the array index
- mutable std::string array_index_str = "0";
+ mutable string_type array_index_str = "0";
/// an empty string (to return a reference for primitive values)
- const std::string empty_str = "";
+ const string_type empty_str = "";
public:
explicit iteration_proxy_value(IteratorType it) noexcept : anchor(it) {}
@@ -1700,21 +3879,21 @@ template <typename IteratorType> class iteration_proxy_value
}
/// equality operator (needed for InputIterator)
- bool operator==(const iteration_proxy_value& o) const noexcept
+ bool operator==(const iteration_proxy_value& o) const
{
return anchor == o.anchor;
}
/// inequality operator (needed for range-based for)
- bool operator!=(const iteration_proxy_value& o) const noexcept
+ bool operator!=(const iteration_proxy_value& o) const
{
return anchor != o.anchor;
}
/// return key of the iterator
- const std::string& key() const
+ const string_type& key() const
{
- assert(anchor.m_object != nullptr);
+ JSON_ASSERT(anchor.m_object != nullptr);
switch (anchor.m_object->type())
{
@@ -1723,7 +3902,7 @@ template <typename IteratorType> class iteration_proxy_value
{
if (array_index != array_index_last)
{
- array_index_str = std::to_string(array_index);
+ int_to_string( array_index_str, array_index );
array_index_last = array_index;
}
return array_index_str;
@@ -1773,7 +3952,7 @@ template<typename IteratorType> class iteration_proxy
// Structured Bindings Support
// For further reference see https://blog.tartanllama.xyz/structured-bindings/
// And see https://github.com/nlohmann/json/pull/1391
-template <std::size_t N, typename IteratorType, enable_if_t<N == 0, int> = 0>
+template<std::size_t N, typename IteratorType, enable_if_t<N == 0, int> = 0>
auto get(const nlohmann::detail::iteration_proxy_value<IteratorType>& i) -> decltype(i.key())
{
return i.key();
@@ -1781,7 +3960,7 @@ auto get(const nlohmann::detail::iteration_proxy_value<IteratorType>& i) -> decl
// Structured Bindings Support
// For further reference see https://blog.tartanllama.xyz/structured-bindings/
// And see https://github.com/nlohmann/json/pull/1391
-template <std::size_t N, typename IteratorType, enable_if_t<N == 1, int> = 0>
+template<std::size_t N, typename IteratorType, enable_if_t<N == 1, int> = 0>
auto get(const nlohmann::detail::iteration_proxy_value<IteratorType>& i) -> decltype(i.value())
{
return i.value();
@@ -1795,11 +3974,16 @@ auto get(const nlohmann::detail::iteration_proxy_value<IteratorType>& i) -> decl
// And see https://github.com/nlohmann/json/pull/1391
namespace std
{
-template <typename IteratorType>
+#if defined(__clang__)
+ // Fix: https://github.com/nlohmann/json/issues/1401
+ #pragma clang diagnostic push
+ #pragma clang diagnostic ignored "-Wmismatched-tags"
+#endif
+template<typename IteratorType>
class tuple_size<::nlohmann::detail::iteration_proxy_value<IteratorType>>
: public std::integral_constant<std::size_t, 2> {};
-template <std::size_t N, typename IteratorType>
+template<std::size_t N, typename IteratorType>
class tuple_element<N, ::nlohmann::detail::iteration_proxy_value<IteratorType >>
{
public:
@@ -1807,7 +3991,17 @@ class tuple_element<N, ::nlohmann::detail::iteration_proxy_value<IteratorType >>
get<N>(std::declval <
::nlohmann::detail::iteration_proxy_value<IteratorType >> ()));
};
-}
+#if defined(__clang__)
+ #pragma clang diagnostic pop
+#endif
+} // namespace std
+
+// #include <nlohmann/detail/meta/cpp_future.hpp>
+
+// #include <nlohmann/detail/meta/type_traits.hpp>
+
+// #include <nlohmann/detail/value_t.hpp>
+
namespace nlohmann
{
@@ -1850,9 +4044,9 @@ struct external_constructor<value_t::string>
j.assert_invariant();
}
- template<typename BasicJsonType, typename CompatibleStringType,
- enable_if_t<not std::is_same<CompatibleStringType, typename BasicJsonType::string_t>::value,
- int> = 0>
+ template < typename BasicJsonType, typename CompatibleStringType,
+ enable_if_t < !std::is_same<CompatibleStringType, typename BasicJsonType::string_t>::value,
+ int > = 0 >
static void construct(BasicJsonType& j, const CompatibleStringType& str)
{
j.m_type = value_t::string;
@@ -1862,6 +4056,28 @@ struct external_constructor<value_t::string>
};
template<>
+struct external_constructor<value_t::binary>
+{
+ template<typename BasicJsonType>
+ static void construct(BasicJsonType& j, const typename BasicJsonType::binary_t& b)
+ {
+ j.m_type = value_t::binary;
+ typename BasicJsonType::binary_t value{b};
+ j.m_value = value;
+ j.assert_invariant();
+ }
+
+ template<typename BasicJsonType>
+ static void construct(BasicJsonType& j, typename BasicJsonType::binary_t&& b)
+ {
+ j.m_type = value_t::binary;
+ typename BasicJsonType::binary_t value{std::move(b)};
+ j.m_value = value;
+ j.assert_invariant();
+ }
+};
+
+template<>
struct external_constructor<value_t::number_float>
{
template<typename BasicJsonType>
@@ -1916,9 +4132,9 @@ struct external_constructor<value_t::array>
j.assert_invariant();
}
- template<typename BasicJsonType, typename CompatibleArrayType,
- enable_if_t<not std::is_same<CompatibleArrayType, typename BasicJsonType::array_t>::value,
- int> = 0>
+ template < typename BasicJsonType, typename CompatibleArrayType,
+ enable_if_t < !std::is_same<CompatibleArrayType, typename BasicJsonType::array_t>::value,
+ int > = 0 >
static void construct(BasicJsonType& j, const CompatibleArrayType& arr)
{
using std::begin;
@@ -1948,7 +4164,10 @@ struct external_constructor<value_t::array>
j.m_type = value_t::array;
j.m_value = value_t::array;
j.m_value.array->resize(arr.size());
- std::copy(std::begin(arr), std::end(arr), j.m_value.array->begin());
+ if (arr.size() > 0)
+ {
+ std::copy(std::begin(arr), std::end(arr), j.m_value.array->begin());
+ }
j.assert_invariant();
}
};
@@ -1972,8 +4191,8 @@ struct external_constructor<value_t::object>
j.assert_invariant();
}
- template<typename BasicJsonType, typename CompatibleObjectType,
- enable_if_t<not std::is_same<CompatibleObjectType, typename BasicJsonType::object_t>::value, int> = 0>
+ template < typename BasicJsonType, typename CompatibleObjectType,
+ enable_if_t < !std::is_same<CompatibleObjectType, typename BasicJsonType::object_t>::value, int > = 0 >
static void construct(BasicJsonType& j, const CompatibleObjectType& obj)
{
using std::begin;
@@ -2044,19 +4263,25 @@ void to_json(BasicJsonType& j, const std::vector<bool>& e)
external_constructor<value_t::array>::construct(j, e);
}
-template <typename BasicJsonType, typename CompatibleArrayType,
- enable_if_t<is_compatible_array_type<BasicJsonType,
- CompatibleArrayType>::value and
- not is_compatible_object_type<
- BasicJsonType, CompatibleArrayType>::value and
- not is_compatible_string_type<BasicJsonType, CompatibleArrayType>::value and
- not is_basic_json<CompatibleArrayType>::value,
- int> = 0>
+template < typename BasicJsonType, typename CompatibleArrayType,
+ enable_if_t < is_compatible_array_type<BasicJsonType,
+ CompatibleArrayType>::value&&
+ !is_compatible_object_type<BasicJsonType, CompatibleArrayType>::value&&
+ !is_compatible_string_type<BasicJsonType, CompatibleArrayType>::value&&
+ !std::is_same<typename BasicJsonType::binary_t, CompatibleArrayType>::value&&
+ !is_basic_json<CompatibleArrayType>::value,
+ int > = 0 >
void to_json(BasicJsonType& j, const CompatibleArrayType& arr)
{
external_constructor<value_t::array>::construct(j, arr);
}
+template<typename BasicJsonType>
+void to_json(BasicJsonType& j, const typename BasicJsonType::binary_t& bin)
+{
+ external_constructor<value_t::binary>::construct(j, bin);
+}
+
template<typename BasicJsonType, typename T,
enable_if_t<std::is_convertible<T, BasicJsonType>::value, int> = 0>
void to_json(BasicJsonType& j, const std::valarray<T>& arr)
@@ -2070,8 +4295,8 @@ void to_json(BasicJsonType& j, typename BasicJsonType::array_t&& arr)
external_constructor<value_t::array>::construct(j, std::move(arr));
}
-template<typename BasicJsonType, typename CompatibleObjectType,
- enable_if_t<is_compatible_object_type<BasicJsonType, CompatibleObjectType>::value and not is_basic_json<CompatibleObjectType>::value, int> = 0>
+template < typename BasicJsonType, typename CompatibleObjectType,
+ enable_if_t < is_compatible_object_type<BasicJsonType, CompatibleObjectType>::value&& !is_basic_json<CompatibleObjectType>::value, int > = 0 >
void to_json(BasicJsonType& j, const CompatibleObjectType& obj)
{
external_constructor<value_t::object>::construct(j, obj);
@@ -2085,23 +4310,23 @@ void to_json(BasicJsonType& j, typename BasicJsonType::object_t&& obj)
template <
typename BasicJsonType, typename T, std::size_t N,
- enable_if_t<not std::is_constructible<typename BasicJsonType::string_t,
- const T(&)[N]>::value,
- int> = 0 >
+ enable_if_t < !std::is_constructible<typename BasicJsonType::string_t,
+ const T(&)[N]>::value,
+ int > = 0 >
void to_json(BasicJsonType& j, const T(&arr)[N])
{
external_constructor<value_t::array>::construct(j, arr);
}
-template<typename BasicJsonType, typename... Args>
-void to_json(BasicJsonType& j, const std::pair<Args...>& p)
+template < typename BasicJsonType, typename T1, typename T2, enable_if_t < std::is_constructible<BasicJsonType, T1>::value&& std::is_constructible<BasicJsonType, T2>::value, int > = 0 >
+void to_json(BasicJsonType& j, const std::pair<T1, T2>& p)
{
j = { p.first, p.second };
}
// for https://github.com/nlohmann/json/pull/1134
-template < typename BasicJsonType, typename T,
- enable_if_t<std::is_same<T, iteration_proxy_value<typename BasicJsonType::iterator>>::value, int> = 0>
+template<typename BasicJsonType, typename T,
+ enable_if_t<std::is_same<T, iteration_proxy_value<typename BasicJsonType::iterator>>::value, int> = 0>
void to_json(BasicJsonType& j, const T& b)
{
j = { {b.key(), b.value()} };
@@ -2113,10 +4338,10 @@ void to_json_tuple_impl(BasicJsonType& j, const Tuple& t, index_sequence<Idx...>
j = { std::get<Idx>(t)... };
}
-template<typename BasicJsonType, typename... Args>
-void to_json(BasicJsonType& j, const std::tuple<Args...>& t)
+template<typename BasicJsonType, typename T, enable_if_t<is_constructible_tuple<BasicJsonType, T>::value, int > = 0>
+void to_json(BasicJsonType& j, const T& t)
{
- to_json_tuple_impl(j, t, index_sequence_for<Args...> {});
+ to_json_tuple_impl(j, t, make_index_sequence<std::tuple_size<T>::value> {});
}
struct to_json_fn
@@ -2135,13 +4360,368 @@ namespace
{
constexpr const auto& to_json = detail::static_const<detail::to_json_fn>::value;
} // namespace
+} // namespace nlohmann
+
+
+namespace nlohmann
+{
+
+template<typename, typename>
+struct adl_serializer
+{
+ /*!
+ @brief convert a JSON value to any value type
+
+ This function is usually called by the `get()` function of the
+ @ref basic_json class (either explicit or via conversion operators).
+
+ @param[in] j JSON value to read from
+ @param[in,out] val value to write to
+ */
+ template<typename BasicJsonType, typename ValueType>
+ static auto from_json(BasicJsonType&& j, ValueType& val) noexcept(
+ noexcept(::nlohmann::from_json(std::forward<BasicJsonType>(j), val)))
+ -> decltype(::nlohmann::from_json(std::forward<BasicJsonType>(j), val), void())
+ {
+ ::nlohmann::from_json(std::forward<BasicJsonType>(j), val);
+ }
+
+ /*!
+ @brief convert any value type to a JSON value
+
+ This function is usually called by the constructors of the @ref basic_json
+ class.
+
+ @param[in,out] j JSON value to write to
+ @param[in] val value to read from
+ */
+ template<typename BasicJsonType, typename ValueType>
+ static auto to_json(BasicJsonType& j, ValueType&& val) noexcept(
+ noexcept(::nlohmann::to_json(j, std::forward<ValueType>(val))))
+ -> decltype(::nlohmann::to_json(j, std::forward<ValueType>(val)), void())
+ {
+ ::nlohmann::to_json(j, std::forward<ValueType>(val));
+ }
+};
+
} // namespace nlohmann
+// #include <nlohmann/byte_container_with_subtype.hpp>
+
+
+#include <cstdint> // uint8_t
+#include <tuple> // tie
+#include <utility> // move
+
+namespace nlohmann
+{
+
+/*!
+@brief an internal type for a backed binary type
+
+This type extends the template parameter @a BinaryType provided to `basic_json`
+with a subtype used by BSON and MessagePack. This type exists so that the user
+does not have to specify a type themselves with a specific naming scheme in
+order to override the binary type.
+
+@tparam BinaryType container to store bytes (`std::vector<std::uint8_t>` by
+ default)
+
+@since version 3.8.0
+*/
+template<typename BinaryType>
+class byte_container_with_subtype : public BinaryType
+{
+ public:
+ /// the type of the underlying container
+ using container_type = BinaryType;
+
+ byte_container_with_subtype() noexcept(noexcept(container_type()))
+ : container_type()
+ {}
+
+ byte_container_with_subtype(const container_type& b) noexcept(noexcept(container_type(b)))
+ : container_type(b)
+ {}
+
+ byte_container_with_subtype(container_type&& b) noexcept(noexcept(container_type(std::move(b))))
+ : container_type(std::move(b))
+ {}
+
+ byte_container_with_subtype(const container_type& b, std::uint8_t subtype) noexcept(noexcept(container_type(b)))
+ : container_type(b)
+ , m_subtype(subtype)
+ , m_has_subtype(true)
+ {}
+
+ byte_container_with_subtype(container_type&& b, std::uint8_t subtype) noexcept(noexcept(container_type(std::move(b))))
+ : container_type(std::move(b))
+ , m_subtype(subtype)
+ , m_has_subtype(true)
+ {}
+
+ bool operator==(const byte_container_with_subtype& rhs) const
+ {
+ return std::tie(static_cast<const BinaryType&>(*this), m_subtype, m_has_subtype) ==
+ std::tie(static_cast<const BinaryType&>(rhs), rhs.m_subtype, rhs.m_has_subtype);
+ }
+
+ bool operator!=(const byte_container_with_subtype& rhs) const
+ {
+ return !(rhs == *this);
+ }
+
+ /*!
+ @brief sets the binary subtype
+
+ Sets the binary subtype of the value, also flags a binary JSON value as
+ having a subtype, which has implications for serialization.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @sa @ref subtype() -- return the binary subtype
+ @sa @ref clear_subtype() -- clears the binary subtype
+ @sa @ref has_subtype() -- returns whether or not the binary value has a
+ subtype
+
+ @since version 3.8.0
+ */
+ void set_subtype(std::uint8_t subtype) noexcept
+ {
+ m_subtype = subtype;
+ m_has_subtype = true;
+ }
+
+ /*!
+ @brief return the binary subtype
+
+ Returns the numerical subtype of the value if it has a subtype. If it does
+ not have a subtype, this function will return size_t(-1) as a sentinel
+ value.
+
+ @return the numerical subtype of the binary value
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @sa @ref set_subtype() -- sets the binary subtype
+ @sa @ref clear_subtype() -- clears the binary subtype
+ @sa @ref has_subtype() -- returns whether or not the binary value has a
+ subtype
+
+ @since version 3.8.0
+ */
+ constexpr std::uint8_t subtype() const noexcept
+ {
+ return m_subtype;
+ }
+
+ /*!
+ @brief return whether the value has a subtype
+
+ @return whether the value has a subtype
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @sa @ref subtype() -- return the binary subtype
+ @sa @ref set_subtype() -- sets the binary subtype
+ @sa @ref clear_subtype() -- clears the binary subtype
+
+ @since version 3.8.0
+ */
+ constexpr bool has_subtype() const noexcept
+ {
+ return m_has_subtype;
+ }
+
+ /*!
+ @brief clears the binary subtype
+
+ Clears the binary subtype and flags the value as not having a subtype, which
+ has implications for serialization; for instance MessagePack will prefer the
+ bin family over the ext family.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @sa @ref subtype() -- return the binary subtype
+ @sa @ref set_subtype() -- sets the binary subtype
+ @sa @ref has_subtype() -- returns whether or not the binary value has a
+ subtype
+
+ @since version 3.8.0
+ */
+ void clear_subtype() noexcept
+ {
+ m_subtype = 0;
+ m_has_subtype = false;
+ }
+
+ private:
+ std::uint8_t m_subtype = 0;
+ bool m_has_subtype = false;
+};
+
+} // namespace nlohmann
+
+// #include <nlohmann/detail/conversions/from_json.hpp>
+
+// #include <nlohmann/detail/conversions/to_json.hpp>
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/hash.hpp>
+
+
+#include <cstddef> // size_t, uint8_t
+#include <functional> // hash
+
+namespace nlohmann
+{
+namespace detail
+{
+
+// boost::hash_combine
+inline std::size_t combine(std::size_t seed, std::size_t h) noexcept
+{
+ seed ^= h + 0x9e3779b9 + (seed << 6U) + (seed >> 2U);
+ return seed;
+}
+
+/*!
+@brief hash a JSON value
+
+The hash function tries to rely on std::hash where possible. Furthermore, the
+type of the JSON value is taken into account to have different hash values for
+null, 0, 0U, and false, etc.
+
+@tparam BasicJsonType basic_json specialization
+@param j JSON value to hash
+@return hash value of j
+*/
+template<typename BasicJsonType>
+std::size_t hash(const BasicJsonType& j)
+{
+ using string_t = typename BasicJsonType::string_t;
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+
+ const auto type = static_cast<std::size_t>(j.type());
+ switch (j.type())
+ {
+ case BasicJsonType::value_t::null:
+ case BasicJsonType::value_t::discarded:
+ {
+ return combine(type, 0);
+ }
+
+ case BasicJsonType::value_t::object:
+ {
+ auto seed = combine(type, j.size());
+ for (const auto& element : j.items())
+ {
+ const auto h = std::hash<string_t> {}(element.key());
+ seed = combine(seed, h);
+ seed = combine(seed, hash(element.value()));
+ }
+ return seed;
+ }
+
+ case BasicJsonType::value_t::array:
+ {
+ auto seed = combine(type, j.size());
+ for (const auto& element : j)
+ {
+ seed = combine(seed, hash(element));
+ }
+ return seed;
+ }
+
+ case BasicJsonType::value_t::string:
+ {
+ const auto h = std::hash<string_t> {}(j.template get_ref<const string_t&>());
+ return combine(type, h);
+ }
+
+ case BasicJsonType::value_t::boolean:
+ {
+ const auto h = std::hash<bool> {}(j.template get<bool>());
+ return combine(type, h);
+ }
+
+ case BasicJsonType::value_t::number_integer:
+ {
+ const auto h = std::hash<number_integer_t> {}(j.template get<number_integer_t>());
+ return combine(type, h);
+ }
+
+ case nlohmann::detail::value_t::number_unsigned:
+ {
+ const auto h = std::hash<number_unsigned_t> {}(j.template get<number_unsigned_t>());
+ return combine(type, h);
+ }
+
+ case nlohmann::detail::value_t::number_float:
+ {
+ const auto h = std::hash<number_float_t> {}(j.template get<number_float_t>());
+ return combine(type, h);
+ }
+
+ case nlohmann::detail::value_t::binary:
+ {
+ auto seed = combine(type, j.get_binary().size());
+ const auto h = std::hash<bool> {}(j.get_binary().has_subtype());
+ seed = combine(seed, h);
+ seed = combine(seed, j.get_binary().subtype());
+ for (const auto byte : j.get_binary())
+ {
+ seed = combine(seed, std::hash<std::uint8_t> {}(byte));
+ }
+ return seed;
+ }
+
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ }
+}
+
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/input/binary_reader.hpp>
+
+
+#include <algorithm> // generate_n
+#include <array> // array
+#include <cmath> // ldexp
+#include <cstddef> // size_t
+#include <cstdint> // uint8_t, uint16_t, uint32_t, uint64_t
+#include <cstdio> // snprintf
+#include <cstring> // memcpy
+#include <iterator> // back_inserter
+#include <limits> // numeric_limits
+#include <string> // char_traits, string
+#include <utility> // make_pair, move
+
+// #include <nlohmann/detail/exceptions.hpp>
+
// #include <nlohmann/detail/input/input_adapters.hpp>
-#include <cassert> // assert
+#include <array> // array
#include <cstddef> // size_t
+#include <cstdio> //FILE *
#include <cstring> // strlen
#include <istream> // istream
#include <iterator> // begin, end, iterator_traits, random_access_iterator_tag, distance, next
@@ -2150,7 +4730,8 @@ constexpr const auto& to_json = detail::static_const<detail::to_json_fn>::value;
#include <string> // string, char_traits
#include <type_traits> // enable_if, is_base_of, is_pointer, is_integral, remove_pointer
#include <utility> // pair, declval
-#include <cstdio> //FILE *
+
+// #include <nlohmann/detail/iterators/iterator_traits.hpp>
// #include <nlohmann/detail/macro_scope.hpp>
@@ -2167,41 +4748,30 @@ enum class input_format_t { json, cbor, msgpack, ubjson, bson };
////////////////////
/*!
-@brief abstract input adapter interface
-
-Produces a stream of std::char_traits<char>::int_type characters from a
-std::istream, a buffer, or some other input type. Accepts the return of
-exactly one non-EOF character for future input. The int_type characters
-returned consist of all valid char values as positive values (typically
-unsigned char), plus an EOF value outside that range, specified by the value
-of the function std::char_traits<char>::eof(). This value is typically -1, but
-could be any arbitrary value which is not a valid char value.
-*/
-struct input_adapter_protocol
-{
- /// get a character [0,255] or std::char_traits<char>::eof().
- virtual std::char_traits<char>::int_type get_character() = 0;
- virtual ~input_adapter_protocol() = default;
-};
-
-/// a type to simplify interfaces
-using input_adapter_t = std::shared_ptr<input_adapter_protocol>;
-
-/*!
Input adapter for stdio file access. This adapter read only 1 byte and do not use any
buffer. This adapter is a very low level adapter.
*/
-class file_input_adapter : public input_adapter_protocol
+class file_input_adapter
{
public:
- explicit file_input_adapter(std::FILE* f) noexcept
+ using char_type = char;
+
+ JSON_HEDLEY_NON_NULL(2)
+ explicit file_input_adapter(std::FILE* f) noexcept
: m_file(f)
{}
- std::char_traits<char>::int_type get_character() noexcept override
+ // make class move-only
+ file_input_adapter(const file_input_adapter&) = delete;
+ file_input_adapter(file_input_adapter&&) = default;
+ file_input_adapter& operator=(const file_input_adapter&) = delete;
+ file_input_adapter& operator=(file_input_adapter&&) = delete;
+
+ std::char_traits<char>::int_type get_character() noexcept
{
return std::fgetc(m_file);
}
+
private:
/// the file pointer to read from
std::FILE* m_file;
@@ -2217,87 +4787,111 @@ characters following those used in parsing the JSON input. Clears the
std::istream flags; any input errors (e.g., EOF) will be detected by the first
subsequent call for input from the std::istream.
*/
-class input_stream_adapter : public input_adapter_protocol
+class input_stream_adapter
{
public:
- ~input_stream_adapter() override
+ using char_type = char;
+
+ ~input_stream_adapter()
{
// clear stream flags; we use underlying streambuf I/O, do not
// maintain ifstream flags, except eof
- is.clear(is.rdstate() & std::ios::eofbit);
+ if (is != nullptr)
+ {
+ is->clear(is->rdstate() & std::ios::eofbit);
+ }
}
explicit input_stream_adapter(std::istream& i)
- : is(i), sb(*i.rdbuf())
+ : is(&i), sb(i.rdbuf())
{}
// delete because of pointer members
input_stream_adapter(const input_stream_adapter&) = delete;
input_stream_adapter& operator=(input_stream_adapter&) = delete;
- input_stream_adapter(input_stream_adapter&&) = delete;
- input_stream_adapter& operator=(input_stream_adapter&&) = delete;
+ input_stream_adapter& operator=(input_stream_adapter&& rhs) = delete;
+
+ input_stream_adapter(input_stream_adapter&& rhs) noexcept : is(rhs.is), sb(rhs.sb)
+ {
+ rhs.is = nullptr;
+ rhs.sb = nullptr;
+ }
// std::istream/std::streambuf use std::char_traits<char>::to_int_type, to
// ensure that std::char_traits<char>::eof() and the character 0xFF do not
// end up as the same value, eg. 0xFFFFFFFF.
- std::char_traits<char>::int_type get_character() override
+ std::char_traits<char>::int_type get_character()
{
- auto res = sb.sbumpc();
+ auto res = sb->sbumpc();
// set eof manually, as we don't use the istream interface.
- if (res == EOF)
+ if (JSON_HEDLEY_UNLIKELY(res == EOF))
{
- is.clear(is.rdstate() | std::ios::eofbit);
+ is->clear(is->rdstate() | std::ios::eofbit);
}
return res;
}
private:
/// the associated input stream
- std::istream& is;
- std::streambuf& sb;
+ std::istream* is = nullptr;
+ std::streambuf* sb = nullptr;
};
-/// input adapter for buffer input
-class input_buffer_adapter : public input_adapter_protocol
+// General-purpose iterator-based adapter. It might not be as fast as
+// theoretically possible for some containers, but it is extremely versatile.
+template<typename IteratorType>
+class iterator_input_adapter
{
public:
- input_buffer_adapter(const char* b, const std::size_t l) noexcept
- : cursor(b), limit(b + l)
- {}
+ using char_type = typename std::iterator_traits<IteratorType>::value_type;
- // delete because of pointer members
- input_buffer_adapter(const input_buffer_adapter&) = delete;
- input_buffer_adapter& operator=(input_buffer_adapter&) = delete;
- input_buffer_adapter(input_buffer_adapter&&) = delete;
- input_buffer_adapter& operator=(input_buffer_adapter&&) = delete;
- ~input_buffer_adapter() override = default;
+ iterator_input_adapter(IteratorType first, IteratorType last)
+ : current(std::move(first)), end(std::move(last)) {}
- std::char_traits<char>::int_type get_character() noexcept override
+ typename std::char_traits<char_type>::int_type get_character()
{
- if (JSON_LIKELY(cursor < limit))
+ if (JSON_HEDLEY_LIKELY(current != end))
{
- return std::char_traits<char>::to_int_type(*(cursor++));
+ auto result = std::char_traits<char_type>::to_int_type(*current);
+ std::advance(current, 1);
+ return result;
+ }
+ else
+ {
+ return std::char_traits<char_type>::eof();
}
-
- return std::char_traits<char>::eof();
}
private:
- /// pointer to the current character
- const char* cursor;
- /// pointer past the last character
- const char* const limit;
+ IteratorType current;
+ IteratorType end;
+
+ template<typename BaseInputAdapter, size_t T>
+ friend struct wide_string_input_helper;
+
+ bool empty() const
+ {
+ return current == end;
+ }
+
};
-template<typename WideStringType, size_t T>
-struct wide_string_input_helper
+
+template<typename BaseInputAdapter, size_t T>
+struct wide_string_input_helper;
+
+template<typename BaseInputAdapter>
+struct wide_string_input_helper<BaseInputAdapter, 4>
{
// UTF-32
- static void fill_buffer(const WideStringType& str, size_t& current_wchar, std::array<std::char_traits<char>::int_type, 4>& utf8_bytes, size_t& utf8_bytes_index, size_t& utf8_bytes_filled)
+ static void fill_buffer(BaseInputAdapter& input,
+ std::array<std::char_traits<char>::int_type, 4>& utf8_bytes,
+ size_t& utf8_bytes_index,
+ size_t& utf8_bytes_filled)
{
utf8_bytes_index = 0;
- if (current_wchar == str.size())
+ if (JSON_HEDLEY_UNLIKELY(input.empty()))
{
utf8_bytes[0] = std::char_traits<char>::eof();
utf8_bytes_filled = 1;
@@ -2305,54 +4899,57 @@ struct wide_string_input_helper
else
{
// get the current character
- const auto wc = static_cast<int>(str[current_wchar++]);
+ const auto wc = input.get_character();
// UTF-32 to UTF-8 encoding
if (wc < 0x80)
{
- utf8_bytes[0] = wc;
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
utf8_bytes_filled = 1;
}
else if (wc <= 0x7FF)
{
- utf8_bytes[0] = 0xC0 | ((wc >> 6) & 0x1F);
- utf8_bytes[1] = 0x80 | (wc & 0x3F);
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xC0u | ((static_cast<unsigned int>(wc) >> 6u) & 0x1Fu));
+ utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
utf8_bytes_filled = 2;
}
else if (wc <= 0xFFFF)
{
- utf8_bytes[0] = 0xE0 | ((wc >> 12) & 0x0F);
- utf8_bytes[1] = 0x80 | ((wc >> 6) & 0x3F);
- utf8_bytes[2] = 0x80 | (wc & 0x3F);
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xE0u | ((static_cast<unsigned int>(wc) >> 12u) & 0x0Fu));
+ utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 6u) & 0x3Fu));
+ utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
utf8_bytes_filled = 3;
}
else if (wc <= 0x10FFFF)
{
- utf8_bytes[0] = 0xF0 | ((wc >> 18) & 0x07);
- utf8_bytes[1] = 0x80 | ((wc >> 12) & 0x3F);
- utf8_bytes[2] = 0x80 | ((wc >> 6) & 0x3F);
- utf8_bytes[3] = 0x80 | (wc & 0x3F);
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xF0u | ((static_cast<unsigned int>(wc) >> 18u) & 0x07u));
+ utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 12u) & 0x3Fu));
+ utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 6u) & 0x3Fu));
+ utf8_bytes[3] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
utf8_bytes_filled = 4;
}
else
{
// unknown character
- utf8_bytes[0] = wc;
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
utf8_bytes_filled = 1;
}
}
}
};
-template<typename WideStringType>
-struct wide_string_input_helper<WideStringType, 2>
+template<typename BaseInputAdapter>
+struct wide_string_input_helper<BaseInputAdapter, 2>
{
// UTF-16
- static void fill_buffer(const WideStringType& str, size_t& current_wchar, std::array<std::char_traits<char>::int_type, 4>& utf8_bytes, size_t& utf8_bytes_index, size_t& utf8_bytes_filled)
+ static void fill_buffer(BaseInputAdapter& input,
+ std::array<std::char_traits<char>::int_type, 4>& utf8_bytes,
+ size_t& utf8_bytes_index,
+ size_t& utf8_bytes_filled)
{
utf8_bytes_index = 0;
- if (current_wchar == str.size())
+ if (JSON_HEDLEY_UNLIKELY(input.empty()))
{
utf8_bytes[0] = std::char_traits<char>::eof();
utf8_bytes_filled = 1;
@@ -2360,44 +4957,42 @@ struct wide_string_input_helper<WideStringType, 2>
else
{
// get the current character
- const auto wc = static_cast<int>(str[current_wchar++]);
+ const auto wc = input.get_character();
// UTF-16 to UTF-8 encoding
if (wc < 0x80)
{
- utf8_bytes[0] = wc;
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
utf8_bytes_filled = 1;
}
else if (wc <= 0x7FF)
{
- utf8_bytes[0] = 0xC0 | ((wc >> 6));
- utf8_bytes[1] = 0x80 | (wc & 0x3F);
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xC0u | ((static_cast<unsigned int>(wc) >> 6u)));
+ utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
utf8_bytes_filled = 2;
}
- else if (0xD800 > wc or wc >= 0xE000)
+ else if (0xD800 > wc || wc >= 0xE000)
{
- utf8_bytes[0] = 0xE0 | ((wc >> 12));
- utf8_bytes[1] = 0x80 | ((wc >> 6) & 0x3F);
- utf8_bytes[2] = 0x80 | (wc & 0x3F);
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xE0u | ((static_cast<unsigned int>(wc) >> 12u)));
+ utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((static_cast<unsigned int>(wc) >> 6u) & 0x3Fu));
+ utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | (static_cast<unsigned int>(wc) & 0x3Fu));
utf8_bytes_filled = 3;
}
else
{
- if (current_wchar < str.size())
+ if (JSON_HEDLEY_UNLIKELY(!input.empty()))
{
- const auto wc2 = static_cast<int>(str[current_wchar++]);
- const int charcode = 0x10000 + (((wc & 0x3FF) << 10) | (wc2 & 0x3FF));
- utf8_bytes[0] = 0xf0 | (charcode >> 18);
- utf8_bytes[1] = 0x80 | ((charcode >> 12) & 0x3F);
- utf8_bytes[2] = 0x80 | ((charcode >> 6) & 0x3F);
- utf8_bytes[3] = 0x80 | (charcode & 0x3F);
+ const auto wc2 = static_cast<unsigned int>(input.get_character());
+ const auto charcode = 0x10000u + (((static_cast<unsigned int>(wc) & 0x3FFu) << 10u) | (wc2 & 0x3FFu));
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(0xF0u | (charcode >> 18u));
+ utf8_bytes[1] = static_cast<std::char_traits<char>::int_type>(0x80u | ((charcode >> 12u) & 0x3Fu));
+ utf8_bytes[2] = static_cast<std::char_traits<char>::int_type>(0x80u | ((charcode >> 6u) & 0x3Fu));
+ utf8_bytes[3] = static_cast<std::char_traits<char>::int_type>(0x80u | (charcode & 0x3Fu));
utf8_bytes_filled = 4;
}
else
{
- // unknown character
- ++current_wchar;
- utf8_bytes[0] = wc;
+ utf8_bytes[0] = static_cast<std::char_traits<char>::int_type>(wc);
utf8_bytes_filled = 1;
}
}
@@ -2405,44 +5000,42 @@ struct wide_string_input_helper<WideStringType, 2>
}
};
-template<typename WideStringType>
-class wide_string_input_adapter : public input_adapter_protocol
+// Wraps another input apdater to convert wide character types into individual bytes.
+template<typename BaseInputAdapter, typename WideCharType>
+class wide_string_input_adapter
{
public:
- explicit wide_string_input_adapter(const WideStringType& w) noexcept
- : str(w)
- {}
+ using char_type = char;
- std::char_traits<char>::int_type get_character() noexcept override
+ wide_string_input_adapter(BaseInputAdapter base)
+ : base_adapter(base) {}
+
+ typename std::char_traits<char>::int_type get_character() noexcept
{
// check if buffer needs to be filled
if (utf8_bytes_index == utf8_bytes_filled)
{
- fill_buffer<sizeof(typename WideStringType::value_type)>();
+ fill_buffer<sizeof(WideCharType)>();
- assert(utf8_bytes_filled > 0);
- assert(utf8_bytes_index == 0);
+ JSON_ASSERT(utf8_bytes_filled > 0);
+ JSON_ASSERT(utf8_bytes_index == 0);
}
// use buffer
- assert(utf8_bytes_filled > 0);
- assert(utf8_bytes_index < utf8_bytes_filled);
+ JSON_ASSERT(utf8_bytes_filled > 0);
+ JSON_ASSERT(utf8_bytes_index < utf8_bytes_filled);
return utf8_bytes[utf8_bytes_index++];
}
private:
+ BaseInputAdapter base_adapter;
+
template<size_t T>
void fill_buffer()
{
- wide_string_input_helper<WideStringType, T>::fill_buffer(str, current_wchar, utf8_bytes, utf8_bytes_index, utf8_bytes_filled);
+ wide_string_input_helper<BaseInputAdapter, T>::fill_buffer(base_adapter, utf8_bytes, utf8_bytes_index, utf8_bytes_filled);
}
- /// the wstring to process
- const WideStringType& str;
-
- /// index of the current wchar in str
- std::size_t current_wchar = 0;
-
/// a buffer for UTF-8 bytes
std::array<std::char_traits<char>::int_type, 4> utf8_bytes = {{0, 0, 0, 0}};
@@ -2452,132 +5045,853 @@ class wide_string_input_adapter : public input_adapter_protocol
std::size_t utf8_bytes_filled = 0;
};
-class input_adapter
+
+template<typename IteratorType, typename Enable = void>
+struct iterator_input_adapter_factory
{
- public:
- // native support
- input_adapter(std::FILE* file)
- : ia(std::make_shared<file_input_adapter>(file)) {}
- /// input adapter for input stream
- input_adapter(std::istream& i)
- : ia(std::make_shared<input_stream_adapter>(i)) {}
+ using iterator_type = IteratorType;
+ using char_type = typename std::iterator_traits<iterator_type>::value_type;
+ using adapter_type = iterator_input_adapter<iterator_type>;
- /// input adapter for input stream
- input_adapter(std::istream&& i)
- : ia(std::make_shared<input_stream_adapter>(i)) {}
+ static adapter_type create(IteratorType first, IteratorType last)
+ {
+ return adapter_type(std::move(first), std::move(last));
+ }
+};
- input_adapter(const std::wstring& ws)
- : ia(std::make_shared<wide_string_input_adapter<std::wstring>>(ws)) {}
+template<typename T>
+struct is_iterator_of_multibyte
+{
+ using value_type = typename std::iterator_traits<T>::value_type;
+ enum
+ {
+ value = sizeof(value_type) > 1
+ };
+};
- input_adapter(const std::u16string& ws)
- : ia(std::make_shared<wide_string_input_adapter<std::u16string>>(ws)) {}
+template<typename IteratorType>
+struct iterator_input_adapter_factory<IteratorType, enable_if_t<is_iterator_of_multibyte<IteratorType>::value>>
+{
+ using iterator_type = IteratorType;
+ using char_type = typename std::iterator_traits<iterator_type>::value_type;
+ using base_adapter_type = iterator_input_adapter<iterator_type>;
+ using adapter_type = wide_string_input_adapter<base_adapter_type, char_type>;
- input_adapter(const std::u32string& ws)
- : ia(std::make_shared<wide_string_input_adapter<std::u32string>>(ws)) {}
+ static adapter_type create(IteratorType first, IteratorType last)
+ {
+ return adapter_type(base_adapter_type(std::move(first), std::move(last)));
+ }
+};
- /// input adapter for buffer
- template<typename CharT,
- typename std::enable_if<
- std::is_pointer<CharT>::value and
- std::is_integral<typename std::remove_pointer<CharT>::type>::value and
- sizeof(typename std::remove_pointer<CharT>::type) == 1,
- int>::type = 0>
- input_adapter(CharT b, std::size_t l)
- : ia(std::make_shared<input_buffer_adapter>(reinterpret_cast<const char*>(b), l)) {}
+// General purpose iterator-based input
+template<typename IteratorType>
+typename iterator_input_adapter_factory<IteratorType>::adapter_type input_adapter(IteratorType first, IteratorType last)
+{
+ using factory_type = iterator_input_adapter_factory<IteratorType>;
+ return factory_type::create(first, last);
+}
- // derived support
+// Convenience shorthand from container to iterator
+template<typename ContainerType>
+auto input_adapter(const ContainerType& container) -> decltype(input_adapter(begin(container), end(container)))
+{
+ // Enable ADL
+ using std::begin;
+ using std::end;
- /// input adapter for string literal
- template<typename CharT,
- typename std::enable_if<
- std::is_pointer<CharT>::value and
- std::is_integral<typename std::remove_pointer<CharT>::type>::value and
- sizeof(typename std::remove_pointer<CharT>::type) == 1,
- int>::type = 0>
- input_adapter(CharT b)
- : input_adapter(reinterpret_cast<const char*>(b),
- std::strlen(reinterpret_cast<const char*>(b))) {}
+ return input_adapter(begin(container), end(container));
+}
+
+// Special cases with fast paths
+inline file_input_adapter input_adapter(std::FILE* file)
+{
+ return file_input_adapter(file);
+}
+
+inline input_stream_adapter input_adapter(std::istream& stream)
+{
+ return input_stream_adapter(stream);
+}
+
+inline input_stream_adapter input_adapter(std::istream&& stream)
+{
+ return input_stream_adapter(stream);
+}
+
+using contiguous_bytes_input_adapter = decltype(input_adapter(std::declval<const char*>(), std::declval<const char*>()));
+
+// Null-delimited strings, and the like.
+template < typename CharT,
+ typename std::enable_if <
+ std::is_pointer<CharT>::value&&
+ !std::is_array<CharT>::value&&
+ std::is_integral<typename std::remove_pointer<CharT>::type>::value&&
+ sizeof(typename std::remove_pointer<CharT>::type) == 1,
+ int >::type = 0 >
+contiguous_bytes_input_adapter input_adapter(CharT b)
+{
+ auto length = std::strlen(reinterpret_cast<const char*>(b));
+ const auto* ptr = reinterpret_cast<const char*>(b);
+ return input_adapter(ptr, ptr + length);
+}
+
+template<typename T, std::size_t N>
+auto input_adapter(T (&array)[N]) -> decltype(input_adapter(array, array + N))
+{
+ return input_adapter(array, array + N);
+}
+
+// This class only handles inputs of input_buffer_adapter type.
+// It's required so that expressions like {ptr, len} can be implicitely casted
+// to the correct adapter.
+class span_input_adapter
+{
+ public:
+ template < typename CharT,
+ typename std::enable_if <
+ std::is_pointer<CharT>::value&&
+ std::is_integral<typename std::remove_pointer<CharT>::type>::value&&
+ sizeof(typename std::remove_pointer<CharT>::type) == 1,
+ int >::type = 0 >
+ span_input_adapter(CharT b, std::size_t l)
+ : ia(reinterpret_cast<const char*>(b), reinterpret_cast<const char*>(b) + l) {}
- /// input adapter for iterator range with contiguous storage
template<class IteratorType,
typename std::enable_if<
std::is_same<typename iterator_traits<IteratorType>::iterator_category, std::random_access_iterator_tag>::value,
int>::type = 0>
- input_adapter(IteratorType first, IteratorType last)
+ span_input_adapter(IteratorType first, IteratorType last)
+ : ia(input_adapter(first, last)) {}
+
+ contiguous_bytes_input_adapter&& get()
+ {
+ return std::move(ia);
+ }
+
+ private:
+ contiguous_bytes_input_adapter ia;
+};
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/input/json_sax.hpp>
+
+
+#include <cstddef>
+#include <string> // string
+#include <utility> // move
+#include <vector> // vector
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+
+namespace nlohmann
+{
+
+/*!
+@brief SAX interface
+
+This class describes the SAX interface used by @ref nlohmann::json::sax_parse.
+Each function is called in different situations while the input is parsed. The
+boolean return value informs the parser whether to continue processing the
+input.
+*/
+template<typename BasicJsonType>
+struct json_sax
+{
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+ using string_t = typename BasicJsonType::string_t;
+ using binary_t = typename BasicJsonType::binary_t;
+
+ /*!
+ @brief a null value was read
+ @return whether parsing should proceed
+ */
+ virtual bool null() = 0;
+
+ /*!
+ @brief a boolean value was read
+ @param[in] val boolean value
+ @return whether parsing should proceed
+ */
+ virtual bool boolean(bool val) = 0;
+
+ /*!
+ @brief an integer number was read
+ @param[in] val integer value
+ @return whether parsing should proceed
+ */
+ virtual bool number_integer(number_integer_t val) = 0;
+
+ /*!
+ @brief an unsigned integer number was read
+ @param[in] val unsigned integer value
+ @return whether parsing should proceed
+ */
+ virtual bool number_unsigned(number_unsigned_t val) = 0;
+
+ /*!
+ @brief an floating-point number was read
+ @param[in] val floating-point value
+ @param[in] s raw token value
+ @return whether parsing should proceed
+ */
+ virtual bool number_float(number_float_t val, const string_t& s) = 0;
+
+ /*!
+ @brief a string was read
+ @param[in] val string value
+ @return whether parsing should proceed
+ @note It is safe to move the passed string.
+ */
+ virtual bool string(string_t& val) = 0;
+
+ /*!
+ @brief a binary string was read
+ @param[in] val binary value
+ @return whether parsing should proceed
+ @note It is safe to move the passed binary.
+ */
+ virtual bool binary(binary_t& val) = 0;
+
+ /*!
+ @brief the beginning of an object was read
+ @param[in] elements number of object elements or -1 if unknown
+ @return whether parsing should proceed
+ @note binary formats may report the number of elements
+ */
+ virtual bool start_object(std::size_t elements) = 0;
+
+ /*!
+ @brief an object key was read
+ @param[in] val object key
+ @return whether parsing should proceed
+ @note It is safe to move the passed string.
+ */
+ virtual bool key(string_t& val) = 0;
+
+ /*!
+ @brief the end of an object was read
+ @return whether parsing should proceed
+ */
+ virtual bool end_object() = 0;
+
+ /*!
+ @brief the beginning of an array was read
+ @param[in] elements number of array elements or -1 if unknown
+ @return whether parsing should proceed
+ @note binary formats may report the number of elements
+ */
+ virtual bool start_array(std::size_t elements) = 0;
+
+ /*!
+ @brief the end of an array was read
+ @return whether parsing should proceed
+ */
+ virtual bool end_array() = 0;
+
+ /*!
+ @brief a parse error occurred
+ @param[in] position the position in the input where the error occurs
+ @param[in] last_token the last read token
+ @param[in] ex an exception object describing the error
+ @return whether parsing should proceed (must return false)
+ */
+ virtual bool parse_error(std::size_t position,
+ const std::string& last_token,
+ const detail::exception& ex) = 0;
+
+ virtual ~json_sax() = default;
+};
+
+
+namespace detail
+{
+/*!
+@brief SAX implementation to create a JSON value from SAX events
+
+This class implements the @ref json_sax interface and processes the SAX events
+to create a JSON value which makes it basically a DOM parser. The structure or
+hierarchy of the JSON value is managed by the stack `ref_stack` which contains
+a pointer to the respective array or object for each recursion depth.
+
+After successful parsing, the value that is passed by reference to the
+constructor contains the parsed value.
+
+@tparam BasicJsonType the JSON type
+*/
+template<typename BasicJsonType>
+class json_sax_dom_parser
+{
+ public:
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+ using string_t = typename BasicJsonType::string_t;
+ using binary_t = typename BasicJsonType::binary_t;
+
+ /*!
+ @param[in, out] r reference to a JSON value that is manipulated while
+ parsing
+ @param[in] allow_exceptions_ whether parse errors yield exceptions
+ */
+ explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true)
+ : root(r), allow_exceptions(allow_exceptions_)
+ {}
+
+ // make class move-only
+ json_sax_dom_parser(const json_sax_dom_parser&) = delete;
+ json_sax_dom_parser(json_sax_dom_parser&&) = default;
+ json_sax_dom_parser& operator=(const json_sax_dom_parser&) = delete;
+ json_sax_dom_parser& operator=(json_sax_dom_parser&&) = default;
+ ~json_sax_dom_parser() = default;
+
+ bool null()
+ {
+ handle_value(nullptr);
+ return true;
+ }
+
+ bool boolean(bool val)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool number_integer(number_integer_t val)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool number_unsigned(number_unsigned_t val)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool number_float(number_float_t val, const string_t& /*unused*/)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool string(string_t& val)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool binary(binary_t& val)
+ {
+ handle_value(std::move(val));
+ return true;
+ }
+
+ bool start_object(std::size_t len)
{
-#ifndef NDEBUG
- // assertion to check that the iterator range is indeed contiguous,
- // see http://stackoverflow.com/a/35008842/266378 for more discussion
- const auto is_contiguous = std::accumulate(
- first, last, std::pair<bool, int>(true, 0),
- [&first](std::pair<bool, int> res, decltype(*first) val)
+ ref_stack.push_back(handle_value(BasicJsonType::value_t::object));
+
+ if (JSON_HEDLEY_UNLIKELY(len != std::size_t(-1) && len > ref_stack.back()->max_size()))
{
- res.first &= (val == *(std::next(std::addressof(*first), res.second++)));
- return res;
- }).first;
- assert(is_contiguous);
-#endif
+ JSON_THROW(out_of_range::create(408,
+ "excessive object size: " + std::to_string(len)));
+ }
- // assertion to check that each element is 1 byte long
- static_assert(
- sizeof(typename iterator_traits<IteratorType>::value_type) == 1,
- "each element in the iterator range must have the size of 1 byte");
+ return true;
+ }
+
+ bool key(string_t& val)
+ {
+ // add null at given key and store the reference for later
+ object_element = &(ref_stack.back()->m_value.object->operator[](val));
+ return true;
+ }
- const auto len = static_cast<size_t>(std::distance(first, last));
- if (JSON_LIKELY(len > 0))
+ bool end_object()
+ {
+ ref_stack.pop_back();
+ return true;
+ }
+
+ bool start_array(std::size_t len)
+ {
+ ref_stack.push_back(handle_value(BasicJsonType::value_t::array));
+
+ if (JSON_HEDLEY_UNLIKELY(len != std::size_t(-1) && len > ref_stack.back()->max_size()))
{
- // there is at least one element: use the address of first
- ia = std::make_shared<input_buffer_adapter>(reinterpret_cast<const char*>(&(*first)), len);
+ JSON_THROW(out_of_range::create(408,
+ "excessive array size: " + std::to_string(len)));
}
- else
+
+ return true;
+ }
+
+ bool end_array()
+ {
+ ref_stack.pop_back();
+ return true;
+ }
+
+ template<class Exception>
+ bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
+ const Exception& ex)
+ {
+ errored = true;
+ static_cast<void>(ex);
+ if (allow_exceptions)
+ {
+ JSON_THROW(ex);
+ }
+ return false;
+ }
+
+ constexpr bool is_errored() const
+ {
+ return errored;
+ }
+
+ private:
+ /*!
+ @invariant If the ref stack is empty, then the passed value will be the new
+ root.
+ @invariant If the ref stack contains a value, then it is an array or an
+ object to which we can add elements
+ */
+ template<typename Value>
+ JSON_HEDLEY_RETURNS_NON_NULL
+ BasicJsonType* handle_value(Value&& v)
+ {
+ if (ref_stack.empty())
+ {
+ root = BasicJsonType(std::forward<Value>(v));
+ return &root;
+ }
+
+ JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object());
+
+ if (ref_stack.back()->is_array())
+ {
+ ref_stack.back()->m_value.array->emplace_back(std::forward<Value>(v));
+ return &(ref_stack.back()->m_value.array->back());
+ }
+
+ JSON_ASSERT(ref_stack.back()->is_object());
+ JSON_ASSERT(object_element);
+ *object_element = BasicJsonType(std::forward<Value>(v));
+ return object_element;
+ }
+
+ /// the parsed JSON value
+ BasicJsonType& root;
+ /// stack to model hierarchy of values
+ std::vector<BasicJsonType*> ref_stack {};
+ /// helper to hold the reference for the next object element
+ BasicJsonType* object_element = nullptr;
+ /// whether a syntax error occurred
+ bool errored = false;
+ /// whether to throw exceptions in case of errors
+ const bool allow_exceptions = true;
+};
+
+template<typename BasicJsonType>
+class json_sax_dom_callback_parser
+{
+ public:
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+ using string_t = typename BasicJsonType::string_t;
+ using binary_t = typename BasicJsonType::binary_t;
+ using parser_callback_t = typename BasicJsonType::parser_callback_t;
+ using parse_event_t = typename BasicJsonType::parse_event_t;
+
+ json_sax_dom_callback_parser(BasicJsonType& r,
+ const parser_callback_t cb,
+ const bool allow_exceptions_ = true)
+ : root(r), callback(cb), allow_exceptions(allow_exceptions_)
+ {
+ keep_stack.push_back(true);
+ }
+
+ // make class move-only
+ json_sax_dom_callback_parser(const json_sax_dom_callback_parser&) = delete;
+ json_sax_dom_callback_parser(json_sax_dom_callback_parser&&) = default;
+ json_sax_dom_callback_parser& operator=(const json_sax_dom_callback_parser&) = delete;
+ json_sax_dom_callback_parser& operator=(json_sax_dom_callback_parser&&) = default;
+ ~json_sax_dom_callback_parser() = default;
+
+ bool null()
+ {
+ handle_value(nullptr);
+ return true;
+ }
+
+ bool boolean(bool val)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool number_integer(number_integer_t val)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool number_unsigned(number_unsigned_t val)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool number_float(number_float_t val, const string_t& /*unused*/)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool string(string_t& val)
+ {
+ handle_value(val);
+ return true;
+ }
+
+ bool binary(binary_t& val)
+ {
+ handle_value(std::move(val));
+ return true;
+ }
+
+ bool start_object(std::size_t len)
+ {
+ // check callback for object start
+ const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::object_start, discarded);
+ keep_stack.push_back(keep);
+
+ auto val = handle_value(BasicJsonType::value_t::object, true);
+ ref_stack.push_back(val.second);
+
+ // check object limit
+ if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != std::size_t(-1) && len > ref_stack.back()->max_size()))
+ {
+ JSON_THROW(out_of_range::create(408, "excessive object size: " + std::to_string(len)));
+ }
+
+ return true;
+ }
+
+ bool key(string_t& val)
+ {
+ BasicJsonType k = BasicJsonType(val);
+
+ // check callback for key
+ const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::key, k);
+ key_keep_stack.push_back(keep);
+
+ // add discarded value at given key and store the reference for later
+ if (keep && ref_stack.back())
+ {
+ object_element = &(ref_stack.back()->m_value.object->operator[](val) = discarded);
+ }
+
+ return true;
+ }
+
+ bool end_object()
+ {
+ if (ref_stack.back() && !callback(static_cast<int>(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back()))
+ {
+ // discard object
+ *ref_stack.back() = discarded;
+ }
+
+ JSON_ASSERT(!ref_stack.empty());
+ JSON_ASSERT(!keep_stack.empty());
+ ref_stack.pop_back();
+ keep_stack.pop_back();
+
+ if (!ref_stack.empty() && ref_stack.back() && ref_stack.back()->is_structured())
{
- // the address of first cannot be used: use nullptr
- ia = std::make_shared<input_buffer_adapter>(nullptr, len);
+ // remove discarded value
+ for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it)
+ {
+ if (it->is_discarded())
+ {
+ ref_stack.back()->erase(it);
+ break;
+ }
+ }
}
+
+ return true;
}
- /// input adapter for array
- template<class T, std::size_t N>
- input_adapter(T (&array)[N])
- : input_adapter(std::begin(array), std::end(array)) {}
+ bool start_array(std::size_t len)
+ {
+ const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::array_start, discarded);
+ keep_stack.push_back(keep);
+
+ auto val = handle_value(BasicJsonType::value_t::array, true);
+ ref_stack.push_back(val.second);
+
+ // check array limit
+ if (ref_stack.back() && JSON_HEDLEY_UNLIKELY(len != std::size_t(-1) && len > ref_stack.back()->max_size()))
+ {
+ JSON_THROW(out_of_range::create(408, "excessive array size: " + std::to_string(len)));
+ }
+
+ return true;
+ }
+
+ bool end_array()
+ {
+ bool keep = true;
+
+ if (ref_stack.back())
+ {
+ keep = callback(static_cast<int>(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back());
+ if (!keep)
+ {
+ // discard array
+ *ref_stack.back() = discarded;
+ }
+ }
+
+ JSON_ASSERT(!ref_stack.empty());
+ JSON_ASSERT(!keep_stack.empty());
+ ref_stack.pop_back();
+ keep_stack.pop_back();
- /// input adapter for contiguous container
- template<class ContiguousContainer, typename
- std::enable_if<not std::is_pointer<ContiguousContainer>::value and
- std::is_base_of<std::random_access_iterator_tag, typename iterator_traits<decltype(std::begin(std::declval<ContiguousContainer const>()))>::iterator_category>::value,
- int>::type = 0>
- input_adapter(const ContiguousContainer& c)
- : input_adapter(std::begin(c), std::end(c)) {}
+ // remove discarded value
+ if (!keep && !ref_stack.empty() && ref_stack.back()->is_array())
+ {
+ ref_stack.back()->m_value.array->pop_back();
+ }
- operator input_adapter_t()
+ return true;
+ }
+
+ template<class Exception>
+ bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
+ const Exception& ex)
{
- return ia;
+ errored = true;
+ static_cast<void>(ex);
+ if (allow_exceptions)
+ {
+ JSON_THROW(ex);
+ }
+ return false;
+ }
+
+ constexpr bool is_errored() const
+ {
+ return errored;
}
private:
- /// the actual adapter
- input_adapter_t ia = nullptr;
+ /*!
+ @param[in] v value to add to the JSON value we build during parsing
+ @param[in] skip_callback whether we should skip calling the callback
+ function; this is required after start_array() and
+ start_object() SAX events, because otherwise we would call the
+ callback function with an empty array or object, respectively.
+
+ @invariant If the ref stack is empty, then the passed value will be the new
+ root.
+ @invariant If the ref stack contains a value, then it is an array or an
+ object to which we can add elements
+
+ @return pair of boolean (whether value should be kept) and pointer (to the
+ passed value in the ref_stack hierarchy; nullptr if not kept)
+ */
+ template<typename Value>
+ std::pair<bool, BasicJsonType*> handle_value(Value&& v, const bool skip_callback = false)
+ {
+ JSON_ASSERT(!keep_stack.empty());
+
+ // do not handle this value if we know it would be added to a discarded
+ // container
+ if (!keep_stack.back())
+ {
+ return {false, nullptr};
+ }
+
+ // create value
+ auto value = BasicJsonType(std::forward<Value>(v));
+
+ // check callback
+ const bool keep = skip_callback || callback(static_cast<int>(ref_stack.size()), parse_event_t::value, value);
+
+ // do not handle this value if we just learnt it shall be discarded
+ if (!keep)
+ {
+ return {false, nullptr};
+ }
+
+ if (ref_stack.empty())
+ {
+ root = std::move(value);
+ return {true, &root};
+ }
+
+ // skip this value if we already decided to skip the parent
+ // (https://github.com/nlohmann/json/issues/971#issuecomment-413678360)
+ if (!ref_stack.back())
+ {
+ return {false, nullptr};
+ }
+
+ // we now only expect arrays and objects
+ JSON_ASSERT(ref_stack.back()->is_array() || ref_stack.back()->is_object());
+
+ // array
+ if (ref_stack.back()->is_array())
+ {
+ ref_stack.back()->m_value.array->push_back(std::move(value));
+ return {true, &(ref_stack.back()->m_value.array->back())};
+ }
+
+ // object
+ JSON_ASSERT(ref_stack.back()->is_object());
+ // check if we should store an element for the current key
+ JSON_ASSERT(!key_keep_stack.empty());
+ const bool store_element = key_keep_stack.back();
+ key_keep_stack.pop_back();
+
+ if (!store_element)
+ {
+ return {false, nullptr};
+ }
+
+ JSON_ASSERT(object_element);
+ *object_element = std::move(value);
+ return {true, object_element};
+ }
+
+ /// the parsed JSON value
+ BasicJsonType& root;
+ /// stack to model hierarchy of values
+ std::vector<BasicJsonType*> ref_stack {};
+ /// stack to manage which values to keep
+ std::vector<bool> keep_stack {};
+ /// stack to manage which object keys to keep
+ std::vector<bool> key_keep_stack {};
+ /// helper to hold the reference for the next object element
+ BasicJsonType* object_element = nullptr;
+ /// whether a syntax error occurred
+ bool errored = false;
+ /// callback function
+ const parser_callback_t callback = nullptr;
+ /// whether to throw exceptions in case of errors
+ const bool allow_exceptions = true;
+ /// a discarded value for the callback
+ BasicJsonType discarded = BasicJsonType::value_t::discarded;
+};
+
+template<typename BasicJsonType>
+class json_sax_acceptor
+{
+ public:
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+ using string_t = typename BasicJsonType::string_t;
+ using binary_t = typename BasicJsonType::binary_t;
+
+ bool null()
+ {
+ return true;
+ }
+
+ bool boolean(bool /*unused*/)
+ {
+ return true;
+ }
+
+ bool number_integer(number_integer_t /*unused*/)
+ {
+ return true;
+ }
+
+ bool number_unsigned(number_unsigned_t /*unused*/)
+ {
+ return true;
+ }
+
+ bool number_float(number_float_t /*unused*/, const string_t& /*unused*/)
+ {
+ return true;
+ }
+
+ bool string(string_t& /*unused*/)
+ {
+ return true;
+ }
+
+ bool binary(binary_t& /*unused*/)
+ {
+ return true;
+ }
+
+ bool start_object(std::size_t /*unused*/ = std::size_t(-1))
+ {
+ return true;
+ }
+
+ bool key(string_t& /*unused*/)
+ {
+ return true;
+ }
+
+ bool end_object()
+ {
+ return true;
+ }
+
+ bool start_array(std::size_t /*unused*/ = std::size_t(-1))
+ {
+ return true;
+ }
+
+ bool end_array()
+ {
+ return true;
+ }
+
+ bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/)
+ {
+ return false;
+ }
};
} // namespace detail
+
} // namespace nlohmann
// #include <nlohmann/detail/input/lexer.hpp>
+#include <array> // array
#include <clocale> // localeconv
#include <cstddef> // size_t
-#include <cstdlib> // strtof, strtod, strtold, strtoll, strtoull
#include <cstdio> // snprintf
+#include <cstdlib> // strtof, strtod, strtold, strtoll, strtoull
#include <initializer_list> // initializer_list
#include <string> // char_traits, string
+#include <utility> // move
#include <vector> // vector
-// #include <nlohmann/detail/macro_scope.hpp>
-
// #include <nlohmann/detail/input/input_adapters.hpp>
// #include <nlohmann/detail/input/position_t.hpp>
+// #include <nlohmann/detail/macro_scope.hpp>
+
namespace nlohmann
{
@@ -2587,19 +5901,9 @@ namespace detail
// lexer //
///////////
-/*!
-@brief lexical analysis
-
-This class organizes the lexical analysis during JSON deserialization.
-*/
template<typename BasicJsonType>
-class lexer
+class lexer_base
{
- using number_integer_t = typename BasicJsonType::number_integer_t;
- using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
- using number_float_t = typename BasicJsonType::number_float_t;
- using string_t = typename BasicJsonType::string_t;
-
public:
/// token types for the parser
enum class token_type
@@ -2624,6 +5928,8 @@ class lexer
};
/// return name of values of type token_type (only used for errors)
+ JSON_HEDLEY_RETURNS_NON_NULL
+ JSON_HEDLEY_CONST
static const char* token_type_name(const token_type t) noexcept
{
switch (t)
@@ -2638,9 +5944,9 @@ class lexer
return "null literal";
case token_type::value_string:
return "string literal";
- case lexer::token_type::value_unsigned:
- case lexer::token_type::value_integer:
- case lexer::token_type::value_float:
+ case token_type::value_unsigned:
+ case token_type::value_integer:
+ case token_type::value_float:
return "number literal";
case token_type::begin_array:
return "'['";
@@ -2666,15 +5972,36 @@ class lexer
// LCOV_EXCL_STOP
}
}
+};
+/*!
+@brief lexical analysis
+
+This class organizes the lexical analysis during JSON deserialization.
+*/
+template<typename BasicJsonType, typename InputAdapterType>
+class lexer : public lexer_base<BasicJsonType>
+{
+ using number_integer_t = typename BasicJsonType::number_integer_t;
+ using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
+ using string_t = typename BasicJsonType::string_t;
+ using char_type = typename InputAdapterType::char_type;
+ using char_int_type = typename std::char_traits<char_type>::int_type;
- explicit lexer(detail::input_adapter_t&& adapter)
- : ia(std::move(adapter)), decimal_point_char(get_decimal_point()) {}
+ public:
+ using token_type = typename lexer_base<BasicJsonType>::token_type;
+
+ explicit lexer(InputAdapterType&& adapter, bool ignore_comments_ = false)
+ : ia(std::move(adapter))
+ , ignore_comments(ignore_comments_)
+ , decimal_point_char(static_cast<char_int_type>(get_decimal_point()))
+ {}
// delete because of pointer members
lexer(const lexer&) = delete;
- lexer(lexer&&) = delete;
+ lexer(lexer&&) = default;
lexer& operator=(lexer&) = delete;
- lexer& operator=(lexer&&) = delete;
+ lexer& operator=(lexer&&) = default;
~lexer() = default;
private:
@@ -2683,10 +6010,11 @@ class lexer
/////////////////////
/// return the locale-dependent decimal point
+ JSON_HEDLEY_PURE
static char get_decimal_point() noexcept
{
- const auto loc = localeconv();
- assert(loc != nullptr);
+ const auto* loc = localeconv();
+ JSON_ASSERT(loc != nullptr);
return (loc->decimal_point == nullptr) ? '.' : *(loc->decimal_point);
}
@@ -2712,25 +6040,25 @@ class lexer
int get_codepoint()
{
// this function only makes sense after reading `\u`
- assert(current == 'u');
+ JSON_ASSERT(current == 'u');
int codepoint = 0;
- const auto factors = { 12, 8, 4, 0 };
+ const auto factors = { 12u, 8u, 4u, 0u };
for (const auto factor : factors)
{
get();
- if (current >= '0' and current <= '9')
+ if (current >= '0' && current <= '9')
{
- codepoint += ((current - 0x30) << factor);
+ codepoint += static_cast<int>((static_cast<unsigned int>(current) - 0x30u) << factor);
}
- else if (current >= 'A' and current <= 'F')
+ else if (current >= 'A' && current <= 'F')
{
- codepoint += ((current - 0x37) << factor);
+ codepoint += static_cast<int>((static_cast<unsigned int>(current) - 0x37u) << factor);
}
- else if (current >= 'a' and current <= 'f')
+ else if (current >= 'a' && current <= 'f')
{
- codepoint += ((current - 0x57) << factor);
+ codepoint += static_cast<int>((static_cast<unsigned int>(current) - 0x57u) << factor);
}
else
{
@@ -2738,7 +6066,7 @@ class lexer
}
}
- assert(0x0000 <= codepoint and codepoint <= 0xFFFF);
+ JSON_ASSERT(0x0000 <= codepoint && codepoint <= 0xFFFF);
return codepoint;
}
@@ -2757,15 +6085,15 @@ class lexer
@return true if and only if no range violation was detected
*/
- bool next_byte_in_range(std::initializer_list<int> ranges)
+ bool next_byte_in_range(std::initializer_list<char_int_type> ranges)
{
- assert(ranges.size() == 2 or ranges.size() == 4 or ranges.size() == 6);
+ JSON_ASSERT(ranges.size() == 2 || ranges.size() == 4 || ranges.size() == 6);
add(current);
for (auto range = ranges.begin(); range != ranges.end(); ++range)
{
get();
- if (JSON_LIKELY(*range <= current and current <= *(++range)))
+ if (JSON_HEDLEY_LIKELY(*range <= current && current <= *(++range)))
{
add(current);
}
@@ -2800,7 +6128,7 @@ class lexer
reset();
// we entered the function by reading an open quote
- assert(current == '\"');
+ JSON_ASSERT(current == '\"');
while (true)
{
@@ -2808,7 +6136,7 @@ class lexer
switch (get())
{
// end of file while parsing string
- case std::char_traits<char>::eof():
+ case std::char_traits<char_type>::eof():
{
error_message = "invalid string: missing closing quote";
return token_type::parse_error;
@@ -2864,55 +6192,55 @@ class lexer
const int codepoint1 = get_codepoint();
int codepoint = codepoint1; // start with codepoint1
- if (JSON_UNLIKELY(codepoint1 == -1))
+ if (JSON_HEDLEY_UNLIKELY(codepoint1 == -1))
{
error_message = "invalid string: '\\u' must be followed by 4 hex digits";
return token_type::parse_error;
}
// check if code point is a high surrogate
- if (0xD800 <= codepoint1 and codepoint1 <= 0xDBFF)
+ if (0xD800 <= codepoint1 && codepoint1 <= 0xDBFF)
{
// expect next \uxxxx entry
- if (JSON_LIKELY(get() == '\\' and get() == 'u'))
+ if (JSON_HEDLEY_LIKELY(get() == '\\' && get() == 'u'))
{
const int codepoint2 = get_codepoint();
- if (JSON_UNLIKELY(codepoint2 == -1))
+ if (JSON_HEDLEY_UNLIKELY(codepoint2 == -1))
{
error_message = "invalid string: '\\u' must be followed by 4 hex digits";
return token_type::parse_error;
}
// check if codepoint2 is a low surrogate
- if (JSON_LIKELY(0xDC00 <= codepoint2 and codepoint2 <= 0xDFFF))
+ if (JSON_HEDLEY_LIKELY(0xDC00 <= codepoint2 && codepoint2 <= 0xDFFF))
{
// overwrite codepoint
- codepoint =
- // high surrogate occupies the most significant 22 bits
- (codepoint1 << 10)
- // low surrogate occupies the least significant 15 bits
- + codepoint2
- // there is still the 0xD800, 0xDC00 and 0x10000 noise
- // in the result so we have to subtract with:
- // (0xD800 << 10) + DC00 - 0x10000 = 0x35FDC00
- - 0x35FDC00;
+ codepoint = static_cast<int>(
+ // high surrogate occupies the most significant 22 bits
+ (static_cast<unsigned int>(codepoint1) << 10u)
+ // low surrogate occupies the least significant 15 bits
+ + static_cast<unsigned int>(codepoint2)
+ // there is still the 0xD800, 0xDC00 and 0x10000 noise
+ // in the result so we have to subtract with:
+ // (0xD800 << 10) + DC00 - 0x10000 = 0x35FDC00
+ - 0x35FDC00u);
}
else
{
- error_message = "invalid string: surrogate U+DC00..U+DFFF must be followed by U+DC00..U+DFFF";
+ error_message = "invalid string: surrogate U+D800..U+DBFF must be followed by U+DC00..U+DFFF";
return token_type::parse_error;
}
}
else
{
- error_message = "invalid string: surrogate U+DC00..U+DFFF must be followed by U+DC00..U+DFFF";
+ error_message = "invalid string: surrogate U+D800..U+DBFF must be followed by U+DC00..U+DFFF";
return token_type::parse_error;
}
}
else
{
- if (JSON_UNLIKELY(0xDC00 <= codepoint1 and codepoint1 <= 0xDFFF))
+ if (JSON_HEDLEY_UNLIKELY(0xDC00 <= codepoint1 && codepoint1 <= 0xDFFF))
{
error_message = "invalid string: surrogate U+DC00..U+DFFF must follow U+D800..U+DBFF";
return token_type::parse_error;
@@ -2920,34 +6248,34 @@ class lexer
}
// result of the above calculation yields a proper codepoint
- assert(0x00 <= codepoint and codepoint <= 0x10FFFF);
+ JSON_ASSERT(0x00 <= codepoint && codepoint <= 0x10FFFF);
// translate codepoint into bytes
if (codepoint < 0x80)
{
// 1-byte characters: 0xxxxxxx (ASCII)
- add(codepoint);
+ add(static_cast<char_int_type>(codepoint));
}
else if (codepoint <= 0x7FF)
{
// 2-byte characters: 110xxxxx 10xxxxxx
- add(0xC0 | (codepoint >> 6));
- add(0x80 | (codepoint & 0x3F));
+ add(static_cast<char_int_type>(0xC0u | (static_cast<unsigned int>(codepoint) >> 6u)));
+ add(static_cast<char_int_type>(0x80u | (static_cast<unsigned int>(codepoint) & 0x3Fu)));
}
else if (codepoint <= 0xFFFF)
{
// 3-byte characters: 1110xxxx 10xxxxxx 10xxxxxx
- add(0xE0 | (codepoint >> 12));
- add(0x80 | ((codepoint >> 6) & 0x3F));
- add(0x80 | (codepoint & 0x3F));
+ add(static_cast<char_int_type>(0xE0u | (static_cast<unsigned int>(codepoint) >> 12u)));
+ add(static_cast<char_int_type>(0x80u | ((static_cast<unsigned int>(codepoint) >> 6u) & 0x3Fu)));
+ add(static_cast<char_int_type>(0x80u | (static_cast<unsigned int>(codepoint) & 0x3Fu)));
}
else
{
// 4-byte characters: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
- add(0xF0 | (codepoint >> 18));
- add(0x80 | ((codepoint >> 12) & 0x3F));
- add(0x80 | ((codepoint >> 6) & 0x3F));
- add(0x80 | (codepoint & 0x3F));
+ add(static_cast<char_int_type>(0xF0u | (static_cast<unsigned int>(codepoint) >> 18u)));
+ add(static_cast<char_int_type>(0x80u | ((static_cast<unsigned int>(codepoint) >> 12u) & 0x3Fu)));
+ add(static_cast<char_int_type>(0x80u | ((static_cast<unsigned int>(codepoint) >> 6u) & 0x3Fu)));
+ add(static_cast<char_int_type>(0x80u | (static_cast<unsigned int>(codepoint) & 0x3Fu)));
}
break;
@@ -3287,7 +6615,7 @@ class lexer
case 0xDE:
case 0xDF:
{
- if (JSON_UNLIKELY(not next_byte_in_range({0x80, 0xBF})))
+ if (JSON_HEDLEY_UNLIKELY(!next_byte_in_range({0x80, 0xBF})))
{
return token_type::parse_error;
}
@@ -3297,7 +6625,7 @@ class lexer
// U+0800..U+0FFF: bytes E0 A0..BF 80..BF
case 0xE0:
{
- if (JSON_UNLIKELY(not (next_byte_in_range({0xA0, 0xBF, 0x80, 0xBF}))))
+ if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0xA0, 0xBF, 0x80, 0xBF}))))
{
return token_type::parse_error;
}
@@ -3321,7 +6649,7 @@ class lexer
case 0xEE:
case 0xEF:
{
- if (JSON_UNLIKELY(not (next_byte_in_range({0x80, 0xBF, 0x80, 0xBF}))))
+ if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x80, 0xBF, 0x80, 0xBF}))))
{
return token_type::parse_error;
}
@@ -3331,7 +6659,7 @@ class lexer
// U+D000..U+D7FF: bytes ED 80..9F 80..BF
case 0xED:
{
- if (JSON_UNLIKELY(not (next_byte_in_range({0x80, 0x9F, 0x80, 0xBF}))))
+ if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x80, 0x9F, 0x80, 0xBF}))))
{
return token_type::parse_error;
}
@@ -3341,7 +6669,7 @@ class lexer
// U+10000..U+3FFFF F0 90..BF 80..BF 80..BF
case 0xF0:
{
- if (JSON_UNLIKELY(not (next_byte_in_range({0x90, 0xBF, 0x80, 0xBF, 0x80, 0xBF}))))
+ if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x90, 0xBF, 0x80, 0xBF, 0x80, 0xBF}))))
{
return token_type::parse_error;
}
@@ -3353,7 +6681,7 @@ class lexer
case 0xF2:
case 0xF3:
{
- if (JSON_UNLIKELY(not (next_byte_in_range({0x80, 0xBF, 0x80, 0xBF, 0x80, 0xBF}))))
+ if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x80, 0xBF, 0x80, 0xBF, 0x80, 0xBF}))))
{
return token_type::parse_error;
}
@@ -3363,7 +6691,7 @@ class lexer
// U+100000..U+10FFFF F4 80..8F 80..BF 80..BF
case 0xF4:
{
- if (JSON_UNLIKELY(not (next_byte_in_range({0x80, 0x8F, 0x80, 0xBF, 0x80, 0xBF}))))
+ if (JSON_HEDLEY_UNLIKELY(!(next_byte_in_range({0x80, 0x8F, 0x80, 0xBF, 0x80, 0xBF}))))
{
return token_type::parse_error;
}
@@ -3380,16 +6708,90 @@ class lexer
}
}
+ /*!
+ * @brief scan a comment
+ * @return whether comment could be scanned successfully
+ */
+ bool scan_comment()
+ {
+ switch (get())
+ {
+ // single-line comments skip input until a newline or EOF is read
+ case '/':
+ {
+ while (true)
+ {
+ switch (get())
+ {
+ case '\n':
+ case '\r':
+ case std::char_traits<char_type>::eof():
+ case '\0':
+ return true;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ // multi-line comments skip input until */ is read
+ case '*':
+ {
+ while (true)
+ {
+ switch (get())
+ {
+ case std::char_traits<char_type>::eof():
+ case '\0':
+ {
+ error_message = "invalid comment; missing closing '*/'";
+ return false;
+ }
+
+ case '*':
+ {
+ switch (get())
+ {
+ case '/':
+ return true;
+
+ default:
+ {
+ unget();
+ continue;
+ }
+ }
+ }
+
+ default:
+ continue;
+ }
+ }
+ }
+
+ // unexpected character after reading '/'
+ default:
+ {
+ error_message = "invalid comment; expecting '/' or '*' after '/'";
+ return false;
+ }
+ }
+ }
+
+ JSON_HEDLEY_NON_NULL(2)
static void strtof(float& f, const char* str, char** endptr) noexcept
{
f = std::strtof(str, endptr);
}
+ JSON_HEDLEY_NON_NULL(2)
static void strtof(double& f, const char* str, char** endptr) noexcept
{
f = std::strtod(str, endptr);
}
+ JSON_HEDLEY_NON_NULL(2)
static void strtof(long double& f, const char* str, char** endptr) noexcept
{
f = std::strtold(str, endptr);
@@ -3412,7 +6814,7 @@ class lexer
minus | zero | any1 | [error] | [error] | [error] | [error] | [error]
zero | done | done | exponent | done | done | decimal1 | done
any1 | any1 | any1 | exponent | done | done | decimal1 | done
- decimal1 | decimal2 | [error] | [error] | [error] | [error] | [error] | [error]
+ decimal1 | decimal2 | decimal2 | [error] | [error] | [error] | [error] | [error]
decimal2 | decimal2 | decimal2 | exponent | done | done | done | done
exponent | any2 | any2 | [error] | sign | sign | [error] | [error]
sign | any2 | any2 | [error] | [error] | [error] | [error] | [error]
@@ -3473,13 +6875,9 @@ class lexer
goto scan_number_any1;
}
- // LCOV_EXCL_START
- default:
- {
- // all other characters are rejected outside scan_number()
- assert(false);
- }
- // LCOV_EXCL_STOP
+ // all other characters are rejected outside scan_number()
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
}
scan_number_minus:
@@ -3726,7 +7124,7 @@ scan_number_done:
const auto x = std::strtoull(token_buffer.data(), &endptr, 10);
// we checked the number format before
- assert(endptr == token_buffer.data() + token_buffer.size());
+ JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size());
if (errno == 0)
{
@@ -3742,7 +7140,7 @@ scan_number_done:
const auto x = std::strtoll(token_buffer.data(), &endptr, 10);
// we checked the number format before
- assert(endptr == token_buffer.data() + token_buffer.size());
+ JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size());
if (errno == 0)
{
@@ -3759,7 +7157,7 @@ scan_number_done:
strtof(value_float, token_buffer.data(), &endptr);
// we checked the number format before
- assert(endptr == token_buffer.data() + token_buffer.size());
+ JSON_ASSERT(endptr == token_buffer.data() + token_buffer.size());
return token_type::value_float;
}
@@ -3769,13 +7167,14 @@ scan_number_done:
@param[in] length the length of the passed literal text
@param[in] return_type the token type to return on success
*/
- token_type scan_literal(const char* literal_text, const std::size_t length,
+ JSON_HEDLEY_NON_NULL(2)
+ token_type scan_literal(const char_type* literal_text, const std::size_t length,
token_type return_type)
{
- assert(current == literal_text[0]);
+ JSON_ASSERT(std::char_traits<char_type>::to_char_type(current) == literal_text[0]);
for (std::size_t i = 1; i < length; ++i)
{
- if (JSON_UNLIKELY(get() != literal_text[i]))
+ if (JSON_HEDLEY_UNLIKELY(std::char_traits<char_type>::to_char_type(get()) != literal_text[i]))
{
error_message = "invalid literal";
return token_type::parse_error;
@@ -3793,7 +7192,7 @@ scan_number_done:
{
token_buffer.clear();
token_string.clear();
- token_string.push_back(std::char_traits<char>::to_char_type(current));
+ token_string.push_back(std::char_traits<char_type>::to_char_type(current));
}
/*
@@ -3806,7 +7205,7 @@ scan_number_done:
@return character read from the input
*/
- std::char_traits<char>::int_type get()
+ char_int_type get()
{
++position.chars_read_total;
++position.chars_read_current_line;
@@ -3818,18 +7217,18 @@ scan_number_done:
}
else
{
- current = ia->get_character();
+ current = ia.get_character();
}
- if (JSON_LIKELY(current != std::char_traits<char>::eof()))
+ if (JSON_HEDLEY_LIKELY(current != std::char_traits<char_type>::eof()))
{
- token_string.push_back(std::char_traits<char>::to_char_type(current));
+ token_string.push_back(std::char_traits<char_type>::to_char_type(current));
}
if (current == '\n')
{
++position.lines_read;
- ++position.chars_read_current_line = 0;
+ position.chars_read_current_line = 0;
}
return current;
@@ -3862,17 +7261,17 @@ scan_number_done:
--position.chars_read_current_line;
}
- if (JSON_LIKELY(current != std::char_traits<char>::eof()))
+ if (JSON_HEDLEY_LIKELY(current != std::char_traits<char_type>::eof()))
{
- assert(token_string.size() != 0);
+ JSON_ASSERT(!token_string.empty());
token_string.pop_back();
}
}
/// add a character to token_buffer
- void add(int c)
+ void add(char_int_type c)
{
- token_buffer.push_back(std::char_traits<char>::to_char_type(c));
+ token_buffer.push_back(static_cast<typename string_t::value_type>(c));
}
public:
@@ -3923,17 +7322,17 @@ scan_number_done:
std::string result;
for (const auto c : token_string)
{
- if ('\x00' <= c and c <= '\x1F')
+ if (static_cast<unsigned char>(c) <= '\x1F')
{
// escape control characters
- char cs[9];
- (std::snprintf)(cs, 9, "<U+%.4X>", static_cast<unsigned char>(c));
- result += cs;
+ std::array<char, 9> cs{{}};
+ (std::snprintf)(cs.data(), cs.size(), "<U+%.4X>", static_cast<unsigned char>(c));
+ result += cs.data();
}
else
{
// add character as is
- result.push_back(c);
+ result.push_back(static_cast<std::string::value_type>(c));
}
}
@@ -3941,6 +7340,7 @@ scan_number_done:
}
/// return syntax error message
+ JSON_HEDLEY_RETURNS_NON_NULL
constexpr const char* get_error_message() const noexcept
{
return error_message;
@@ -3959,7 +7359,7 @@ scan_number_done:
if (get() == 0xEF)
{
// check if we completely parse the BOM
- return get() == 0xBB and get() == 0xBF;
+ return get() == 0xBB && get() == 0xBF;
}
// the first character is not the beginning of the BOM; unget it to
@@ -3968,21 +7368,38 @@ scan_number_done:
return true;
}
+ void skip_whitespace()
+ {
+ do
+ {
+ get();
+ }
+ while (current == ' ' || current == '\t' || current == '\n' || current == '\r');
+ }
+
token_type scan()
{
// initially, skip the BOM
- if (position.chars_read_total == 0 and not skip_bom())
+ if (position.chars_read_total == 0 && !skip_bom())
{
error_message = "invalid BOM; must be 0xEF 0xBB 0xBF if given";
return token_type::parse_error;
}
// read next character and ignore whitespace
- do
+ skip_whitespace();
+
+ // ignore comments
+ while (ignore_comments && current == '/')
{
- get();
+ if (!scan_comment())
+ {
+ return token_type::parse_error;
+ }
+
+ // skip following whitespace
+ skip_whitespace();
}
- while (current == ' ' or current == '\t' or current == '\n' or current == '\r');
switch (current)
{
@@ -4002,11 +7419,20 @@ scan_number_done:
// literals
case 't':
- return scan_literal("true", 4, token_type::literal_true);
+ {
+ std::array<char_type, 4> true_literal = {{'t', 'r', 'u', 'e'}};
+ return scan_literal(true_literal.data(), true_literal.size(), token_type::literal_true);
+ }
case 'f':
- return scan_literal("false", 5, token_type::literal_false);
+ {
+ std::array<char_type, 5> false_literal = {{'f', 'a', 'l', 's', 'e'}};
+ return scan_literal(false_literal.data(), false_literal.size(), token_type::literal_false);
+ }
case 'n':
- return scan_literal("null", 4, token_type::literal_null);
+ {
+ std::array<char_type, 4> null_literal = {{'n', 'u', 'l', 'l'}};
+ return scan_literal(null_literal.data(), null_literal.size(), token_type::literal_null);
+ }
// string
case '\"':
@@ -4029,7 +7455,7 @@ scan_number_done:
// end of input (the null byte is needed when parsing from
// string literals)
case '\0':
- case std::char_traits<char>::eof():
+ case std::char_traits<char_type>::eof():
return token_type::end_of_input;
// error
@@ -4041,19 +7467,22 @@ scan_number_done:
private:
/// input adapter
- detail::input_adapter_t ia = nullptr;
+ InputAdapterType ia;
+
+ /// whether comments should be ignored (true) or signaled as errors (false)
+ const bool ignore_comments = false;
/// the current character
- std::char_traits<char>::int_type current = std::char_traits<char>::eof();
+ char_int_type current = std::char_traits<char_type>::eof();
/// whether the next get() call should just return current
bool next_unget = false;
/// the start position of the current token
- position_t position;
+ position_t position {};
/// raw input token string (for error messages)
- std::vector<char> token_string {};
+ std::vector<char_type> token_string {};
/// buffer for variable-length tokens (numbers, strings)
string_t token_buffer {};
@@ -4067,23 +7496,11 @@ scan_number_done:
number_float_t value_float = 0;
/// the decimal point
- const char decimal_point_char = '.';
+ const char_int_type decimal_point_char = '.';
};
} // namespace detail
} // namespace nlohmann
-// #include <nlohmann/detail/input/parser.hpp>
-
-
-#include <cassert> // assert
-#include <cmath> // isfinite
-#include <cstdint> // uint8_t
-#include <functional> // function
-#include <string> // string
-#include <utility> // move
-
-// #include <nlohmann/detail/exceptions.hpp>
-
// #include <nlohmann/detail/macro_scope.hpp>
// #include <nlohmann/detail/meta/is_sax.hpp>
@@ -4091,6 +7508,7 @@ scan_number_done:
#include <cstdint> // size_t
#include <utility> // declval
+#include <string> // string
// #include <nlohmann/detail/meta/detected.hpp>
@@ -4101,53 +7519,57 @@ namespace nlohmann
{
namespace detail
{
-template <typename T>
+template<typename T>
using null_function_t = decltype(std::declval<T&>().null());
-template <typename T>
+template<typename T>
using boolean_function_t =
decltype(std::declval<T&>().boolean(std::declval<bool>()));
-template <typename T, typename Integer>
+template<typename T, typename Integer>
using number_integer_function_t =
decltype(std::declval<T&>().number_integer(std::declval<Integer>()));
-template <typename T, typename Unsigned>
+template<typename T, typename Unsigned>
using number_unsigned_function_t =
decltype(std::declval<T&>().number_unsigned(std::declval<Unsigned>()));
-template <typename T, typename Float, typename String>
+template<typename T, typename Float, typename String>
using number_float_function_t = decltype(std::declval<T&>().number_float(
std::declval<Float>(), std::declval<const String&>()));
-template <typename T, typename String>
+template<typename T, typename String>
using string_function_t =
decltype(std::declval<T&>().string(std::declval<String&>()));
-template <typename T>
+template<typename T, typename Binary>
+using binary_function_t =
+ decltype(std::declval<T&>().binary(std::declval<Binary&>()));
+
+template<typename T>
using start_object_function_t =
decltype(std::declval<T&>().start_object(std::declval<std::size_t>()));
-template <typename T, typename String>
+template<typename T, typename String>
using key_function_t =
decltype(std::declval<T&>().key(std::declval<String&>()));
-template <typename T>
+template<typename T>
using end_object_function_t = decltype(std::declval<T&>().end_object());
-template <typename T>
+template<typename T>
using start_array_function_t =
decltype(std::declval<T&>().start_array(std::declval<std::size_t>()));
-template <typename T>
+template<typename T>
using end_array_function_t = decltype(std::declval<T&>().end_array());
-template <typename T, typename Exception>
+template<typename T, typename Exception>
using parse_error_function_t = decltype(std::declval<T&>().parse_error(
std::declval<std::size_t>(), std::declval<const std::string&>(),
std::declval<const Exception&>()));
-template <typename SAX, typename BasicJsonType>
+template<typename SAX, typename BasicJsonType>
struct is_sax
{
private:
@@ -4158,19 +7580,18 @@ struct is_sax
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
using number_float_t = typename BasicJsonType::number_float_t;
using string_t = typename BasicJsonType::string_t;
+ using binary_t = typename BasicJsonType::binary_t;
using exception_t = typename BasicJsonType::exception;
public:
static constexpr bool value =
is_detected_exact<bool, null_function_t, SAX>::value &&
is_detected_exact<bool, boolean_function_t, SAX>::value &&
- is_detected_exact<bool, number_integer_function_t, SAX,
- number_integer_t>::value &&
- is_detected_exact<bool, number_unsigned_function_t, SAX,
- number_unsigned_t>::value &&
- is_detected_exact<bool, number_float_function_t, SAX, number_float_t,
- string_t>::value &&
+ is_detected_exact<bool, number_integer_function_t, SAX, number_integer_t>::value &&
+ is_detected_exact<bool, number_unsigned_function_t, SAX, number_unsigned_t>::value &&
+ is_detected_exact<bool, number_float_function_t, SAX, number_float_t, string_t>::value &&
is_detected_exact<bool, string_function_t, SAX, string_t>::value &&
+ is_detected_exact<bool, binary_function_t, SAX, binary_t>::value &&
is_detected_exact<bool, start_object_function_t, SAX>::value &&
is_detected_exact<bool, key_function_t, SAX, string_t>::value &&
is_detected_exact<bool, end_object_function_t, SAX>::value &&
@@ -4179,7 +7600,7 @@ struct is_sax
is_detected_exact<bool, parse_error_function_t, SAX, exception_t>::value;
};
-template <typename SAX, typename BasicJsonType>
+template<typename SAX, typename BasicJsonType>
struct is_sax_static_asserts
{
private:
@@ -4190,6 +7611,7 @@ struct is_sax_static_asserts
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
using number_float_t = typename BasicJsonType::number_float_t;
using string_t = typename BasicJsonType::string_t;
+ using binary_t = typename BasicJsonType::binary_t;
using exception_t = typename BasicJsonType::exception;
public:
@@ -4213,6 +7635,9 @@ struct is_sax_static_asserts
static_assert(
is_detected_exact<bool, string_function_t, SAX, string_t>::value,
"Missing/invalid function: bool string(string_t&)");
+ static_assert(
+ is_detected_exact<bool, binary_function_t, SAX, binary_t>::value,
+ "Missing/invalid function: bool binary(binary_t&)");
static_assert(is_detected_exact<bool, start_object_function_t, SAX>::value,
"Missing/invalid function: bool start_object(std::size_t)");
static_assert(is_detected_exact<bool, key_function_t, SAX, string_t>::value,
@@ -4231,715 +7656,2473 @@ struct is_sax_static_asserts
} // namespace detail
} // namespace nlohmann
-// #include <nlohmann/detail/input/input_adapters.hpp>
-
-// #include <nlohmann/detail/input/json_sax.hpp>
+// #include <nlohmann/detail/value_t.hpp>
-#include <cstddef>
-#include <string>
-#include <vector>
+namespace nlohmann
+{
+namespace detail
+{
-// #include <nlohmann/detail/input/parser.hpp>
+/// how to treat CBOR tags
+enum class cbor_tag_handler_t
+{
+ error, ///< throw a parse_error exception in case of a tag
+ ignore ///< ignore tags
+};
-// #include <nlohmann/detail/exceptions.hpp>
+/*!
+@brief determine system byte order
+@return true if and only if system's byte order is little endian
-namespace nlohmann
+@note from https://stackoverflow.com/a/1001328/266378
+*/
+static inline bool little_endianess(int num = 1) noexcept
{
+ return *reinterpret_cast<char*>(&num) == 1;
+}
-/*!
-@brief SAX interface
-This class describes the SAX interface used by @ref nlohmann::json::sax_parse.
-Each function is called in different situations while the input is parsed. The
-boolean return value informs the parser whether to continue processing the
-input.
+///////////////////
+// binary reader //
+///////////////////
+
+/*!
+@brief deserialization of CBOR, MessagePack, and UBJSON values
*/
-template<typename BasicJsonType>
-struct json_sax
+template<typename BasicJsonType, typename InputAdapterType, typename SAX = json_sax_dom_parser<BasicJsonType>>
+class binary_reader
{
- /// type for (signed) integers
using number_integer_t = typename BasicJsonType::number_integer_t;
- /// type for unsigned integers
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
- /// type for floating-point numbers
using number_float_t = typename BasicJsonType::number_float_t;
- /// type for strings
using string_t = typename BasicJsonType::string_t;
+ using binary_t = typename BasicJsonType::binary_t;
+ using json_sax_t = SAX;
+ using char_type = typename InputAdapterType::char_type;
+ using char_int_type = typename std::char_traits<char_type>::int_type;
+ public:
/*!
- @brief a null value was read
- @return whether parsing should proceed
- */
- virtual bool null() = 0;
+ @brief create a binary reader
- /*!
- @brief a boolean value was read
- @param[in] val boolean value
- @return whether parsing should proceed
+ @param[in] adapter input adapter to read from
*/
- virtual bool boolean(bool val) = 0;
+ explicit binary_reader(InputAdapterType&& adapter) : ia(std::move(adapter))
+ {
+ (void)detail::is_sax_static_asserts<SAX, BasicJsonType> {};
+ }
- /*!
- @brief an integer number was read
- @param[in] val integer value
- @return whether parsing should proceed
- */
- virtual bool number_integer(number_integer_t val) = 0;
+ // make class move-only
+ binary_reader(const binary_reader&) = delete;
+ binary_reader(binary_reader&&) = default;
+ binary_reader& operator=(const binary_reader&) = delete;
+ binary_reader& operator=(binary_reader&&) = default;
+ ~binary_reader() = default;
/*!
- @brief an unsigned integer number was read
- @param[in] val unsigned integer value
- @return whether parsing should proceed
- */
- virtual bool number_unsigned(number_unsigned_t val) = 0;
+ @param[in] format the binary format to parse
+ @param[in] sax_ a SAX event processor
+ @param[in] strict whether to expect the input to be consumed completed
+ @param[in] tag_handler how to treat CBOR tags
- /*!
- @brief an floating-point number was read
- @param[in] val floating-point value
- @param[in] s raw token value
- @return whether parsing should proceed
+ @return
*/
- virtual bool number_float(number_float_t val, const string_t& s) = 0;
+ JSON_HEDLEY_NON_NULL(3)
+ bool sax_parse(const input_format_t format,
+ json_sax_t* sax_,
+ const bool strict = true,
+ const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error)
+ {
+ sax = sax_;
+ bool result = false;
- /*!
- @brief a string was read
- @param[in] val string value
- @return whether parsing should proceed
- @note It is safe to move the passed string.
- */
- virtual bool string(string_t& val) = 0;
+ switch (format)
+ {
+ case input_format_t::bson:
+ result = parse_bson_internal();
+ break;
- /*!
- @brief the beginning of an object was read
- @param[in] elements number of object elements or -1 if unknown
- @return whether parsing should proceed
- @note binary formats may report the number of elements
- */
- virtual bool start_object(std::size_t elements) = 0;
+ case input_format_t::cbor:
+ result = parse_cbor_internal(true, tag_handler);
+ break;
+
+ case input_format_t::msgpack:
+ result = parse_msgpack_internal();
+ break;
+
+ case input_format_t::ubjson:
+ result = parse_ubjson_internal();
+ break;
+
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ }
+
+ // strict mode: next byte must be EOF
+ if (result && strict)
+ {
+ if (format == input_format_t::ubjson)
+ {
+ get_ignore_noop();
+ }
+ else
+ {
+ get();
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(current != std::char_traits<char_type>::eof()))
+ {
+ return sax->parse_error(chars_read, get_token_string(),
+ parse_error::create(110, chars_read, exception_message(format, "expected end of input; last byte: 0x" + get_token_string(), "value")));
+ }
+ }
+
+ return result;
+ }
+
+ private:
+ //////////
+ // BSON //
+ //////////
/*!
- @brief an object key was read
- @param[in] val object key
- @return whether parsing should proceed
- @note It is safe to move the passed string.
+ @brief Reads in a BSON-object and passes it to the SAX-parser.
+ @return whether a valid BSON-value was passed to the SAX parser
*/
- virtual bool key(string_t& val) = 0;
+ bool parse_bson_internal()
+ {
+ std::int32_t document_size{};
+ get_number<std::int32_t, true>(input_format_t::bson, document_size);
+
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_object(std::size_t(-1))))
+ {
+ return false;
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(!parse_bson_element_list(/*is_array*/false)))
+ {
+ return false;
+ }
+
+ return sax->end_object();
+ }
/*!
- @brief the end of an object was read
- @return whether parsing should proceed
+ @brief Parses a C-style string from the BSON input.
+ @param[in, out] result A reference to the string variable where the read
+ string is to be stored.
+ @return `true` if the \x00-byte indicating the end of the string was
+ encountered before the EOF; false` indicates an unexpected EOF.
*/
- virtual bool end_object() = 0;
+ bool get_bson_cstr(string_t& result)
+ {
+ auto out = std::back_inserter(result);
+ while (true)
+ {
+ get();
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::bson, "cstring")))
+ {
+ return false;
+ }
+ if (current == 0x00)
+ {
+ return true;
+ }
+ *out++ = static_cast<typename string_t::value_type>(current);
+ }
+ }
/*!
- @brief the beginning of an array was read
- @param[in] elements number of array elements or -1 if unknown
- @return whether parsing should proceed
- @note binary formats may report the number of elements
+ @brief Parses a zero-terminated string of length @a len from the BSON
+ input.
+ @param[in] len The length (including the zero-byte at the end) of the
+ string to be read.
+ @param[in, out] result A reference to the string variable where the read
+ string is to be stored.
+ @tparam NumberType The type of the length @a len
+ @pre len >= 1
+ @return `true` if the string was successfully parsed
*/
- virtual bool start_array(std::size_t elements) = 0;
+ template<typename NumberType>
+ bool get_bson_string(const NumberType len, string_t& result)
+ {
+ if (JSON_HEDLEY_UNLIKELY(len < 1))
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::bson, "string length must be at least 1, is " + std::to_string(len), "string")));
+ }
+
+ return get_string(input_format_t::bson, len - static_cast<NumberType>(1), result) && get() != std::char_traits<char_type>::eof();
+ }
/*!
- @brief the end of an array was read
- @return whether parsing should proceed
+ @brief Parses a byte array input of length @a len from the BSON input.
+ @param[in] len The length of the byte array to be read.
+ @param[in, out] result A reference to the binary variable where the read
+ array is to be stored.
+ @tparam NumberType The type of the length @a len
+ @pre len >= 0
+ @return `true` if the byte array was successfully parsed
*/
- virtual bool end_array() = 0;
+ template<typename NumberType>
+ bool get_bson_binary(const NumberType len, binary_t& result)
+ {
+ if (JSON_HEDLEY_UNLIKELY(len < 0))
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::bson, "byte array length cannot be negative, is " + std::to_string(len), "binary")));
+ }
+
+ // All BSON binary values have a subtype
+ std::uint8_t subtype{};
+ get_number<std::uint8_t>(input_format_t::bson, subtype);
+ result.set_subtype(subtype);
+
+ return get_binary(input_format_t::bson, len, result);
+ }
/*!
- @brief a parse error occurred
- @param[in] position the position in the input where the error occurs
- @param[in] last_token the last read token
- @param[in] ex an exception object describing the error
- @return whether parsing should proceed (must return false)
+ @brief Read a BSON document element of the given @a element_type.
+ @param[in] element_type The BSON element type, c.f. http://bsonspec.org/spec.html
+ @param[in] element_type_parse_position The position in the input stream,
+ where the `element_type` was read.
+ @warning Not all BSON element types are supported yet. An unsupported
+ @a element_type will give rise to a parse_error.114:
+ Unsupported BSON record type 0x...
+ @return whether a valid BSON-object/array was passed to the SAX parser
*/
- virtual bool parse_error(std::size_t position,
- const std::string& last_token,
- const detail::exception& ex) = 0;
+ bool parse_bson_element_internal(const char_int_type element_type,
+ const std::size_t element_type_parse_position)
+ {
+ switch (element_type)
+ {
+ case 0x01: // double
+ {
+ double number{};
+ return get_number<double, true>(input_format_t::bson, number) && sax->number_float(static_cast<number_float_t>(number), "");
+ }
- virtual ~json_sax() = default;
-};
+ case 0x02: // string
+ {
+ std::int32_t len{};
+ string_t value;
+ return get_number<std::int32_t, true>(input_format_t::bson, len) && get_bson_string(len, value) && sax->string(value);
+ }
+ case 0x03: // object
+ {
+ return parse_bson_internal();
+ }
-namespace detail
-{
-/*!
-@brief SAX implementation to create a JSON value from SAX events
+ case 0x04: // array
+ {
+ return parse_bson_array();
+ }
-This class implements the @ref json_sax interface and processes the SAX events
-to create a JSON value which makes it basically a DOM parser. The structure or
-hierarchy of the JSON value is managed by the stack `ref_stack` which contains
-a pointer to the respective array or object for each recursion depth.
+ case 0x05: // binary
+ {
+ std::int32_t len{};
+ binary_t value;
+ return get_number<std::int32_t, true>(input_format_t::bson, len) && get_bson_binary(len, value) && sax->binary(value);
+ }
-After successful parsing, the value that is passed by reference to the
-constructor contains the parsed value.
+ case 0x08: // boolean
+ {
+ return sax->boolean(get() != 0);
+ }
-@tparam BasicJsonType the JSON type
-*/
-template<typename BasicJsonType>
-class json_sax_dom_parser
-{
- public:
- using number_integer_t = typename BasicJsonType::number_integer_t;
- using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
- using number_float_t = typename BasicJsonType::number_float_t;
- using string_t = typename BasicJsonType::string_t;
+ case 0x0A: // null
+ {
+ return sax->null();
+ }
- /*!
- @param[in, out] r reference to a JSON value that is manipulated while
- parsing
- @param[in] allow_exceptions_ whether parse errors yield exceptions
- */
- explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true)
- : root(r), allow_exceptions(allow_exceptions_)
- {}
+ case 0x10: // int32
+ {
+ std::int32_t value{};
+ return get_number<std::int32_t, true>(input_format_t::bson, value) && sax->number_integer(value);
+ }
- bool null()
- {
- handle_value(nullptr);
- return true;
- }
+ case 0x12: // int64
+ {
+ std::int64_t value{};
+ return get_number<std::int64_t, true>(input_format_t::bson, value) && sax->number_integer(value);
+ }
- bool boolean(bool val)
- {
- handle_value(val);
- return true;
+ default: // anything else not supported (yet)
+ {
+ std::array<char, 3> cr{{}};
+ (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast<unsigned char>(element_type));
+ return sax->parse_error(element_type_parse_position, std::string(cr.data()), parse_error::create(114, element_type_parse_position, "Unsupported BSON record type 0x" + std::string(cr.data())));
+ }
+ }
}
- bool number_integer(number_integer_t val)
- {
- handle_value(val);
- return true;
- }
+ /*!
+ @brief Read a BSON element list (as specified in the BSON-spec)
- bool number_unsigned(number_unsigned_t val)
- {
- handle_value(val);
- return true;
- }
+ The same binary layout is used for objects and arrays, hence it must be
+ indicated with the argument @a is_array which one is expected
+ (true --> array, false --> object).
- bool number_float(number_float_t val, const string_t& /*unused*/)
+ @param[in] is_array Determines if the element list being read is to be
+ treated as an object (@a is_array == false), or as an
+ array (@a is_array == true).
+ @return whether a valid BSON-object/array was passed to the SAX parser
+ */
+ bool parse_bson_element_list(const bool is_array)
{
- handle_value(val);
- return true;
- }
+ string_t key;
+
+ while (auto element_type = get())
+ {
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::bson, "element list")))
+ {
+ return false;
+ }
+
+ const std::size_t element_type_parse_position = chars_read;
+ if (JSON_HEDLEY_UNLIKELY(!get_bson_cstr(key)))
+ {
+ return false;
+ }
+
+ if (!is_array && !sax->key(key))
+ {
+ return false;
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(!parse_bson_element_internal(element_type, element_type_parse_position)))
+ {
+ return false;
+ }
+
+ // get_bson_cstr only appends
+ key.clear();
+ }
- bool string(string_t& val)
- {
- handle_value(val);
return true;
}
- bool start_object(std::size_t len)
+ /*!
+ @brief Reads an array from the BSON input and passes it to the SAX-parser.
+ @return whether a valid BSON-array was passed to the SAX parser
+ */
+ bool parse_bson_array()
{
- ref_stack.push_back(handle_value(BasicJsonType::value_t::object));
+ std::int32_t document_size{};
+ get_number<std::int32_t, true>(input_format_t::bson, document_size);
- if (JSON_UNLIKELY(len != std::size_t(-1) and len > ref_stack.back()->max_size()))
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_array(std::size_t(-1))))
{
- JSON_THROW(out_of_range::create(408,
- "excessive object size: " + std::to_string(len)));
+ return false;
}
- return true;
- }
+ if (JSON_HEDLEY_UNLIKELY(!parse_bson_element_list(/*is_array*/true)))
+ {
+ return false;
+ }
- bool key(string_t& val)
- {
- // add null at given key and store the reference for later
- object_element = &(ref_stack.back()->m_value.object->operator[](val));
- return true;
+ return sax->end_array();
}
- bool end_object()
+ //////////
+ // CBOR //
+ //////////
+
+ /*!
+ @param[in] get_char whether a new character should be retrieved from the
+ input (true) or whether the last read character should
+ be considered instead (false)
+ @param[in] tag_handler how CBOR tags should be treated
+
+ @return whether a valid CBOR value was passed to the SAX parser
+ */
+ bool parse_cbor_internal(const bool get_char,
+ const cbor_tag_handler_t tag_handler)
{
- ref_stack.pop_back();
- return true;
+ switch (get_char ? get() : current)
+ {
+ // EOF
+ case std::char_traits<char_type>::eof():
+ return unexpect_eof(input_format_t::cbor, "value");
+
+ // Integer 0x00..0x17 (0..23)
+ case 0x00:
+ case 0x01:
+ case 0x02:
+ case 0x03:
+ case 0x04:
+ case 0x05:
+ case 0x06:
+ case 0x07:
+ case 0x08:
+ case 0x09:
+ case 0x0A:
+ case 0x0B:
+ case 0x0C:
+ case 0x0D:
+ case 0x0E:
+ case 0x0F:
+ case 0x10:
+ case 0x11:
+ case 0x12:
+ case 0x13:
+ case 0x14:
+ case 0x15:
+ case 0x16:
+ case 0x17:
+ return sax->number_unsigned(static_cast<number_unsigned_t>(current));
+
+ case 0x18: // Unsigned integer (one-byte uint8_t follows)
+ {
+ std::uint8_t number{};
+ return get_number(input_format_t::cbor, number) && sax->number_unsigned(number);
+ }
+
+ case 0x19: // Unsigned integer (two-byte uint16_t follows)
+ {
+ std::uint16_t number{};
+ return get_number(input_format_t::cbor, number) && sax->number_unsigned(number);
+ }
+
+ case 0x1A: // Unsigned integer (four-byte uint32_t follows)
+ {
+ std::uint32_t number{};
+ return get_number(input_format_t::cbor, number) && sax->number_unsigned(number);
+ }
+
+ case 0x1B: // Unsigned integer (eight-byte uint64_t follows)
+ {
+ std::uint64_t number{};
+ return get_number(input_format_t::cbor, number) && sax->number_unsigned(number);
+ }
+
+ // Negative integer -1-0x00..-1-0x17 (-1..-24)
+ case 0x20:
+ case 0x21:
+ case 0x22:
+ case 0x23:
+ case 0x24:
+ case 0x25:
+ case 0x26:
+ case 0x27:
+ case 0x28:
+ case 0x29:
+ case 0x2A:
+ case 0x2B:
+ case 0x2C:
+ case 0x2D:
+ case 0x2E:
+ case 0x2F:
+ case 0x30:
+ case 0x31:
+ case 0x32:
+ case 0x33:
+ case 0x34:
+ case 0x35:
+ case 0x36:
+ case 0x37:
+ return sax->number_integer(static_cast<std::int8_t>(0x20 - 1 - current));
+
+ case 0x38: // Negative integer (one-byte uint8_t follows)
+ {
+ std::uint8_t number{};
+ return get_number(input_format_t::cbor, number) && sax->number_integer(static_cast<number_integer_t>(-1) - number);
+ }
+
+ case 0x39: // Negative integer -1-n (two-byte uint16_t follows)
+ {
+ std::uint16_t number{};
+ return get_number(input_format_t::cbor, number) && sax->number_integer(static_cast<number_integer_t>(-1) - number);
+ }
+
+ case 0x3A: // Negative integer -1-n (four-byte uint32_t follows)
+ {
+ std::uint32_t number{};
+ return get_number(input_format_t::cbor, number) && sax->number_integer(static_cast<number_integer_t>(-1) - number);
+ }
+
+ case 0x3B: // Negative integer -1-n (eight-byte uint64_t follows)
+ {
+ std::uint64_t number{};
+ return get_number(input_format_t::cbor, number) && sax->number_integer(static_cast<number_integer_t>(-1)
+ - static_cast<number_integer_t>(number));
+ }
+
+ // Binary data (0x00..0x17 bytes follow)
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x44:
+ case 0x45:
+ case 0x46:
+ case 0x47:
+ case 0x48:
+ case 0x49:
+ case 0x4A:
+ case 0x4B:
+ case 0x4C:
+ case 0x4D:
+ case 0x4E:
+ case 0x4F:
+ case 0x50:
+ case 0x51:
+ case 0x52:
+ case 0x53:
+ case 0x54:
+ case 0x55:
+ case 0x56:
+ case 0x57:
+ case 0x58: // Binary data (one-byte uint8_t for n follows)
+ case 0x59: // Binary data (two-byte uint16_t for n follow)
+ case 0x5A: // Binary data (four-byte uint32_t for n follow)
+ case 0x5B: // Binary data (eight-byte uint64_t for n follow)
+ case 0x5F: // Binary data (indefinite length)
+ {
+ binary_t b;
+ return get_cbor_binary(b) && sax->binary(b);
+ }
+
+ // UTF-8 string (0x00..0x17 bytes follow)
+ case 0x60:
+ case 0x61:
+ case 0x62:
+ case 0x63:
+ case 0x64:
+ case 0x65:
+ case 0x66:
+ case 0x67:
+ case 0x68:
+ case 0x69:
+ case 0x6A:
+ case 0x6B:
+ case 0x6C:
+ case 0x6D:
+ case 0x6E:
+ case 0x6F:
+ case 0x70:
+ case 0x71:
+ case 0x72:
+ case 0x73:
+ case 0x74:
+ case 0x75:
+ case 0x76:
+ case 0x77:
+ case 0x78: // UTF-8 string (one-byte uint8_t for n follows)
+ case 0x79: // UTF-8 string (two-byte uint16_t for n follow)
+ case 0x7A: // UTF-8 string (four-byte uint32_t for n follow)
+ case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow)
+ case 0x7F: // UTF-8 string (indefinite length)
+ {
+ string_t s;
+ return get_cbor_string(s) && sax->string(s);
+ }
+
+ // array (0x00..0x17 data items follow)
+ case 0x80:
+ case 0x81:
+ case 0x82:
+ case 0x83:
+ case 0x84:
+ case 0x85:
+ case 0x86:
+ case 0x87:
+ case 0x88:
+ case 0x89:
+ case 0x8A:
+ case 0x8B:
+ case 0x8C:
+ case 0x8D:
+ case 0x8E:
+ case 0x8F:
+ case 0x90:
+ case 0x91:
+ case 0x92:
+ case 0x93:
+ case 0x94:
+ case 0x95:
+ case 0x96:
+ case 0x97:
+ return get_cbor_array(static_cast<std::size_t>(static_cast<unsigned int>(current) & 0x1Fu), tag_handler);
+
+ case 0x98: // array (one-byte uint8_t for n follows)
+ {
+ std::uint8_t len{};
+ return get_number(input_format_t::cbor, len) && get_cbor_array(static_cast<std::size_t>(len), tag_handler);
+ }
+
+ case 0x99: // array (two-byte uint16_t for n follow)
+ {
+ std::uint16_t len{};
+ return get_number(input_format_t::cbor, len) && get_cbor_array(static_cast<std::size_t>(len), tag_handler);
+ }
+
+ case 0x9A: // array (four-byte uint32_t for n follow)
+ {
+ std::uint32_t len{};
+ return get_number(input_format_t::cbor, len) && get_cbor_array(static_cast<std::size_t>(len), tag_handler);
+ }
+
+ case 0x9B: // array (eight-byte uint64_t for n follow)
+ {
+ std::uint64_t len{};
+ return get_number(input_format_t::cbor, len) && get_cbor_array(static_cast<std::size_t>(len), tag_handler);
+ }
+
+ case 0x9F: // array (indefinite length)
+ return get_cbor_array(std::size_t(-1), tag_handler);
+
+ // map (0x00..0x17 pairs of data items follow)
+ case 0xA0:
+ case 0xA1:
+ case 0xA2:
+ case 0xA3:
+ case 0xA4:
+ case 0xA5:
+ case 0xA6:
+ case 0xA7:
+ case 0xA8:
+ case 0xA9:
+ case 0xAA:
+ case 0xAB:
+ case 0xAC:
+ case 0xAD:
+ case 0xAE:
+ case 0xAF:
+ case 0xB0:
+ case 0xB1:
+ case 0xB2:
+ case 0xB3:
+ case 0xB4:
+ case 0xB5:
+ case 0xB6:
+ case 0xB7:
+ return get_cbor_object(static_cast<std::size_t>(static_cast<unsigned int>(current) & 0x1Fu), tag_handler);
+
+ case 0xB8: // map (one-byte uint8_t for n follows)
+ {
+ std::uint8_t len{};
+ return get_number(input_format_t::cbor, len) && get_cbor_object(static_cast<std::size_t>(len), tag_handler);
+ }
+
+ case 0xB9: // map (two-byte uint16_t for n follow)
+ {
+ std::uint16_t len{};
+ return get_number(input_format_t::cbor, len) && get_cbor_object(static_cast<std::size_t>(len), tag_handler);
+ }
+
+ case 0xBA: // map (four-byte uint32_t for n follow)
+ {
+ std::uint32_t len{};
+ return get_number(input_format_t::cbor, len) && get_cbor_object(static_cast<std::size_t>(len), tag_handler);
+ }
+
+ case 0xBB: // map (eight-byte uint64_t for n follow)
+ {
+ std::uint64_t len{};
+ return get_number(input_format_t::cbor, len) && get_cbor_object(static_cast<std::size_t>(len), tag_handler);
+ }
+
+ case 0xBF: // map (indefinite length)
+ return get_cbor_object(std::size_t(-1), tag_handler);
+
+ case 0xC6: // tagged item
+ case 0xC7:
+ case 0xC8:
+ case 0xC9:
+ case 0xCA:
+ case 0xCB:
+ case 0xCC:
+ case 0xCD:
+ case 0xCE:
+ case 0xCF:
+ case 0xD0:
+ case 0xD1:
+ case 0xD2:
+ case 0xD3:
+ case 0xD4:
+ case 0xD8: // tagged item (1 bytes follow)
+ case 0xD9: // tagged item (2 bytes follow)
+ case 0xDA: // tagged item (4 bytes follow)
+ case 0xDB: // tagged item (8 bytes follow)
+ {
+ switch (tag_handler)
+ {
+ case cbor_tag_handler_t::error:
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::cbor, "invalid byte: 0x" + last_token, "value")));
+ }
+
+ case cbor_tag_handler_t::ignore:
+ {
+ switch (current)
+ {
+ case 0xD8:
+ {
+ std::uint8_t len{};
+ get_number(input_format_t::cbor, len);
+ break;
+ }
+ case 0xD9:
+ {
+ std::uint16_t len{};
+ get_number(input_format_t::cbor, len);
+ break;
+ }
+ case 0xDA:
+ {
+ std::uint32_t len{};
+ get_number(input_format_t::cbor, len);
+ break;
+ }
+ case 0xDB:
+ {
+ std::uint64_t len{};
+ get_number(input_format_t::cbor, len);
+ break;
+ }
+ default:
+ break;
+ }
+ return parse_cbor_internal(true, tag_handler);
+ }
+
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ }
+ }
+
+ case 0xF4: // false
+ return sax->boolean(false);
+
+ case 0xF5: // true
+ return sax->boolean(true);
+
+ case 0xF6: // null
+ return sax->null();
+
+ case 0xF9: // Half-Precision Float (two-byte IEEE 754)
+ {
+ const auto byte1_raw = get();
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::cbor, "number")))
+ {
+ return false;
+ }
+ const auto byte2_raw = get();
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::cbor, "number")))
+ {
+ return false;
+ }
+
+ const auto byte1 = static_cast<unsigned char>(byte1_raw);
+ const auto byte2 = static_cast<unsigned char>(byte2_raw);
+
+ // code from RFC 7049, Appendix D, Figure 3:
+ // As half-precision floating-point numbers were only added
+ // to IEEE 754 in 2008, today's programming platforms often
+ // still only have limited support for them. It is very
+ // easy to include at least decoding support for them even
+ // without such support. An example of a small decoder for
+ // half-precision floating-point numbers in the C language
+ // is shown in Fig. 3.
+ const auto half = static_cast<unsigned int>((byte1 << 8u) + byte2);
+ const double val = [&half]
+ {
+ const int exp = (half >> 10u) & 0x1Fu;
+ const unsigned int mant = half & 0x3FFu;
+ JSON_ASSERT(0 <= exp&& exp <= 32);
+ JSON_ASSERT(mant <= 1024);
+ switch (exp)
+ {
+ case 0:
+ return std::ldexp(mant, -24);
+ case 31:
+ return (mant == 0)
+ ? std::numeric_limits<double>::infinity()
+ : std::numeric_limits<double>::quiet_NaN();
+ default:
+ return std::ldexp(mant + 1024, exp - 25);
+ }
+ }();
+ return sax->number_float((half & 0x8000u) != 0
+ ? static_cast<number_float_t>(-val)
+ : static_cast<number_float_t>(val), "");
+ }
+
+ case 0xFA: // Single-Precision Float (four-byte IEEE 754)
+ {
+ float number{};
+ return get_number(input_format_t::cbor, number) && sax->number_float(static_cast<number_float_t>(number), "");
+ }
+
+ case 0xFB: // Double-Precision Float (eight-byte IEEE 754)
+ {
+ double number{};
+ return get_number(input_format_t::cbor, number) && sax->number_float(static_cast<number_float_t>(number), "");
+ }
+
+ default: // anything else (0xFF is handled inside the other types)
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::cbor, "invalid byte: 0x" + last_token, "value")));
+ }
+ }
}
- bool start_array(std::size_t len)
- {
- ref_stack.push_back(handle_value(BasicJsonType::value_t::array));
+ /*!
+ @brief reads a CBOR string
+
+ This function first reads starting bytes to determine the expected
+ string length and then copies this number of bytes into a string.
+ Additionally, CBOR's strings with indefinite lengths are supported.
- if (JSON_UNLIKELY(len != std::size_t(-1) and len > ref_stack.back()->max_size()))
+ @param[out] result created string
+
+ @return whether string creation completed
+ */
+ bool get_cbor_string(string_t& result)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::cbor, "string")))
{
- JSON_THROW(out_of_range::create(408,
- "excessive array size: " + std::to_string(len)));
+ return false;
}
- return true;
- }
+ switch (current)
+ {
+ // UTF-8 string (0x00..0x17 bytes follow)
+ case 0x60:
+ case 0x61:
+ case 0x62:
+ case 0x63:
+ case 0x64:
+ case 0x65:
+ case 0x66:
+ case 0x67:
+ case 0x68:
+ case 0x69:
+ case 0x6A:
+ case 0x6B:
+ case 0x6C:
+ case 0x6D:
+ case 0x6E:
+ case 0x6F:
+ case 0x70:
+ case 0x71:
+ case 0x72:
+ case 0x73:
+ case 0x74:
+ case 0x75:
+ case 0x76:
+ case 0x77:
+ {
+ return get_string(input_format_t::cbor, static_cast<unsigned int>(current) & 0x1Fu, result);
+ }
- bool end_array()
- {
- ref_stack.pop_back();
- return true;
+ case 0x78: // UTF-8 string (one-byte uint8_t for n follows)
+ {
+ std::uint8_t len{};
+ return get_number(input_format_t::cbor, len) && get_string(input_format_t::cbor, len, result);
+ }
+
+ case 0x79: // UTF-8 string (two-byte uint16_t for n follow)
+ {
+ std::uint16_t len{};
+ return get_number(input_format_t::cbor, len) && get_string(input_format_t::cbor, len, result);
+ }
+
+ case 0x7A: // UTF-8 string (four-byte uint32_t for n follow)
+ {
+ std::uint32_t len{};
+ return get_number(input_format_t::cbor, len) && get_string(input_format_t::cbor, len, result);
+ }
+
+ case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow)
+ {
+ std::uint64_t len{};
+ return get_number(input_format_t::cbor, len) && get_string(input_format_t::cbor, len, result);
+ }
+
+ case 0x7F: // UTF-8 string (indefinite length)
+ {
+ while (get() != 0xFF)
+ {
+ string_t chunk;
+ if (!get_cbor_string(chunk))
+ {
+ return false;
+ }
+ result.append(chunk);
+ }
+ return true;
+ }
+
+ default:
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::cbor, "expected length specification (0x60-0x7B) or indefinite string type (0x7F); last byte: 0x" + last_token, "string")));
+ }
+ }
}
- bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
- const detail::exception& ex)
+ /*!
+ @brief reads a CBOR byte array
+
+ This function first reads starting bytes to determine the expected
+ byte array length and then copies this number of bytes into the byte array.
+ Additionally, CBOR's byte arrays with indefinite lengths are supported.
+
+ @param[out] result created byte array
+
+ @return whether byte array creation completed
+ */
+ bool get_cbor_binary(binary_t& result)
{
- errored = true;
- if (allow_exceptions)
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::cbor, "binary")))
{
- // determine the proper exception type from the id
- switch ((ex.id / 100) % 100)
+ return false;
+ }
+
+ switch (current)
+ {
+ // Binary data (0x00..0x17 bytes follow)
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x44:
+ case 0x45:
+ case 0x46:
+ case 0x47:
+ case 0x48:
+ case 0x49:
+ case 0x4A:
+ case 0x4B:
+ case 0x4C:
+ case 0x4D:
+ case 0x4E:
+ case 0x4F:
+ case 0x50:
+ case 0x51:
+ case 0x52:
+ case 0x53:
+ case 0x54:
+ case 0x55:
+ case 0x56:
+ case 0x57:
{
- case 1:
- JSON_THROW(*reinterpret_cast<const detail::parse_error*>(&ex));
- case 4:
- JSON_THROW(*reinterpret_cast<const detail::out_of_range*>(&ex));
- // LCOV_EXCL_START
- case 2:
- JSON_THROW(*reinterpret_cast<const detail::invalid_iterator*>(&ex));
- case 3:
- JSON_THROW(*reinterpret_cast<const detail::type_error*>(&ex));
- case 5:
- JSON_THROW(*reinterpret_cast<const detail::other_error*>(&ex));
- default:
- assert(false);
- // LCOV_EXCL_STOP
+ return get_binary(input_format_t::cbor, static_cast<unsigned int>(current) & 0x1Fu, result);
+ }
+
+ case 0x58: // Binary data (one-byte uint8_t for n follows)
+ {
+ std::uint8_t len{};
+ return get_number(input_format_t::cbor, len) &&
+ get_binary(input_format_t::cbor, len, result);
+ }
+
+ case 0x59: // Binary data (two-byte uint16_t for n follow)
+ {
+ std::uint16_t len{};
+ return get_number(input_format_t::cbor, len) &&
+ get_binary(input_format_t::cbor, len, result);
+ }
+
+ case 0x5A: // Binary data (four-byte uint32_t for n follow)
+ {
+ std::uint32_t len{};
+ return get_number(input_format_t::cbor, len) &&
+ get_binary(input_format_t::cbor, len, result);
+ }
+
+ case 0x5B: // Binary data (eight-byte uint64_t for n follow)
+ {
+ std::uint64_t len{};
+ return get_number(input_format_t::cbor, len) &&
+ get_binary(input_format_t::cbor, len, result);
+ }
+
+ case 0x5F: // Binary data (indefinite length)
+ {
+ while (get() != 0xFF)
+ {
+ binary_t chunk;
+ if (!get_cbor_binary(chunk))
+ {
+ return false;
+ }
+ result.insert(result.end(), chunk.begin(), chunk.end());
+ }
+ return true;
+ }
+
+ default:
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::cbor, "expected length specification (0x40-0x5B) or indefinite binary array type (0x5F); last byte: 0x" + last_token, "binary")));
}
}
- return false;
}
- constexpr bool is_errored() const
+ /*!
+ @param[in] len the length of the array or std::size_t(-1) for an
+ array of indefinite size
+ @param[in] tag_handler how CBOR tags should be treated
+ @return whether array creation completed
+ */
+ bool get_cbor_array(const std::size_t len,
+ const cbor_tag_handler_t tag_handler)
{
- return errored;
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_array(len)))
+ {
+ return false;
+ }
+
+ if (len != std::size_t(-1))
+ {
+ for (std::size_t i = 0; i < len; ++i)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!parse_cbor_internal(true, tag_handler)))
+ {
+ return false;
+ }
+ }
+ }
+ else
+ {
+ while (get() != 0xFF)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!parse_cbor_internal(false, tag_handler)))
+ {
+ return false;
+ }
+ }
+ }
+
+ return sax->end_array();
}
- private:
/*!
- @invariant If the ref stack is empty, then the passed value will be the new
- root.
- @invariant If the ref stack contains a value, then it is an array or an
- object to which we can add elements
+ @param[in] len the length of the object or std::size_t(-1) for an
+ object of indefinite size
+ @param[in] tag_handler how CBOR tags should be treated
+ @return whether object creation completed
*/
- template<typename Value>
- BasicJsonType* handle_value(Value&& v)
+ bool get_cbor_object(const std::size_t len,
+ const cbor_tag_handler_t tag_handler)
{
- if (ref_stack.empty())
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_object(len)))
{
- root = BasicJsonType(std::forward<Value>(v));
- return &root;
+ return false;
}
- assert(ref_stack.back()->is_array() or ref_stack.back()->is_object());
-
- if (ref_stack.back()->is_array())
+ string_t key;
+ if (len != std::size_t(-1))
{
- ref_stack.back()->m_value.array->emplace_back(std::forward<Value>(v));
- return &(ref_stack.back()->m_value.array->back());
+ for (std::size_t i = 0; i < len; ++i)
+ {
+ get();
+ if (JSON_HEDLEY_UNLIKELY(!get_cbor_string(key) || !sax->key(key)))
+ {
+ return false;
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(!parse_cbor_internal(true, tag_handler)))
+ {
+ return false;
+ }
+ key.clear();
+ }
}
else
{
- assert(object_element);
- *object_element = BasicJsonType(std::forward<Value>(v));
- return object_element;
+ while (get() != 0xFF)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!get_cbor_string(key) || !sax->key(key)))
+ {
+ return false;
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(!parse_cbor_internal(true, tag_handler)))
+ {
+ return false;
+ }
+ key.clear();
+ }
}
- }
- /// the parsed JSON value
- BasicJsonType& root;
- /// stack to model hierarchy of values
- std::vector<BasicJsonType*> ref_stack;
- /// helper to hold the reference for the next object element
- BasicJsonType* object_element = nullptr;
- /// whether a syntax error occurred
- bool errored = false;
- /// whether to throw exceptions in case of errors
- const bool allow_exceptions = true;
-};
+ return sax->end_object();
+ }
-template<typename BasicJsonType>
-class json_sax_dom_callback_parser
-{
- public:
- using number_integer_t = typename BasicJsonType::number_integer_t;
- using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
- using number_float_t = typename BasicJsonType::number_float_t;
- using string_t = typename BasicJsonType::string_t;
- using parser_callback_t = typename BasicJsonType::parser_callback_t;
- using parse_event_t = typename BasicJsonType::parse_event_t;
+ /////////////
+ // MsgPack //
+ /////////////
- json_sax_dom_callback_parser(BasicJsonType& r,
- const parser_callback_t cb,
- const bool allow_exceptions_ = true)
- : root(r), callback(cb), allow_exceptions(allow_exceptions_)
+ /*!
+ @return whether a valid MessagePack value was passed to the SAX parser
+ */
+ bool parse_msgpack_internal()
{
- keep_stack.push_back(true);
- }
+ switch (get())
+ {
+ // EOF
+ case std::char_traits<char_type>::eof():
+ return unexpect_eof(input_format_t::msgpack, "value");
- bool null()
- {
- handle_value(nullptr);
- return true;
- }
+ // positive fixint
+ case 0x00:
+ case 0x01:
+ case 0x02:
+ case 0x03:
+ case 0x04:
+ case 0x05:
+ case 0x06:
+ case 0x07:
+ case 0x08:
+ case 0x09:
+ case 0x0A:
+ case 0x0B:
+ case 0x0C:
+ case 0x0D:
+ case 0x0E:
+ case 0x0F:
+ case 0x10:
+ case 0x11:
+ case 0x12:
+ case 0x13:
+ case 0x14:
+ case 0x15:
+ case 0x16:
+ case 0x17:
+ case 0x18:
+ case 0x19:
+ case 0x1A:
+ case 0x1B:
+ case 0x1C:
+ case 0x1D:
+ case 0x1E:
+ case 0x1F:
+ case 0x20:
+ case 0x21:
+ case 0x22:
+ case 0x23:
+ case 0x24:
+ case 0x25:
+ case 0x26:
+ case 0x27:
+ case 0x28:
+ case 0x29:
+ case 0x2A:
+ case 0x2B:
+ case 0x2C:
+ case 0x2D:
+ case 0x2E:
+ case 0x2F:
+ case 0x30:
+ case 0x31:
+ case 0x32:
+ case 0x33:
+ case 0x34:
+ case 0x35:
+ case 0x36:
+ case 0x37:
+ case 0x38:
+ case 0x39:
+ case 0x3A:
+ case 0x3B:
+ case 0x3C:
+ case 0x3D:
+ case 0x3E:
+ case 0x3F:
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x44:
+ case 0x45:
+ case 0x46:
+ case 0x47:
+ case 0x48:
+ case 0x49:
+ case 0x4A:
+ case 0x4B:
+ case 0x4C:
+ case 0x4D:
+ case 0x4E:
+ case 0x4F:
+ case 0x50:
+ case 0x51:
+ case 0x52:
+ case 0x53:
+ case 0x54:
+ case 0x55:
+ case 0x56:
+ case 0x57:
+ case 0x58:
+ case 0x59:
+ case 0x5A:
+ case 0x5B:
+ case 0x5C:
+ case 0x5D:
+ case 0x5E:
+ case 0x5F:
+ case 0x60:
+ case 0x61:
+ case 0x62:
+ case 0x63:
+ case 0x64:
+ case 0x65:
+ case 0x66:
+ case 0x67:
+ case 0x68:
+ case 0x69:
+ case 0x6A:
+ case 0x6B:
+ case 0x6C:
+ case 0x6D:
+ case 0x6E:
+ case 0x6F:
+ case 0x70:
+ case 0x71:
+ case 0x72:
+ case 0x73:
+ case 0x74:
+ case 0x75:
+ case 0x76:
+ case 0x77:
+ case 0x78:
+ case 0x79:
+ case 0x7A:
+ case 0x7B:
+ case 0x7C:
+ case 0x7D:
+ case 0x7E:
+ case 0x7F:
+ return sax->number_unsigned(static_cast<number_unsigned_t>(current));
- bool boolean(bool val)
- {
- handle_value(val);
- return true;
- }
+ // fixmap
+ case 0x80:
+ case 0x81:
+ case 0x82:
+ case 0x83:
+ case 0x84:
+ case 0x85:
+ case 0x86:
+ case 0x87:
+ case 0x88:
+ case 0x89:
+ case 0x8A:
+ case 0x8B:
+ case 0x8C:
+ case 0x8D:
+ case 0x8E:
+ case 0x8F:
+ return get_msgpack_object(static_cast<std::size_t>(static_cast<unsigned int>(current) & 0x0Fu));
- bool number_integer(number_integer_t val)
- {
- handle_value(val);
- return true;
+ // fixarray
+ case 0x90:
+ case 0x91:
+ case 0x92:
+ case 0x93:
+ case 0x94:
+ case 0x95:
+ case 0x96:
+ case 0x97:
+ case 0x98:
+ case 0x99:
+ case 0x9A:
+ case 0x9B:
+ case 0x9C:
+ case 0x9D:
+ case 0x9E:
+ case 0x9F:
+ return get_msgpack_array(static_cast<std::size_t>(static_cast<unsigned int>(current) & 0x0Fu));
+
+ // fixstr
+ case 0xA0:
+ case 0xA1:
+ case 0xA2:
+ case 0xA3:
+ case 0xA4:
+ case 0xA5:
+ case 0xA6:
+ case 0xA7:
+ case 0xA8:
+ case 0xA9:
+ case 0xAA:
+ case 0xAB:
+ case 0xAC:
+ case 0xAD:
+ case 0xAE:
+ case 0xAF:
+ case 0xB0:
+ case 0xB1:
+ case 0xB2:
+ case 0xB3:
+ case 0xB4:
+ case 0xB5:
+ case 0xB6:
+ case 0xB7:
+ case 0xB8:
+ case 0xB9:
+ case 0xBA:
+ case 0xBB:
+ case 0xBC:
+ case 0xBD:
+ case 0xBE:
+ case 0xBF:
+ case 0xD9: // str 8
+ case 0xDA: // str 16
+ case 0xDB: // str 32
+ {
+ string_t s;
+ return get_msgpack_string(s) && sax->string(s);
+ }
+
+ case 0xC0: // nil
+ return sax->null();
+
+ case 0xC2: // false
+ return sax->boolean(false);
+
+ case 0xC3: // true
+ return sax->boolean(true);
+
+ case 0xC4: // bin 8
+ case 0xC5: // bin 16
+ case 0xC6: // bin 32
+ case 0xC7: // ext 8
+ case 0xC8: // ext 16
+ case 0xC9: // ext 32
+ case 0xD4: // fixext 1
+ case 0xD5: // fixext 2
+ case 0xD6: // fixext 4
+ case 0xD7: // fixext 8
+ case 0xD8: // fixext 16
+ {
+ binary_t b;
+ return get_msgpack_binary(b) && sax->binary(b);
+ }
+
+ case 0xCA: // float 32
+ {
+ float number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_float(static_cast<number_float_t>(number), "");
+ }
+
+ case 0xCB: // float 64
+ {
+ double number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_float(static_cast<number_float_t>(number), "");
+ }
+
+ case 0xCC: // uint 8
+ {
+ std::uint8_t number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_unsigned(number);
+ }
+
+ case 0xCD: // uint 16
+ {
+ std::uint16_t number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_unsigned(number);
+ }
+
+ case 0xCE: // uint 32
+ {
+ std::uint32_t number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_unsigned(number);
+ }
+
+ case 0xCF: // uint 64
+ {
+ std::uint64_t number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_unsigned(number);
+ }
+
+ case 0xD0: // int 8
+ {
+ std::int8_t number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_integer(number);
+ }
+
+ case 0xD1: // int 16
+ {
+ std::int16_t number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_integer(number);
+ }
+
+ case 0xD2: // int 32
+ {
+ std::int32_t number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_integer(number);
+ }
+
+ case 0xD3: // int 64
+ {
+ std::int64_t number{};
+ return get_number(input_format_t::msgpack, number) && sax->number_integer(number);
+ }
+
+ case 0xDC: // array 16
+ {
+ std::uint16_t len{};
+ return get_number(input_format_t::msgpack, len) && get_msgpack_array(static_cast<std::size_t>(len));
+ }
+
+ case 0xDD: // array 32
+ {
+ std::uint32_t len{};
+ return get_number(input_format_t::msgpack, len) && get_msgpack_array(static_cast<std::size_t>(len));
+ }
+
+ case 0xDE: // map 16
+ {
+ std::uint16_t len{};
+ return get_number(input_format_t::msgpack, len) && get_msgpack_object(static_cast<std::size_t>(len));
+ }
+
+ case 0xDF: // map 32
+ {
+ std::uint32_t len{};
+ return get_number(input_format_t::msgpack, len) && get_msgpack_object(static_cast<std::size_t>(len));
+ }
+
+ // negative fixint
+ case 0xE0:
+ case 0xE1:
+ case 0xE2:
+ case 0xE3:
+ case 0xE4:
+ case 0xE5:
+ case 0xE6:
+ case 0xE7:
+ case 0xE8:
+ case 0xE9:
+ case 0xEA:
+ case 0xEB:
+ case 0xEC:
+ case 0xED:
+ case 0xEE:
+ case 0xEF:
+ case 0xF0:
+ case 0xF1:
+ case 0xF2:
+ case 0xF3:
+ case 0xF4:
+ case 0xF5:
+ case 0xF6:
+ case 0xF7:
+ case 0xF8:
+ case 0xF9:
+ case 0xFA:
+ case 0xFB:
+ case 0xFC:
+ case 0xFD:
+ case 0xFE:
+ case 0xFF:
+ return sax->number_integer(static_cast<std::int8_t>(current));
+
+ default: // anything else
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::msgpack, "invalid byte: 0x" + last_token, "value")));
+ }
+ }
}
- bool number_unsigned(number_unsigned_t val)
+ /*!
+ @brief reads a MessagePack string
+
+ This function first reads starting bytes to determine the expected
+ string length and then copies this number of bytes into a string.
+
+ @param[out] result created string
+
+ @return whether string creation completed
+ */
+ bool get_msgpack_string(string_t& result)
{
- handle_value(val);
- return true;
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::msgpack, "string")))
+ {
+ return false;
+ }
+
+ switch (current)
+ {
+ // fixstr
+ case 0xA0:
+ case 0xA1:
+ case 0xA2:
+ case 0xA3:
+ case 0xA4:
+ case 0xA5:
+ case 0xA6:
+ case 0xA7:
+ case 0xA8:
+ case 0xA9:
+ case 0xAA:
+ case 0xAB:
+ case 0xAC:
+ case 0xAD:
+ case 0xAE:
+ case 0xAF:
+ case 0xB0:
+ case 0xB1:
+ case 0xB2:
+ case 0xB3:
+ case 0xB4:
+ case 0xB5:
+ case 0xB6:
+ case 0xB7:
+ case 0xB8:
+ case 0xB9:
+ case 0xBA:
+ case 0xBB:
+ case 0xBC:
+ case 0xBD:
+ case 0xBE:
+ case 0xBF:
+ {
+ return get_string(input_format_t::msgpack, static_cast<unsigned int>(current) & 0x1Fu, result);
+ }
+
+ case 0xD9: // str 8
+ {
+ std::uint8_t len{};
+ return get_number(input_format_t::msgpack, len) && get_string(input_format_t::msgpack, len, result);
+ }
+
+ case 0xDA: // str 16
+ {
+ std::uint16_t len{};
+ return get_number(input_format_t::msgpack, len) && get_string(input_format_t::msgpack, len, result);
+ }
+
+ case 0xDB: // str 32
+ {
+ std::uint32_t len{};
+ return get_number(input_format_t::msgpack, len) && get_string(input_format_t::msgpack, len, result);
+ }
+
+ default:
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::msgpack, "expected length specification (0xA0-0xBF, 0xD9-0xDB); last byte: 0x" + last_token, "string")));
+ }
+ }
}
- bool number_float(number_float_t val, const string_t& /*unused*/)
+ /*!
+ @brief reads a MessagePack byte array
+
+ This function first reads starting bytes to determine the expected
+ byte array length and then copies this number of bytes into a byte array.
+
+ @param[out] result created byte array
+
+ @return whether byte array creation completed
+ */
+ bool get_msgpack_binary(binary_t& result)
{
- handle_value(val);
- return true;
+ // helper function to set the subtype
+ auto assign_and_return_true = [&result](std::int8_t subtype)
+ {
+ result.set_subtype(static_cast<std::uint8_t>(subtype));
+ return true;
+ };
+
+ switch (current)
+ {
+ case 0xC4: // bin 8
+ {
+ std::uint8_t len{};
+ return get_number(input_format_t::msgpack, len) &&
+ get_binary(input_format_t::msgpack, len, result);
+ }
+
+ case 0xC5: // bin 16
+ {
+ std::uint16_t len{};
+ return get_number(input_format_t::msgpack, len) &&
+ get_binary(input_format_t::msgpack, len, result);
+ }
+
+ case 0xC6: // bin 32
+ {
+ std::uint32_t len{};
+ return get_number(input_format_t::msgpack, len) &&
+ get_binary(input_format_t::msgpack, len, result);
+ }
+
+ case 0xC7: // ext 8
+ {
+ std::uint8_t len{};
+ std::int8_t subtype{};
+ return get_number(input_format_t::msgpack, len) &&
+ get_number(input_format_t::msgpack, subtype) &&
+ get_binary(input_format_t::msgpack, len, result) &&
+ assign_and_return_true(subtype);
+ }
+
+ case 0xC8: // ext 16
+ {
+ std::uint16_t len{};
+ std::int8_t subtype{};
+ return get_number(input_format_t::msgpack, len) &&
+ get_number(input_format_t::msgpack, subtype) &&
+ get_binary(input_format_t::msgpack, len, result) &&
+ assign_and_return_true(subtype);
+ }
+
+ case 0xC9: // ext 32
+ {
+ std::uint32_t len{};
+ std::int8_t subtype{};
+ return get_number(input_format_t::msgpack, len) &&
+ get_number(input_format_t::msgpack, subtype) &&
+ get_binary(input_format_t::msgpack, len, result) &&
+ assign_and_return_true(subtype);
+ }
+
+ case 0xD4: // fixext 1
+ {
+ std::int8_t subtype{};
+ return get_number(input_format_t::msgpack, subtype) &&
+ get_binary(input_format_t::msgpack, 1, result) &&
+ assign_and_return_true(subtype);
+ }
+
+ case 0xD5: // fixext 2
+ {
+ std::int8_t subtype{};
+ return get_number(input_format_t::msgpack, subtype) &&
+ get_binary(input_format_t::msgpack, 2, result) &&
+ assign_and_return_true(subtype);
+ }
+
+ case 0xD6: // fixext 4
+ {
+ std::int8_t subtype{};
+ return get_number(input_format_t::msgpack, subtype) &&
+ get_binary(input_format_t::msgpack, 4, result) &&
+ assign_and_return_true(subtype);
+ }
+
+ case 0xD7: // fixext 8
+ {
+ std::int8_t subtype{};
+ return get_number(input_format_t::msgpack, subtype) &&
+ get_binary(input_format_t::msgpack, 8, result) &&
+ assign_and_return_true(subtype);
+ }
+
+ case 0xD8: // fixext 16
+ {
+ std::int8_t subtype{};
+ return get_number(input_format_t::msgpack, subtype) &&
+ get_binary(input_format_t::msgpack, 16, result) &&
+ assign_and_return_true(subtype);
+ }
+
+ default: // LCOV_EXCL_LINE
+ return false; // LCOV_EXCL_LINE
+ }
}
- bool string(string_t& val)
+ /*!
+ @param[in] len the length of the array
+ @return whether array creation completed
+ */
+ bool get_msgpack_array(const std::size_t len)
{
- handle_value(val);
- return true;
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_array(len)))
+ {
+ return false;
+ }
+
+ for (std::size_t i = 0; i < len; ++i)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!parse_msgpack_internal()))
+ {
+ return false;
+ }
+ }
+
+ return sax->end_array();
}
- bool start_object(std::size_t len)
+ /*!
+ @param[in] len the length of the object
+ @return whether object creation completed
+ */
+ bool get_msgpack_object(const std::size_t len)
{
- // check callback for object start
- const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::object_start, discarded);
- keep_stack.push_back(keep);
-
- auto val = handle_value(BasicJsonType::value_t::object, true);
- ref_stack.push_back(val.second);
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_object(len)))
+ {
+ return false;
+ }
- // check object limit
- if (ref_stack.back())
+ string_t key;
+ for (std::size_t i = 0; i < len; ++i)
{
- if (JSON_UNLIKELY(len != std::size_t(-1) and len > ref_stack.back()->max_size()))
+ get();
+ if (JSON_HEDLEY_UNLIKELY(!get_msgpack_string(key) || !sax->key(key)))
+ {
+ return false;
+ }
+
+ if (JSON_HEDLEY_UNLIKELY(!parse_msgpack_internal()))
{
- JSON_THROW(out_of_range::create(408,
- "excessive object size: " + std::to_string(len)));
+ return false;
}
+ key.clear();
}
- return true;
+ return sax->end_object();
}
- bool key(string_t& val)
+ ////////////
+ // UBJSON //
+ ////////////
+
+ /*!
+ @param[in] get_char whether a new character should be retrieved from the
+ input (true, default) or whether the last read
+ character should be considered instead
+
+ @return whether a valid UBJSON value was passed to the SAX parser
+ */
+ bool parse_ubjson_internal(const bool get_char = true)
{
- BasicJsonType k = BasicJsonType(val);
+ return get_ubjson_value(get_char ? get_ignore_noop() : current);
+ }
- // check callback for key
- const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::key, k);
- key_keep_stack.push_back(keep);
+ /*!
+ @brief reads a UBJSON string
- // add discarded value at given key and store the reference for later
- if (keep and ref_stack.back())
+ This function is either called after reading the 'S' byte explicitly
+ indicating a string, or in case of an object key where the 'S' byte can be
+ left out.
+
+ @param[out] result created string
+ @param[in] get_char whether a new character should be retrieved from the
+ input (true, default) or whether the last read
+ character should be considered instead
+
+ @return whether string creation completed
+ */
+ bool get_ubjson_string(string_t& result, const bool get_char = true)
+ {
+ if (get_char)
{
- object_element = &(ref_stack.back()->m_value.object->operator[](val) = discarded);
+ get(); // TODO(niels): may we ignore N here?
}
- return true;
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "value")))
+ {
+ return false;
+ }
+
+ switch (current)
+ {
+ case 'U':
+ {
+ std::uint8_t len{};
+ return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result);
+ }
+
+ case 'i':
+ {
+ std::int8_t len{};
+ return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result);
+ }
+
+ case 'I':
+ {
+ std::int16_t len{};
+ return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result);
+ }
+
+ case 'l':
+ {
+ std::int32_t len{};
+ return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result);
+ }
+
+ case 'L':
+ {
+ std::int64_t len{};
+ return get_number(input_format_t::ubjson, len) && get_string(input_format_t::ubjson, len, result);
+ }
+
+ default:
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "expected length type specification (U, i, I, l, L); last byte: 0x" + last_token, "string")));
+ }
}
- bool end_object()
+ /*!
+ @param[out] result determined size
+ @return whether size determination completed
+ */
+ bool get_ubjson_size_value(std::size_t& result)
{
- if (ref_stack.back())
+ switch (get_ignore_noop())
{
- if (not callback(static_cast<int>(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back()))
+ case 'U':
{
- // discard object
- *ref_stack.back() = discarded;
+ std::uint8_t number{};
+ if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number)))
+ {
+ return false;
+ }
+ result = static_cast<std::size_t>(number);
+ return true;
}
- }
- assert(not ref_stack.empty());
- assert(not keep_stack.empty());
- ref_stack.pop_back();
- keep_stack.pop_back();
+ case 'i':
+ {
+ std::int8_t number{};
+ if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number)))
+ {
+ return false;
+ }
+ result = static_cast<std::size_t>(number);
+ return true;
+ }
- if (not ref_stack.empty() and ref_stack.back())
- {
- // remove discarded value
- if (ref_stack.back()->is_object())
+ case 'I':
{
- for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it)
+ std::int16_t number{};
+ if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number)))
{
- if (it->is_discarded())
- {
- ref_stack.back()->erase(it);
- break;
- }
+ return false;
}
+ result = static_cast<std::size_t>(number);
+ return true;
}
- }
- return true;
+ case 'l':
+ {
+ std::int32_t number{};
+ if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number)))
+ {
+ return false;
+ }
+ result = static_cast<std::size_t>(number);
+ return true;
+ }
+
+ case 'L':
+ {
+ std::int64_t number{};
+ if (JSON_HEDLEY_UNLIKELY(!get_number(input_format_t::ubjson, number)))
+ {
+ return false;
+ }
+ result = static_cast<std::size_t>(number);
+ return true;
+ }
+
+ default:
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "expected length type specification (U, i, I, l, L) after '#'; last byte: 0x" + last_token, "size")));
+ }
+ }
}
- bool start_array(std::size_t len)
+ /*!
+ @brief determine the type and size for a container
+
+ In the optimized UBJSON format, a type and a size can be provided to allow
+ for a more compact representation.
+
+ @param[out] result pair of the size and the type
+
+ @return whether pair creation completed
+ */
+ bool get_ubjson_size_type(std::pair<std::size_t, char_int_type>& result)
{
- const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::array_start, discarded);
- keep_stack.push_back(keep);
+ result.first = string_t::npos; // size
+ result.second = 0; // type
- auto val = handle_value(BasicJsonType::value_t::array, true);
- ref_stack.push_back(val.second);
+ get_ignore_noop();
- // check array limit
- if (ref_stack.back())
+ if (current == '$')
{
- if (JSON_UNLIKELY(len != std::size_t(-1) and len > ref_stack.back()->max_size()))
+ result.second = get(); // must not ignore 'N', because 'N' maybe the type
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "type")))
{
- JSON_THROW(out_of_range::create(408,
- "excessive array size: " + std::to_string(len)));
+ return false;
}
+
+ get_ignore_noop();
+ if (JSON_HEDLEY_UNLIKELY(current != '#'))
+ {
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "value")))
+ {
+ return false;
+ }
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::ubjson, "expected '#' after type information; last byte: 0x" + last_token, "size")));
+ }
+
+ return get_ubjson_size_value(result.first);
+ }
+
+ if (current == '#')
+ {
+ return get_ubjson_size_value(result.first);
}
return true;
}
- bool end_array()
+ /*!
+ @param prefix the previously read or set type prefix
+ @return whether value creation completed
+ */
+ bool get_ubjson_value(const char_int_type prefix)
{
- bool keep = true;
-
- if (ref_stack.back())
+ switch (prefix)
{
- keep = callback(static_cast<int>(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back());
- if (not keep)
+ case std::char_traits<char_type>::eof(): // EOF
+ return unexpect_eof(input_format_t::ubjson, "value");
+
+ case 'T': // true
+ return sax->boolean(true);
+ case 'F': // false
+ return sax->boolean(false);
+
+ case 'Z': // null
+ return sax->null();
+
+ case 'U':
{
- // discard array
- *ref_stack.back() = discarded;
+ std::uint8_t number{};
+ return get_number(input_format_t::ubjson, number) && sax->number_unsigned(number);
}
- }
- assert(not ref_stack.empty());
- assert(not keep_stack.empty());
- ref_stack.pop_back();
- keep_stack.pop_back();
+ case 'i':
+ {
+ std::int8_t number{};
+ return get_number(input_format_t::ubjson, number) && sax->number_integer(number);
+ }
- // remove discarded value
- if (not keep and not ref_stack.empty())
- {
- if (ref_stack.back()->is_array())
+ case 'I':
{
- ref_stack.back()->m_value.array->pop_back();
+ std::int16_t number{};
+ return get_number(input_format_t::ubjson, number) && sax->number_integer(number);
}
- }
- return true;
- }
+ case 'l':
+ {
+ std::int32_t number{};
+ return get_number(input_format_t::ubjson, number) && sax->number_integer(number);
+ }
- bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
- const detail::exception& ex)
- {
- errored = true;
- if (allow_exceptions)
- {
- // determine the proper exception type from the id
- switch ((ex.id / 100) % 100)
+ case 'L':
{
- case 1:
- JSON_THROW(*reinterpret_cast<const detail::parse_error*>(&ex));
- case 4:
- JSON_THROW(*reinterpret_cast<const detail::out_of_range*>(&ex));
- // LCOV_EXCL_START
- case 2:
- JSON_THROW(*reinterpret_cast<const detail::invalid_iterator*>(&ex));
- case 3:
- JSON_THROW(*reinterpret_cast<const detail::type_error*>(&ex));
- case 5:
- JSON_THROW(*reinterpret_cast<const detail::other_error*>(&ex));
- default:
- assert(false);
- // LCOV_EXCL_STOP
+ std::int64_t number{};
+ return get_number(input_format_t::ubjson, number) && sax->number_integer(number);
}
- }
- return false;
- }
- constexpr bool is_errored() const
- {
- return errored;
- }
+ case 'd':
+ {
+ float number{};
+ return get_number(input_format_t::ubjson, number) && sax->number_float(static_cast<number_float_t>(number), "");
+ }
- private:
- /*!
- @param[in] v value to add to the JSON value we build during parsing
- @param[in] skip_callback whether we should skip calling the callback
- function; this is required after start_array() and
- start_object() SAX events, because otherwise we would call the
- callback function with an empty array or object, respectively.
+ case 'D':
+ {
+ double number{};
+ return get_number(input_format_t::ubjson, number) && sax->number_float(static_cast<number_float_t>(number), "");
+ }
- @invariant If the ref stack is empty, then the passed value will be the new
- root.
- @invariant If the ref stack contains a value, then it is an array or an
- object to which we can add elements
+ case 'H':
+ {
+ return get_ubjson_high_precision_number();
+ }
- @return pair of boolean (whether value should be kept) and pointer (to the
- passed value in the ref_stack hierarchy; nullptr if not kept)
+ case 'C': // char
+ {
+ get();
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "char")))
+ {
+ return false;
+ }
+ if (JSON_HEDLEY_UNLIKELY(current > 127))
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "byte after 'C' must be in range 0x00..0x7F; last byte: 0x" + last_token, "char")));
+ }
+ string_t s(1, static_cast<typename string_t::value_type>(current));
+ return sax->string(s);
+ }
+
+ case 'S': // string
+ {
+ string_t s;
+ return get_ubjson_string(s) && sax->string(s);
+ }
+
+ case '[': // array
+ return get_ubjson_array();
+
+ case '{': // object
+ return get_ubjson_object();
+
+ default: // anything else
+ {
+ auto last_token = get_token_string();
+ return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::ubjson, "invalid byte: 0x" + last_token, "value")));
+ }
+ }
+ }
+
+ /*!
+ @return whether array creation completed
*/
- template<typename Value>
- std::pair<bool, BasicJsonType*> handle_value(Value&& v, const bool skip_callback = false)
+ bool get_ubjson_array()
{
- assert(not keep_stack.empty());
+ std::pair<std::size_t, char_int_type> size_and_type;
+ if (JSON_HEDLEY_UNLIKELY(!get_ubjson_size_type(size_and_type)))
+ {
+ return false;
+ }
- // do not handle this value if we know it would be added to a discarded
- // container
- if (not keep_stack.back())
+ if (size_and_type.first != string_t::npos)
{
- return {false, nullptr};
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_array(size_and_type.first)))
+ {
+ return false;
+ }
+
+ if (size_and_type.second != 0)
+ {
+ if (size_and_type.second != 'N')
+ {
+ for (std::size_t i = 0; i < size_and_type.first; ++i)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!get_ubjson_value(size_and_type.second)))
+ {
+ return false;
+ }
+ }
+ }
+ }
+ else
+ {
+ for (std::size_t i = 0; i < size_and_type.first; ++i)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!parse_ubjson_internal()))
+ {
+ return false;
+ }
+ }
+ }
}
+ else
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_array(std::size_t(-1))))
+ {
+ return false;
+ }
- // create value
- auto value = BasicJsonType(std::forward<Value>(v));
+ while (current != ']')
+ {
+ if (JSON_HEDLEY_UNLIKELY(!parse_ubjson_internal(false)))
+ {
+ return false;
+ }
+ get_ignore_noop();
+ }
+ }
- // check callback
- const bool keep = skip_callback or callback(static_cast<int>(ref_stack.size()), parse_event_t::value, value);
+ return sax->end_array();
+ }
- // do not handle this value if we just learnt it shall be discarded
- if (not keep)
+ /*!
+ @return whether object creation completed
+ */
+ bool get_ubjson_object()
+ {
+ std::pair<std::size_t, char_int_type> size_and_type;
+ if (JSON_HEDLEY_UNLIKELY(!get_ubjson_size_type(size_and_type)))
{
- return {false, nullptr};
+ return false;
}
- if (ref_stack.empty())
+ string_t key;
+ if (size_and_type.first != string_t::npos)
{
- root = std::move(value);
- return {true, &root};
- }
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_object(size_and_type.first)))
+ {
+ return false;
+ }
- // skip this value if we already decided to skip the parent
- // (https://github.com/nlohmann/json/issues/971#issuecomment-413678360)
- if (not ref_stack.back())
+ if (size_and_type.second != 0)
+ {
+ for (std::size_t i = 0; i < size_and_type.first; ++i)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!get_ubjson_string(key) || !sax->key(key)))
+ {
+ return false;
+ }
+ if (JSON_HEDLEY_UNLIKELY(!get_ubjson_value(size_and_type.second)))
+ {
+ return false;
+ }
+ key.clear();
+ }
+ }
+ else
+ {
+ for (std::size_t i = 0; i < size_and_type.first; ++i)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!get_ubjson_string(key) || !sax->key(key)))
+ {
+ return false;
+ }
+ if (JSON_HEDLEY_UNLIKELY(!parse_ubjson_internal()))
+ {
+ return false;
+ }
+ key.clear();
+ }
+ }
+ }
+ else
{
- return {false, nullptr};
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_object(std::size_t(-1))))
+ {
+ return false;
+ }
+
+ while (current != '}')
+ {
+ if (JSON_HEDLEY_UNLIKELY(!get_ubjson_string(key, false) || !sax->key(key)))
+ {
+ return false;
+ }
+ if (JSON_HEDLEY_UNLIKELY(!parse_ubjson_internal()))
+ {
+ return false;
+ }
+ get_ignore_noop();
+ key.clear();
+ }
}
- // we now only expect arrays and objects
- assert(ref_stack.back()->is_array() or ref_stack.back()->is_object());
+ return sax->end_object();
+ }
- if (ref_stack.back()->is_array())
+ // Note, no reader for UBJSON binary types is implemented because they do
+ // not exist
+
+ bool get_ubjson_high_precision_number()
+ {
+ // get size of following number string
+ std::size_t size{};
+ auto res = get_ubjson_size_value(size);
+ if (JSON_HEDLEY_UNLIKELY(!res))
{
- ref_stack.back()->m_value.array->push_back(std::move(value));
- return {true, &(ref_stack.back()->m_value.array->back())};
+ return res;
}
- else
- {
- // check if we should store an element for the current key
- assert(not key_keep_stack.empty());
- const bool store_element = key_keep_stack.back();
- key_keep_stack.pop_back();
- if (not store_element)
+ // get number string
+ std::vector<char> number_vector;
+ for (std::size_t i = 0; i < size; ++i)
+ {
+ get();
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(input_format_t::ubjson, "number")))
{
- return {false, nullptr};
+ return false;
}
+ number_vector.push_back(static_cast<char>(current));
+ }
+
+ // parse number string
+ auto number_ia = detail::input_adapter(std::forward<decltype(number_vector)>(number_vector));
+ auto number_lexer = detail::lexer<BasicJsonType, decltype(number_ia)>(std::move(number_ia), false);
+ const auto result_number = number_lexer.scan();
+ const auto number_string = number_lexer.get_token_string();
+ const auto result_remainder = number_lexer.scan();
- assert(object_element);
- *object_element = std::move(value);
- return {true, object_element};
+ using token_type = typename detail::lexer_base<BasicJsonType>::token_type;
+
+ if (JSON_HEDLEY_UNLIKELY(result_remainder != token_type::end_of_input))
+ {
+ return sax->parse_error(chars_read, number_string, parse_error::create(115, chars_read, exception_message(input_format_t::ubjson, "invalid number text: " + number_lexer.get_token_string(), "high-precision number")));
+ }
+
+ switch (result_number)
+ {
+ case token_type::value_integer:
+ return sax->number_integer(number_lexer.get_number_integer());
+ case token_type::value_unsigned:
+ return sax->number_unsigned(number_lexer.get_number_unsigned());
+ case token_type::value_float:
+ return sax->number_float(number_lexer.get_number_float(), std::move(number_string));
+ default:
+ return sax->parse_error(chars_read, number_string, parse_error::create(115, chars_read, exception_message(input_format_t::ubjson, "invalid number text: " + number_lexer.get_token_string(), "high-precision number")));
}
}
- /// the parsed JSON value
- BasicJsonType& root;
- /// stack to model hierarchy of values
- std::vector<BasicJsonType*> ref_stack;
- /// stack to manage which values to keep
- std::vector<bool> keep_stack;
- /// stack to manage which object keys to keep
- std::vector<bool> key_keep_stack;
- /// helper to hold the reference for the next object element
- BasicJsonType* object_element = nullptr;
- /// whether a syntax error occurred
- bool errored = false;
- /// callback function
- const parser_callback_t callback = nullptr;
- /// whether to throw exceptions in case of errors
- const bool allow_exceptions = true;
- /// a discarded value for the callback
- BasicJsonType discarded = BasicJsonType::value_t::discarded;
-};
+ ///////////////////////
+ // Utility functions //
+ ///////////////////////
-template<typename BasicJsonType>
-class json_sax_acceptor
-{
- public:
- using number_integer_t = typename BasicJsonType::number_integer_t;
- using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
- using number_float_t = typename BasicJsonType::number_float_t;
- using string_t = typename BasicJsonType::string_t;
+ /*!
+ @brief get next character from the input
- bool null()
- {
- return true;
- }
+ This function provides the interface to the used input adapter. It does
+ not throw in case the input reached EOF, but returns a -'ve valued
+ `std::char_traits<char_type>::eof()` in that case.
- bool boolean(bool /*unused*/)
+ @return character read from the input
+ */
+ char_int_type get()
{
- return true;
+ ++chars_read;
+ return current = ia.get_character();
}
- bool number_integer(number_integer_t /*unused*/)
+ /*!
+ @return character read from the input after ignoring all 'N' entries
+ */
+ char_int_type get_ignore_noop()
{
- return true;
- }
+ do
+ {
+ get();
+ }
+ while (current == 'N');
- bool number_unsigned(number_unsigned_t /*unused*/)
- {
- return true;
+ return current;
}
- bool number_float(number_float_t /*unused*/, const string_t& /*unused*/)
- {
- return true;
- }
+ /*
+ @brief read a number from the input
- bool string(string_t& /*unused*/)
- {
- return true;
- }
+ @tparam NumberType the type of the number
+ @param[in] format the current format (for diagnostics)
+ @param[out] result number of type @a NumberType
- bool start_object(std::size_t /*unused*/ = std::size_t(-1))
+ @return whether conversion completed
+
+ @note This function needs to respect the system's endianess, because
+ bytes in CBOR, MessagePack, and UBJSON are stored in network order
+ (big endian) and therefore need reordering on little endian systems.
+ */
+ template<typename NumberType, bool InputIsLittleEndian = false>
+ bool get_number(const input_format_t format, NumberType& result)
{
+ // step 1: read input into array with system's byte order
+ std::array<std::uint8_t, sizeof(NumberType)> vec;
+ for (std::size_t i = 0; i < sizeof(NumberType); ++i)
+ {
+ get();
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(format, "number")))
+ {
+ return false;
+ }
+
+ // reverse byte order prior to conversion if necessary
+ if (is_little_endian != InputIsLittleEndian)
+ {
+ vec[sizeof(NumberType) - i - 1] = static_cast<std::uint8_t>(current);
+ }
+ else
+ {
+ vec[i] = static_cast<std::uint8_t>(current); // LCOV_EXCL_LINE
+ }
+ }
+
+ // step 2: convert array into number of type T and return
+ std::memcpy(&result, vec.data(), sizeof(NumberType));
return true;
}
- bool key(string_t& /*unused*/)
+ /*!
+ @brief create a string by reading characters from the input
+
+ @tparam NumberType the type of the number
+ @param[in] format the current format (for diagnostics)
+ @param[in] len number of characters to read
+ @param[out] result string created by reading @a len bytes
+
+ @return whether string creation completed
+
+ @note We can not reserve @a len bytes for the result, because @a len
+ may be too large. Usually, @ref unexpect_eof() detects the end of
+ the input before we run out of string memory.
+ */
+ template<typename NumberType>
+ bool get_string(const input_format_t format,
+ const NumberType len,
+ string_t& result)
{
- return true;
+ bool success = true;
+ for (NumberType i = 0; i < len; i++)
+ {
+ get();
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(format, "string")))
+ {
+ success = false;
+ break;
+ }
+ result.push_back(static_cast<typename string_t::value_type>(current));
+ };
+ return success;
}
- bool end_object()
+ /*!
+ @brief create a byte array by reading bytes from the input
+
+ @tparam NumberType the type of the number
+ @param[in] format the current format (for diagnostics)
+ @param[in] len number of bytes to read
+ @param[out] result byte array created by reading @a len bytes
+
+ @return whether byte array creation completed
+
+ @note We can not reserve @a len bytes for the result, because @a len
+ may be too large. Usually, @ref unexpect_eof() detects the end of
+ the input before we run out of memory.
+ */
+ template<typename NumberType>
+ bool get_binary(const input_format_t format,
+ const NumberType len,
+ binary_t& result)
{
- return true;
+ bool success = true;
+ for (NumberType i = 0; i < len; i++)
+ {
+ get();
+ if (JSON_HEDLEY_UNLIKELY(!unexpect_eof(format, "binary")))
+ {
+ success = false;
+ break;
+ }
+ result.push_back(static_cast<std::uint8_t>(current));
+ }
+ return success;
}
- bool start_array(std::size_t /*unused*/ = std::size_t(-1))
+ /*!
+ @param[in] format the current format (for diagnostics)
+ @param[in] context further context information (for diagnostics)
+ @return whether the last read character is not EOF
+ */
+ JSON_HEDLEY_NON_NULL(3)
+ bool unexpect_eof(const input_format_t format, const char* context) const
{
+ if (JSON_HEDLEY_UNLIKELY(current == std::char_traits<char_type>::eof()))
+ {
+ return sax->parse_error(chars_read, "<end of file>",
+ parse_error::create(110, chars_read, exception_message(format, "unexpected end of input", context)));
+ }
return true;
}
- bool end_array()
+ /*!
+ @return a string representation of the last read byte
+ */
+ std::string get_token_string() const
{
- return true;
+ std::array<char, 3> cr{{}};
+ (std::snprintf)(cr.data(), cr.size(), "%.2hhX", static_cast<unsigned char>(current));
+ return std::string{cr.data()};
}
- bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/)
+ /*!
+ @param[in] format the current format
+ @param[in] detail a detailed error message
+ @param[in] context further context information
+ @return a message string to use in the parse_error exceptions
+ */
+ std::string exception_message(const input_format_t format,
+ const std::string& detail,
+ const std::string& context) const
{
- return false;
+ std::string error_msg = "syntax error while parsing ";
+
+ switch (format)
+ {
+ case input_format_t::cbor:
+ error_msg += "CBOR";
+ break;
+
+ case input_format_t::msgpack:
+ error_msg += "MessagePack";
+ break;
+
+ case input_format_t::ubjson:
+ error_msg += "UBJSON";
+ break;
+
+ case input_format_t::bson:
+ error_msg += "BSON";
+ break;
+
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ }
+
+ return error_msg + " " + context + ": " + detail;
}
+
+ private:
+ /// input adapter
+ InputAdapterType ia;
+
+ /// the current character
+ char_int_type current = std::char_traits<char_type>::eof();
+
+ /// the number of characters read
+ std::size_t chars_read = 0;
+
+ /// whether we can assume little endianess
+ const bool is_little_endian = little_endianess();
+
+ /// the SAX parser
+ json_sax_t* sax = nullptr;
};
} // namespace detail
-
} // namespace nlohmann
+// #include <nlohmann/detail/input/input_adapters.hpp>
+
+// #include <nlohmann/detail/input/lexer.hpp>
+
+// #include <nlohmann/detail/input/parser.hpp>
+
+
+#include <cmath> // isfinite
+#include <cstdint> // uint8_t
+#include <functional> // function
+#include <string> // string
+#include <utility> // move
+#include <vector> // vector
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/input/input_adapters.hpp>
+
+// #include <nlohmann/detail/input/json_sax.hpp>
+
// #include <nlohmann/detail/input/lexer.hpp>
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/meta/is_sax.hpp>
+
// #include <nlohmann/detail/value_t.hpp>
@@ -4951,46 +10134,50 @@ namespace detail
// parser //
////////////
+enum class parse_event_t : uint8_t
+{
+ /// the parser read `{` and started to process a JSON object
+ object_start,
+ /// the parser read `}` and finished processing a JSON object
+ object_end,
+ /// the parser read `[` and started to process a JSON array
+ array_start,
+ /// the parser read `]` and finished processing a JSON array
+ array_end,
+ /// the parser read a key of a value in an object
+ key,
+ /// the parser finished reading a JSON value
+ value
+};
+
+template<typename BasicJsonType>
+using parser_callback_t =
+ std::function<bool(int depth, parse_event_t event, BasicJsonType& parsed)>;
+
/*!
@brief syntax analysis
-This class implements a recursive decent parser.
+This class implements a recursive descent parser.
*/
-template<typename BasicJsonType>
+template<typename BasicJsonType, typename InputAdapterType>
class parser
{
using number_integer_t = typename BasicJsonType::number_integer_t;
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
using number_float_t = typename BasicJsonType::number_float_t;
using string_t = typename BasicJsonType::string_t;
- using lexer_t = lexer<BasicJsonType>;
+ using lexer_t = lexer<BasicJsonType, InputAdapterType>;
using token_type = typename lexer_t::token_type;
public:
- enum class parse_event_t : uint8_t
- {
- /// the parser read `{` and started to process a JSON object
- object_start,
- /// the parser read `}` and finished processing a JSON object
- object_end,
- /// the parser read `[` and started to process a JSON array
- array_start,
- /// the parser read `]` and finished processing a JSON array
- array_end,
- /// the parser read a key of a value in an object
- key,
- /// the parser finished reading a JSON value
- value
- };
-
- using parser_callback_t =
- std::function<bool(int depth, parse_event_t event, BasicJsonType& parsed)>;
-
/// a parser reading from an input adapter
- explicit parser(detail::input_adapter_t&& adapter,
- const parser_callback_t cb = nullptr,
- const bool allow_exceptions_ = true)
- : callback(cb), m_lexer(std::move(adapter)), allow_exceptions(allow_exceptions_)
+ explicit parser(InputAdapterType&& adapter,
+ const parser_callback_t<BasicJsonType> cb = nullptr,
+ const bool allow_exceptions_ = true,
+ const bool skip_comments = false)
+ : callback(cb)
+ , m_lexer(std::move(adapter), skip_comments)
+ , allow_exceptions(allow_exceptions_)
{
// read first token
get_token();
@@ -5015,7 +10202,7 @@ class parser
result.assert_invariant();
// in strict mode, input must be completely read
- if (strict and (get_token() != token_type::end_of_input))
+ if (strict && (get_token() != token_type::end_of_input))
{
sdp.parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
@@ -5044,7 +10231,7 @@ class parser
result.assert_invariant();
// in strict mode, input must be completely read
- if (strict and (get_token() != token_type::end_of_input))
+ if (strict && (get_token() != token_type::end_of_input))
{
sdp.parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
@@ -5073,14 +10260,15 @@ class parser
return sax_parse(&sax_acceptor, strict);
}
- template <typename SAX>
+ template<typename SAX>
+ JSON_HEDLEY_NON_NULL(2)
bool sax_parse(SAX* sax, const bool strict = true)
{
(void)detail::is_sax_static_asserts<SAX, BasicJsonType> {};
const bool result = sax_parse_internal(sax);
// strict mode: next byte must be EOF
- if (result and strict and (get_token() != token_type::end_of_input))
+ if (result && strict && (get_token() != token_type::end_of_input))
{
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
@@ -5092,7 +10280,8 @@ class parser
}
private:
- template <typename SAX>
+ template<typename SAX>
+ JSON_HEDLEY_NON_NULL(2)
bool sax_parse_internal(SAX* sax)
{
// stack to remember the hierarchy of structured values we are parsing
@@ -5103,14 +10292,14 @@ class parser
while (true)
{
- if (not skip_to_state_evaluation)
+ if (!skip_to_state_evaluation)
{
// invariant: get_token() was called before each iteration
switch (last_token)
{
case token_type::begin_object:
{
- if (JSON_UNLIKELY(not sax->start_object(std::size_t(-1))))
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_object(std::size_t(-1))))
{
return false;
}
@@ -5118,7 +10307,7 @@ class parser
// closing } -> we are done
if (get_token() == token_type::end_object)
{
- if (JSON_UNLIKELY(not sax->end_object()))
+ if (JSON_HEDLEY_UNLIKELY(!sax->end_object()))
{
return false;
}
@@ -5126,20 +10315,20 @@ class parser
}
// parse key
- if (JSON_UNLIKELY(last_token != token_type::value_string))
+ if (JSON_HEDLEY_UNLIKELY(last_token != token_type::value_string))
{
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
parse_error::create(101, m_lexer.get_position(),
exception_message(token_type::value_string, "object key")));
}
- if (JSON_UNLIKELY(not sax->key(m_lexer.get_string())))
+ if (JSON_HEDLEY_UNLIKELY(!sax->key(m_lexer.get_string())))
{
return false;
}
// parse separator (:)
- if (JSON_UNLIKELY(get_token() != token_type::name_separator))
+ if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::name_separator))
{
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
@@ -5157,7 +10346,7 @@ class parser
case token_type::begin_array:
{
- if (JSON_UNLIKELY(not sax->start_array(std::size_t(-1))))
+ if (JSON_HEDLEY_UNLIKELY(!sax->start_array(std::size_t(-1))))
{
return false;
}
@@ -5165,7 +10354,7 @@ class parser
// closing ] -> we are done
if (get_token() == token_type::end_array)
{
- if (JSON_UNLIKELY(not sax->end_array()))
+ if (JSON_HEDLEY_UNLIKELY(!sax->end_array()))
{
return false;
}
@@ -5183,25 +10372,24 @@ class parser
{
const auto res = m_lexer.get_number_float();
- if (JSON_UNLIKELY(not std::isfinite(res)))
+ if (JSON_HEDLEY_UNLIKELY(!std::isfinite(res)))
{
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
out_of_range::create(406, "number overflow parsing '" + m_lexer.get_token_string() + "'"));
}
- else
+
+ if (JSON_HEDLEY_UNLIKELY(!sax->number_float(res, m_lexer.get_string())))
{
- if (JSON_UNLIKELY(not sax->number_float(res, m_lexer.get_string())))
- {
- return false;
- }
- break;
+ return false;
}
+
+ break;
}
case token_type::literal_false:
{
- if (JSON_UNLIKELY(not sax->boolean(false)))
+ if (JSON_HEDLEY_UNLIKELY(!sax->boolean(false)))
{
return false;
}
@@ -5210,7 +10398,7 @@ class parser
case token_type::literal_null:
{
- if (JSON_UNLIKELY(not sax->null()))
+ if (JSON_HEDLEY_UNLIKELY(!sax->null()))
{
return false;
}
@@ -5219,7 +10407,7 @@ class parser
case token_type::literal_true:
{
- if (JSON_UNLIKELY(not sax->boolean(true)))
+ if (JSON_HEDLEY_UNLIKELY(!sax->boolean(true)))
{
return false;
}
@@ -5228,7 +10416,7 @@ class parser
case token_type::value_integer:
{
- if (JSON_UNLIKELY(not sax->number_integer(m_lexer.get_number_integer())))
+ if (JSON_HEDLEY_UNLIKELY(!sax->number_integer(m_lexer.get_number_integer())))
{
return false;
}
@@ -5237,7 +10425,7 @@ class parser
case token_type::value_string:
{
- if (JSON_UNLIKELY(not sax->string(m_lexer.get_string())))
+ if (JSON_HEDLEY_UNLIKELY(!sax->string(m_lexer.get_string())))
{
return false;
}
@@ -5246,7 +10434,7 @@ class parser
case token_type::value_unsigned:
{
- if (JSON_UNLIKELY(not sax->number_unsigned(m_lexer.get_number_unsigned())))
+ if (JSON_HEDLEY_UNLIKELY(!sax->number_unsigned(m_lexer.get_number_unsigned())))
{
return false;
}
@@ -5282,103 +10470,95 @@ class parser
// empty stack: we reached the end of the hierarchy: done
return true;
}
- else
+
+ if (states.back()) // array
{
- if (states.back()) // array
+ // comma -> next value
+ if (get_token() == token_type::value_separator)
{
- // comma -> next value
- if (get_token() == token_type::value_separator)
+ // parse a new value
+ get_token();
+ continue;
+ }
+
+ // closing ]
+ if (JSON_HEDLEY_LIKELY(last_token == token_type::end_array))
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->end_array()))
{
- // parse a new value
- get_token();
- continue;
+ return false;
}
- // closing ]
- if (JSON_LIKELY(last_token == token_type::end_array))
- {
- if (JSON_UNLIKELY(not sax->end_array()))
- {
- return false;
- }
+ // We are done with this array. Before we can parse a
+ // new value, we need to evaluate the new state first.
+ // By setting skip_to_state_evaluation to false, we
+ // are effectively jumping to the beginning of this if.
+ JSON_ASSERT(!states.empty());
+ states.pop_back();
+ skip_to_state_evaluation = true;
+ continue;
+ }
- // We are done with this array. Before we can parse a
- // new value, we need to evaluate the new state first.
- // By setting skip_to_state_evaluation to false, we
- // are effectively jumping to the beginning of this if.
- assert(not states.empty());
- states.pop_back();
- skip_to_state_evaluation = true;
- continue;
- }
- else
+ return sax->parse_error(m_lexer.get_position(),
+ m_lexer.get_token_string(),
+ parse_error::create(101, m_lexer.get_position(),
+ exception_message(token_type::end_array, "array")));
+ }
+ else // object
+ {
+ // comma -> next value
+ if (get_token() == token_type::value_separator)
+ {
+ // parse key
+ if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::value_string))
{
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
parse_error::create(101, m_lexer.get_position(),
- exception_message(token_type::end_array, "array")));
+ exception_message(token_type::value_string, "object key")));
}
- }
- else // object
- {
- // comma -> next value
- if (get_token() == token_type::value_separator)
- {
- // parse key
- if (JSON_UNLIKELY(get_token() != token_type::value_string))
- {
- return sax->parse_error(m_lexer.get_position(),
- m_lexer.get_token_string(),
- parse_error::create(101, m_lexer.get_position(),
- exception_message(token_type::value_string, "object key")));
- }
- else
- {
- if (JSON_UNLIKELY(not sax->key(m_lexer.get_string())))
- {
- return false;
- }
- }
- // parse separator (:)
- if (JSON_UNLIKELY(get_token() != token_type::name_separator))
- {
- return sax->parse_error(m_lexer.get_position(),
- m_lexer.get_token_string(),
- parse_error::create(101, m_lexer.get_position(),
- exception_message(token_type::name_separator, "object separator")));
- }
-
- // parse values
- get_token();
- continue;
- }
-
- // closing }
- if (JSON_LIKELY(last_token == token_type::end_object))
+ if (JSON_HEDLEY_UNLIKELY(!sax->key(m_lexer.get_string())))
{
- if (JSON_UNLIKELY(not sax->end_object()))
- {
- return false;
- }
-
- // We are done with this object. Before we can parse a
- // new value, we need to evaluate the new state first.
- // By setting skip_to_state_evaluation to false, we
- // are effectively jumping to the beginning of this if.
- assert(not states.empty());
- states.pop_back();
- skip_to_state_evaluation = true;
- continue;
+ return false;
}
- else
+
+ // parse separator (:)
+ if (JSON_HEDLEY_UNLIKELY(get_token() != token_type::name_separator))
{
return sax->parse_error(m_lexer.get_position(),
m_lexer.get_token_string(),
parse_error::create(101, m_lexer.get_position(),
- exception_message(token_type::end_object, "object")));
+ exception_message(token_type::name_separator, "object separator")));
+ }
+
+ // parse values
+ get_token();
+ continue;
+ }
+
+ // closing }
+ if (JSON_HEDLEY_LIKELY(last_token == token_type::end_object))
+ {
+ if (JSON_HEDLEY_UNLIKELY(!sax->end_object()))
+ {
+ return false;
}
+
+ // We are done with this object. Before we can parse a
+ // new value, we need to evaluate the new state first.
+ // By setting skip_to_state_evaluation to false, we
+ // are effectively jumping to the beginning of this if.
+ JSON_ASSERT(!states.empty());
+ states.pop_back();
+ skip_to_state_evaluation = true;
+ continue;
}
+
+ return sax->parse_error(m_lexer.get_position(),
+ m_lexer.get_token_string(),
+ parse_error::create(101, m_lexer.get_position(),
+ exception_message(token_type::end_object, "object")));
}
}
}
@@ -5386,14 +10566,14 @@ class parser
/// get next token from lexer
token_type get_token()
{
- return (last_token = m_lexer.scan());
+ return last_token = m_lexer.scan();
}
std::string exception_message(const token_type expected, const std::string& context)
{
std::string error_msg = "syntax error ";
- if (not context.empty())
+ if (!context.empty())
{
error_msg += "while parsing " + context + " ";
}
@@ -5420,7 +10600,7 @@ class parser
private:
/// callback function
- const parser_callback_t callback = nullptr;
+ const parser_callback_t<BasicJsonType> callback = nullptr;
/// the type of the last read token
token_type last_token = token_type::uninitialized;
/// the lexer
@@ -5431,6 +10611,9 @@ class parser
} // namespace detail
} // namespace nlohmann
+// #include <nlohmann/detail/iterators/internal_iterator.hpp>
+
+
// #include <nlohmann/detail/iterators/primitive_iterator.hpp>
@@ -5553,11 +10736,6 @@ class primitive_iterator_t
} // namespace detail
} // namespace nlohmann
-// #include <nlohmann/detail/iterators/internal_iterator.hpp>
-
-
-// #include <nlohmann/detail/iterators/primitive_iterator.hpp>
-
namespace nlohmann
{
@@ -5584,7 +10762,6 @@ template<typename BasicJsonType> struct internal_iterator
// #include <nlohmann/detail/iterators/iter_impl.hpp>
-#include <ciso646> // not
#include <iterator> // iterator, random_access_iterator_tag, bidirectional_iterator_tag, advance, next
#include <type_traits> // conditional, is_const, remove_const
@@ -5598,6 +10775,8 @@ template<typename BasicJsonType> struct internal_iterator
// #include <nlohmann/detail/meta/cpp_future.hpp>
+// #include <nlohmann/detail/meta/type_traits.hpp>
+
// #include <nlohmann/detail/value_t.hpp>
@@ -5674,7 +10853,7 @@ class iter_impl
*/
explicit iter_impl(pointer object) noexcept : m_object(object)
{
- assert(m_object != nullptr);
+ JSON_ASSERT(m_object != nullptr);
switch (m_object->m_type)
{
@@ -5708,16 +10887,41 @@ class iter_impl
*/
/*!
+ @brief const copy constructor
+ @param[in] other const iterator to copy from
+ @note This copy constructor had to be defined explicitly to circumvent a bug
+ occurring on msvc v19.0 compiler (VS 2015) debug build. For more
+ information refer to: https://github.com/nlohmann/json/issues/1608
+ */
+ iter_impl(const iter_impl<const BasicJsonType>& other) noexcept
+ : m_object(other.m_object), m_it(other.m_it)
+ {}
+
+ /*!
+ @brief converting assignment
+ @param[in] other const iterator to copy from
+ @return const/non-const iterator
+ @note It is not checked whether @a other is initialized.
+ */
+ iter_impl& operator=(const iter_impl<const BasicJsonType>& other) noexcept
+ {
+ m_object = other.m_object;
+ m_it = other.m_it;
+ return *this;
+ }
+
+ /*!
@brief converting constructor
@param[in] other non-const iterator to copy from
@note It is not checked whether @a other is initialized.
*/
iter_impl(const iter_impl<typename std::remove_const<BasicJsonType>::type>& other) noexcept
- : m_object(other.m_object), m_it(other.m_it) {}
+ : m_object(other.m_object), m_it(other.m_it)
+ {}
/*!
@brief converting assignment
- @param[in,out] other non-const iterator to copy from
+ @param[in] other non-const iterator to copy from
@return const/non-const iterator
@note It is not checked whether @a other is initialized.
*/
@@ -5735,7 +10939,7 @@ class iter_impl
*/
void set_begin() noexcept
{
- assert(m_object != nullptr);
+ JSON_ASSERT(m_object != nullptr);
switch (m_object->m_type)
{
@@ -5772,7 +10976,7 @@ class iter_impl
*/
void set_end() noexcept
{
- assert(m_object != nullptr);
+ JSON_ASSERT(m_object != nullptr);
switch (m_object->m_type)
{
@@ -5803,19 +11007,19 @@ class iter_impl
*/
reference operator*() const
{
- assert(m_object != nullptr);
+ JSON_ASSERT(m_object != nullptr);
switch (m_object->m_type)
{
case value_t::object:
{
- assert(m_it.object_iterator != m_object->m_value.object->end());
+ JSON_ASSERT(m_it.object_iterator != m_object->m_value.object->end());
return m_it.object_iterator->second;
}
case value_t::array:
{
- assert(m_it.array_iterator != m_object->m_value.array->end());
+ JSON_ASSERT(m_it.array_iterator != m_object->m_value.array->end());
return *m_it.array_iterator;
}
@@ -5824,7 +11028,7 @@ class iter_impl
default:
{
- if (JSON_LIKELY(m_it.primitive_iterator.is_begin()))
+ if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.is_begin()))
{
return *m_object;
}
@@ -5840,25 +11044,25 @@ class iter_impl
*/
pointer operator->() const
{
- assert(m_object != nullptr);
+ JSON_ASSERT(m_object != nullptr);
switch (m_object->m_type)
{
case value_t::object:
{
- assert(m_it.object_iterator != m_object->m_value.object->end());
+ JSON_ASSERT(m_it.object_iterator != m_object->m_value.object->end());
return &(m_it.object_iterator->second);
}
case value_t::array:
{
- assert(m_it.array_iterator != m_object->m_value.array->end());
+ JSON_ASSERT(m_it.array_iterator != m_object->m_value.array->end());
return &*m_it.array_iterator;
}
default:
{
- if (JSON_LIKELY(m_it.primitive_iterator.is_begin()))
+ if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.is_begin()))
{
return m_object;
}
@@ -5885,7 +11089,7 @@ class iter_impl
*/
iter_impl& operator++()
{
- assert(m_object != nullptr);
+ JSON_ASSERT(m_object != nullptr);
switch (m_object->m_type)
{
@@ -5928,7 +11132,7 @@ class iter_impl
*/
iter_impl& operator--()
{
- assert(m_object != nullptr);
+ JSON_ASSERT(m_object != nullptr);
switch (m_object->m_type)
{
@@ -5961,12 +11165,12 @@ class iter_impl
bool operator==(const iter_impl& other) const
{
// if objects are not the same, the comparison is undefined
- if (JSON_UNLIKELY(m_object != other.m_object))
+ if (JSON_HEDLEY_UNLIKELY(m_object != other.m_object))
{
JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers"));
}
- assert(m_object != nullptr);
+ JSON_ASSERT(m_object != nullptr);
switch (m_object->m_type)
{
@@ -5987,7 +11191,7 @@ class iter_impl
*/
bool operator!=(const iter_impl& other) const
{
- return not operator==(other);
+ return !operator==(other);
}
/*!
@@ -5997,12 +11201,12 @@ class iter_impl
bool operator<(const iter_impl& other) const
{
// if objects are not the same, the comparison is undefined
- if (JSON_UNLIKELY(m_object != other.m_object))
+ if (JSON_HEDLEY_UNLIKELY(m_object != other.m_object))
{
JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers"));
}
- assert(m_object != nullptr);
+ JSON_ASSERT(m_object != nullptr);
switch (m_object->m_type)
{
@@ -6023,7 +11227,7 @@ class iter_impl
*/
bool operator<=(const iter_impl& other) const
{
- return not other.operator < (*this);
+ return !other.operator < (*this);
}
/*!
@@ -6032,7 +11236,7 @@ class iter_impl
*/
bool operator>(const iter_impl& other) const
{
- return not operator<=(other);
+ return !operator<=(other);
}
/*!
@@ -6041,7 +11245,7 @@ class iter_impl
*/
bool operator>=(const iter_impl& other) const
{
- return not operator<(other);
+ return !operator<(other);
}
/*!
@@ -6050,7 +11254,7 @@ class iter_impl
*/
iter_impl& operator+=(difference_type i)
{
- assert(m_object != nullptr);
+ JSON_ASSERT(m_object != nullptr);
switch (m_object->m_type)
{
@@ -6121,7 +11325,7 @@ class iter_impl
*/
difference_type operator-(const iter_impl& other) const
{
- assert(m_object != nullptr);
+ JSON_ASSERT(m_object != nullptr);
switch (m_object->m_type)
{
@@ -6142,7 +11346,7 @@ class iter_impl
*/
reference operator[](difference_type n) const
{
- assert(m_object != nullptr);
+ JSON_ASSERT(m_object != nullptr);
switch (m_object->m_type)
{
@@ -6157,7 +11361,7 @@ class iter_impl
default:
{
- if (JSON_LIKELY(m_it.primitive_iterator.get_value() == -n))
+ if (JSON_HEDLEY_LIKELY(m_it.primitive_iterator.get_value() == -n))
{
return *m_object;
}
@@ -6173,9 +11377,9 @@ class iter_impl
*/
const typename object_t::key_type& key() const
{
- assert(m_object != nullptr);
+ JSON_ASSERT(m_object != nullptr);
- if (JSON_LIKELY(m_object->is_object()))
+ if (JSON_HEDLEY_LIKELY(m_object->is_object()))
{
return m_it.object_iterator->first;
}
@@ -6196,10 +11400,11 @@ class iter_impl
/// associated JSON instance
pointer m_object = nullptr;
/// the actual iterator of the associated instance
- internal_iterator<typename std::remove_const<BasicJsonType>::type> m_it;
+ internal_iterator<typename std::remove_const<BasicJsonType>::type> m_it {};
};
-} // namespace detail
+} // namespace detail
} // namespace nlohmann
+
// #include <nlohmann/detail/iterators/iteration_proxy.hpp>
// #include <nlohmann/detail/iterators/json_reverse_iterator.hpp>
@@ -6323,2132 +11528,1214 @@ class json_reverse_iterator : public std::reverse_iterator<Base>
} // namespace detail
} // namespace nlohmann
-// #include <nlohmann/detail/output/output_adapters.hpp>
+// #include <nlohmann/detail/iterators/primitive_iterator.hpp>
+// #include <nlohmann/detail/json_pointer.hpp>
-#include <algorithm> // copy
-#include <cstddef> // size_t
-#include <ios> // streamsize
-#include <iterator> // back_inserter
-#include <memory> // shared_ptr, make_shared
-#include <ostream> // basic_ostream
-#include <string> // basic_string
+
+#include <algorithm> // all_of
+#include <cctype> // isdigit
+#include <limits> // max
+#include <numeric> // accumulate
+#include <string> // string
+#include <utility> // move
#include <vector> // vector
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/value_t.hpp>
+
+
namespace nlohmann
{
-namespace detail
-{
-/// abstract output adapter interface
-template<typename CharType> struct output_adapter_protocol
+template<typename BasicJsonType>
+class json_pointer
{
- virtual void write_character(CharType c) = 0;
- virtual void write_characters(const CharType* s, std::size_t length) = 0;
- virtual ~output_adapter_protocol() = default;
-};
-
-/// a type to simplify interfaces
-template<typename CharType>
-using output_adapter_t = std::shared_ptr<output_adapter_protocol<CharType>>;
+ // allow basic_json to access private members
+ NLOHMANN_BASIC_JSON_TPL_DECLARATION
+ friend class basic_json;
-/// output adapter for byte vectors
-template<typename CharType>
-class output_vector_adapter : public output_adapter_protocol<CharType>
-{
public:
- explicit output_vector_adapter(std::vector<CharType>& vec) noexcept
- : v(vec)
- {}
+ /*!
+ @brief create JSON pointer
- void write_character(CharType c) override
- {
- v.push_back(c);
- }
+ Create a JSON pointer according to the syntax described in
+ [Section 3 of RFC6901](https://tools.ietf.org/html/rfc6901#section-3).
- void write_characters(const CharType* s, std::size_t length) override
- {
- std::copy(s, s + length, std::back_inserter(v));
- }
+ @param[in] s string representing the JSON pointer; if omitted, the empty
+ string is assumed which references the whole JSON value
- private:
- std::vector<CharType>& v;
-};
+ @throw parse_error.107 if the given JSON pointer @a s is nonempty and does
+ not begin with a slash (`/`); see example below
-/// output adapter for output streams
-template<typename CharType>
-class output_stream_adapter : public output_adapter_protocol<CharType>
-{
- public:
- explicit output_stream_adapter(std::basic_ostream<CharType>& s) noexcept
- : stream(s)
+ @throw parse_error.108 if a tilde (`~`) in the given JSON pointer @a s is
+ not followed by `0` (representing `~`) or `1` (representing `/`); see
+ example below
+
+ @liveexample{The example shows the construction several valid JSON pointers
+ as well as the exceptional behavior.,json_pointer}
+
+ @since version 2.0.0
+ */
+ explicit json_pointer(const std::string& s = "")
+ : reference_tokens(split(s))
{}
- void write_character(CharType c) override
- {
- stream.put(c);
- }
+ /*!
+ @brief return a string representation of the JSON pointer
- void write_characters(const CharType* s, std::size_t length) override
- {
- stream.write(s, static_cast<std::streamsize>(length));
- }
+ @invariant For each JSON pointer `ptr`, it holds:
+ @code {.cpp}
+ ptr == json_pointer(ptr.to_string());
+ @endcode
- private:
- std::basic_ostream<CharType>& stream;
-};
+ @return a string representation of the JSON pointer
-/// output adapter for basic_string
-template<typename CharType, typename StringType = std::basic_string<CharType>>
-class output_string_adapter : public output_adapter_protocol<CharType>
-{
- public:
- explicit output_string_adapter(StringType& s) noexcept
- : str(s)
- {}
+ @liveexample{The example shows the result of `to_string`.,json_pointer__to_string}
- void write_character(CharType c) override
+ @since version 2.0.0
+ */
+ std::string to_string() const
{
- str.push_back(c);
+ return std::accumulate(reference_tokens.begin(), reference_tokens.end(),
+ std::string{},
+ [](const std::string & a, const std::string & b)
+ {
+ return a + "/" + escape(b);
+ });
}
- void write_characters(const CharType* s, std::size_t length) override
+ /// @copydoc to_string()
+ operator std::string() const
{
- str.append(s, length);
+ return to_string();
}
- private:
- StringType& str;
-};
+ /*!
+ @brief append another JSON pointer at the end of this JSON pointer
-template<typename CharType, typename StringType = std::basic_string<CharType>>
-class output_adapter
-{
- public:
- output_adapter(std::vector<CharType>& vec)
- : oa(std::make_shared<output_vector_adapter<CharType>>(vec)) {}
+ @param[in] ptr JSON pointer to append
+ @return JSON pointer with @a ptr appended
- output_adapter(std::basic_ostream<CharType>& s)
- : oa(std::make_shared<output_stream_adapter<CharType>>(s)) {}
+ @liveexample{The example shows the usage of `operator/=`.,json_pointer__operator_add}
- output_adapter(StringType& s)
- : oa(std::make_shared<output_string_adapter<CharType, StringType>>(s)) {}
+ @complexity Linear in the length of @a ptr.
- operator output_adapter_t<CharType>()
+ @sa @ref operator/=(std::string) to append a reference token
+ @sa @ref operator/=(std::size_t) to append an array index
+ @sa @ref operator/(const json_pointer&, const json_pointer&) for a binary operator
+
+ @since version 3.6.0
+ */
+ json_pointer& operator/=(const json_pointer& ptr)
{
- return oa;
+ reference_tokens.insert(reference_tokens.end(),
+ ptr.reference_tokens.begin(),
+ ptr.reference_tokens.end());
+ return *this;
}
- private:
- output_adapter_t<CharType> oa = nullptr;
-};
-} // namespace detail
-} // namespace nlohmann
-
-// #include <nlohmann/detail/input/binary_reader.hpp>
-
-
-#include <algorithm> // generate_n
-#include <array> // array
-#include <cassert> // assert
-#include <cmath> // ldexp
-#include <cstddef> // size_t
-#include <cstdint> // uint8_t, uint16_t, uint32_t, uint64_t
-#include <cstdio> // snprintf
-#include <cstring> // memcpy
-#include <iterator> // back_inserter
-#include <limits> // numeric_limits
-#include <string> // char_traits, string
-#include <utility> // make_pair, move
+ /*!
+ @brief append an unescaped reference token at the end of this JSON pointer
-// #include <nlohmann/detail/input/input_adapters.hpp>
+ @param[in] token reference token to append
+ @return JSON pointer with @a token appended without escaping @a token
-// #include <nlohmann/detail/input/json_sax.hpp>
+ @liveexample{The example shows the usage of `operator/=`.,json_pointer__operator_add}
-// #include <nlohmann/detail/exceptions.hpp>
+ @complexity Amortized constant.
-// #include <nlohmann/detail/macro_scope.hpp>
+ @sa @ref operator/=(const json_pointer&) to append a JSON pointer
+ @sa @ref operator/=(std::size_t) to append an array index
+ @sa @ref operator/(const json_pointer&, std::size_t) for a binary operator
-// #include <nlohmann/detail/meta/is_sax.hpp>
+ @since version 3.6.0
+ */
+ json_pointer& operator/=(std::string token)
+ {
+ push_back(std::move(token));
+ return *this;
+ }
-// #include <nlohmann/detail/value_t.hpp>
+ /*!
+ @brief append an array index at the end of this JSON pointer
+ @param[in] array_idx array index to append
+ @return JSON pointer with @a array_idx appended
-namespace nlohmann
-{
-namespace detail
-{
-///////////////////
-// binary reader //
-///////////////////
+ @liveexample{The example shows the usage of `operator/=`.,json_pointer__operator_add}
-/*!
-@brief deserialization of CBOR, MessagePack, and UBJSON values
-*/
-template<typename BasicJsonType, typename SAX = json_sax_dom_parser<BasicJsonType>>
-class binary_reader
-{
- using number_integer_t = typename BasicJsonType::number_integer_t;
- using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
- using number_float_t = typename BasicJsonType::number_float_t;
- using string_t = typename BasicJsonType::string_t;
- using json_sax_t = SAX;
+ @complexity Amortized constant.
- public:
- /*!
- @brief create a binary reader
+ @sa @ref operator/=(const json_pointer&) to append a JSON pointer
+ @sa @ref operator/=(std::string) to append a reference token
+ @sa @ref operator/(const json_pointer&, std::string) for a binary operator
- @param[in] adapter input adapter to read from
+ @since version 3.6.0
*/
- explicit binary_reader(input_adapter_t adapter) : ia(std::move(adapter))
+ json_pointer& operator/=(std::size_t array_idx)
{
- (void)detail::is_sax_static_asserts<SAX, BasicJsonType> {};
- assert(ia);
+ return *this /= std::to_string(array_idx);
}
/*!
- @param[in] format the binary format to parse
- @param[in] sax_ a SAX event processor
- @param[in] strict whether to expect the input to be consumed completed
+ @brief create a new JSON pointer by appending the right JSON pointer at the end of the left JSON pointer
- @return
- */
- bool sax_parse(const input_format_t format,
- json_sax_t* sax_,
- const bool strict = true)
- {
- sax = sax_;
- bool result = false;
+ @param[in] lhs JSON pointer
+ @param[in] rhs JSON pointer
+ @return a new JSON pointer with @a rhs appended to @a lhs
- switch (format)
- {
- case input_format_t::bson:
- result = parse_bson_internal();
- break;
+ @liveexample{The example shows the usage of `operator/`.,json_pointer__operator_add_binary}
- case input_format_t::cbor:
- result = parse_cbor_internal();
- break;
+ @complexity Linear in the length of @a lhs and @a rhs.
- case input_format_t::msgpack:
- result = parse_msgpack_internal();
- break;
+ @sa @ref operator/=(const json_pointer&) to append a JSON pointer
- case input_format_t::ubjson:
- result = parse_ubjson_internal();
- break;
-
- // LCOV_EXCL_START
- default:
- assert(false);
- // LCOV_EXCL_STOP
- }
+ @since version 3.6.0
+ */
+ friend json_pointer operator/(const json_pointer& lhs,
+ const json_pointer& rhs)
+ {
+ return json_pointer(lhs) /= rhs;
+ }
- // strict mode: next byte must be EOF
- if (result and strict)
- {
- if (format == input_format_t::ubjson)
- {
- get_ignore_noop();
- }
- else
- {
- get();
- }
+ /*!
+ @brief create a new JSON pointer by appending the unescaped token at the end of the JSON pointer
- if (JSON_UNLIKELY(current != std::char_traits<char>::eof()))
- {
- return sax->parse_error(chars_read, get_token_string(),
- parse_error::create(110, chars_read, exception_message(format, "expected end of input; last byte: 0x" + get_token_string(), "value")));
- }
- }
+ @param[in] ptr JSON pointer
+ @param[in] token reference token
+ @return a new JSON pointer with unescaped @a token appended to @a ptr
- return result;
- }
+ @liveexample{The example shows the usage of `operator/`.,json_pointer__operator_add_binary}
- /*!
- @brief determine system byte order
+ @complexity Linear in the length of @a ptr.
- @return true if and only if system's byte order is little endian
+ @sa @ref operator/=(std::string) to append a reference token
- @note from http://stackoverflow.com/a/1001328/266378
+ @since version 3.6.0
*/
- static constexpr bool little_endianess(int num = 1) noexcept
+ friend json_pointer operator/(const json_pointer& ptr, std::string token)
{
- return (*reinterpret_cast<char*>(&num) == 1);
+ return json_pointer(ptr) /= std::move(token);
}
- private:
- //////////
- // BSON //
- //////////
-
/*!
- @brief Reads in a BSON-object and passes it to the SAX-parser.
- @return whether a valid BSON-value was passed to the SAX parser
- */
- bool parse_bson_internal()
- {
- std::int32_t document_size;
- get_number<std::int32_t, true>(input_format_t::bson, document_size);
+ @brief create a new JSON pointer by appending the array-index-token at the end of the JSON pointer
- if (JSON_UNLIKELY(not sax->start_object(std::size_t(-1))))
- {
- return false;
- }
+ @param[in] ptr JSON pointer
+ @param[in] array_idx array index
+ @return a new JSON pointer with @a array_idx appended to @a ptr
- if (JSON_UNLIKELY(not parse_bson_element_list(/*is_array*/false)))
- {
- return false;
- }
+ @liveexample{The example shows the usage of `operator/`.,json_pointer__operator_add_binary}
- return sax->end_object();
- }
+ @complexity Linear in the length of @a ptr.
- /*!
- @brief Parses a C-style string from the BSON input.
- @param[in, out] result A reference to the string variable where the read
- string is to be stored.
- @return `true` if the \x00-byte indicating the end of the string was
- encountered before the EOF; false` indicates an unexpected EOF.
+ @sa @ref operator/=(std::size_t) to append an array index
+
+ @since version 3.6.0
*/
- bool get_bson_cstr(string_t& result)
+ friend json_pointer operator/(const json_pointer& ptr, std::size_t array_idx)
{
- auto out = std::back_inserter(result);
- while (true)
- {
- get();
- if (JSON_UNLIKELY(not unexpect_eof(input_format_t::bson, "cstring")))
- {
- return false;
- }
- if (current == 0x00)
- {
- return true;
- }
- *out++ = static_cast<char>(current);
- }
-
- return true;
+ return json_pointer(ptr) /= array_idx;
}
/*!
- @brief Parses a zero-terminated string of length @a len from the BSON
- input.
- @param[in] len The length (including the zero-byte at the end) of the
- string to be read.
- @param[in, out] result A reference to the string variable where the read
- string is to be stored.
- @tparam NumberType The type of the length @a len
- @pre len >= 1
- @return `true` if the string was successfully parsed
+ @brief returns the parent of this JSON pointer
+
+ @return parent of this JSON pointer; in case this JSON pointer is the root,
+ the root itself is returned
+
+ @complexity Linear in the length of the JSON pointer.
+
+ @liveexample{The example shows the result of `parent_pointer` for different
+ JSON Pointers.,json_pointer__parent_pointer}
+
+ @since version 3.6.0
*/
- template<typename NumberType>
- bool get_bson_string(const NumberType len, string_t& result)
+ json_pointer parent_pointer() const
{
- if (JSON_UNLIKELY(len < 1))
+ if (empty())
{
- auto last_token = get_token_string();
- return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::bson, "string length must be at least 1, is " + std::to_string(len), "string")));
+ return *this;
}
- return get_string(input_format_t::bson, len - static_cast<NumberType>(1), result) and get() != std::char_traits<char>::eof();
+ json_pointer res = *this;
+ res.pop_back();
+ return res;
}
/*!
- @brief Read a BSON document element of the given @a element_type.
- @param[in] element_type The BSON element type, c.f. http://bsonspec.org/spec.html
- @param[in] element_type_parse_position The position in the input stream,
- where the `element_type` was read.
- @warning Not all BSON element types are supported yet. An unsupported
- @a element_type will give rise to a parse_error.114:
- Unsupported BSON record type 0x...
- @return whether a valid BSON-object/array was passed to the SAX parser
- */
- bool parse_bson_element_internal(const int element_type,
- const std::size_t element_type_parse_position)
- {
- switch (element_type)
- {
- case 0x01: // double
- {
- double number;
- return get_number<double, true>(input_format_t::bson, number) and sax->number_float(static_cast<number_float_t>(number), "");
- }
-
- case 0x02: // string
- {
- std::int32_t len;
- string_t value;
- return get_number<std::int32_t, true>(input_format_t::bson, len) and get_bson_string(len, value) and sax->string(value);
- }
-
- case 0x03: // object
- {
- return parse_bson_internal();
- }
+ @brief remove last reference token
- case 0x04: // array
- {
- return parse_bson_array();
- }
+ @pre not `empty()`
- case 0x08: // boolean
- {
- return sax->boolean(get() != 0);
- }
+ @liveexample{The example shows the usage of `pop_back`.,json_pointer__pop_back}
- case 0x0A: // null
- {
- return sax->null();
- }
-
- case 0x10: // int32
- {
- std::int32_t value;
- return get_number<std::int32_t, true>(input_format_t::bson, value) and sax->number_integer(value);
- }
-
- case 0x12: // int64
- {
- std::int64_t value;
- return get_number<std::int64_t, true>(input_format_t::bson, value) and sax->number_integer(value);
- }
-
- default: // anything else not supported (yet)
- {
- char cr[3];
- (std::snprintf)(cr, sizeof(cr), "%.2hhX", static_cast<unsigned char>(element_type));
- return sax->parse_error(element_type_parse_position, std::string(cr), parse_error::create(114, element_type_parse_position, "Unsupported BSON record type 0x" + std::string(cr)));
- }
- }
- }
-
- /*!
- @brief Read a BSON element list (as specified in the BSON-spec)
+ @complexity Constant.
- The same binary layout is used for objects and arrays, hence it must be
- indicated with the argument @a is_array which one is expected
- (true --> array, false --> object).
+ @throw out_of_range.405 if JSON pointer has no parent
- @param[in] is_array Determines if the element list being read is to be
- treated as an object (@a is_array == false), or as an
- array (@a is_array == true).
- @return whether a valid BSON-object/array was passed to the SAX parser
+ @since version 3.6.0
*/
- bool parse_bson_element_list(const bool is_array)
+ void pop_back()
{
- string_t key;
- while (int element_type = get())
+ if (JSON_HEDLEY_UNLIKELY(empty()))
{
- if (JSON_UNLIKELY(not unexpect_eof(input_format_t::bson, "element list")))
- {
- return false;
- }
-
- const std::size_t element_type_parse_position = chars_read;
- if (JSON_UNLIKELY(not get_bson_cstr(key)))
- {
- return false;
- }
-
- if (not is_array)
- {
- if (not sax->key(key))
- {
- return false;
- }
- }
-
- if (JSON_UNLIKELY(not parse_bson_element_internal(element_type, element_type_parse_position)))
- {
- return false;
- }
-
- // get_bson_cstr only appends
- key.clear();
+ JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent"));
}
- return true;
+ reference_tokens.pop_back();
}
/*!
- @brief Reads an array from the BSON input and passes it to the SAX-parser.
- @return whether a valid BSON-array was passed to the SAX parser
- */
- bool parse_bson_array()
- {
- std::int32_t document_size;
- get_number<std::int32_t, true>(input_format_t::bson, document_size);
+ @brief return last reference token
- if (JSON_UNLIKELY(not sax->start_array(std::size_t(-1))))
- {
- return false;
- }
+ @pre not `empty()`
+ @return last reference token
- if (JSON_UNLIKELY(not parse_bson_element_list(/*is_array*/true)))
- {
- return false;
- }
-
- return sax->end_array();
- }
+ @liveexample{The example shows the usage of `back`.,json_pointer__back}
- //////////
- // CBOR //
- //////////
+ @complexity Constant.
- /*!
- @param[in] get_char whether a new character should be retrieved from the
- input (true, default) or whether the last read
- character should be considered instead
+ @throw out_of_range.405 if JSON pointer has no parent
- @return whether a valid CBOR value was passed to the SAX parser
+ @since version 3.6.0
*/
- bool parse_cbor_internal(const bool get_char = true)
+ const std::string& back() const
{
- switch (get_char ? get() : current)
+ if (JSON_HEDLEY_UNLIKELY(empty()))
{
- // EOF
- case std::char_traits<char>::eof():
- return unexpect_eof(input_format_t::cbor, "value");
+ JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent"));
+ }
- // Integer 0x00..0x17 (0..23)
- case 0x00:
- case 0x01:
- case 0x02:
- case 0x03:
- case 0x04:
- case 0x05:
- case 0x06:
- case 0x07:
- case 0x08:
- case 0x09:
- case 0x0A:
- case 0x0B:
- case 0x0C:
- case 0x0D:
- case 0x0E:
- case 0x0F:
- case 0x10:
- case 0x11:
- case 0x12:
- case 0x13:
- case 0x14:
- case 0x15:
- case 0x16:
- case 0x17:
- return sax->number_unsigned(static_cast<number_unsigned_t>(current));
+ return reference_tokens.back();
+ }
- case 0x18: // Unsigned integer (one-byte uint8_t follows)
- {
- uint8_t number;
- return get_number(input_format_t::cbor, number) and sax->number_unsigned(number);
- }
+ /*!
+ @brief append an unescaped token at the end of the reference pointer
- case 0x19: // Unsigned integer (two-byte uint16_t follows)
- {
- uint16_t number;
- return get_number(input_format_t::cbor, number) and sax->number_unsigned(number);
- }
+ @param[in] token token to add
- case 0x1A: // Unsigned integer (four-byte uint32_t follows)
- {
- uint32_t number;
- return get_number(input_format_t::cbor, number) and sax->number_unsigned(number);
- }
+ @complexity Amortized constant.
- case 0x1B: // Unsigned integer (eight-byte uint64_t follows)
- {
- uint64_t number;
- return get_number(input_format_t::cbor, number) and sax->number_unsigned(number);
- }
+ @liveexample{The example shows the result of `push_back` for different
+ JSON Pointers.,json_pointer__push_back}
- // Negative integer -1-0x00..-1-0x17 (-1..-24)
- case 0x20:
- case 0x21:
- case 0x22:
- case 0x23:
- case 0x24:
- case 0x25:
- case 0x26:
- case 0x27:
- case 0x28:
- case 0x29:
- case 0x2A:
- case 0x2B:
- case 0x2C:
- case 0x2D:
- case 0x2E:
- case 0x2F:
- case 0x30:
- case 0x31:
- case 0x32:
- case 0x33:
- case 0x34:
- case 0x35:
- case 0x36:
- case 0x37:
- return sax->number_integer(static_cast<int8_t>(0x20 - 1 - current));
+ @since version 3.6.0
+ */
+ void push_back(const std::string& token)
+ {
+ reference_tokens.push_back(token);
+ }
- case 0x38: // Negative integer (one-byte uint8_t follows)
- {
- uint8_t number;
- return get_number(input_format_t::cbor, number) and sax->number_integer(static_cast<number_integer_t>(-1) - number);
- }
+ /// @copydoc push_back(const std::string&)
+ void push_back(std::string&& token)
+ {
+ reference_tokens.push_back(std::move(token));
+ }
- case 0x39: // Negative integer -1-n (two-byte uint16_t follows)
- {
- uint16_t number;
- return get_number(input_format_t::cbor, number) and sax->number_integer(static_cast<number_integer_t>(-1) - number);
- }
+ /*!
+ @brief return whether pointer points to the root document
- case 0x3A: // Negative integer -1-n (four-byte uint32_t follows)
- {
- uint32_t number;
- return get_number(input_format_t::cbor, number) and sax->number_integer(static_cast<number_integer_t>(-1) - number);
- }
+ @return true iff the JSON pointer points to the root document
- case 0x3B: // Negative integer -1-n (eight-byte uint64_t follows)
- {
- uint64_t number;
- return get_number(input_format_t::cbor, number) and sax->number_integer(static_cast<number_integer_t>(-1)
- - static_cast<number_integer_t>(number));
- }
+ @complexity Constant.
- // UTF-8 string (0x00..0x17 bytes follow)
- case 0x60:
- case 0x61:
- case 0x62:
- case 0x63:
- case 0x64:
- case 0x65:
- case 0x66:
- case 0x67:
- case 0x68:
- case 0x69:
- case 0x6A:
- case 0x6B:
- case 0x6C:
- case 0x6D:
- case 0x6E:
- case 0x6F:
- case 0x70:
- case 0x71:
- case 0x72:
- case 0x73:
- case 0x74:
- case 0x75:
- case 0x76:
- case 0x77:
- case 0x78: // UTF-8 string (one-byte uint8_t for n follows)
- case 0x79: // UTF-8 string (two-byte uint16_t for n follow)
- case 0x7A: // UTF-8 string (four-byte uint32_t for n follow)
- case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow)
- case 0x7F: // UTF-8 string (indefinite length)
- {
- string_t s;
- return get_cbor_string(s) and sax->string(s);
- }
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
- // array (0x00..0x17 data items follow)
- case 0x80:
- case 0x81:
- case 0x82:
- case 0x83:
- case 0x84:
- case 0x85:
- case 0x86:
- case 0x87:
- case 0x88:
- case 0x89:
- case 0x8A:
- case 0x8B:
- case 0x8C:
- case 0x8D:
- case 0x8E:
- case 0x8F:
- case 0x90:
- case 0x91:
- case 0x92:
- case 0x93:
- case 0x94:
- case 0x95:
- case 0x96:
- case 0x97:
- return get_cbor_array(static_cast<std::size_t>(current & 0x1F));
+ @liveexample{The example shows the result of `empty` for different JSON
+ Pointers.,json_pointer__empty}
- case 0x98: // array (one-byte uint8_t for n follows)
- {
- uint8_t len;
- return get_number(input_format_t::cbor, len) and get_cbor_array(static_cast<std::size_t>(len));
- }
+ @since version 3.6.0
+ */
+ bool empty() const noexcept
+ {
+ return reference_tokens.empty();
+ }
- case 0x99: // array (two-byte uint16_t for n follow)
- {
- uint16_t len;
- return get_number(input_format_t::cbor, len) and get_cbor_array(static_cast<std::size_t>(len));
- }
+ private:
+ /*!
+ @param[in] s reference token to be converted into an array index
- case 0x9A: // array (four-byte uint32_t for n follow)
- {
- uint32_t len;
- return get_number(input_format_t::cbor, len) and get_cbor_array(static_cast<std::size_t>(len));
- }
+ @return integer representation of @a s
- case 0x9B: // array (eight-byte uint64_t for n follow)
- {
- uint64_t len;
- return get_number(input_format_t::cbor, len) and get_cbor_array(static_cast<std::size_t>(len));
- }
+ @throw parse_error.106 if an array index begins with '0'
+ @throw parse_error.109 if an array index begins not with a digit
+ @throw out_of_range.404 if string @a s could not be converted to an integer
+ @throw out_of_range.410 if an array index exceeds size_type
+ */
+ static typename BasicJsonType::size_type array_index(const std::string& s)
+ {
+ using size_type = typename BasicJsonType::size_type;
- case 0x9F: // array (indefinite length)
- return get_cbor_array(std::size_t(-1));
+ // error condition (cf. RFC 6901, Sect. 4)
+ if (JSON_HEDLEY_UNLIKELY(s.size() > 1 && s[0] == '0'))
+ {
+ JSON_THROW(detail::parse_error::create(106, 0,
+ "array index '" + s +
+ "' must not begin with '0'"));
+ }
- // map (0x00..0x17 pairs of data items follow)
- case 0xA0:
- case 0xA1:
- case 0xA2:
- case 0xA3:
- case 0xA4:
- case 0xA5:
- case 0xA6:
- case 0xA7:
- case 0xA8:
- case 0xA9:
- case 0xAA:
- case 0xAB:
- case 0xAC:
- case 0xAD:
- case 0xAE:
- case 0xAF:
- case 0xB0:
- case 0xB1:
- case 0xB2:
- case 0xB3:
- case 0xB4:
- case 0xB5:
- case 0xB6:
- case 0xB7:
- return get_cbor_object(static_cast<std::size_t>(current & 0x1F));
+ // error condition (cf. RFC 6901, Sect. 4)
+ if (JSON_HEDLEY_UNLIKELY(s.size() > 1 && !(s[0] >= '1' && s[0] <= '9')))
+ {
+ JSON_THROW(detail::parse_error::create(109, 0, "array index '" + s + "' is not a number"));
+ }
- case 0xB8: // map (one-byte uint8_t for n follows)
- {
- uint8_t len;
- return get_number(input_format_t::cbor, len) and get_cbor_object(static_cast<std::size_t>(len));
- }
+ std::size_t processed_chars = 0;
+ unsigned long long res = 0;
+ JSON_TRY
+ {
+ res = std::stoull(s, &processed_chars);
+ }
+ JSON_CATCH(std::out_of_range&)
+ {
+ JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + s + "'"));
+ }
- case 0xB9: // map (two-byte uint16_t for n follow)
- {
- uint16_t len;
- return get_number(input_format_t::cbor, len) and get_cbor_object(static_cast<std::size_t>(len));
- }
+ // check if the string was completely read
+ if (JSON_HEDLEY_UNLIKELY(processed_chars != s.size()))
+ {
+ JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + s + "'"));
+ }
- case 0xBA: // map (four-byte uint32_t for n follow)
- {
- uint32_t len;
- return get_number(input_format_t::cbor, len) and get_cbor_object(static_cast<std::size_t>(len));
- }
+ // only triggered on special platforms (like 32bit), see also
+ // https://github.com/nlohmann/json/pull/2203
+ if (res >= static_cast<unsigned long long>((std::numeric_limits<size_type>::max)()))
+ {
+ JSON_THROW(detail::out_of_range::create(410, "array index " + s + " exceeds size_type")); // LCOV_EXCL_LINE
+ }
- case 0xBB: // map (eight-byte uint64_t for n follow)
- {
- uint64_t len;
- return get_number(input_format_t::cbor, len) and get_cbor_object(static_cast<std::size_t>(len));
- }
+ return static_cast<size_type>(res);
+ }
- case 0xBF: // map (indefinite length)
- return get_cbor_object(std::size_t(-1));
+ json_pointer top() const
+ {
+ if (JSON_HEDLEY_UNLIKELY(empty()))
+ {
+ JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent"));
+ }
- case 0xF4: // false
- return sax->boolean(false);
+ json_pointer result = *this;
+ result.reference_tokens = {reference_tokens[0]};
+ return result;
+ }
- case 0xF5: // true
- return sax->boolean(true);
+ /*!
+ @brief create and return a reference to the pointed to value
- case 0xF6: // null
- return sax->null();
+ @complexity Linear in the number of reference tokens.
- case 0xF9: // Half-Precision Float (two-byte IEEE 754)
+ @throw parse_error.109 if array index is not a number
+ @throw type_error.313 if value cannot be unflattened
+ */
+ BasicJsonType& get_and_create(BasicJsonType& j) const
+ {
+ auto result = &j;
+
+ // in case no reference tokens exist, return a reference to the JSON value
+ // j which will be overwritten by a primitive value
+ for (const auto& reference_token : reference_tokens)
+ {
+ switch (result->type())
{
- const int byte1_raw = get();
- if (JSON_UNLIKELY(not unexpect_eof(input_format_t::cbor, "number")))
+ case detail::value_t::null:
{
- return false;
+ if (reference_token == "0")
+ {
+ // start a new array if reference token is 0
+ result = &result->operator[](0);
+ }
+ else
+ {
+ // start a new object otherwise
+ result = &result->operator[](reference_token);
+ }
+ break;
}
- const int byte2_raw = get();
- if (JSON_UNLIKELY(not unexpect_eof(input_format_t::cbor, "number")))
+
+ case detail::value_t::object:
{
- return false;
+ // create an entry in the object
+ result = &result->operator[](reference_token);
+ break;
}
- const auto byte1 = static_cast<unsigned char>(byte1_raw);
- const auto byte2 = static_cast<unsigned char>(byte2_raw);
-
- // code from RFC 7049, Appendix D, Figure 3:
- // As half-precision floating-point numbers were only added
- // to IEEE 754 in 2008, today's programming platforms often
- // still only have limited support for them. It is very
- // easy to include at least decoding support for them even
- // without such support. An example of a small decoder for
- // half-precision floating-point numbers in the C language
- // is shown in Fig. 3.
- const int half = (byte1 << 8) + byte2;
- const double val = [&half]
+ case detail::value_t::array:
{
- const int exp = (half >> 10) & 0x1F;
- const int mant = half & 0x3FF;
- assert(0 <= exp and exp <= 32);
- assert(0 <= mant and mant <= 1024);
- switch (exp)
- {
- case 0:
- return std::ldexp(mant, -24);
- case 31:
- return (mant == 0)
- ? std::numeric_limits<double>::infinity()
- : std::numeric_limits<double>::quiet_NaN();
- default:
- return std::ldexp(mant + 1024, exp - 25);
- }
- }();
- return sax->number_float((half & 0x8000) != 0
- ? static_cast<number_float_t>(-val)
- : static_cast<number_float_t>(val), "");
- }
-
- case 0xFA: // Single-Precision Float (four-byte IEEE 754)
- {
- float number;
- return get_number(input_format_t::cbor, number) and sax->number_float(static_cast<number_float_t>(number), "");
- }
-
- case 0xFB: // Double-Precision Float (eight-byte IEEE 754)
- {
- double number;
- return get_number(input_format_t::cbor, number) and sax->number_float(static_cast<number_float_t>(number), "");
- }
+ // create an entry in the array
+ result = &result->operator[](array_index(reference_token));
+ break;
+ }
- default: // anything else (0xFF is handled inside the other types)
- {
- auto last_token = get_token_string();
- return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::cbor, "invalid byte: 0x" + last_token, "value")));
+ /*
+ The following code is only reached if there exists a reference
+ token _and_ the current value is primitive. In this case, we have
+ an error situation, because primitive values may only occur as
+ single value; that is, with an empty list of reference tokens.
+ */
+ default:
+ JSON_THROW(detail::type_error::create(313, "invalid value to unflatten"));
}
}
+
+ return *result;
}
/*!
- @brief reads a CBOR string
+ @brief return a reference to the pointed to value
- This function first reads starting bytes to determine the expected
- string length and then copies this number of bytes into a string.
- Additionally, CBOR's strings with indefinite lengths are supported.
+ @note This version does not throw if a value is not present, but tries to
+ create nested values instead. For instance, calling this function
+ with pointer `"/this/that"` on a null value is equivalent to calling
+ `operator[]("this").operator[]("that")` on that value, effectively
+ changing the null value to an object.
- @param[out] result created string
+ @param[in] ptr a JSON value
- @return whether string creation completed
+ @return reference to the JSON value pointed to by the JSON pointer
+
+ @complexity Linear in the length of the JSON pointer.
+
+ @throw parse_error.106 if an array index begins with '0'
+ @throw parse_error.109 if an array index was not a number
+ @throw out_of_range.404 if the JSON pointer can not be resolved
*/
- bool get_cbor_string(string_t& result)
+ BasicJsonType& get_unchecked(BasicJsonType* ptr) const
{
- if (JSON_UNLIKELY(not unexpect_eof(input_format_t::cbor, "string")))
- {
- return false;
- }
-
- switch (current)
+ for (const auto& reference_token : reference_tokens)
{
- // UTF-8 string (0x00..0x17 bytes follow)
- case 0x60:
- case 0x61:
- case 0x62:
- case 0x63:
- case 0x64:
- case 0x65:
- case 0x66:
- case 0x67:
- case 0x68:
- case 0x69:
- case 0x6A:
- case 0x6B:
- case 0x6C:
- case 0x6D:
- case 0x6E:
- case 0x6F:
- case 0x70:
- case 0x71:
- case 0x72:
- case 0x73:
- case 0x74:
- case 0x75:
- case 0x76:
- case 0x77:
- {
- return get_string(input_format_t::cbor, current & 0x1F, result);
- }
-
- case 0x78: // UTF-8 string (one-byte uint8_t for n follows)
- {
- uint8_t len;
- return get_number(input_format_t::cbor, len) and get_string(input_format_t::cbor, len, result);
- }
-
- case 0x79: // UTF-8 string (two-byte uint16_t for n follow)
+ // convert null values to arrays or objects before continuing
+ if (ptr->is_null())
{
- uint16_t len;
- return get_number(input_format_t::cbor, len) and get_string(input_format_t::cbor, len, result);
- }
+ // check if reference token is a number
+ const bool nums =
+ std::all_of(reference_token.begin(), reference_token.end(),
+ [](const unsigned char x)
+ {
+ return std::isdigit(x);
+ });
- case 0x7A: // UTF-8 string (four-byte uint32_t for n follow)
- {
- uint32_t len;
- return get_number(input_format_t::cbor, len) and get_string(input_format_t::cbor, len, result);
+ // change value to array for numbers or "-" or to object otherwise
+ *ptr = (nums || reference_token == "-")
+ ? detail::value_t::array
+ : detail::value_t::object;
}
- case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow)
+ switch (ptr->type())
{
- uint64_t len;
- return get_number(input_format_t::cbor, len) and get_string(input_format_t::cbor, len, result);
- }
+ case detail::value_t::object:
+ {
+ // use unchecked object access
+ ptr = &ptr->operator[](reference_token);
+ break;
+ }
- case 0x7F: // UTF-8 string (indefinite length)
- {
- while (get() != 0xFF)
+ case detail::value_t::array:
{
- string_t chunk;
- if (not get_cbor_string(chunk))
+ if (reference_token == "-")
{
- return false;
+ // explicitly treat "-" as index beyond the end
+ ptr = &ptr->operator[](ptr->m_value.array->size());
}
- result.append(chunk);
+ else
+ {
+ // convert array index to number; unchecked access
+ ptr = &ptr->operator[](array_index(reference_token));
+ }
+ break;
}
- return true;
- }
- default:
- {
- auto last_token = get_token_string();
- return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::cbor, "expected length specification (0x60-0x7B) or indefinite string type (0x7F); last byte: 0x" + last_token, "string")));
+ default:
+ JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'"));
}
}
+
+ return *ptr;
}
/*!
- @param[in] len the length of the array or std::size_t(-1) for an
- array of indefinite size
- @return whether array creation completed
+ @throw parse_error.106 if an array index begins with '0'
+ @throw parse_error.109 if an array index was not a number
+ @throw out_of_range.402 if the array index '-' is used
+ @throw out_of_range.404 if the JSON pointer can not be resolved
*/
- bool get_cbor_array(const std::size_t len)
+ BasicJsonType& get_checked(BasicJsonType* ptr) const
{
- if (JSON_UNLIKELY(not sax->start_array(len)))
- {
- return false;
- }
-
- if (len != std::size_t(-1))
+ for (const auto& reference_token : reference_tokens)
{
- for (std::size_t i = 0; i < len; ++i)
+ switch (ptr->type())
{
- if (JSON_UNLIKELY(not parse_cbor_internal()))
+ case detail::value_t::object:
{
- return false;
+ // note: at performs range check
+ ptr = &ptr->at(reference_token);
+ break;
}
- }
- }
- else
- {
- while (get() != 0xFF)
- {
- if (JSON_UNLIKELY(not parse_cbor_internal(false)))
+
+ case detail::value_t::array:
{
- return false;
+ if (JSON_HEDLEY_UNLIKELY(reference_token == "-"))
+ {
+ // "-" always fails the range check
+ JSON_THROW(detail::out_of_range::create(402,
+ "array index '-' (" + std::to_string(ptr->m_value.array->size()) +
+ ") is out of range"));
+ }
+
+ // note: at performs range check
+ ptr = &ptr->at(array_index(reference_token));
+ break;
}
+
+ default:
+ JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'"));
}
}
- return sax->end_array();
+ return *ptr;
}
/*!
- @param[in] len the length of the object or std::size_t(-1) for an
- object of indefinite size
- @return whether object creation completed
+ @brief return a const reference to the pointed to value
+
+ @param[in] ptr a JSON value
+
+ @return const reference to the JSON value pointed to by the JSON
+ pointer
+
+ @throw parse_error.106 if an array index begins with '0'
+ @throw parse_error.109 if an array index was not a number
+ @throw out_of_range.402 if the array index '-' is used
+ @throw out_of_range.404 if the JSON pointer can not be resolved
*/
- bool get_cbor_object(const std::size_t len)
+ const BasicJsonType& get_unchecked(const BasicJsonType* ptr) const
{
- if (not JSON_UNLIKELY(sax->start_object(len)))
- {
- return false;
- }
-
- string_t key;
- if (len != std::size_t(-1))
+ for (const auto& reference_token : reference_tokens)
{
- for (std::size_t i = 0; i < len; ++i)
+ switch (ptr->type())
{
- get();
- if (JSON_UNLIKELY(not get_cbor_string(key) or not sax->key(key)))
+ case detail::value_t::object:
{
- return false;
+ // use unchecked object access
+ ptr = &ptr->operator[](reference_token);
+ break;
}
- if (JSON_UNLIKELY(not parse_cbor_internal()))
+ case detail::value_t::array:
{
- return false;
+ if (JSON_HEDLEY_UNLIKELY(reference_token == "-"))
+ {
+ // "-" cannot be used for const access
+ JSON_THROW(detail::out_of_range::create(402,
+ "array index '-' (" + std::to_string(ptr->m_value.array->size()) +
+ ") is out of range"));
+ }
+
+ // use unchecked array access
+ ptr = &ptr->operator[](array_index(reference_token));
+ break;
}
- key.clear();
+
+ default:
+ JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'"));
}
}
- else
+
+ return *ptr;
+ }
+
+ /*!
+ @throw parse_error.106 if an array index begins with '0'
+ @throw parse_error.109 if an array index was not a number
+ @throw out_of_range.402 if the array index '-' is used
+ @throw out_of_range.404 if the JSON pointer can not be resolved
+ */
+ const BasicJsonType& get_checked(const BasicJsonType* ptr) const
+ {
+ for (const auto& reference_token : reference_tokens)
{
- while (get() != 0xFF)
+ switch (ptr->type())
{
- if (JSON_UNLIKELY(not get_cbor_string(key) or not sax->key(key)))
+ case detail::value_t::object:
{
- return false;
+ // note: at performs range check
+ ptr = &ptr->at(reference_token);
+ break;
}
- if (JSON_UNLIKELY(not parse_cbor_internal()))
+ case detail::value_t::array:
{
- return false;
+ if (JSON_HEDLEY_UNLIKELY(reference_token == "-"))
+ {
+ // "-" always fails the range check
+ JSON_THROW(detail::out_of_range::create(402,
+ "array index '-' (" + std::to_string(ptr->m_value.array->size()) +
+ ") is out of range"));
+ }
+
+ // note: at performs range check
+ ptr = &ptr->at(array_index(reference_token));
+ break;
}
- key.clear();
+
+ default:
+ JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'"));
}
}
- return sax->end_object();
+ return *ptr;
}
- /////////////
- // MsgPack //
- /////////////
-
/*!
- @return whether a valid MessagePack value was passed to the SAX parser
+ @throw parse_error.106 if an array index begins with '0'
+ @throw parse_error.109 if an array index was not a number
*/
- bool parse_msgpack_internal()
+ bool contains(const BasicJsonType* ptr) const
{
- switch (get())
+ for (const auto& reference_token : reference_tokens)
{
- // EOF
- case std::char_traits<char>::eof():
- return unexpect_eof(input_format_t::msgpack, "value");
-
- // positive fixint
- case 0x00:
- case 0x01:
- case 0x02:
- case 0x03:
- case 0x04:
- case 0x05:
- case 0x06:
- case 0x07:
- case 0x08:
- case 0x09:
- case 0x0A:
- case 0x0B:
- case 0x0C:
- case 0x0D:
- case 0x0E:
- case 0x0F:
- case 0x10:
- case 0x11:
- case 0x12:
- case 0x13:
- case 0x14:
- case 0x15:
- case 0x16:
- case 0x17:
- case 0x18:
- case 0x19:
- case 0x1A:
- case 0x1B:
- case 0x1C:
- case 0x1D:
- case 0x1E:
- case 0x1F:
- case 0x20:
- case 0x21:
- case 0x22:
- case 0x23:
- case 0x24:
- case 0x25:
- case 0x26:
- case 0x27:
- case 0x28:
- case 0x29:
- case 0x2A:
- case 0x2B:
- case 0x2C:
- case 0x2D:
- case 0x2E:
- case 0x2F:
- case 0x30:
- case 0x31:
- case 0x32:
- case 0x33:
- case 0x34:
- case 0x35:
- case 0x36:
- case 0x37:
- case 0x38:
- case 0x39:
- case 0x3A:
- case 0x3B:
- case 0x3C:
- case 0x3D:
- case 0x3E:
- case 0x3F:
- case 0x40:
- case 0x41:
- case 0x42:
- case 0x43:
- case 0x44:
- case 0x45:
- case 0x46:
- case 0x47:
- case 0x48:
- case 0x49:
- case 0x4A:
- case 0x4B:
- case 0x4C:
- case 0x4D:
- case 0x4E:
- case 0x4F:
- case 0x50:
- case 0x51:
- case 0x52:
- case 0x53:
- case 0x54:
- case 0x55:
- case 0x56:
- case 0x57:
- case 0x58:
- case 0x59:
- case 0x5A:
- case 0x5B:
- case 0x5C:
- case 0x5D:
- case 0x5E:
- case 0x5F:
- case 0x60:
- case 0x61:
- case 0x62:
- case 0x63:
- case 0x64:
- case 0x65:
- case 0x66:
- case 0x67:
- case 0x68:
- case 0x69:
- case 0x6A:
- case 0x6B:
- case 0x6C:
- case 0x6D:
- case 0x6E:
- case 0x6F:
- case 0x70:
- case 0x71:
- case 0x72:
- case 0x73:
- case 0x74:
- case 0x75:
- case 0x76:
- case 0x77:
- case 0x78:
- case 0x79:
- case 0x7A:
- case 0x7B:
- case 0x7C:
- case 0x7D:
- case 0x7E:
- case 0x7F:
- return sax->number_unsigned(static_cast<number_unsigned_t>(current));
-
- // fixmap
- case 0x80:
- case 0x81:
- case 0x82:
- case 0x83:
- case 0x84:
- case 0x85:
- case 0x86:
- case 0x87:
- case 0x88:
- case 0x89:
- case 0x8A:
- case 0x8B:
- case 0x8C:
- case 0x8D:
- case 0x8E:
- case 0x8F:
- return get_msgpack_object(static_cast<std::size_t>(current & 0x0F));
-
- // fixarray
- case 0x90:
- case 0x91:
- case 0x92:
- case 0x93:
- case 0x94:
- case 0x95:
- case 0x96:
- case 0x97:
- case 0x98:
- case 0x99:
- case 0x9A:
- case 0x9B:
- case 0x9C:
- case 0x9D:
- case 0x9E:
- case 0x9F:
- return get_msgpack_array(static_cast<std::size_t>(current & 0x0F));
-
- // fixstr
- case 0xA0:
- case 0xA1:
- case 0xA2:
- case 0xA3:
- case 0xA4:
- case 0xA5:
- case 0xA6:
- case 0xA7:
- case 0xA8:
- case 0xA9:
- case 0xAA:
- case 0xAB:
- case 0xAC:
- case 0xAD:
- case 0xAE:
- case 0xAF:
- case 0xB0:
- case 0xB1:
- case 0xB2:
- case 0xB3:
- case 0xB4:
- case 0xB5:
- case 0xB6:
- case 0xB7:
- case 0xB8:
- case 0xB9:
- case 0xBA:
- case 0xBB:
- case 0xBC:
- case 0xBD:
- case 0xBE:
- case 0xBF:
+ switch (ptr->type())
{
- string_t s;
- return get_msgpack_string(s) and sax->string(s);
- }
-
- case 0xC0: // nil
- return sax->null();
-
- case 0xC2: // false
- return sax->boolean(false);
-
- case 0xC3: // true
- return sax->boolean(true);
-
- case 0xCA: // float 32
- {
- float number;
- return get_number(input_format_t::msgpack, number) and sax->number_float(static_cast<number_float_t>(number), "");
- }
-
- case 0xCB: // float 64
- {
- double number;
- return get_number(input_format_t::msgpack, number) and sax->number_float(static_cast<number_float_t>(number), "");
- }
-
- case 0xCC: // uint 8
- {
- uint8_t number;
- return get_number(input_format_t::msgpack, number) and sax->number_unsigned(number);
- }
-
- case 0xCD: // uint 16
- {
- uint16_t number;
- return get_number(input_format_t::msgpack, number) and sax->number_unsigned(number);
- }
-
- case 0xCE: // uint 32
- {
- uint32_t number;
- return get_number(input_format_t::msgpack, number) and sax->number_unsigned(number);
- }
-
- case 0xCF: // uint 64
- {
- uint64_t number;
- return get_number(input_format_t::msgpack, number) and sax->number_unsigned(number);
- }
-
- case 0xD0: // int 8
- {
- int8_t number;
- return get_number(input_format_t::msgpack, number) and sax->number_integer(number);
- }
-
- case 0xD1: // int 16
- {
- int16_t number;
- return get_number(input_format_t::msgpack, number) and sax->number_integer(number);
- }
-
- case 0xD2: // int 32
- {
- int32_t number;
- return get_number(input_format_t::msgpack, number) and sax->number_integer(number);
- }
-
- case 0xD3: // int 64
- {
- int64_t number;
- return get_number(input_format_t::msgpack, number) and sax->number_integer(number);
- }
-
- case 0xD9: // str 8
- case 0xDA: // str 16
- case 0xDB: // str 32
- {
- string_t s;
- return get_msgpack_string(s) and sax->string(s);
- }
-
- case 0xDC: // array 16
- {
- uint16_t len;
- return get_number(input_format_t::msgpack, len) and get_msgpack_array(static_cast<std::size_t>(len));
- }
+ case detail::value_t::object:
+ {
+ if (!ptr->contains(reference_token))
+ {
+ // we did not find the key in the object
+ return false;
+ }
- case 0xDD: // array 32
- {
- uint32_t len;
- return get_number(input_format_t::msgpack, len) and get_msgpack_array(static_cast<std::size_t>(len));
- }
+ ptr = &ptr->operator[](reference_token);
+ break;
+ }
- case 0xDE: // map 16
- {
- uint16_t len;
- return get_number(input_format_t::msgpack, len) and get_msgpack_object(static_cast<std::size_t>(len));
- }
+ case detail::value_t::array:
+ {
+ if (JSON_HEDLEY_UNLIKELY(reference_token == "-"))
+ {
+ // "-" always fails the range check
+ return false;
+ }
+ if (JSON_HEDLEY_UNLIKELY(reference_token.size() == 1 && !("0" <= reference_token && reference_token <= "9")))
+ {
+ // invalid char
+ return false;
+ }
+ if (JSON_HEDLEY_UNLIKELY(reference_token.size() > 1))
+ {
+ if (JSON_HEDLEY_UNLIKELY(!('1' <= reference_token[0] && reference_token[0] <= '9')))
+ {
+ // first char should be between '1' and '9'
+ return false;
+ }
+ for (std::size_t i = 1; i < reference_token.size(); i++)
+ {
+ if (JSON_HEDLEY_UNLIKELY(!('0' <= reference_token[i] && reference_token[i] <= '9')))
+ {
+ // other char should be between '0' and '9'
+ return false;
+ }
+ }
+ }
- case 0xDF: // map 32
- {
- uint32_t len;
- return get_number(input_format_t::msgpack, len) and get_msgpack_object(static_cast<std::size_t>(len));
- }
+ const auto idx = array_index(reference_token);
+ if (idx >= ptr->size())
+ {
+ // index out of range
+ return false;
+ }
- // negative fixint
- case 0xE0:
- case 0xE1:
- case 0xE2:
- case 0xE3:
- case 0xE4:
- case 0xE5:
- case 0xE6:
- case 0xE7:
- case 0xE8:
- case 0xE9:
- case 0xEA:
- case 0xEB:
- case 0xEC:
- case 0xED:
- case 0xEE:
- case 0xEF:
- case 0xF0:
- case 0xF1:
- case 0xF2:
- case 0xF3:
- case 0xF4:
- case 0xF5:
- case 0xF6:
- case 0xF7:
- case 0xF8:
- case 0xF9:
- case 0xFA:
- case 0xFB:
- case 0xFC:
- case 0xFD:
- case 0xFE:
- case 0xFF:
- return sax->number_integer(static_cast<int8_t>(current));
+ ptr = &ptr->operator[](idx);
+ break;
+ }
- default: // anything else
- {
- auto last_token = get_token_string();
- return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::msgpack, "invalid byte: 0x" + last_token, "value")));
+ default:
+ {
+ // we do not expect primitive values if there is still a
+ // reference token to process
+ return false;
+ }
}
}
+
+ // no reference token left means we found a primitive value
+ return true;
}
/*!
- @brief reads a MessagePack string
-
- This function first reads starting bytes to determine the expected
- string length and then copies this number of bytes into a string.
+ @brief split the string input to reference tokens
- @param[out] result created string
+ @note This function is only called by the json_pointer constructor.
+ All exceptions below are documented there.
- @return whether string creation completed
+ @throw parse_error.107 if the pointer is not empty or begins with '/'
+ @throw parse_error.108 if character '~' is not followed by '0' or '1'
*/
- bool get_msgpack_string(string_t& result)
+ static std::vector<std::string> split(const std::string& reference_string)
{
- if (JSON_UNLIKELY(not unexpect_eof(input_format_t::msgpack, "string")))
+ std::vector<std::string> result;
+
+ // special case: empty reference string -> no reference tokens
+ if (reference_string.empty())
{
- return false;
+ return result;
}
- switch (current)
+ // check if nonempty reference string begins with slash
+ if (JSON_HEDLEY_UNLIKELY(reference_string[0] != '/'))
{
- // fixstr
- case 0xA0:
- case 0xA1:
- case 0xA2:
- case 0xA3:
- case 0xA4:
- case 0xA5:
- case 0xA6:
- case 0xA7:
- case 0xA8:
- case 0xA9:
- case 0xAA:
- case 0xAB:
- case 0xAC:
- case 0xAD:
- case 0xAE:
- case 0xAF:
- case 0xB0:
- case 0xB1:
- case 0xB2:
- case 0xB3:
- case 0xB4:
- case 0xB5:
- case 0xB6:
- case 0xB7:
- case 0xB8:
- case 0xB9:
- case 0xBA:
- case 0xBB:
- case 0xBC:
- case 0xBD:
- case 0xBE:
- case 0xBF:
- {
- return get_string(input_format_t::msgpack, current & 0x1F, result);
- }
+ JSON_THROW(detail::parse_error::create(107, 1,
+ "JSON pointer must be empty or begin with '/' - was: '" +
+ reference_string + "'"));
+ }
- case 0xD9: // str 8
- {
- uint8_t len;
- return get_number(input_format_t::msgpack, len) and get_string(input_format_t::msgpack, len, result);
- }
+ // extract the reference tokens:
+ // - slash: position of the last read slash (or end of string)
+ // - start: position after the previous slash
+ for (
+ // search for the first slash after the first character
+ std::size_t slash = reference_string.find_first_of('/', 1),
+ // set the beginning of the first reference token
+ start = 1;
+ // we can stop if start == 0 (if slash == std::string::npos)
+ start != 0;
+ // set the beginning of the next reference token
+ // (will eventually be 0 if slash == std::string::npos)
+ start = (slash == std::string::npos) ? 0 : slash + 1,
+ // find next slash
+ slash = reference_string.find_first_of('/', start))
+ {
+ // use the text between the beginning of the reference token
+ // (start) and the last slash (slash).
+ auto reference_token = reference_string.substr(start, slash - start);
- case 0xDA: // str 16
+ // check reference tokens are properly escaped
+ for (std::size_t pos = reference_token.find_first_of('~');
+ pos != std::string::npos;
+ pos = reference_token.find_first_of('~', pos + 1))
{
- uint16_t len;
- return get_number(input_format_t::msgpack, len) and get_string(input_format_t::msgpack, len, result);
- }
+ JSON_ASSERT(reference_token[pos] == '~');
- case 0xDB: // str 32
- {
- uint32_t len;
- return get_number(input_format_t::msgpack, len) and get_string(input_format_t::msgpack, len, result);
+ // ~ must be followed by 0 or 1
+ if (JSON_HEDLEY_UNLIKELY(pos == reference_token.size() - 1 ||
+ (reference_token[pos + 1] != '0' &&
+ reference_token[pos + 1] != '1')))
+ {
+ JSON_THROW(detail::parse_error::create(108, 0, "escape character '~' must be followed with '0' or '1'"));
+ }
}
- default:
- {
- auto last_token = get_token_string();
- return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::msgpack, "expected length specification (0xA0-0xBF, 0xD9-0xDB); last byte: 0x" + last_token, "string")));
- }
+ // finally, store the reference token
+ unescape(reference_token);
+ result.push_back(reference_token);
}
+
+ return result;
}
/*!
- @param[in] len the length of the array
- @return whether array creation completed
- */
- bool get_msgpack_array(const std::size_t len)
- {
- if (JSON_UNLIKELY(not sax->start_array(len)))
- {
- return false;
- }
+ @brief replace all occurrences of a substring by another string
- for (std::size_t i = 0; i < len; ++i)
- {
- if (JSON_UNLIKELY(not parse_msgpack_internal()))
- {
- return false;
- }
- }
+ @param[in,out] s the string to manipulate; changed so that all
+ occurrences of @a f are replaced with @a t
+ @param[in] f the substring to replace with @a t
+ @param[in] t the string to replace @a f
- return sax->end_array();
- }
+ @pre The search string @a f must not be empty. **This precondition is
+ enforced with an assertion.**
- /*!
- @param[in] len the length of the object
- @return whether object creation completed
+ @since version 2.0.0
*/
- bool get_msgpack_object(const std::size_t len)
+ static void replace_substring(std::string& s, const std::string& f,
+ const std::string& t)
{
- if (JSON_UNLIKELY(not sax->start_object(len)))
- {
- return false;
- }
-
- string_t key;
- for (std::size_t i = 0; i < len; ++i)
- {
- get();
- if (JSON_UNLIKELY(not get_msgpack_string(key) or not sax->key(key)))
- {
- return false;
- }
-
- if (JSON_UNLIKELY(not parse_msgpack_internal()))
- {
- return false;
- }
- key.clear();
- }
-
- return sax->end_object();
+ JSON_ASSERT(!f.empty());
+ for (auto pos = s.find(f); // find first occurrence of f
+ pos != std::string::npos; // make sure f was found
+ s.replace(pos, f.size(), t), // replace with t, and
+ pos = s.find(f, pos + t.size())) // find next occurrence of f
+ {}
}
- ////////////
- // UBJSON //
- ////////////
-
- /*!
- @param[in] get_char whether a new character should be retrieved from the
- input (true, default) or whether the last read
- character should be considered instead
-
- @return whether a valid UBJSON value was passed to the SAX parser
- */
- bool parse_ubjson_internal(const bool get_char = true)
+ /// escape "~" to "~0" and "/" to "~1"
+ static std::string escape(std::string s)
{
- return get_ubjson_value(get_char ? get_ignore_noop() : current);
+ replace_substring(s, "~", "~0");
+ replace_substring(s, "/", "~1");
+ return s;
}
- /*!
- @brief reads a UBJSON string
-
- This function is either called after reading the 'S' byte explicitly
- indicating a string, or in case of an object key where the 'S' byte can be
- left out.
-
- @param[out] result created string
- @param[in] get_char whether a new character should be retrieved from the
- input (true, default) or whether the last read
- character should be considered instead
-
- @return whether string creation completed
- */
- bool get_ubjson_string(string_t& result, const bool get_char = true)
+ /// unescape "~1" to tilde and "~0" to slash (order is important!)
+ static void unescape(std::string& s)
{
- if (get_char)
- {
- get(); // TODO: may we ignore N here?
- }
-
- if (JSON_UNLIKELY(not unexpect_eof(input_format_t::ubjson, "value")))
- {
- return false;
- }
-
- switch (current)
- {
- case 'U':
- {
- uint8_t len;
- return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result);
- }
-
- case 'i':
- {
- int8_t len;
- return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result);
- }
-
- case 'I':
- {
- int16_t len;
- return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result);
- }
-
- case 'l':
- {
- int32_t len;
- return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result);
- }
-
- case 'L':
- {
- int64_t len;
- return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result);
- }
-
- default:
- auto last_token = get_token_string();
- return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "expected length type specification (U, i, I, l, L); last byte: 0x" + last_token, "string")));
- }
+ replace_substring(s, "~1", "/");
+ replace_substring(s, "~0", "~");
}
/*!
- @param[out] result determined size
- @return whether size determination completed
+ @param[in] reference_string the reference string to the current value
+ @param[in] value the value to consider
+ @param[in,out] result the result object to insert values to
+
+ @note Empty objects or arrays are flattened to `null`.
*/
- bool get_ubjson_size_value(std::size_t& result)
+ static void flatten(const std::string& reference_string,
+ const BasicJsonType& value,
+ BasicJsonType& result)
{
- switch (get_ignore_noop())
+ switch (value.type())
{
- case 'U':
- {
- uint8_t number;
- if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number)))
- {
- return false;
- }
- result = static_cast<std::size_t>(number);
- return true;
- }
-
- case 'i':
+ case detail::value_t::array:
{
- int8_t number;
- if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number)))
+ if (value.m_value.array->empty())
{
- return false;
+ // flatten empty array as null
+ result[reference_string] = nullptr;
}
- result = static_cast<std::size_t>(number);
- return true;
- }
-
- case 'I':
- {
- int16_t number;
- if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number)))
+ else
{
- return false;
+ // iterate array and use index as reference string
+ for (std::size_t i = 0; i < value.m_value.array->size(); ++i)
+ {
+ flatten(reference_string + "/" + std::to_string(i),
+ value.m_value.array->operator[](i), result);
+ }
}
- result = static_cast<std::size_t>(number);
- return true;
+ break;
}
- case 'l':
+ case detail::value_t::object:
{
- int32_t number;
- if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number)))
+ if (value.m_value.object->empty())
{
- return false;
+ // flatten empty object as null
+ result[reference_string] = nullptr;
}
- result = static_cast<std::size_t>(number);
- return true;
- }
-
- case 'L':
- {
- int64_t number;
- if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number)))
+ else
{
- return false;
+ // iterate object and use keys as reference string
+ for (const auto& element : *value.m_value.object)
+ {
+ flatten(reference_string + "/" + escape(element.first), element.second, result);
+ }
}
- result = static_cast<std::size_t>(number);
- return true;
+ break;
}
default:
{
- auto last_token = get_token_string();
- return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "expected length type specification (U, i, I, l, L) after '#'; last byte: 0x" + last_token, "size")));
+ // add primitive value with its reference string
+ result[reference_string] = value;
+ break;
}
}
}
/*!
- @brief determine the type and size for a container
-
- In the optimized UBJSON format, a type and a size can be provided to allow
- for a more compact representation.
+ @param[in] value flattened JSON
- @param[out] result pair of the size and the type
+ @return unflattened JSON
- @return whether pair creation completed
+ @throw parse_error.109 if array index is not a number
+ @throw type_error.314 if value is not an object
+ @throw type_error.315 if object values are not primitive
+ @throw type_error.313 if value cannot be unflattened
*/
- bool get_ubjson_size_type(std::pair<std::size_t, int>& result)
+ static BasicJsonType
+ unflatten(const BasicJsonType& value)
{
- result.first = string_t::npos; // size
- result.second = 0; // type
+ if (JSON_HEDLEY_UNLIKELY(!value.is_object()))
+ {
+ JSON_THROW(detail::type_error::create(314, "only objects can be unflattened"));
+ }
- get_ignore_noop();
+ BasicJsonType result;
- if (current == '$')
+ // iterate the JSON object values
+ for (const auto& element : *value.m_value.object)
{
- result.second = get(); // must not ignore 'N', because 'N' maybe the type
- if (JSON_UNLIKELY(not unexpect_eof(input_format_t::ubjson, "type")))
- {
- return false;
- }
-
- get_ignore_noop();
- if (JSON_UNLIKELY(current != '#'))
+ if (JSON_HEDLEY_UNLIKELY(!element.second.is_primitive()))
{
- if (JSON_UNLIKELY(not unexpect_eof(input_format_t::ubjson, "value")))
- {
- return false;
- }
- auto last_token = get_token_string();
- return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::ubjson, "expected '#' after type information; last byte: 0x" + last_token, "size")));
+ JSON_THROW(detail::type_error::create(315, "values in object must be primitive"));
}
- return get_ubjson_size_value(result.first);
- }
- else if (current == '#')
- {
- return get_ubjson_size_value(result.first);
+ // assign value to reference pointed to by JSON pointer; Note that if
+ // the JSON pointer is "" (i.e., points to the whole value), function
+ // get_and_create returns a reference to result itself. An assignment
+ // will then create a primitive value.
+ json_pointer(element.first).get_and_create(result) = element.second;
}
- return true;
+
+ return result;
}
/*!
- @param prefix the previously read or set type prefix
- @return whether value creation completed
+ @brief compares two JSON pointers for equality
+
+ @param[in] lhs JSON pointer to compare
+ @param[in] rhs JSON pointer to compare
+ @return whether @a lhs is equal to @a rhs
+
+ @complexity Linear in the length of the JSON pointer
+
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
*/
- bool get_ubjson_value(const int prefix)
+ friend bool operator==(json_pointer const& lhs,
+ json_pointer const& rhs) noexcept
{
- switch (prefix)
- {
- case std::char_traits<char>::eof(): // EOF
- return unexpect_eof(input_format_t::ubjson, "value");
+ return lhs.reference_tokens == rhs.reference_tokens;
+ }
- case 'T': // true
- return sax->boolean(true);
- case 'F': // false
- return sax->boolean(false);
+ /*!
+ @brief compares two JSON pointers for inequality
- case 'Z': // null
- return sax->null();
+ @param[in] lhs JSON pointer to compare
+ @param[in] rhs JSON pointer to compare
+ @return whether @a lhs is not equal @a rhs
- case 'U':
- {
- uint8_t number;
- return get_number(input_format_t::ubjson, number) and sax->number_unsigned(number);
- }
+ @complexity Linear in the length of the JSON pointer
- case 'i':
- {
- int8_t number;
- return get_number(input_format_t::ubjson, number) and sax->number_integer(number);
- }
+ @exceptionsafety No-throw guarantee: this function never throws exceptions.
+ */
+ friend bool operator!=(json_pointer const& lhs,
+ json_pointer const& rhs) noexcept
+ {
+ return !(lhs == rhs);
+ }
- case 'I':
- {
- int16_t number;
- return get_number(input_format_t::ubjson, number) and sax->number_integer(number);
- }
+ /// the reference tokens
+ std::vector<std::string> reference_tokens;
+};
+} // namespace nlohmann
- case 'l':
- {
- int32_t number;
- return get_number(input_format_t::ubjson, number) and sax->number_integer(number);
- }
+// #include <nlohmann/detail/json_ref.hpp>
- case 'L':
- {
- int64_t number;
- return get_number(input_format_t::ubjson, number) and sax->number_integer(number);
- }
- case 'd':
- {
- float number;
- return get_number(input_format_t::ubjson, number) and sax->number_float(static_cast<number_float_t>(number), "");
- }
+#include <initializer_list>
+#include <utility>
- case 'D':
- {
- double number;
- return get_number(input_format_t::ubjson, number) and sax->number_float(static_cast<number_float_t>(number), "");
- }
+// #include <nlohmann/detail/meta/type_traits.hpp>
- case 'C': // char
- {
- get();
- if (JSON_UNLIKELY(not unexpect_eof(input_format_t::ubjson, "char")))
- {
- return false;
- }
- if (JSON_UNLIKELY(current > 127))
- {
- auto last_token = get_token_string();
- return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "byte after 'C' must be in range 0x00..0x7F; last byte: 0x" + last_token, "char")));
- }
- string_t s(1, static_cast<char>(current));
- return sax->string(s);
- }
- case 'S': // string
- {
- string_t s;
- return get_ubjson_string(s) and sax->string(s);
- }
+namespace nlohmann
+{
+namespace detail
+{
+template<typename BasicJsonType>
+class json_ref
+{
+ public:
+ using value_type = BasicJsonType;
- case '[': // array
- return get_ubjson_array();
+ json_ref(value_type&& value)
+ : owned_value(std::move(value))
+ , value_ref(&owned_value)
+ , is_rvalue(true)
+ {}
- case '{': // object
- return get_ubjson_object();
+ json_ref(const value_type& value)
+ : value_ref(const_cast<value_type*>(&value))
+ , is_rvalue(false)
+ {}
- default: // anything else
- {
- auto last_token = get_token_string();
- return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::ubjson, "invalid byte: 0x" + last_token, "value")));
- }
- }
- }
+ json_ref(std::initializer_list<json_ref> init)
+ : owned_value(init)
+ , value_ref(&owned_value)
+ , is_rvalue(true)
+ {}
- /*!
- @return whether array creation completed
- */
- bool get_ubjson_array()
- {
- std::pair<std::size_t, int> size_and_type;
- if (JSON_UNLIKELY(not get_ubjson_size_type(size_and_type)))
- {
- return false;
- }
+ template <
+ class... Args,
+ enable_if_t<std::is_constructible<value_type, Args...>::value, int> = 0 >
+ json_ref(Args && ... args)
+ : owned_value(std::forward<Args>(args)...)
+ , value_ref(&owned_value)
+ , is_rvalue(true)
+ {}
- if (size_and_type.first != string_t::npos)
- {
- if (JSON_UNLIKELY(not sax->start_array(size_and_type.first)))
- {
- return false;
- }
+ // class should be movable only
+ json_ref(json_ref&&) = default;
+ json_ref(const json_ref&) = delete;
+ json_ref& operator=(const json_ref&) = delete;
+ json_ref& operator=(json_ref&&) = delete;
+ ~json_ref() = default;
- if (size_and_type.second != 0)
- {
- if (size_and_type.second != 'N')
- {
- for (std::size_t i = 0; i < size_and_type.first; ++i)
- {
- if (JSON_UNLIKELY(not get_ubjson_value(size_and_type.second)))
- {
- return false;
- }
- }
- }
- }
- else
- {
- for (std::size_t i = 0; i < size_and_type.first; ++i)
- {
- if (JSON_UNLIKELY(not parse_ubjson_internal()))
- {
- return false;
- }
- }
- }
- }
- else
+ value_type moved_or_copied() const
+ {
+ if (is_rvalue)
{
- if (JSON_UNLIKELY(not sax->start_array(std::size_t(-1))))
- {
- return false;
- }
-
- while (current != ']')
- {
- if (JSON_UNLIKELY(not parse_ubjson_internal(false)))
- {
- return false;
- }
- get_ignore_noop();
- }
+ return std::move(*value_ref);
}
+ return *value_ref;
+ }
- return sax->end_array();
+ value_type const& operator*() const
+ {
+ return *static_cast<value_type const*>(value_ref);
}
- /*!
- @return whether object creation completed
- */
- bool get_ubjson_object()
+ value_type const* operator->() const
{
- std::pair<std::size_t, int> size_and_type;
- if (JSON_UNLIKELY(not get_ubjson_size_type(size_and_type)))
- {
- return false;
- }
+ return static_cast<value_type const*>(value_ref);
+ }
- string_t key;
- if (size_and_type.first != string_t::npos)
- {
- if (JSON_UNLIKELY(not sax->start_object(size_and_type.first)))
- {
- return false;
- }
+ private:
+ mutable value_type owned_value = nullptr;
+ value_type* value_ref = nullptr;
+ const bool is_rvalue = true;
+};
+} // namespace detail
+} // namespace nlohmann
- if (size_and_type.second != 0)
- {
- for (std::size_t i = 0; i < size_and_type.first; ++i)
- {
- if (JSON_UNLIKELY(not get_ubjson_string(key) or not sax->key(key)))
- {
- return false;
- }
- if (JSON_UNLIKELY(not get_ubjson_value(size_and_type.second)))
- {
- return false;
- }
- key.clear();
- }
- }
- else
- {
- for (std::size_t i = 0; i < size_and_type.first; ++i)
- {
- if (JSON_UNLIKELY(not get_ubjson_string(key) or not sax->key(key)))
- {
- return false;
- }
- if (JSON_UNLIKELY(not parse_ubjson_internal()))
- {
- return false;
- }
- key.clear();
- }
- }
- }
- else
- {
- if (JSON_UNLIKELY(not sax->start_object(std::size_t(-1))))
- {
- return false;
- }
+// #include <nlohmann/detail/macro_scope.hpp>
- while (current != '}')
- {
- if (JSON_UNLIKELY(not get_ubjson_string(key, false) or not sax->key(key)))
- {
- return false;
- }
- if (JSON_UNLIKELY(not parse_ubjson_internal()))
- {
- return false;
- }
- get_ignore_noop();
- key.clear();
- }
- }
+// #include <nlohmann/detail/meta/cpp_future.hpp>
- return sax->end_object();
- }
+// #include <nlohmann/detail/meta/type_traits.hpp>
- ///////////////////////
- // Utility functions //
- ///////////////////////
+// #include <nlohmann/detail/output/binary_writer.hpp>
- /*!
- @brief get next character from the input
- This function provides the interface to the used input adapter. It does
- not throw in case the input reached EOF, but returns a -'ve valued
- `std::char_traits<char>::eof()` in that case.
+#include <algorithm> // reverse
+#include <array> // array
+#include <cstdint> // uint8_t, uint16_t, uint32_t, uint64_t
+#include <cstring> // memcpy
+#include <limits> // numeric_limits
+#include <string> // string
+#include <cmath> // isnan, isinf
- @return character read from the input
- */
- int get()
- {
- ++chars_read;
- return (current = ia->get_character());
- }
+// #include <nlohmann/detail/input/binary_reader.hpp>
- /*!
- @return character read from the input after ignoring all 'N' entries
- */
- int get_ignore_noop()
- {
- do
- {
- get();
- }
- while (current == 'N');
+// #include <nlohmann/detail/macro_scope.hpp>
- return current;
- }
+// #include <nlohmann/detail/output/output_adapters.hpp>
- /*
- @brief read a number from the input
- @tparam NumberType the type of the number
- @param[in] format the current format (for diagnostics)
- @param[out] result number of type @a NumberType
+#include <algorithm> // copy
+#include <cstddef> // size_t
+#include <ios> // streamsize
+#include <iterator> // back_inserter
+#include <memory> // shared_ptr, make_shared
+#include <ostream> // basic_ostream
+#include <string> // basic_string
+#include <vector> // vector
+// #include <nlohmann/detail/macro_scope.hpp>
- @return whether conversion completed
- @note This function needs to respect the system's endianess, because
- bytes in CBOR, MessagePack, and UBJSON are stored in network order
- (big endian) and therefore need reordering on little endian systems.
- */
- template<typename NumberType, bool InputIsLittleEndian = false>
- bool get_number(const input_format_t format, NumberType& result)
- {
- // step 1: read input into array with system's byte order
- std::array<uint8_t, sizeof(NumberType)> vec;
- for (std::size_t i = 0; i < sizeof(NumberType); ++i)
- {
- get();
- if (JSON_UNLIKELY(not unexpect_eof(format, "number")))
- {
- return false;
- }
+namespace nlohmann
+{
+namespace detail
+{
+/// abstract output adapter interface
+template<typename CharType> struct output_adapter_protocol
+{
+ virtual void write_character(CharType c) = 0;
+ virtual void write_characters(const CharType* s, std::size_t length) = 0;
+ virtual ~output_adapter_protocol() = default;
+};
- // reverse byte order prior to conversion if necessary
- if (is_little_endian && !InputIsLittleEndian)
- {
- vec[sizeof(NumberType) - i - 1] = static_cast<uint8_t>(current);
- }
- else
- {
- vec[i] = static_cast<uint8_t>(current); // LCOV_EXCL_LINE
- }
- }
+/// a type to simplify interfaces
+template<typename CharType>
+using output_adapter_t = std::shared_ptr<output_adapter_protocol<CharType>>;
- // step 2: convert array into number of type T and return
- std::memcpy(&result, vec.data(), sizeof(NumberType));
- return true;
+/// output adapter for byte vectors
+template<typename CharType>
+class output_vector_adapter : public output_adapter_protocol<CharType>
+{
+ public:
+ explicit output_vector_adapter(std::vector<CharType>& vec) noexcept
+ : v(vec)
+ {}
+
+ void write_character(CharType c) override
+ {
+ v.push_back(c);
}
- /*!
- @brief create a string by reading characters from the input
+ JSON_HEDLEY_NON_NULL(2)
+ void write_characters(const CharType* s, std::size_t length) override
+ {
+ std::copy(s, s + length, std::back_inserter(v));
+ }
- @tparam NumberType the type of the number
- @param[in] format the current format (for diagnostics)
- @param[in] len number of characters to read
- @param[out] result string created by reading @a len bytes
+ private:
+ std::vector<CharType>& v;
+};
- @return whether string creation completed
+/// output adapter for output streams
+template<typename CharType>
+class output_stream_adapter : public output_adapter_protocol<CharType>
+{
+ public:
+ explicit output_stream_adapter(std::basic_ostream<CharType>& s) noexcept
+ : stream(s)
+ {}
- @note We can not reserve @a len bytes for the result, because @a len
- may be too large. Usually, @ref unexpect_eof() detects the end of
- the input before we run out of string memory.
- */
- template<typename NumberType>
- bool get_string(const input_format_t format,
- const NumberType len,
- string_t& result)
+ void write_character(CharType c) override
{
- bool success = true;
- std::generate_n(std::back_inserter(result), len, [this, &success, &format]()
- {
- get();
- if (JSON_UNLIKELY(not unexpect_eof(format, "string")))
- {
- success = false;
- }
- return static_cast<char>(current);
- });
- return success;
+ stream.put(c);
}
- /*!
- @param[in] format the current format (for diagnostics)
- @param[in] context further context information (for diagnostics)
- @return whether the last read character is not EOF
- */
- bool unexpect_eof(const input_format_t format, const char* context) const
+ JSON_HEDLEY_NON_NULL(2)
+ void write_characters(const CharType* s, std::size_t length) override
{
- if (JSON_UNLIKELY(current == std::char_traits<char>::eof()))
- {
- return sax->parse_error(chars_read, "<end of file>",
- parse_error::create(110, chars_read, exception_message(format, "unexpected end of input", context)));
- }
- return true;
+ stream.write(s, static_cast<std::streamsize>(length));
}
- /*!
- @return a string representation of the last read byte
- */
- std::string get_token_string() const
+ private:
+ std::basic_ostream<CharType>& stream;
+};
+
+/// output adapter for basic_string
+template<typename CharType, typename StringType = std::basic_string<CharType>>
+class output_string_adapter : public output_adapter_protocol<CharType>
+{
+ public:
+ explicit output_string_adapter(StringType& s) noexcept
+ : str(s)
+ {}
+
+ void write_character(CharType c) override
{
- char cr[3];
- (std::snprintf)(cr, 3, "%.2hhX", static_cast<unsigned char>(current));
- return std::string{cr};
+ str.push_back(c);
}
- /*!
- @param[in] format the current format
- @param[in] detail a detailed error message
- @param[in] context further contect information
- @return a message string to use in the parse_error exceptions
- */
- std::string exception_message(const input_format_t format,
- const std::string& detail,
- const std::string& context) const
+ JSON_HEDLEY_NON_NULL(2)
+ void write_characters(const CharType* s, std::size_t length) override
{
- std::string error_msg = "syntax error while parsing ";
-
- switch (format)
- {
- case input_format_t::cbor:
- error_msg += "CBOR";
- break;
+ str.append(s, length);
+ }
- case input_format_t::msgpack:
- error_msg += "MessagePack";
- break;
+ private:
+ StringType& str;
+};
- case input_format_t::ubjson:
- error_msg += "UBJSON";
- break;
+template<typename CharType, typename StringType = std::basic_string<CharType>>
+class output_adapter
+{
+ public:
+ output_adapter(std::vector<CharType>& vec)
+ : oa(std::make_shared<output_vector_adapter<CharType>>(vec)) {}
- case input_format_t::bson:
- error_msg += "BSON";
- break;
+ output_adapter(std::basic_ostream<CharType>& s)
+ : oa(std::make_shared<output_stream_adapter<CharType>>(s)) {}
- // LCOV_EXCL_START
- default:
- assert(false);
- // LCOV_EXCL_STOP
- }
+ output_adapter(StringType& s)
+ : oa(std::make_shared<output_string_adapter<CharType, StringType>>(s)) {}
- return error_msg + " " + context + ": " + detail;
+ operator output_adapter_t<CharType>()
+ {
+ return oa;
}
private:
- /// input adapter
- input_adapter_t ia = nullptr;
-
- /// the current character
- int current = std::char_traits<char>::eof();
-
- /// the number of characters read
- std::size_t chars_read = 0;
-
- /// whether we can assume little endianess
- const bool is_little_endian = little_endianess();
-
- /// the SAX parser
- json_sax_t* sax = nullptr;
+ output_adapter_t<CharType> oa = nullptr;
};
} // namespace detail
} // namespace nlohmann
-// #include <nlohmann/detail/output/binary_writer.hpp>
-
-
-#include <algorithm> // reverse
-#include <array> // array
-#include <cstdint> // uint8_t, uint16_t, uint32_t, uint64_t
-#include <cstring> // memcpy
-#include <limits> // numeric_limits
-
-// #include <nlohmann/detail/input/binary_reader.hpp>
-
-// #include <nlohmann/detail/output/output_adapters.hpp>
-
namespace nlohmann
{
@@ -8465,6 +12752,8 @@ template<typename BasicJsonType, typename CharType>
class binary_writer
{
using string_t = typename BasicJsonType::string_t;
+ using binary_t = typename BasicJsonType::binary_t;
+ using number_float_t = typename BasicJsonType::number_float_t;
public:
/*!
@@ -8474,7 +12763,7 @@ class binary_writer
*/
explicit binary_writer(output_adapter_t<CharType> adapter) : oa(adapter)
{
- assert(oa);
+ JSON_ASSERT(oa);
}
/*!
@@ -8528,27 +12817,27 @@ class binary_writer
// code from the value_t::number_unsigned case here.
if (j.m_value.number_integer <= 0x17)
{
- write_number(static_cast<uint8_t>(j.m_value.number_integer));
+ write_number(static_cast<std::uint8_t>(j.m_value.number_integer));
}
- else if (j.m_value.number_integer <= (std::numeric_limits<uint8_t>::max)())
+ else if (j.m_value.number_integer <= (std::numeric_limits<std::uint8_t>::max)())
{
oa->write_character(to_char_type(0x18));
- write_number(static_cast<uint8_t>(j.m_value.number_integer));
+ write_number(static_cast<std::uint8_t>(j.m_value.number_integer));
}
- else if (j.m_value.number_integer <= (std::numeric_limits<uint16_t>::max)())
+ else if (j.m_value.number_integer <= (std::numeric_limits<std::uint16_t>::max)())
{
oa->write_character(to_char_type(0x19));
- write_number(static_cast<uint16_t>(j.m_value.number_integer));
+ write_number(static_cast<std::uint16_t>(j.m_value.number_integer));
}
- else if (j.m_value.number_integer <= (std::numeric_limits<uint32_t>::max)())
+ else if (j.m_value.number_integer <= (std::numeric_limits<std::uint32_t>::max)())
{
oa->write_character(to_char_type(0x1A));
- write_number(static_cast<uint32_t>(j.m_value.number_integer));
+ write_number(static_cast<std::uint32_t>(j.m_value.number_integer));
}
else
{
oa->write_character(to_char_type(0x1B));
- write_number(static_cast<uint64_t>(j.m_value.number_integer));
+ write_number(static_cast<std::uint64_t>(j.m_value.number_integer));
}
}
else
@@ -8558,27 +12847,27 @@ class binary_writer
const auto positive_number = -1 - j.m_value.number_integer;
if (j.m_value.number_integer >= -24)
{
- write_number(static_cast<uint8_t>(0x20 + positive_number));
+ write_number(static_cast<std::uint8_t>(0x20 + positive_number));
}
- else if (positive_number <= (std::numeric_limits<uint8_t>::max)())
+ else if (positive_number <= (std::numeric_limits<std::uint8_t>::max)())
{
oa->write_character(to_char_type(0x38));
- write_number(static_cast<uint8_t>(positive_number));
+ write_number(static_cast<std::uint8_t>(positive_number));
}
- else if (positive_number <= (std::numeric_limits<uint16_t>::max)())
+ else if (positive_number <= (std::numeric_limits<std::uint16_t>::max)())
{
oa->write_character(to_char_type(0x39));
- write_number(static_cast<uint16_t>(positive_number));
+ write_number(static_cast<std::uint16_t>(positive_number));
}
- else if (positive_number <= (std::numeric_limits<uint32_t>::max)())
+ else if (positive_number <= (std::numeric_limits<std::uint32_t>::max)())
{
oa->write_character(to_char_type(0x3A));
- write_number(static_cast<uint32_t>(positive_number));
+ write_number(static_cast<std::uint32_t>(positive_number));
}
else
{
oa->write_character(to_char_type(0x3B));
- write_number(static_cast<uint64_t>(positive_number));
+ write_number(static_cast<std::uint64_t>(positive_number));
}
}
break;
@@ -8588,35 +12877,51 @@ class binary_writer
{
if (j.m_value.number_unsigned <= 0x17)
{
- write_number(static_cast<uint8_t>(j.m_value.number_unsigned));
+ write_number(static_cast<std::uint8_t>(j.m_value.number_unsigned));
}
- else if (j.m_value.number_unsigned <= (std::numeric_limits<uint8_t>::max)())
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint8_t>::max)())
{
oa->write_character(to_char_type(0x18));
- write_number(static_cast<uint8_t>(j.m_value.number_unsigned));
+ write_number(static_cast<std::uint8_t>(j.m_value.number_unsigned));
}
- else if (j.m_value.number_unsigned <= (std::numeric_limits<uint16_t>::max)())
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint16_t>::max)())
{
oa->write_character(to_char_type(0x19));
- write_number(static_cast<uint16_t>(j.m_value.number_unsigned));
+ write_number(static_cast<std::uint16_t>(j.m_value.number_unsigned));
}
- else if (j.m_value.number_unsigned <= (std::numeric_limits<uint32_t>::max)())
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint32_t>::max)())
{
oa->write_character(to_char_type(0x1A));
- write_number(static_cast<uint32_t>(j.m_value.number_unsigned));
+ write_number(static_cast<std::uint32_t>(j.m_value.number_unsigned));
}
else
{
oa->write_character(to_char_type(0x1B));
- write_number(static_cast<uint64_t>(j.m_value.number_unsigned));
+ write_number(static_cast<std::uint64_t>(j.m_value.number_unsigned));
}
break;
}
case value_t::number_float:
{
- oa->write_character(get_cbor_float_prefix(j.m_value.number_float));
- write_number(j.m_value.number_float);
+ if (std::isnan(j.m_value.number_float))
+ {
+ // NaN is 0xf97e00 in CBOR
+ oa->write_character(to_char_type(0xF9));
+ oa->write_character(to_char_type(0x7E));
+ oa->write_character(to_char_type(0x00));
+ }
+ else if (std::isinf(j.m_value.number_float))
+ {
+ // Infinity is 0xf97c00, -Infinity is 0xf9fc00
+ oa->write_character(to_char_type(0xf9));
+ oa->write_character(j.m_value.number_float > 0 ? to_char_type(0x7C) : to_char_type(0xFC));
+ oa->write_character(to_char_type(0x00));
+ }
+ else
+ {
+ write_compact_float(j.m_value.number_float, detail::input_format_t::cbor);
+ }
break;
}
@@ -8626,28 +12931,28 @@ class binary_writer
const auto N = j.m_value.string->size();
if (N <= 0x17)
{
- write_number(static_cast<uint8_t>(0x60 + N));
+ write_number(static_cast<std::uint8_t>(0x60 + N));
}
- else if (N <= (std::numeric_limits<uint8_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint8_t>::max)())
{
oa->write_character(to_char_type(0x78));
- write_number(static_cast<uint8_t>(N));
+ write_number(static_cast<std::uint8_t>(N));
}
- else if (N <= (std::numeric_limits<uint16_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint16_t>::max)())
{
oa->write_character(to_char_type(0x79));
- write_number(static_cast<uint16_t>(N));
+ write_number(static_cast<std::uint16_t>(N));
}
- else if (N <= (std::numeric_limits<uint32_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint32_t>::max)())
{
oa->write_character(to_char_type(0x7A));
- write_number(static_cast<uint32_t>(N));
+ write_number(static_cast<std::uint32_t>(N));
}
// LCOV_EXCL_START
- else if (N <= (std::numeric_limits<uint64_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint64_t>::max)())
{
oa->write_character(to_char_type(0x7B));
- write_number(static_cast<uint64_t>(N));
+ write_number(static_cast<std::uint64_t>(N));
}
// LCOV_EXCL_STOP
@@ -8664,28 +12969,28 @@ class binary_writer
const auto N = j.m_value.array->size();
if (N <= 0x17)
{
- write_number(static_cast<uint8_t>(0x80 + N));
+ write_number(static_cast<std::uint8_t>(0x80 + N));
}
- else if (N <= (std::numeric_limits<uint8_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint8_t>::max)())
{
oa->write_character(to_char_type(0x98));
- write_number(static_cast<uint8_t>(N));
+ write_number(static_cast<std::uint8_t>(N));
}
- else if (N <= (std::numeric_limits<uint16_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint16_t>::max)())
{
oa->write_character(to_char_type(0x99));
- write_number(static_cast<uint16_t>(N));
+ write_number(static_cast<std::uint16_t>(N));
}
- else if (N <= (std::numeric_limits<uint32_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint32_t>::max)())
{
oa->write_character(to_char_type(0x9A));
- write_number(static_cast<uint32_t>(N));
+ write_number(static_cast<std::uint32_t>(N));
}
// LCOV_EXCL_START
- else if (N <= (std::numeric_limits<uint64_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint64_t>::max)())
{
oa->write_character(to_char_type(0x9B));
- write_number(static_cast<uint64_t>(N));
+ write_number(static_cast<std::uint64_t>(N));
}
// LCOV_EXCL_STOP
@@ -8697,34 +13002,79 @@ class binary_writer
break;
}
+ case value_t::binary:
+ {
+ if (j.m_value.binary->has_subtype())
+ {
+ write_number(static_cast<std::uint8_t>(0xd8));
+ write_number(j.m_value.binary->subtype());
+ }
+
+ // step 1: write control byte and the binary array size
+ const auto N = j.m_value.binary->size();
+ if (N <= 0x17)
+ {
+ write_number(static_cast<std::uint8_t>(0x40 + N));
+ }
+ else if (N <= (std::numeric_limits<std::uint8_t>::max)())
+ {
+ oa->write_character(to_char_type(0x58));
+ write_number(static_cast<std::uint8_t>(N));
+ }
+ else if (N <= (std::numeric_limits<std::uint16_t>::max)())
+ {
+ oa->write_character(to_char_type(0x59));
+ write_number(static_cast<std::uint16_t>(N));
+ }
+ else if (N <= (std::numeric_limits<std::uint32_t>::max)())
+ {
+ oa->write_character(to_char_type(0x5A));
+ write_number(static_cast<std::uint32_t>(N));
+ }
+ // LCOV_EXCL_START
+ else if (N <= (std::numeric_limits<std::uint64_t>::max)())
+ {
+ oa->write_character(to_char_type(0x5B));
+ write_number(static_cast<std::uint64_t>(N));
+ }
+ // LCOV_EXCL_STOP
+
+ // step 2: write each element
+ oa->write_characters(
+ reinterpret_cast<const CharType*>(j.m_value.binary->data()),
+ N);
+
+ break;
+ }
+
case value_t::object:
{
// step 1: write control byte and the object size
const auto N = j.m_value.object->size();
if (N <= 0x17)
{
- write_number(static_cast<uint8_t>(0xA0 + N));
+ write_number(static_cast<std::uint8_t>(0xA0 + N));
}
- else if (N <= (std::numeric_limits<uint8_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint8_t>::max)())
{
oa->write_character(to_char_type(0xB8));
- write_number(static_cast<uint8_t>(N));
+ write_number(static_cast<std::uint8_t>(N));
}
- else if (N <= (std::numeric_limits<uint16_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint16_t>::max)())
{
oa->write_character(to_char_type(0xB9));
- write_number(static_cast<uint16_t>(N));
+ write_number(static_cast<std::uint16_t>(N));
}
- else if (N <= (std::numeric_limits<uint32_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint32_t>::max)())
{
oa->write_character(to_char_type(0xBA));
- write_number(static_cast<uint32_t>(N));
+ write_number(static_cast<std::uint32_t>(N));
}
// LCOV_EXCL_START
- else if (N <= (std::numeric_limits<uint64_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint64_t>::max)())
{
oa->write_character(to_char_type(0xBB));
- write_number(static_cast<uint64_t>(N));
+ write_number(static_cast<std::uint64_t>(N));
}
// LCOV_EXCL_STOP
@@ -8773,31 +13123,31 @@ class binary_writer
if (j.m_value.number_unsigned < 128)
{
// positive fixnum
- write_number(static_cast<uint8_t>(j.m_value.number_integer));
+ write_number(static_cast<std::uint8_t>(j.m_value.number_integer));
}
- else if (j.m_value.number_unsigned <= (std::numeric_limits<uint8_t>::max)())
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint8_t>::max)())
{
// uint 8
oa->write_character(to_char_type(0xCC));
- write_number(static_cast<uint8_t>(j.m_value.number_integer));
+ write_number(static_cast<std::uint8_t>(j.m_value.number_integer));
}
- else if (j.m_value.number_unsigned <= (std::numeric_limits<uint16_t>::max)())
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint16_t>::max)())
{
// uint 16
oa->write_character(to_char_type(0xCD));
- write_number(static_cast<uint16_t>(j.m_value.number_integer));
+ write_number(static_cast<std::uint16_t>(j.m_value.number_integer));
}
- else if (j.m_value.number_unsigned <= (std::numeric_limits<uint32_t>::max)())
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint32_t>::max)())
{
// uint 32
oa->write_character(to_char_type(0xCE));
- write_number(static_cast<uint32_t>(j.m_value.number_integer));
+ write_number(static_cast<std::uint32_t>(j.m_value.number_integer));
}
- else if (j.m_value.number_unsigned <= (std::numeric_limits<uint64_t>::max)())
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint64_t>::max)())
{
// uint 64
oa->write_character(to_char_type(0xCF));
- write_number(static_cast<uint64_t>(j.m_value.number_integer));
+ write_number(static_cast<std::uint64_t>(j.m_value.number_integer));
}
}
else
@@ -8805,35 +13155,35 @@ class binary_writer
if (j.m_value.number_integer >= -32)
{
// negative fixnum
- write_number(static_cast<int8_t>(j.m_value.number_integer));
+ write_number(static_cast<std::int8_t>(j.m_value.number_integer));
}
- else if (j.m_value.number_integer >= (std::numeric_limits<int8_t>::min)() and
- j.m_value.number_integer <= (std::numeric_limits<int8_t>::max)())
+ else if (j.m_value.number_integer >= (std::numeric_limits<std::int8_t>::min)() &&
+ j.m_value.number_integer <= (std::numeric_limits<std::int8_t>::max)())
{
// int 8
oa->write_character(to_char_type(0xD0));
- write_number(static_cast<int8_t>(j.m_value.number_integer));
+ write_number(static_cast<std::int8_t>(j.m_value.number_integer));
}
- else if (j.m_value.number_integer >= (std::numeric_limits<int16_t>::min)() and
- j.m_value.number_integer <= (std::numeric_limits<int16_t>::max)())
+ else if (j.m_value.number_integer >= (std::numeric_limits<std::int16_t>::min)() &&
+ j.m_value.number_integer <= (std::numeric_limits<std::int16_t>::max)())
{
// int 16
oa->write_character(to_char_type(0xD1));
- write_number(static_cast<int16_t>(j.m_value.number_integer));
+ write_number(static_cast<std::int16_t>(j.m_value.number_integer));
}
- else if (j.m_value.number_integer >= (std::numeric_limits<int32_t>::min)() and
- j.m_value.number_integer <= (std::numeric_limits<int32_t>::max)())
+ else if (j.m_value.number_integer >= (std::numeric_limits<std::int32_t>::min)() &&
+ j.m_value.number_integer <= (std::numeric_limits<std::int32_t>::max)())
{
// int 32
oa->write_character(to_char_type(0xD2));
- write_number(static_cast<int32_t>(j.m_value.number_integer));
+ write_number(static_cast<std::int32_t>(j.m_value.number_integer));
}
- else if (j.m_value.number_integer >= (std::numeric_limits<int64_t>::min)() and
- j.m_value.number_integer <= (std::numeric_limits<int64_t>::max)())
+ else if (j.m_value.number_integer >= (std::numeric_limits<std::int64_t>::min)() &&
+ j.m_value.number_integer <= (std::numeric_limits<std::int64_t>::max)())
{
// int 64
oa->write_character(to_char_type(0xD3));
- write_number(static_cast<int64_t>(j.m_value.number_integer));
+ write_number(static_cast<std::int64_t>(j.m_value.number_integer));
}
}
break;
@@ -8844,39 +13194,38 @@ class binary_writer
if (j.m_value.number_unsigned < 128)
{
// positive fixnum
- write_number(static_cast<uint8_t>(j.m_value.number_integer));
+ write_number(static_cast<std::uint8_t>(j.m_value.number_integer));
}
- else if (j.m_value.number_unsigned <= (std::numeric_limits<uint8_t>::max)())
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint8_t>::max)())
{
// uint 8
oa->write_character(to_char_type(0xCC));
- write_number(static_cast<uint8_t>(j.m_value.number_integer));
+ write_number(static_cast<std::uint8_t>(j.m_value.number_integer));
}
- else if (j.m_value.number_unsigned <= (std::numeric_limits<uint16_t>::max)())
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint16_t>::max)())
{
// uint 16
oa->write_character(to_char_type(0xCD));
- write_number(static_cast<uint16_t>(j.m_value.number_integer));
+ write_number(static_cast<std::uint16_t>(j.m_value.number_integer));
}
- else if (j.m_value.number_unsigned <= (std::numeric_limits<uint32_t>::max)())
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint32_t>::max)())
{
// uint 32
oa->write_character(to_char_type(0xCE));
- write_number(static_cast<uint32_t>(j.m_value.number_integer));
+ write_number(static_cast<std::uint32_t>(j.m_value.number_integer));
}
- else if (j.m_value.number_unsigned <= (std::numeric_limits<uint64_t>::max)())
+ else if (j.m_value.number_unsigned <= (std::numeric_limits<std::uint64_t>::max)())
{
// uint 64
oa->write_character(to_char_type(0xCF));
- write_number(static_cast<uint64_t>(j.m_value.number_integer));
+ write_number(static_cast<std::uint64_t>(j.m_value.number_integer));
}
break;
}
case value_t::number_float:
{
- oa->write_character(get_msgpack_float_prefix(j.m_value.number_float));
- write_number(j.m_value.number_float);
+ write_compact_float(j.m_value.number_float, detail::input_format_t::msgpack);
break;
}
@@ -8887,25 +13236,25 @@ class binary_writer
if (N <= 31)
{
// fixstr
- write_number(static_cast<uint8_t>(0xA0 | N));
+ write_number(static_cast<std::uint8_t>(0xA0 | N));
}
- else if (N <= (std::numeric_limits<uint8_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint8_t>::max)())
{
// str 8
oa->write_character(to_char_type(0xD9));
- write_number(static_cast<uint8_t>(N));
+ write_number(static_cast<std::uint8_t>(N));
}
- else if (N <= (std::numeric_limits<uint16_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint16_t>::max)())
{
// str 16
oa->write_character(to_char_type(0xDA));
- write_number(static_cast<uint16_t>(N));
+ write_number(static_cast<std::uint16_t>(N));
}
- else if (N <= (std::numeric_limits<uint32_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint32_t>::max)())
{
// str 32
oa->write_character(to_char_type(0xDB));
- write_number(static_cast<uint32_t>(N));
+ write_number(static_cast<std::uint32_t>(N));
}
// step 2: write the string
@@ -8922,19 +13271,19 @@ class binary_writer
if (N <= 15)
{
// fixarray
- write_number(static_cast<uint8_t>(0x90 | N));
+ write_number(static_cast<std::uint8_t>(0x90 | N));
}
- else if (N <= (std::numeric_limits<uint16_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint16_t>::max)())
{
// array 16
oa->write_character(to_char_type(0xDC));
- write_number(static_cast<uint16_t>(N));
+ write_number(static_cast<std::uint16_t>(N));
}
- else if (N <= (std::numeric_limits<uint32_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint32_t>::max)())
{
// array 32
oa->write_character(to_char_type(0xDD));
- write_number(static_cast<uint32_t>(N));
+ write_number(static_cast<std::uint32_t>(N));
}
// step 2: write each element
@@ -8945,6 +13294,89 @@ class binary_writer
break;
}
+ case value_t::binary:
+ {
+ // step 0: determine if the binary type has a set subtype to
+ // determine whether or not to use the ext or fixext types
+ const bool use_ext = j.m_value.binary->has_subtype();
+
+ // step 1: write control byte and the byte string length
+ const auto N = j.m_value.binary->size();
+ if (N <= (std::numeric_limits<std::uint8_t>::max)())
+ {
+ std::uint8_t output_type{};
+ bool fixed = true;
+ if (use_ext)
+ {
+ switch (N)
+ {
+ case 1:
+ output_type = 0xD4; // fixext 1
+ break;
+ case 2:
+ output_type = 0xD5; // fixext 2
+ break;
+ case 4:
+ output_type = 0xD6; // fixext 4
+ break;
+ case 8:
+ output_type = 0xD7; // fixext 8
+ break;
+ case 16:
+ output_type = 0xD8; // fixext 16
+ break;
+ default:
+ output_type = 0xC7; // ext 8
+ fixed = false;
+ break;
+ }
+
+ }
+ else
+ {
+ output_type = 0xC4; // bin 8
+ fixed = false;
+ }
+
+ oa->write_character(to_char_type(output_type));
+ if (!fixed)
+ {
+ write_number(static_cast<std::uint8_t>(N));
+ }
+ }
+ else if (N <= (std::numeric_limits<std::uint16_t>::max)())
+ {
+ std::uint8_t output_type = use_ext
+ ? 0xC8 // ext 16
+ : 0xC5; // bin 16
+
+ oa->write_character(to_char_type(output_type));
+ write_number(static_cast<std::uint16_t>(N));
+ }
+ else if (N <= (std::numeric_limits<std::uint32_t>::max)())
+ {
+ std::uint8_t output_type = use_ext
+ ? 0xC9 // ext 32
+ : 0xC6; // bin 32
+
+ oa->write_character(to_char_type(output_type));
+ write_number(static_cast<std::uint32_t>(N));
+ }
+
+ // step 1.5: if this is an ext type, write the subtype
+ if (use_ext)
+ {
+ write_number(static_cast<std::int8_t>(j.m_value.binary->subtype()));
+ }
+
+ // step 2: write the byte string
+ oa->write_characters(
+ reinterpret_cast<const CharType*>(j.m_value.binary->data()),
+ N);
+
+ break;
+ }
+
case value_t::object:
{
// step 1: write control byte and the object size
@@ -8952,19 +13384,19 @@ class binary_writer
if (N <= 15)
{
// fixmap
- write_number(static_cast<uint8_t>(0x80 | (N & 0xF)));
+ write_number(static_cast<std::uint8_t>(0x80 | (N & 0xF)));
}
- else if (N <= (std::numeric_limits<uint16_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint16_t>::max)())
{
// map 16
oa->write_character(to_char_type(0xDE));
- write_number(static_cast<uint16_t>(N));
+ write_number(static_cast<std::uint16_t>(N));
}
- else if (N <= (std::numeric_limits<uint32_t>::max)())
+ else if (N <= (std::numeric_limits<std::uint32_t>::max)())
{
// map 32
oa->write_character(to_char_type(0xDF));
- write_number(static_cast<uint32_t>(N));
+ write_number(static_cast<std::uint32_t>(N));
}
// step 2: write each element
@@ -9051,9 +13483,9 @@ class binary_writer
}
bool prefix_required = true;
- if (use_type and not j.m_value.array->empty())
+ if (use_type && !j.m_value.array->empty())
{
- assert(use_count);
+ JSON_ASSERT(use_count);
const CharType first_prefix = ubjson_prefix(j.front());
const bool same_prefix = std::all_of(j.begin() + 1, j.end(),
[this, first_prefix](const BasicJsonType & v)
@@ -9080,7 +13512,50 @@ class binary_writer
write_ubjson(el, use_count, use_type, prefix_required);
}
- if (not use_count)
+ if (!use_count)
+ {
+ oa->write_character(to_char_type(']'));
+ }
+
+ break;
+ }
+
+ case value_t::binary:
+ {
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('['));
+ }
+
+ if (use_type && !j.m_value.binary->empty())
+ {
+ JSON_ASSERT(use_count);
+ oa->write_character(to_char_type('$'));
+ oa->write_character('U');
+ }
+
+ if (use_count)
+ {
+ oa->write_character(to_char_type('#'));
+ write_number_with_ubjson_prefix(j.m_value.binary->size(), true);
+ }
+
+ if (use_type)
+ {
+ oa->write_characters(
+ reinterpret_cast<const CharType*>(j.m_value.binary->data()),
+ j.m_value.binary->size());
+ }
+ else
+ {
+ for (size_t i = 0; i < j.m_value.binary->size(); ++i)
+ {
+ oa->write_character(to_char_type('U'));
+ oa->write_character(j.m_value.binary->data()[i]);
+ }
+ }
+
+ if (!use_count)
{
oa->write_character(to_char_type(']'));
}
@@ -9096,9 +13571,9 @@ class binary_writer
}
bool prefix_required = true;
- if (use_type and not j.m_value.object->empty())
+ if (use_type && !j.m_value.object->empty())
{
- assert(use_count);
+ JSON_ASSERT(use_count);
const CharType first_prefix = ubjson_prefix(j.front());
const bool same_prefix = std::all_of(j.begin(), j.end(),
[this, first_prefix](const BasicJsonType & v)
@@ -9129,7 +13604,7 @@ class binary_writer
write_ubjson(el.second, use_count, use_type, prefix_required);
}
- if (not use_count)
+ if (!use_count)
{
oa->write_character(to_char_type('}'));
}
@@ -9154,7 +13629,7 @@ class binary_writer
static std::size_t calc_bson_entry_header_size(const string_t& name)
{
const auto it = name.find(static_cast<typename string_t::value_type>(0));
- if (JSON_UNLIKELY(it != BasicJsonType::string_t::npos))
+ if (JSON_HEDLEY_UNLIKELY(it != BasicJsonType::string_t::npos))
{
JSON_THROW(out_of_range::create(409,
"BSON key cannot contain code point U+0000 (at byte " + std::to_string(it) + ")"));
@@ -9230,14 +13705,9 @@ class binary_writer
*/
static std::size_t calc_bson_integer_size(const std::int64_t value)
{
- if ((std::numeric_limits<std::int32_t>::min)() <= value and value <= (std::numeric_limits<std::int32_t>::max)())
- {
- return sizeof(std::int32_t);
- }
- else
- {
- return sizeof(std::int64_t);
- }
+ return (std::numeric_limits<std::int32_t>::min)() <= value && value <= (std::numeric_limits<std::int32_t>::max)()
+ ? sizeof(std::int32_t)
+ : sizeof(std::int64_t);
}
/*!
@@ -9246,7 +13716,7 @@ class binary_writer
void write_bson_integer(const string_t& name,
const std::int64_t value)
{
- if ((std::numeric_limits<std::int32_t>::min)() <= value and value <= (std::numeric_limits<std::int32_t>::max)())
+ if ((std::numeric_limits<std::int32_t>::min)() <= value && value <= (std::numeric_limits<std::int32_t>::max)())
{
write_bson_entry_header(name, 0x10); // int32
write_number<std::int32_t, true>(static_cast<std::int32_t>(value));
@@ -9305,18 +13775,25 @@ class binary_writer
*/
static std::size_t calc_bson_array_size(const typename BasicJsonType::array_t& value)
{
- std::size_t embedded_document_size = 0ul;
std::size_t array_index = 0ul;
- for (const auto& el : value)
+ const std::size_t embedded_document_size = std::accumulate(std::begin(value), std::end(value), std::size_t(0), [&array_index](std::size_t result, const typename BasicJsonType::array_t::value_type & el)
{
- embedded_document_size += calc_bson_element_size(std::to_string(array_index++), el);
- }
+ return result + calc_bson_element_size(std::to_string(array_index++), el);
+ });
return sizeof(std::int32_t) + embedded_document_size + 1ul;
}
/*!
+ @return The size of the BSON-encoded binary array @a value
+ */
+ static std::size_t calc_bson_binary_size(const typename BasicJsonType::binary_t& value)
+ {
+ return sizeof(std::int32_t) + value.size() + 1ul;
+ }
+
+ /*!
@brief Writes a BSON element with key @a name and array @a value
*/
void write_bson_array(const string_t& name,
@@ -9336,6 +13813,20 @@ class binary_writer
}
/*!
+ @brief Writes a BSON element with key @a name and binary value @a value
+ */
+ void write_bson_binary(const string_t& name,
+ const binary_t& value)
+ {
+ write_bson_entry_header(name, 0x05);
+
+ write_number<std::int32_t, true>(static_cast<std::int32_t>(value.size()));
+ write_number(value.has_subtype() ? value.subtype() : std::uint8_t(0x00));
+
+ oa->write_characters(reinterpret_cast<const CharType*>(value.data()), value.size());
+ }
+
+ /*!
@brief Calculates the size necessary to serialize the JSON value @a j with its @a name
@return The calculated size for the BSON document entry for @a j with the given @a name.
*/
@@ -9351,6 +13842,9 @@ class binary_writer
case value_t::array:
return header_size + calc_bson_array_size(*j.m_value.array);
+ case value_t::binary:
+ return header_size + calc_bson_binary_size(*j.m_value.binary);
+
case value_t::boolean:
return header_size + 1ul;
@@ -9371,10 +13865,10 @@ class binary_writer
// LCOV_EXCL_START
default:
- assert(false);
+ JSON_ASSERT(false);
return 0ul;
// LCOV_EXCL_STOP
- };
+ }
}
/*!
@@ -9395,6 +13889,9 @@ class binary_writer
case value_t::array:
return write_bson_array(name, *j.m_value.array);
+ case value_t::binary:
+ return write_bson_binary(name, *j.m_value.binary);
+
case value_t::boolean:
return write_bson_boolean(name, j.m_value.boolean);
@@ -9415,10 +13912,10 @@ class binary_writer
// LCOV_EXCL_START
default:
- assert(false);
+ JSON_ASSERT(false);
return;
// LCOV_EXCL_STOP
- };
+ }
}
/*!
@@ -9429,7 +13926,7 @@ class binary_writer
*/
static std::size_t calc_bson_object_size(const typename BasicJsonType::object_t& value)
{
- std::size_t document_size = std::accumulate(value.begin(), value.end(), 0ul,
+ std::size_t document_size = std::accumulate(value.begin(), value.end(), std::size_t(0),
[](size_t result, const typename BasicJsonType::object_t::value_type & el)
{
return result += calc_bson_element_size(el.first, el.second);
@@ -9505,115 +14002,129 @@ class binary_writer
void write_number_with_ubjson_prefix(const NumberType n,
const bool add_prefix)
{
- if (n <= static_cast<uint64_t>((std::numeric_limits<int8_t>::max)()))
+ if (n <= static_cast<std::uint64_t>((std::numeric_limits<std::int8_t>::max)()))
{
if (add_prefix)
{
oa->write_character(to_char_type('i')); // int8
}
- write_number(static_cast<uint8_t>(n));
+ write_number(static_cast<std::uint8_t>(n));
}
- else if (n <= (std::numeric_limits<uint8_t>::max)())
+ else if (n <= (std::numeric_limits<std::uint8_t>::max)())
{
if (add_prefix)
{
oa->write_character(to_char_type('U')); // uint8
}
- write_number(static_cast<uint8_t>(n));
+ write_number(static_cast<std::uint8_t>(n));
}
- else if (n <= static_cast<uint64_t>((std::numeric_limits<int16_t>::max)()))
+ else if (n <= static_cast<std::uint64_t>((std::numeric_limits<std::int16_t>::max)()))
{
if (add_prefix)
{
oa->write_character(to_char_type('I')); // int16
}
- write_number(static_cast<int16_t>(n));
+ write_number(static_cast<std::int16_t>(n));
}
- else if (n <= static_cast<uint64_t>((std::numeric_limits<int32_t>::max)()))
+ else if (n <= static_cast<std::uint64_t>((std::numeric_limits<std::int32_t>::max)()))
{
if (add_prefix)
{
oa->write_character(to_char_type('l')); // int32
}
- write_number(static_cast<int32_t>(n));
+ write_number(static_cast<std::int32_t>(n));
}
- else if (n <= static_cast<uint64_t>((std::numeric_limits<int64_t>::max)()))
+ else if (n <= static_cast<std::uint64_t>((std::numeric_limits<std::int64_t>::max)()))
{
if (add_prefix)
{
oa->write_character(to_char_type('L')); // int64
}
- write_number(static_cast<int64_t>(n));
+ write_number(static_cast<std::int64_t>(n));
}
else
{
- JSON_THROW(out_of_range::create(407, "integer number " + std::to_string(n) + " cannot be represented by UBJSON as it does not fit int64"));
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('H')); // high-precision number
+ }
+
+ const auto number = BasicJsonType(n).dump();
+ write_number_with_ubjson_prefix(number.size(), true);
+ for (std::size_t i = 0; i < number.size(); ++i)
+ {
+ oa->write_character(to_char_type(static_cast<std::uint8_t>(number[i])));
+ }
}
}
// UBJSON: write number (signed integer)
- template<typename NumberType, typename std::enable_if<
- std::is_signed<NumberType>::value and
- not std::is_floating_point<NumberType>::value, int>::type = 0>
+ template < typename NumberType, typename std::enable_if <
+ std::is_signed<NumberType>::value&&
+ !std::is_floating_point<NumberType>::value, int >::type = 0 >
void write_number_with_ubjson_prefix(const NumberType n,
const bool add_prefix)
{
- if ((std::numeric_limits<int8_t>::min)() <= n and n <= (std::numeric_limits<int8_t>::max)())
+ if ((std::numeric_limits<std::int8_t>::min)() <= n && n <= (std::numeric_limits<std::int8_t>::max)())
{
if (add_prefix)
{
oa->write_character(to_char_type('i')); // int8
}
- write_number(static_cast<int8_t>(n));
+ write_number(static_cast<std::int8_t>(n));
}
- else if (static_cast<int64_t>((std::numeric_limits<uint8_t>::min)()) <= n and n <= static_cast<int64_t>((std::numeric_limits<uint8_t>::max)()))
+ else if (static_cast<std::int64_t>((std::numeric_limits<std::uint8_t>::min)()) <= n && n <= static_cast<std::int64_t>((std::numeric_limits<std::uint8_t>::max)()))
{
if (add_prefix)
{
oa->write_character(to_char_type('U')); // uint8
}
- write_number(static_cast<uint8_t>(n));
+ write_number(static_cast<std::uint8_t>(n));
}
- else if ((std::numeric_limits<int16_t>::min)() <= n and n <= (std::numeric_limits<int16_t>::max)())
+ else if ((std::numeric_limits<std::int16_t>::min)() <= n && n <= (std::numeric_limits<std::int16_t>::max)())
{
if (add_prefix)
{
oa->write_character(to_char_type('I')); // int16
}
- write_number(static_cast<int16_t>(n));
+ write_number(static_cast<std::int16_t>(n));
}
- else if ((std::numeric_limits<int32_t>::min)() <= n and n <= (std::numeric_limits<int32_t>::max)())
+ else if ((std::numeric_limits<std::int32_t>::min)() <= n && n <= (std::numeric_limits<std::int32_t>::max)())
{
if (add_prefix)
{
oa->write_character(to_char_type('l')); // int32
}
- write_number(static_cast<int32_t>(n));
+ write_number(static_cast<std::int32_t>(n));
}
- else if ((std::numeric_limits<int64_t>::min)() <= n and n <= (std::numeric_limits<int64_t>::max)())
+ else if ((std::numeric_limits<std::int64_t>::min)() <= n && n <= (std::numeric_limits<std::int64_t>::max)())
{
if (add_prefix)
{
oa->write_character(to_char_type('L')); // int64
}
- write_number(static_cast<int64_t>(n));
+ write_number(static_cast<std::int64_t>(n));
}
// LCOV_EXCL_START
else
{
- JSON_THROW(out_of_range::create(407, "integer number " + std::to_string(n) + " cannot be represented by UBJSON as it does not fit int64"));
+ if (add_prefix)
+ {
+ oa->write_character(to_char_type('H')); // high-precision number
+ }
+
+ const auto number = BasicJsonType(n).dump();
+ write_number_with_ubjson_prefix(number.size(), true);
+ for (std::size_t i = 0; i < number.size(); ++i)
+ {
+ oa->write_character(to_char_type(static_cast<std::uint8_t>(number[i])));
+ }
}
// LCOV_EXCL_STOP
}
/*!
@brief determine the type prefix of container values
-
- @note This function does not need to be 100% accurate when it comes to
- integer limits. In case a number exceeds the limits of int64_t,
- this will be detected by a later call to function
- write_number_with_ubjson_prefix. Therefore, we return 'L' for any
- value that does not fit the previous limits.
*/
CharType ubjson_prefix(const BasicJsonType& j) const noexcept
{
@@ -9627,46 +14138,54 @@ class binary_writer
case value_t::number_integer:
{
- if ((std::numeric_limits<int8_t>::min)() <= j.m_value.number_integer and j.m_value.number_integer <= (std::numeric_limits<int8_t>::max)())
+ if ((std::numeric_limits<std::int8_t>::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits<std::int8_t>::max)())
{
return 'i';
}
- if ((std::numeric_limits<uint8_t>::min)() <= j.m_value.number_integer and j.m_value.number_integer <= (std::numeric_limits<uint8_t>::max)())
+ if ((std::numeric_limits<std::uint8_t>::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits<std::uint8_t>::max)())
{
return 'U';
}
- if ((std::numeric_limits<int16_t>::min)() <= j.m_value.number_integer and j.m_value.number_integer <= (std::numeric_limits<int16_t>::max)())
+ if ((std::numeric_limits<std::int16_t>::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits<std::int16_t>::max)())
{
return 'I';
}
- if ((std::numeric_limits<int32_t>::min)() <= j.m_value.number_integer and j.m_value.number_integer <= (std::numeric_limits<int32_t>::max)())
+ if ((std::numeric_limits<std::int32_t>::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits<std::int32_t>::max)())
{
return 'l';
}
- // no check and assume int64_t (see note above)
- return 'L';
+ if ((std::numeric_limits<std::int64_t>::min)() <= j.m_value.number_integer && j.m_value.number_integer <= (std::numeric_limits<std::int64_t>::max)())
+ {
+ return 'L';
+ }
+ // anything else is treated as high-precision number
+ return 'H'; // LCOV_EXCL_LINE
}
case value_t::number_unsigned:
{
- if (j.m_value.number_unsigned <= (std::numeric_limits<int8_t>::max)())
+ if (j.m_value.number_unsigned <= static_cast<std::uint64_t>((std::numeric_limits<std::int8_t>::max)()))
{
return 'i';
}
- if (j.m_value.number_unsigned <= (std::numeric_limits<uint8_t>::max)())
+ if (j.m_value.number_unsigned <= static_cast<std::uint64_t>((std::numeric_limits<std::uint8_t>::max)()))
{
return 'U';
}
- if (j.m_value.number_unsigned <= (std::numeric_limits<int16_t>::max)())
+ if (j.m_value.number_unsigned <= static_cast<std::uint64_t>((std::numeric_limits<std::int16_t>::max)()))
{
return 'I';
}
- if (j.m_value.number_unsigned <= (std::numeric_limits<int32_t>::max)())
+ if (j.m_value.number_unsigned <= static_cast<std::uint64_t>((std::numeric_limits<std::int32_t>::max)()))
{
return 'l';
}
- // no check and assume int64_t (see note above)
- return 'L';
+ if (j.m_value.number_unsigned <= static_cast<std::uint64_t>((std::numeric_limits<std::int64_t>::max)()))
+ {
+ return 'L';
+ }
+ // anything else is treated as high-precision number
+ return 'H'; // LCOV_EXCL_LINE
}
case value_t::number_float:
@@ -9675,7 +14194,8 @@ class binary_writer
case value_t::string:
return 'S';
- case value_t::array:
+ case value_t::array: // fallthrough
+ case value_t::binary:
return '[';
case value_t::object:
@@ -9719,7 +14239,7 @@ class binary_writer
std::memcpy(vec.data(), &n, sizeof(NumberType));
// step 2: write array to output (with possible reordering)
- if (is_little_endian and not OutputIsLittleEndian)
+ if (is_little_endian != OutputIsLittleEndian)
{
// reverse byte order prior to conversion if necessary
std::reverse(vec.begin(), vec.end());
@@ -9728,24 +14248,44 @@ class binary_writer
oa->write_characters(vec.data(), sizeof(NumberType));
}
+ void write_compact_float(const number_float_t n, detail::input_format_t format)
+ {
+ if (static_cast<double>(n) >= static_cast<double>(std::numeric_limits<float>::lowest()) &&
+ static_cast<double>(n) <= static_cast<double>((std::numeric_limits<float>::max)()) &&
+ static_cast<double>(static_cast<float>(n)) == static_cast<double>(n))
+ {
+ oa->write_character(format == detail::input_format_t::cbor
+ ? get_cbor_float_prefix(static_cast<float>(n))
+ : get_msgpack_float_prefix(static_cast<float>(n)));
+ write_number(static_cast<float>(n));
+ }
+ else
+ {
+ oa->write_character(format == detail::input_format_t::cbor
+ ? get_cbor_float_prefix(n)
+ : get_msgpack_float_prefix(n));
+ write_number(n);
+ }
+ }
+
public:
// The following to_char_type functions are implement the conversion
// between uint8_t and CharType. In case CharType is not unsigned,
// such a conversion is required to allow values greater than 128.
// See <https://github.com/nlohmann/json/issues/1286> for a discussion.
template < typename C = CharType,
- enable_if_t < std::is_signed<C>::value and std::is_signed<char>::value > * = nullptr >
+ enable_if_t < std::is_signed<C>::value && std::is_signed<char>::value > * = nullptr >
static constexpr CharType to_char_type(std::uint8_t x) noexcept
{
return *reinterpret_cast<char*>(&x);
}
template < typename C = CharType,
- enable_if_t < std::is_signed<C>::value and std::is_unsigned<char>::value > * = nullptr >
+ enable_if_t < std::is_signed<C>::value && std::is_unsigned<char>::value > * = nullptr >
static CharType to_char_type(std::uint8_t x) noexcept
{
static_assert(sizeof(std::uint8_t) == sizeof(CharType), "size of CharType must be equal to std::uint8_t");
- static_assert(std::is_pod<CharType>::value, "CharType must be POD");
+ static_assert(std::is_trivial<CharType>::value, "CharType must be trivial");
CharType result;
std::memcpy(&result, &x, sizeof(x));
return result;
@@ -9760,8 +14300,8 @@ class binary_writer
template < typename InputCharType, typename C = CharType,
enable_if_t <
- std::is_signed<C>::value and
- std::is_signed<char>::value and
+ std::is_signed<C>::value &&
+ std::is_signed<char>::value &&
std::is_same<char, typename std::remove_cv<InputCharType>::type>::value
> * = nullptr >
static constexpr CharType to_char_type(InputCharType x) noexcept
@@ -9771,7 +14311,7 @@ class binary_writer
private:
/// whether we can assume little endianess
- const bool is_little_endian = binary_reader<BasicJsonType>::little_endianess();
+ const bool is_little_endian = little_endianess();
/// the output
output_adapter_t<CharType> oa = nullptr;
@@ -9779,32 +14319,35 @@ class binary_writer
} // namespace detail
} // namespace nlohmann
+// #include <nlohmann/detail/output/output_adapters.hpp>
+
// #include <nlohmann/detail/output/serializer.hpp>
#include <algorithm> // reverse, remove, fill, find, none_of
#include <array> // array
-#include <cassert> // assert
-#include <ciso646> // and, or
#include <clocale> // localeconv, lconv
#include <cmath> // labs, isfinite, isnan, signbit
#include <cstddef> // size_t, ptrdiff_t
#include <cstdint> // uint8_t
#include <cstdio> // snprintf
#include <limits> // numeric_limits
-#include <string> // string
+#include <string> // string, char_traits
#include <type_traits> // is_same
-
-// #include <nlohmann/detail/exceptions.hpp>
+#include <utility> // move
// #include <nlohmann/detail/conversions/to_chars.hpp>
-#include <cassert> // assert
-#include <ciso646> // or, and, not
+#include <array> // array
#include <cmath> // signbit, isfinite
#include <cstdint> // intN_t, uintN_t
#include <cstring> // memcpy, memmove
+#include <limits> // numeric_limits
+#include <type_traits> // conditional
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
namespace nlohmann
{
@@ -9833,7 +14376,7 @@ For a detailed description of the algorithm see:
namespace dtoa_impl
{
-template <typename Target, typename Source>
+template<typename Target, typename Source>
Target reinterpret_bits(const Source source)
{
static_assert(sizeof(Target) == sizeof(Source), "size mismatch");
@@ -9847,10 +14390,10 @@ struct diyfp // f * 2^e
{
static constexpr int kPrecision = 64; // = q
- uint64_t f = 0;
+ std::uint64_t f = 0;
int e = 0;
- constexpr diyfp(uint64_t f_, int e_) noexcept : f(f_), e(e_) {}
+ constexpr diyfp(std::uint64_t f_, int e_) noexcept : f(f_), e(e_) {}
/*!
@brief returns x - y
@@ -9858,8 +14401,8 @@ struct diyfp // f * 2^e
*/
static diyfp sub(const diyfp& x, const diyfp& y) noexcept
{
- assert(x.e == y.e);
- assert(x.f >= y.f);
+ JSON_ASSERT(x.e == y.e);
+ JSON_ASSERT(x.f >= y.f);
return {x.f - y.f, x.e};
}
@@ -9895,23 +14438,23 @@ struct diyfp // f * 2^e
//
// = p_lo + 2^64 p_hi
- const uint64_t u_lo = x.f & 0xFFFFFFFF;
- const uint64_t u_hi = x.f >> 32;
- const uint64_t v_lo = y.f & 0xFFFFFFFF;
- const uint64_t v_hi = y.f >> 32;
+ const std::uint64_t u_lo = x.f & 0xFFFFFFFFu;
+ const std::uint64_t u_hi = x.f >> 32u;
+ const std::uint64_t v_lo = y.f & 0xFFFFFFFFu;
+ const std::uint64_t v_hi = y.f >> 32u;
- const uint64_t p0 = u_lo * v_lo;
- const uint64_t p1 = u_lo * v_hi;
- const uint64_t p2 = u_hi * v_lo;
- const uint64_t p3 = u_hi * v_hi;
+ const std::uint64_t p0 = u_lo * v_lo;
+ const std::uint64_t p1 = u_lo * v_hi;
+ const std::uint64_t p2 = u_hi * v_lo;
+ const std::uint64_t p3 = u_hi * v_hi;
- const uint64_t p0_hi = p0 >> 32;
- const uint64_t p1_lo = p1 & 0xFFFFFFFF;
- const uint64_t p1_hi = p1 >> 32;
- const uint64_t p2_lo = p2 & 0xFFFFFFFF;
- const uint64_t p2_hi = p2 >> 32;
+ const std::uint64_t p0_hi = p0 >> 32u;
+ const std::uint64_t p1_lo = p1 & 0xFFFFFFFFu;
+ const std::uint64_t p1_hi = p1 >> 32u;
+ const std::uint64_t p2_lo = p2 & 0xFFFFFFFFu;
+ const std::uint64_t p2_hi = p2 >> 32u;
- uint64_t Q = p0_hi + p1_lo + p2_lo;
+ std::uint64_t Q = p0_hi + p1_lo + p2_lo;
// The full product might now be computed as
//
@@ -9922,9 +14465,9 @@ struct diyfp // f * 2^e
// Effectively we only need to add the highest bit in p_lo to p_hi (and
// Q_hi + 1 does not overflow).
- Q += uint64_t{1} << (64 - 32 - 1); // round, ties up
+ Q += std::uint64_t{1} << (64u - 32u - 1u); // round, ties up
- const uint64_t h = p3 + p2_hi + p1_hi + (Q >> 32);
+ const std::uint64_t h = p3 + p2_hi + p1_hi + (Q >> 32u);
return {h, x.e + y.e + 64};
}
@@ -9935,11 +14478,11 @@ struct diyfp // f * 2^e
*/
static diyfp normalize(diyfp x) noexcept
{
- assert(x.f != 0);
+ JSON_ASSERT(x.f != 0);
- while ((x.f >> 63) == 0)
+ while ((x.f >> 63u) == 0)
{
- x.f <<= 1;
+ x.f <<= 1u;
x.e--;
}
@@ -9954,8 +14497,8 @@ struct diyfp // f * 2^e
{
const int delta = x.e - target_exponent;
- assert(delta >= 0);
- assert(((x.f << delta) >> delta) == x.f);
+ JSON_ASSERT(delta >= 0);
+ JSON_ASSERT(((x.f << delta) >> delta) == x.f);
return {x.f << delta, target_exponent};
}
@@ -9974,11 +14517,11 @@ boundaries.
@pre value must be finite and positive
*/
-template <typename FloatType>
+template<typename FloatType>
boundaries compute_boundaries(FloatType value)
{
- assert(std::isfinite(value));
- assert(value > 0);
+ JSON_ASSERT(std::isfinite(value));
+ JSON_ASSERT(value > 0);
// Convert the IEEE representation into a diyfp.
//
@@ -9993,15 +14536,15 @@ boundaries compute_boundaries(FloatType value)
constexpr int kPrecision = std::numeric_limits<FloatType>::digits; // = p (includes the hidden bit)
constexpr int kBias = std::numeric_limits<FloatType>::max_exponent - 1 + (kPrecision - 1);
constexpr int kMinExp = 1 - kBias;
- constexpr uint64_t kHiddenBit = uint64_t{1} << (kPrecision - 1); // = 2^(p-1)
+ constexpr std::uint64_t kHiddenBit = std::uint64_t{1} << (kPrecision - 1); // = 2^(p-1)
- using bits_type = typename std::conditional< kPrecision == 24, uint32_t, uint64_t >::type;
+ using bits_type = typename std::conditional<kPrecision == 24, std::uint32_t, std::uint64_t >::type;
- const uint64_t bits = reinterpret_bits<bits_type>(value);
- const uint64_t E = bits >> (kPrecision - 1);
- const uint64_t F = bits & (kHiddenBit - 1);
+ const std::uint64_t bits = reinterpret_bits<bits_type>(value);
+ const std::uint64_t E = bits >> (kPrecision - 1);
+ const std::uint64_t F = bits & (kHiddenBit - 1);
- const bool is_denormal = (E == 0);
+ const bool is_denormal = E == 0;
const diyfp v = is_denormal
? diyfp(F, kMinExp)
: diyfp(F + kHiddenBit, static_cast<int>(E) - kBias);
@@ -10027,7 +14570,7 @@ boundaries compute_boundaries(FloatType value)
// -----------------+------+------+-------------+-------------+--- (B)
// v- m- v m+ v+
- const bool lower_boundary_is_closer = (F == 0 and E > 1);
+ const bool lower_boundary_is_closer = F == 0 && E > 1;
const diyfp m_plus = diyfp(2 * v.f + 1, v.e - 1);
const diyfp m_minus = lower_boundary_is_closer
? diyfp(4 * v.f - 1, v.e - 2) // (B)
@@ -10102,7 +14645,7 @@ constexpr int kGamma = -32;
struct cached_power // c = f * 2^e ~= 10^k
{
- uint64_t f;
+ std::uint64_t f;
int e;
int k;
};
@@ -10126,7 +14669,7 @@ inline cached_power get_cached_power_for_binary_exponent(int e)
// ==> 2^(q - 1 + alpha) <= c * 2^(e + q)
// ==> 2^(alpha - e - 1) <= c
//
- // If c were an exakt power of ten, i.e. c = 10^k, one may determine k as
+ // If c were an exact power of ten, i.e. c = 10^k, one may determine k as
//
// k = ceil( log_10( 2^(alpha - e - 1) ) )
// = ceil( (alpha - e - 1) * log_10(2) )
@@ -10166,110 +14709,110 @@ inline cached_power get_cached_power_for_binary_exponent(int e)
// NB:
// Actually this function returns c, such that -60 <= e_c + e + 64 <= -34.
- constexpr int kCachedPowersSize = 79;
constexpr int kCachedPowersMinDecExp = -300;
constexpr int kCachedPowersDecStep = 8;
- static constexpr cached_power kCachedPowers[] =
- {
- { 0xAB70FE17C79AC6CA, -1060, -300 },
- { 0xFF77B1FCBEBCDC4F, -1034, -292 },
- { 0xBE5691EF416BD60C, -1007, -284 },
- { 0x8DD01FAD907FFC3C, -980, -276 },
- { 0xD3515C2831559A83, -954, -268 },
- { 0x9D71AC8FADA6C9B5, -927, -260 },
- { 0xEA9C227723EE8BCB, -901, -252 },
- { 0xAECC49914078536D, -874, -244 },
- { 0x823C12795DB6CE57, -847, -236 },
- { 0xC21094364DFB5637, -821, -228 },
- { 0x9096EA6F3848984F, -794, -220 },
- { 0xD77485CB25823AC7, -768, -212 },
- { 0xA086CFCD97BF97F4, -741, -204 },
- { 0xEF340A98172AACE5, -715, -196 },
- { 0xB23867FB2A35B28E, -688, -188 },
- { 0x84C8D4DFD2C63F3B, -661, -180 },
- { 0xC5DD44271AD3CDBA, -635, -172 },
- { 0x936B9FCEBB25C996, -608, -164 },
- { 0xDBAC6C247D62A584, -582, -156 },
- { 0xA3AB66580D5FDAF6, -555, -148 },
- { 0xF3E2F893DEC3F126, -529, -140 },
- { 0xB5B5ADA8AAFF80B8, -502, -132 },
- { 0x87625F056C7C4A8B, -475, -124 },
- { 0xC9BCFF6034C13053, -449, -116 },
- { 0x964E858C91BA2655, -422, -108 },
- { 0xDFF9772470297EBD, -396, -100 },
- { 0xA6DFBD9FB8E5B88F, -369, -92 },
- { 0xF8A95FCF88747D94, -343, -84 },
- { 0xB94470938FA89BCF, -316, -76 },
- { 0x8A08F0F8BF0F156B, -289, -68 },
- { 0xCDB02555653131B6, -263, -60 },
- { 0x993FE2C6D07B7FAC, -236, -52 },
- { 0xE45C10C42A2B3B06, -210, -44 },
- { 0xAA242499697392D3, -183, -36 },
- { 0xFD87B5F28300CA0E, -157, -28 },
- { 0xBCE5086492111AEB, -130, -20 },
- { 0x8CBCCC096F5088CC, -103, -12 },
- { 0xD1B71758E219652C, -77, -4 },
- { 0x9C40000000000000, -50, 4 },
- { 0xE8D4A51000000000, -24, 12 },
- { 0xAD78EBC5AC620000, 3, 20 },
- { 0x813F3978F8940984, 30, 28 },
- { 0xC097CE7BC90715B3, 56, 36 },
- { 0x8F7E32CE7BEA5C70, 83, 44 },
- { 0xD5D238A4ABE98068, 109, 52 },
- { 0x9F4F2726179A2245, 136, 60 },
- { 0xED63A231D4C4FB27, 162, 68 },
- { 0xB0DE65388CC8ADA8, 189, 76 },
- { 0x83C7088E1AAB65DB, 216, 84 },
- { 0xC45D1DF942711D9A, 242, 92 },
- { 0x924D692CA61BE758, 269, 100 },
- { 0xDA01EE641A708DEA, 295, 108 },
- { 0xA26DA3999AEF774A, 322, 116 },
- { 0xF209787BB47D6B85, 348, 124 },
- { 0xB454E4A179DD1877, 375, 132 },
- { 0x865B86925B9BC5C2, 402, 140 },
- { 0xC83553C5C8965D3D, 428, 148 },
- { 0x952AB45CFA97A0B3, 455, 156 },
- { 0xDE469FBD99A05FE3, 481, 164 },
- { 0xA59BC234DB398C25, 508, 172 },
- { 0xF6C69A72A3989F5C, 534, 180 },
- { 0xB7DCBF5354E9BECE, 561, 188 },
- { 0x88FCF317F22241E2, 588, 196 },
- { 0xCC20CE9BD35C78A5, 614, 204 },
- { 0x98165AF37B2153DF, 641, 212 },
- { 0xE2A0B5DC971F303A, 667, 220 },
- { 0xA8D9D1535CE3B396, 694, 228 },
- { 0xFB9B7CD9A4A7443C, 720, 236 },
- { 0xBB764C4CA7A44410, 747, 244 },
- { 0x8BAB8EEFB6409C1A, 774, 252 },
- { 0xD01FEF10A657842C, 800, 260 },
- { 0x9B10A4E5E9913129, 827, 268 },
- { 0xE7109BFBA19C0C9D, 853, 276 },
- { 0xAC2820D9623BF429, 880, 284 },
- { 0x80444B5E7AA7CF85, 907, 292 },
- { 0xBF21E44003ACDD2D, 933, 300 },
- { 0x8E679C2F5E44FF8F, 960, 308 },
- { 0xD433179D9C8CB841, 986, 316 },
- { 0x9E19DB92B4E31BA9, 1013, 324 },
+ static constexpr std::array<cached_power, 79> kCachedPowers =
+ {
+ {
+ { 0xAB70FE17C79AC6CA, -1060, -300 },
+ { 0xFF77B1FCBEBCDC4F, -1034, -292 },
+ { 0xBE5691EF416BD60C, -1007, -284 },
+ { 0x8DD01FAD907FFC3C, -980, -276 },
+ { 0xD3515C2831559A83, -954, -268 },
+ { 0x9D71AC8FADA6C9B5, -927, -260 },
+ { 0xEA9C227723EE8BCB, -901, -252 },
+ { 0xAECC49914078536D, -874, -244 },
+ { 0x823C12795DB6CE57, -847, -236 },
+ { 0xC21094364DFB5637, -821, -228 },
+ { 0x9096EA6F3848984F, -794, -220 },
+ { 0xD77485CB25823AC7, -768, -212 },
+ { 0xA086CFCD97BF97F4, -741, -204 },
+ { 0xEF340A98172AACE5, -715, -196 },
+ { 0xB23867FB2A35B28E, -688, -188 },
+ { 0x84C8D4DFD2C63F3B, -661, -180 },
+ { 0xC5DD44271AD3CDBA, -635, -172 },
+ { 0x936B9FCEBB25C996, -608, -164 },
+ { 0xDBAC6C247D62A584, -582, -156 },
+ { 0xA3AB66580D5FDAF6, -555, -148 },
+ { 0xF3E2F893DEC3F126, -529, -140 },
+ { 0xB5B5ADA8AAFF80B8, -502, -132 },
+ { 0x87625F056C7C4A8B, -475, -124 },
+ { 0xC9BCFF6034C13053, -449, -116 },
+ { 0x964E858C91BA2655, -422, -108 },
+ { 0xDFF9772470297EBD, -396, -100 },
+ { 0xA6DFBD9FB8E5B88F, -369, -92 },
+ { 0xF8A95FCF88747D94, -343, -84 },
+ { 0xB94470938FA89BCF, -316, -76 },
+ { 0x8A08F0F8BF0F156B, -289, -68 },
+ { 0xCDB02555653131B6, -263, -60 },
+ { 0x993FE2C6D07B7FAC, -236, -52 },
+ { 0xE45C10C42A2B3B06, -210, -44 },
+ { 0xAA242499697392D3, -183, -36 },
+ { 0xFD87B5F28300CA0E, -157, -28 },
+ { 0xBCE5086492111AEB, -130, -20 },
+ { 0x8CBCCC096F5088CC, -103, -12 },
+ { 0xD1B71758E219652C, -77, -4 },
+ { 0x9C40000000000000, -50, 4 },
+ { 0xE8D4A51000000000, -24, 12 },
+ { 0xAD78EBC5AC620000, 3, 20 },
+ { 0x813F3978F8940984, 30, 28 },
+ { 0xC097CE7BC90715B3, 56, 36 },
+ { 0x8F7E32CE7BEA5C70, 83, 44 },
+ { 0xD5D238A4ABE98068, 109, 52 },
+ { 0x9F4F2726179A2245, 136, 60 },
+ { 0xED63A231D4C4FB27, 162, 68 },
+ { 0xB0DE65388CC8ADA8, 189, 76 },
+ { 0x83C7088E1AAB65DB, 216, 84 },
+ { 0xC45D1DF942711D9A, 242, 92 },
+ { 0x924D692CA61BE758, 269, 100 },
+ { 0xDA01EE641A708DEA, 295, 108 },
+ { 0xA26DA3999AEF774A, 322, 116 },
+ { 0xF209787BB47D6B85, 348, 124 },
+ { 0xB454E4A179DD1877, 375, 132 },
+ { 0x865B86925B9BC5C2, 402, 140 },
+ { 0xC83553C5C8965D3D, 428, 148 },
+ { 0x952AB45CFA97A0B3, 455, 156 },
+ { 0xDE469FBD99A05FE3, 481, 164 },
+ { 0xA59BC234DB398C25, 508, 172 },
+ { 0xF6C69A72A3989F5C, 534, 180 },
+ { 0xB7DCBF5354E9BECE, 561, 188 },
+ { 0x88FCF317F22241E2, 588, 196 },
+ { 0xCC20CE9BD35C78A5, 614, 204 },
+ { 0x98165AF37B2153DF, 641, 212 },
+ { 0xE2A0B5DC971F303A, 667, 220 },
+ { 0xA8D9D1535CE3B396, 694, 228 },
+ { 0xFB9B7CD9A4A7443C, 720, 236 },
+ { 0xBB764C4CA7A44410, 747, 244 },
+ { 0x8BAB8EEFB6409C1A, 774, 252 },
+ { 0xD01FEF10A657842C, 800, 260 },
+ { 0x9B10A4E5E9913129, 827, 268 },
+ { 0xE7109BFBA19C0C9D, 853, 276 },
+ { 0xAC2820D9623BF429, 880, 284 },
+ { 0x80444B5E7AA7CF85, 907, 292 },
+ { 0xBF21E44003ACDD2D, 933, 300 },
+ { 0x8E679C2F5E44FF8F, 960, 308 },
+ { 0xD433179D9C8CB841, 986, 316 },
+ { 0x9E19DB92B4E31BA9, 1013, 324 },
+ }
};
// This computation gives exactly the same results for k as
// k = ceil((kAlpha - e - 1) * 0.30102999566398114)
// for |e| <= 1500, but doesn't require floating-point operations.
// NB: log_10(2) ~= 78913 / 2^18
- assert(e >= -1500);
- assert(e <= 1500);
+ JSON_ASSERT(e >= -1500);
+ JSON_ASSERT(e <= 1500);
const int f = kAlpha - e - 1;
const int k = (f * 78913) / (1 << 18) + static_cast<int>(f > 0);
const int index = (-kCachedPowersMinDecExp + k + (kCachedPowersDecStep - 1)) / kCachedPowersDecStep;
- assert(index >= 0);
- assert(index < kCachedPowersSize);
- static_cast<void>(kCachedPowersSize); // Fix warning.
+ JSON_ASSERT(index >= 0);
+ JSON_ASSERT(static_cast<std::size_t>(index) < kCachedPowers.size());
- const cached_power cached = kCachedPowers[index];
- assert(kAlpha <= cached.e + e + 64);
- assert(kGamma >= cached.e + e + 64);
+ const cached_power cached = kCachedPowers[static_cast<std::size_t>(index)];
+ JSON_ASSERT(kAlpha <= cached.e + e + 64);
+ JSON_ASSERT(kGamma >= cached.e + e + 64);
return cached;
}
@@ -10278,7 +14821,7 @@ inline cached_power get_cached_power_for_binary_exponent(int e)
For n != 0, returns k, such that pow10 := 10^(k-1) <= n < 10^k.
For n == 0, returns 1 and sets pow10 := 1.
*/
-inline int find_largest_pow10(const uint32_t n, uint32_t& pow10)
+inline int find_largest_pow10(const std::uint32_t n, std::uint32_t& pow10)
{
// LCOV_EXCL_START
if (n >= 1000000000)
@@ -10334,13 +14877,13 @@ inline int find_largest_pow10(const uint32_t n, uint32_t& pow10)
}
}
-inline void grisu2_round(char* buf, int len, uint64_t dist, uint64_t delta,
- uint64_t rest, uint64_t ten_k)
+inline void grisu2_round(char* buf, int len, std::uint64_t dist, std::uint64_t delta,
+ std::uint64_t rest, std::uint64_t ten_k)
{
- assert(len >= 1);
- assert(dist <= delta);
- assert(rest <= delta);
- assert(ten_k > 0);
+ JSON_ASSERT(len >= 1);
+ JSON_ASSERT(dist <= delta);
+ JSON_ASSERT(rest <= delta);
+ JSON_ASSERT(ten_k > 0);
// <--------------------------- delta ---->
// <---- dist --------->
@@ -10362,10 +14905,10 @@ inline void grisu2_round(char* buf, int len, uint64_t dist, uint64_t delta,
// integer arithmetic.
while (rest < dist
- and delta - rest >= ten_k
- and (rest + ten_k < dist or dist - rest > rest + ten_k - dist))
+ && delta - rest >= ten_k
+ && (rest + ten_k < dist || dist - rest > rest + ten_k - dist))
{
- assert(buf[len - 1] != '0');
+ JSON_ASSERT(buf[len - 1] != '0');
buf[len - 1]--;
rest += ten_k;
}
@@ -10393,11 +14936,11 @@ inline void grisu2_digit_gen(char* buffer, int& length, int& decimal_exponent,
// Grisu2 generates the digits of M+ from left to right and stops as soon as
// V is in [M-,M+].
- assert(M_plus.e >= kAlpha);
- assert(M_plus.e <= kGamma);
+ JSON_ASSERT(M_plus.e >= kAlpha);
+ JSON_ASSERT(M_plus.e <= kGamma);
- uint64_t delta = diyfp::sub(M_plus, M_minus).f; // (significand of (M+ - M-), implicit exponent is e)
- uint64_t dist = diyfp::sub(M_plus, w ).f; // (significand of (M+ - w ), implicit exponent is e)
+ std::uint64_t delta = diyfp::sub(M_plus, M_minus).f; // (significand of (M+ - M-), implicit exponent is e)
+ std::uint64_t dist = diyfp::sub(M_plus, w ).f; // (significand of (M+ - w ), implicit exponent is e)
// Split M+ = f * 2^e into two parts p1 and p2 (note: e < 0):
//
@@ -10406,18 +14949,18 @@ inline void grisu2_digit_gen(char* buffer, int& length, int& decimal_exponent,
// = ((p1 ) * 2^-e + (p2 )) * 2^e
// = p1 + p2 * 2^e
- const diyfp one(uint64_t{1} << -M_plus.e, M_plus.e);
+ const diyfp one(std::uint64_t{1} << -M_plus.e, M_plus.e);
- auto p1 = static_cast<uint32_t>(M_plus.f >> -one.e); // p1 = f div 2^-e (Since -e >= 32, p1 fits into a 32-bit int.)
- uint64_t p2 = M_plus.f & (one.f - 1); // p2 = f mod 2^-e
+ auto p1 = static_cast<std::uint32_t>(M_plus.f >> -one.e); // p1 = f div 2^-e (Since -e >= 32, p1 fits into a 32-bit int.)
+ std::uint64_t p2 = M_plus.f & (one.f - 1); // p2 = f mod 2^-e
// 1)
//
// Generate the digits of the integral part p1 = d[n-1]...d[1]d[0]
- assert(p1 > 0);
+ JSON_ASSERT(p1 > 0);
- uint32_t pow10;
+ std::uint32_t pow10;
const int k = find_largest_pow10(p1, pow10);
// 10^(k-1) <= p1 < 10^k, pow10 = 10^(k-1)
@@ -10445,13 +14988,13 @@ inline void grisu2_digit_gen(char* buffer, int& length, int& decimal_exponent,
// M+ = buffer * 10^n + (p1 + p2 * 2^e) (buffer = 0 for n = k)
// pow10 = 10^(n-1) <= p1 < 10^n
//
- const uint32_t d = p1 / pow10; // d = p1 div 10^(n-1)
- const uint32_t r = p1 % pow10; // r = p1 mod 10^(n-1)
+ const std::uint32_t d = p1 / pow10; // d = p1 div 10^(n-1)
+ const std::uint32_t r = p1 % pow10; // r = p1 mod 10^(n-1)
//
// M+ = buffer * 10^n + (d * 10^(n-1) + r) + p2 * 2^e
// = (buffer * 10 + d) * 10^(n-1) + (r + p2 * 2^e)
//
- assert(d <= 9);
+ JSON_ASSERT(d <= 9);
buffer[length++] = static_cast<char>('0' + d); // buffer := buffer * 10 + d
//
// M+ = buffer * 10^(n-1) + (r + p2 * 2^e)
@@ -10471,7 +15014,7 @@ inline void grisu2_digit_gen(char* buffer, int& length, int& decimal_exponent,
// Note:
// Since rest and delta share the same exponent e, it suffices to
// compare the significands.
- const uint64_t rest = (uint64_t{p1} << -one.e) + p2;
+ const std::uint64_t rest = (std::uint64_t{p1} << -one.e) + p2;
if (rest <= delta)
{
// V = buffer * 10^n, with M- <= V <= M+.
@@ -10487,7 +15030,7 @@ inline void grisu2_digit_gen(char* buffer, int& length, int& decimal_exponent,
//
// 10^n = (10^n * 2^-e) * 2^e = ulp * 2^e
//
- const uint64_t ten_n = uint64_t{pow10} << -one.e;
+ const std::uint64_t ten_n = std::uint64_t{pow10} << -one.e;
grisu2_round(buffer, length, dist, delta, rest, ten_n);
return;
@@ -10538,7 +15081,7 @@ inline void grisu2_digit_gen(char* buffer, int& length, int& decimal_exponent,
//
// and stop as soon as 10^-m * r * 2^e <= delta * 2^e
- assert(p2 > delta);
+ JSON_ASSERT(p2 > delta);
int m = 0;
for (;;)
@@ -10549,16 +15092,16 @@ inline void grisu2_digit_gen(char* buffer, int& length, int& decimal_exponent,
// = buffer * 10^-m + 10^-m * (1/10 * (10 * p2) ) * 2^e
// = buffer * 10^-m + 10^-m * (1/10 * ((10*p2 div 2^-e) * 2^-e + (10*p2 mod 2^-e)) * 2^e
//
- assert(p2 <= UINT64_MAX / 10);
+ JSON_ASSERT(p2 <= (std::numeric_limits<std::uint64_t>::max)() / 10);
p2 *= 10;
- const uint64_t d = p2 >> -one.e; // d = (10 * p2) div 2^-e
- const uint64_t r = p2 & (one.f - 1); // r = (10 * p2) mod 2^-e
+ const std::uint64_t d = p2 >> -one.e; // d = (10 * p2) div 2^-e
+ const std::uint64_t r = p2 & (one.f - 1); // r = (10 * p2) mod 2^-e
//
// M+ = buffer * 10^-m + 10^-m * (1/10 * (d * 2^-e + r) * 2^e
// = buffer * 10^-m + 10^-m * (1/10 * (d + r * 2^e))
// = (buffer * 10 + d) * 10^(-m-1) + 10^(-m-1) * r * 2^e
//
- assert(d <= 9);
+ JSON_ASSERT(d <= 9);
buffer[length++] = static_cast<char>('0' + d); // buffer := buffer * 10 + d
//
// M+ = buffer * 10^(-m-1) + 10^(-m-1) * r * 2^e
@@ -10592,7 +15135,7 @@ inline void grisu2_digit_gen(char* buffer, int& length, int& decimal_exponent,
//
// 10^m * 10^-m = 1 = 2^-e * 2^e = ten_m * 2^e
//
- const uint64_t ten_m = one.f;
+ const std::uint64_t ten_m = one.f;
grisu2_round(buffer, length, dist, delta, p2, ten_m);
// By construction this algorithm generates the shortest possible decimal
@@ -10615,11 +15158,12 @@ v = buf * 10^decimal_exponent
len is the length of the buffer (number of decimal digits)
The buffer must be large enough, i.e. >= max_digits10.
*/
+JSON_HEDLEY_NON_NULL(1)
inline void grisu2(char* buf, int& len, int& decimal_exponent,
diyfp m_minus, diyfp v, diyfp m_plus)
{
- assert(m_plus.e == m_minus.e);
- assert(m_plus.e == v.e);
+ JSON_ASSERT(m_plus.e == m_minus.e);
+ JSON_ASSERT(m_plus.e == v.e);
// --------(-----------------------+-----------------------)-------- (A)
// m- v m+
@@ -10673,14 +15217,15 @@ v = buf * 10^decimal_exponent
len is the length of the buffer (number of decimal digits)
The buffer must be large enough, i.e. >= max_digits10.
*/
-template <typename FloatType>
+template<typename FloatType>
+JSON_HEDLEY_NON_NULL(1)
void grisu2(char* buf, int& len, int& decimal_exponent, FloatType value)
{
static_assert(diyfp::kPrecision >= std::numeric_limits<FloatType>::digits + 3,
"internal error: not enough precision");
- assert(std::isfinite(value));
- assert(value > 0);
+ JSON_ASSERT(std::isfinite(value));
+ JSON_ASSERT(value > 0);
// If the neighbors (and boundaries) of 'value' are always computed for double-precision
// numbers, all float's can be recovered using strtod (and strtof). However, the resulting
@@ -10712,10 +15257,12 @@ void grisu2(char* buf, int& len, int& decimal_exponent, FloatType value)
@return a pointer to the element following the exponent.
@pre -1000 < e < 1000
*/
+JSON_HEDLEY_NON_NULL(1)
+JSON_HEDLEY_RETURNS_NON_NULL
inline char* append_exponent(char* buf, int e)
{
- assert(e > -1000);
- assert(e < 1000);
+ JSON_ASSERT(e > -1000);
+ JSON_ASSERT(e < 1000);
if (e < 0)
{
@@ -10727,7 +15274,7 @@ inline char* append_exponent(char* buf, int e)
*buf++ = '+';
}
- auto k = static_cast<uint32_t>(e);
+ auto k = static_cast<std::uint32_t>(e);
if (k < 10)
{
// Always print at least two digits in the exponent.
@@ -10762,11 +15309,13 @@ notation. Otherwise it will be printed in exponential notation.
@pre min_exp < 0
@pre max_exp > 0
*/
+JSON_HEDLEY_NON_NULL(1)
+JSON_HEDLEY_RETURNS_NON_NULL
inline char* format_buffer(char* buf, int len, int decimal_exponent,
int min_exp, int max_exp)
{
- assert(min_exp < 0);
- assert(max_exp > 0);
+ JSON_ASSERT(min_exp < 0);
+ JSON_ASSERT(max_exp > 0);
const int k = len;
const int n = len + decimal_exponent;
@@ -10775,40 +15324,40 @@ inline char* format_buffer(char* buf, int len, int decimal_exponent,
// k is the length of the buffer (number of decimal digits)
// n is the position of the decimal point relative to the start of the buffer.
- if (k <= n and n <= max_exp)
+ if (k <= n && n <= max_exp)
{
// digits[000]
// len <= max_exp + 2
- std::memset(buf + k, '0', static_cast<size_t>(n - k));
+ std::memset(buf + k, '0', static_cast<size_t>(n) - static_cast<size_t>(k));
// Make it look like a floating-point number (#362, #378)
buf[n + 0] = '.';
buf[n + 1] = '0';
- return buf + (n + 2);
+ return buf + (static_cast<size_t>(n) + 2);
}
- if (0 < n and n <= max_exp)
+ if (0 < n && n <= max_exp)
{
// dig.its
// len <= max_digits10 + 1
- assert(k > n);
+ JSON_ASSERT(k > n);
- std::memmove(buf + (n + 1), buf + n, static_cast<size_t>(k - n));
+ std::memmove(buf + (static_cast<size_t>(n) + 1), buf + n, static_cast<size_t>(k) - static_cast<size_t>(n));
buf[n] = '.';
- return buf + (k + 1);
+ return buf + (static_cast<size_t>(k) + 1U);
}
- if (min_exp < n and n <= 0)
+ if (min_exp < n && n <= 0)
{
// 0.[000]digits
// len <= 2 + (-min_exp - 1) + max_digits10
- std::memmove(buf + (2 + -n), buf, static_cast<size_t>(k));
+ std::memmove(buf + (2 + static_cast<size_t>(-n)), buf, static_cast<size_t>(k));
buf[0] = '0';
buf[1] = '.';
std::memset(buf + 2, '0', static_cast<size_t>(-n));
- return buf + (2 + (-n) + k);
+ return buf + (2U + static_cast<size_t>(-n) + static_cast<size_t>(k));
}
if (k == 1)
@@ -10823,9 +15372,9 @@ inline char* format_buffer(char* buf, int len, int decimal_exponent,
// d.igitsE+123
// len <= max_digits10 + 1 + 5
- std::memmove(buf + 2, buf + 1, static_cast<size_t>(k - 1));
+ std::memmove(buf + 2, buf + 1, static_cast<size_t>(k) - 1);
buf[1] = '.';
- buf += 1 + k;
+ buf += 1 + static_cast<size_t>(k);
}
*buf++ = 'e';
@@ -10844,11 +15393,13 @@ format. Returns an iterator pointing past-the-end of the decimal representation.
@note The buffer must be large enough.
@note The result is NOT null-terminated.
*/
-template <typename FloatType>
+template<typename FloatType>
+JSON_HEDLEY_NON_NULL(1, 2)
+JSON_HEDLEY_RETURNS_NON_NULL
char* to_chars(char* first, const char* last, FloatType value)
{
static_cast<void>(last); // maybe unused - fix warning
- assert(std::isfinite(value));
+ JSON_ASSERT(std::isfinite(value));
// Use signbit(value) instead of (value < 0) since signbit works for -0.
if (std::signbit(value))
@@ -10866,7 +15417,7 @@ char* to_chars(char* first, const char* last, FloatType value)
return first;
}
- assert(last - first >= std::numeric_limits<FloatType>::max_digits10);
+ JSON_ASSERT(last - first >= std::numeric_limits<FloatType>::max_digits10);
// Compute v = buffer * 10^decimal_exponent.
// The decimal digits are stored in the buffer, which needs to be interpreted
@@ -10876,16 +15427,16 @@ char* to_chars(char* first, const char* last, FloatType value)
int decimal_exponent = 0;
dtoa_impl::grisu2(first, len, decimal_exponent, value);
- assert(len <= std::numeric_limits<FloatType>::max_digits10);
+ JSON_ASSERT(len <= std::numeric_limits<FloatType>::max_digits10);
// Format the buffer like printf("%.*g", prec, value)
constexpr int kMinExp = -4;
// Use digits10 here to increase compatibility with version 2.
constexpr int kMaxExp = std::numeric_limits<FloatType>::digits10;
- assert(last - first >= kMaxExp + 2);
- assert(last - first >= 2 + (-kMinExp - 1) + std::numeric_limits<FloatType>::max_digits10);
- assert(last - first >= std::numeric_limits<FloatType>::max_digits10 + 6);
+ JSON_ASSERT(last - first >= kMaxExp + 2);
+ JSON_ASSERT(last - first >= 2 + (-kMinExp - 1) + std::numeric_limits<FloatType>::max_digits10);
+ JSON_ASSERT(last - first >= std::numeric_limits<FloatType>::max_digits10 + 6);
return dtoa_impl::format_buffer(first, len, decimal_exponent, kMinExp, kMaxExp);
}
@@ -10893,6 +15444,8 @@ char* to_chars(char* first, const char* last, FloatType value)
} // namespace detail
} // namespace nlohmann
+// #include <nlohmann/detail/exceptions.hpp>
+
// #include <nlohmann/detail/macro_scope.hpp>
// #include <nlohmann/detail/meta/cpp_future.hpp>
@@ -10927,8 +15480,9 @@ class serializer
using number_float_t = typename BasicJsonType::number_float_t;
using number_integer_t = typename BasicJsonType::number_integer_t;
using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
- static constexpr uint8_t UTF8_ACCEPT = 0;
- static constexpr uint8_t UTF8_REJECT = 1;
+ using binary_char_t = typename BasicJsonType::binary_t::value_type;
+ static constexpr std::uint8_t UTF8_ACCEPT = 0;
+ static constexpr std::uint8_t UTF8_REJECT = 1;
public:
/*!
@@ -10940,8 +15494,8 @@ class serializer
error_handler_t error_handler_ = error_handler_t::strict)
: o(std::move(s))
, loc(std::localeconv())
- , thousands_sep(loc->thousands_sep == nullptr ? '\0' : * (loc->thousands_sep))
- , decimal_point(loc->decimal_point == nullptr ? '\0' : * (loc->decimal_point))
+ , thousands_sep(loc->thousands_sep == nullptr ? '\0' : std::char_traits<char>::to_char_type(* (loc->thousands_sep)))
+ , decimal_point(loc->decimal_point == nullptr ? '\0' : std::char_traits<char>::to_char_type(* (loc->decimal_point)))
, indent_char(ichar)
, indent_string(512, indent_char)
, error_handler(error_handler_)
@@ -10965,13 +15519,19 @@ class serializer
- strings and object keys are escaped using `escape_string()`
- integer numbers are converted implicitly via `operator<<`
- floating-point numbers are converted to a string using `"%g"` format
+ - binary values are serialized as objects containing the subtype and the
+ byte array
- @param[in] val value to serialize
- @param[in] pretty_print whether the output shall be pretty-printed
- @param[in] indent_step the indent level
- @param[in] current_indent the current indent level (only used internally)
+ @param[in] val value to serialize
+ @param[in] pretty_print whether the output shall be pretty-printed
+ @param[in] ensure_ascii If @a ensure_ascii is true, all non-ASCII characters
+ in the output are escaped with `\uXXXX` sequences, and the result consists
+ of ASCII characters only.
+ @param[in] indent_step the indent level
+ @param[in] current_indent the current indent level (only used internally)
*/
- void dump(const BasicJsonType& val, const bool pretty_print,
+ void dump(const BasicJsonType& val,
+ const bool pretty_print,
const bool ensure_ascii,
const unsigned int indent_step,
const unsigned int current_indent = 0)
@@ -10992,7 +15552,7 @@ class serializer
// variable to hold indentation for recursive calls
const auto new_indent = current_indent + indent_step;
- if (JSON_UNLIKELY(indent_string.size() < new_indent))
+ if (JSON_HEDLEY_UNLIKELY(indent_string.size() < new_indent))
{
indent_string.resize(indent_string.size() * 2, ' ');
}
@@ -11010,8 +15570,8 @@ class serializer
}
// last element
- assert(i != val.m_value.object->cend());
- assert(std::next(i) == val.m_value.object->cend());
+ JSON_ASSERT(i != val.m_value.object->cend());
+ JSON_ASSERT(std::next(i) == val.m_value.object->cend());
o->write_characters(indent_string.c_str(), new_indent);
o->write_character('\"');
dump_escaped(i->first, ensure_ascii);
@@ -11038,8 +15598,8 @@ class serializer
}
// last element
- assert(i != val.m_value.object->cend());
- assert(std::next(i) == val.m_value.object->cend());
+ JSON_ASSERT(i != val.m_value.object->cend());
+ JSON_ASSERT(std::next(i) == val.m_value.object->cend());
o->write_character('\"');
dump_escaped(i->first, ensure_ascii);
o->write_characters("\":", 2);
@@ -11065,7 +15625,7 @@ class serializer
// variable to hold indentation for recursive calls
const auto new_indent = current_indent + indent_step;
- if (JSON_UNLIKELY(indent_string.size() < new_indent))
+ if (JSON_HEDLEY_UNLIKELY(indent_string.size() < new_indent))
{
indent_string.resize(indent_string.size() * 2, ' ');
}
@@ -11080,7 +15640,7 @@ class serializer
}
// last element
- assert(not val.m_value.array->empty());
+ JSON_ASSERT(!val.m_value.array->empty());
o->write_characters(indent_string.c_str(), new_indent);
dump(val.m_value.array->back(), true, ensure_ascii, indent_step, new_indent);
@@ -11101,7 +15661,7 @@ class serializer
}
// last element
- assert(not val.m_value.array->empty());
+ JSON_ASSERT(!val.m_value.array->empty());
dump(val.m_value.array->back(), false, ensure_ascii, indent_step, current_indent);
o->write_character(']');
@@ -11118,6 +15678,79 @@ class serializer
return;
}
+ case value_t::binary:
+ {
+ if (pretty_print)
+ {
+ o->write_characters("{\n", 2);
+
+ // variable to hold indentation for recursive calls
+ const auto new_indent = current_indent + indent_step;
+ if (JSON_HEDLEY_UNLIKELY(indent_string.size() < new_indent))
+ {
+ indent_string.resize(indent_string.size() * 2, ' ');
+ }
+
+ o->write_characters(indent_string.c_str(), new_indent);
+
+ o->write_characters("\"bytes\": [", 10);
+
+ if (!val.m_value.binary->empty())
+ {
+ for (auto i = val.m_value.binary->cbegin();
+ i != val.m_value.binary->cend() - 1; ++i)
+ {
+ dump_integer(*i);
+ o->write_characters(", ", 2);
+ }
+ dump_integer(val.m_value.binary->back());
+ }
+
+ o->write_characters("],\n", 3);
+ o->write_characters(indent_string.c_str(), new_indent);
+
+ o->write_characters("\"subtype\": ", 11);
+ if (val.m_value.binary->has_subtype())
+ {
+ dump_integer(val.m_value.binary->subtype());
+ }
+ else
+ {
+ o->write_characters("null", 4);
+ }
+ o->write_character('\n');
+ o->write_characters(indent_string.c_str(), current_indent);
+ o->write_character('}');
+ }
+ else
+ {
+ o->write_characters("{\"bytes\":[", 10);
+
+ if (!val.m_value.binary->empty())
+ {
+ for (auto i = val.m_value.binary->cbegin();
+ i != val.m_value.binary->cend() - 1; ++i)
+ {
+ dump_integer(*i);
+ o->write_character(',');
+ }
+ dump_integer(val.m_value.binary->back());
+ }
+
+ o->write_characters("],\"subtype\":", 12);
+ if (val.m_value.binary->has_subtype())
+ {
+ dump_integer(val.m_value.binary->subtype());
+ o->write_character('}');
+ }
+ else
+ {
+ o->write_characters("null}", 5);
+ }
+ }
+ return;
+ }
+
case value_t::boolean:
{
if (val.m_value.boolean)
@@ -11160,6 +15793,9 @@ class serializer
o->write_characters("null", 4);
return;
}
+
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
}
}
@@ -11180,8 +15816,8 @@ class serializer
*/
void dump_escaped(const string_t& s, const bool ensure_ascii)
{
- uint32_t codepoint;
- uint8_t state = UTF8_ACCEPT;
+ std::uint32_t codepoint;
+ std::uint8_t state = UTF8_ACCEPT;
std::size_t bytes = 0; // number of bytes written to string_buffer
// number of bytes written at the point of the last valid byte
@@ -11251,19 +15887,19 @@ class serializer
{
// escape control characters (0x00..0x1F) or, if
// ensure_ascii parameter is used, non-ASCII characters
- if ((codepoint <= 0x1F) or (ensure_ascii and (codepoint >= 0x7F)))
+ if ((codepoint <= 0x1F) || (ensure_ascii && (codepoint >= 0x7F)))
{
if (codepoint <= 0xFFFF)
{
(std::snprintf)(string_buffer.data() + bytes, 7, "\\u%04x",
- static_cast<uint16_t>(codepoint));
+ static_cast<std::uint16_t>(codepoint));
bytes += 6;
}
else
{
(std::snprintf)(string_buffer.data() + bytes, 13, "\\u%04x\\u%04x",
- static_cast<uint16_t>(0xD7C0 + (codepoint >> 10)),
- static_cast<uint16_t>(0xDC00 + (codepoint & 0x3FF)));
+ static_cast<std::uint16_t>(0xD7C0u + (codepoint >> 10u)),
+ static_cast<std::uint16_t>(0xDC00u + (codepoint & 0x3FFu)));
bytes += 12;
}
}
@@ -11337,6 +15973,16 @@ class serializer
string_buffer[bytes++] = detail::binary_writer<BasicJsonType, char>::to_char_type('\xBF');
string_buffer[bytes++] = detail::binary_writer<BasicJsonType, char>::to_char_type('\xBD');
}
+
+ // write buffer and reset index; there must be 13 bytes
+ // left, as this is the maximal number of bytes to be
+ // written ("\uxxxx\uxxxx\0") for one code point
+ if (string_buffer.size() - bytes < 13)
+ {
+ o->write_characters(string_buffer.data(), bytes);
+ bytes = 0;
+ }
+
bytes_after_last_accept = bytes;
}
@@ -11346,13 +15992,16 @@ class serializer
state = UTF8_ACCEPT;
break;
}
+
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
}
break;
}
default: // decode found yet incomplete multi-byte code point
{
- if (not ensure_ascii)
+ if (!ensure_ascii)
{
// code point will not be escaped - copy byte to buffer
string_buffer[bytes++] = s[i];
@@ -11364,7 +16013,7 @@ class serializer
}
// we finished processing the string
- if (JSON_LIKELY(state == UTF8_ACCEPT))
+ if (JSON_HEDLEY_LIKELY(state == UTF8_ACCEPT))
{
// write buffer
if (bytes > 0)
@@ -11380,7 +16029,7 @@ class serializer
case error_handler_t::strict:
{
std::string sn(3, '\0');
- (std::snprintf)(&sn[0], sn.size(), "%.2X", static_cast<uint8_t>(s.back()));
+ (std::snprintf)(&sn[0], sn.size(), "%.2X", static_cast<std::uint8_t>(s.back()));
JSON_THROW(type_error::create(316, "incomplete UTF-8 string; last byte: 0x" + sn));
}
@@ -11406,7 +16055,44 @@ class serializer
}
break;
}
+
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ }
+ }
+ }
+
+ /*!
+ @brief count digits
+
+ Count the number of decimal (base 10) digits for an input unsigned integer.
+
+ @param[in] x unsigned integer number to count its digits
+ @return number of decimal digits
+ */
+ inline unsigned int count_digits(number_unsigned_t x) noexcept
+ {
+ unsigned int n_digits = 1;
+ for (;;)
+ {
+ if (x < 10)
+ {
+ return n_digits;
+ }
+ if (x < 100)
+ {
+ return n_digits + 1;
}
+ if (x < 1000)
+ {
+ return n_digits + 2;
+ }
+ if (x < 10000)
+ {
+ return n_digits + 3;
+ }
+ x = x / 10000u;
+ n_digits += 4;
}
}
@@ -11419,12 +16105,29 @@ class serializer
@param[in] x integer number (signed or unsigned) to dump
@tparam NumberType either @a number_integer_t or @a number_unsigned_t
*/
- template<typename NumberType, detail::enable_if_t<
- std::is_same<NumberType, number_unsigned_t>::value or
- std::is_same<NumberType, number_integer_t>::value,
- int> = 0>
+ template < typename NumberType, detail::enable_if_t <
+ std::is_same<NumberType, number_unsigned_t>::value ||
+ std::is_same<NumberType, number_integer_t>::value ||
+ std::is_same<NumberType, binary_char_t>::value,
+ int > = 0 >
void dump_integer(NumberType x)
{
+ static constexpr std::array<std::array<char, 2>, 100> digits_to_99
+ {
+ {
+ {{'0', '0'}}, {{'0', '1'}}, {{'0', '2'}}, {{'0', '3'}}, {{'0', '4'}}, {{'0', '5'}}, {{'0', '6'}}, {{'0', '7'}}, {{'0', '8'}}, {{'0', '9'}},
+ {{'1', '0'}}, {{'1', '1'}}, {{'1', '2'}}, {{'1', '3'}}, {{'1', '4'}}, {{'1', '5'}}, {{'1', '6'}}, {{'1', '7'}}, {{'1', '8'}}, {{'1', '9'}},
+ {{'2', '0'}}, {{'2', '1'}}, {{'2', '2'}}, {{'2', '3'}}, {{'2', '4'}}, {{'2', '5'}}, {{'2', '6'}}, {{'2', '7'}}, {{'2', '8'}}, {{'2', '9'}},
+ {{'3', '0'}}, {{'3', '1'}}, {{'3', '2'}}, {{'3', '3'}}, {{'3', '4'}}, {{'3', '5'}}, {{'3', '6'}}, {{'3', '7'}}, {{'3', '8'}}, {{'3', '9'}},
+ {{'4', '0'}}, {{'4', '1'}}, {{'4', '2'}}, {{'4', '3'}}, {{'4', '4'}}, {{'4', '5'}}, {{'4', '6'}}, {{'4', '7'}}, {{'4', '8'}}, {{'4', '9'}},
+ {{'5', '0'}}, {{'5', '1'}}, {{'5', '2'}}, {{'5', '3'}}, {{'5', '4'}}, {{'5', '5'}}, {{'5', '6'}}, {{'5', '7'}}, {{'5', '8'}}, {{'5', '9'}},
+ {{'6', '0'}}, {{'6', '1'}}, {{'6', '2'}}, {{'6', '3'}}, {{'6', '4'}}, {{'6', '5'}}, {{'6', '6'}}, {{'6', '7'}}, {{'6', '8'}}, {{'6', '9'}},
+ {{'7', '0'}}, {{'7', '1'}}, {{'7', '2'}}, {{'7', '3'}}, {{'7', '4'}}, {{'7', '5'}}, {{'7', '6'}}, {{'7', '7'}}, {{'7', '8'}}, {{'7', '9'}},
+ {{'8', '0'}}, {{'8', '1'}}, {{'8', '2'}}, {{'8', '3'}}, {{'8', '4'}}, {{'8', '5'}}, {{'8', '6'}}, {{'8', '7'}}, {{'8', '8'}}, {{'8', '9'}},
+ {{'9', '0'}}, {{'9', '1'}}, {{'9', '2'}}, {{'9', '3'}}, {{'9', '4'}}, {{'9', '5'}}, {{'9', '6'}}, {{'9', '7'}}, {{'9', '8'}}, {{'9', '9'}},
+ }
+ };
+
// special case for "0"
if (x == 0)
{
@@ -11432,28 +16135,57 @@ class serializer
return;
}
- const bool is_negative = std::is_same<NumberType, number_integer_t>::value and not (x >= 0); // see issue #755
- std::size_t i = 0;
+ // use a pointer to fill the buffer
+ auto buffer_ptr = number_buffer.begin();
+
+ const bool is_negative = std::is_same<NumberType, number_integer_t>::value && !(x >= 0); // see issue #755
+ number_unsigned_t abs_value;
- while (x != 0)
+ unsigned int n_chars;
+
+ if (is_negative)
{
- // spare 1 byte for '\0'
- assert(i < number_buffer.size() - 1);
+ *buffer_ptr = '-';
+ abs_value = remove_sign(static_cast<number_integer_t>(x));
- const auto digit = std::labs(static_cast<long>(x % 10));
- number_buffer[i++] = static_cast<char>('0' + digit);
- x /= 10;
+ // account one more byte for the minus sign
+ n_chars = 1 + count_digits(abs_value);
+ }
+ else
+ {
+ abs_value = static_cast<number_unsigned_t>(x);
+ n_chars = count_digits(abs_value);
}
- if (is_negative)
+ // spare 1 byte for '\0'
+ JSON_ASSERT(n_chars < number_buffer.size() - 1);
+
+ // jump to the end to generate the string from backward
+ // so we later avoid reversing the result
+ buffer_ptr += n_chars;
+
+ // Fast int2ascii implementation inspired by "Fastware" talk by Andrei Alexandrescu
+ // See: https://www.youtube.com/watch?v=o4-CwDo2zpg
+ while (abs_value >= 100)
+ {
+ const auto digits_index = static_cast<unsigned>((abs_value % 100));
+ abs_value /= 100;
+ *(--buffer_ptr) = digits_to_99[digits_index][1];
+ *(--buffer_ptr) = digits_to_99[digits_index][0];
+ }
+
+ if (abs_value >= 10)
+ {
+ const auto digits_index = static_cast<unsigned>(abs_value);
+ *(--buffer_ptr) = digits_to_99[digits_index][1];
+ *(--buffer_ptr) = digits_to_99[digits_index][0];
+ }
+ else
{
- // make sure there is capacity for the '-'
- assert(i < number_buffer.size() - 2);
- number_buffer[i++] = '-';
+ *(--buffer_ptr) = static_cast<char>('0' + abs_value);
}
- std::reverse(number_buffer.begin(), number_buffer.begin() + i);
- o->write_characters(number_buffer.data(), i);
+ o->write_characters(number_buffer.data(), n_chars);
}
/*!
@@ -11467,7 +16199,7 @@ class serializer
void dump_float(number_float_t x)
{
// NaN / inf
- if (not std::isfinite(x))
+ if (!std::isfinite(x))
{
o->write_characters("null", 4);
return;
@@ -11479,8 +16211,8 @@ class serializer
//
// NB: The test below works if <long double> == <double>.
static constexpr bool is_ieee_single_or_double
- = (std::numeric_limits<number_float_t>::is_iec559 and std::numeric_limits<number_float_t>::digits == 24 and std::numeric_limits<number_float_t>::max_exponent == 128) or
- (std::numeric_limits<number_float_t>::is_iec559 and std::numeric_limits<number_float_t>::digits == 53 and std::numeric_limits<number_float_t>::max_exponent == 1024);
+ = (std::numeric_limits<number_float_t>::is_iec559 && std::numeric_limits<number_float_t>::digits == 24 && std::numeric_limits<number_float_t>::max_exponent == 128) ||
+ (std::numeric_limits<number_float_t>::is_iec559 && std::numeric_limits<number_float_t>::digits == 53 && std::numeric_limits<number_float_t>::max_exponent == 1024);
dump_float(x, std::integral_constant<bool, is_ieee_single_or_double>());
}
@@ -11502,9 +16234,9 @@ class serializer
std::ptrdiff_t len = (std::snprintf)(number_buffer.data(), number_buffer.size(), "%.*g", d, x);
// negative value indicates an error
- assert(len > 0);
+ JSON_ASSERT(len > 0);
// check if buffer was large enough
- assert(static_cast<std::size_t>(len) < number_buffer.size());
+ JSON_ASSERT(static_cast<std::size_t>(len) < number_buffer.size());
// erase thousands separator
if (thousands_sep != '\0')
@@ -11512,12 +16244,12 @@ class serializer
const auto end = std::remove(number_buffer.begin(),
number_buffer.begin() + len, thousands_sep);
std::fill(end, number_buffer.end(), '\0');
- assert((end - number_buffer.begin()) <= len);
+ JSON_ASSERT((end - number_buffer.begin()) <= len);
len = (end - number_buffer.begin());
}
// convert decimal point to '.'
- if (decimal_point != '\0' and decimal_point != '.')
+ if (decimal_point != '\0' && decimal_point != '.')
{
const auto dec_pos = std::find(number_buffer.begin(), number_buffer.end(), decimal_point);
if (dec_pos != number_buffer.end())
@@ -11533,7 +16265,7 @@ class serializer
std::none_of(number_buffer.begin(), number_buffer.begin() + len + 1,
[](char c)
{
- return (c == '.' or c == 'e');
+ return c == '.' || c == 'e';
});
if (value_is_int_like)
@@ -11563,9 +16295,9 @@ class serializer
@copyright Copyright (c) 2008-2009 Bjoern Hoehrmann <bjoern@hoehrmann.de>
@sa http://bjoern.hoehrmann.de/utf-8/decoder/dfa/
*/
- static uint8_t decode(uint8_t& state, uint32_t& codep, const uint8_t byte) noexcept
+ static std::uint8_t decode(std::uint8_t& state, std::uint32_t& codep, const std::uint8_t byte) noexcept
{
- static const std::array<uint8_t, 400> utf8d =
+ static const std::array<std::uint8_t, 400> utf8d =
{
{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00..1F
@@ -11585,16 +16317,44 @@ class serializer
}
};
- const uint8_t type = utf8d[byte];
+ const std::uint8_t type = utf8d[byte];
codep = (state != UTF8_ACCEPT)
- ? (byte & 0x3fu) | (codep << 6)
- : static_cast<uint32_t>(0xff >> type) & (byte);
+ ? (byte & 0x3fu) | (codep << 6u)
+ : (0xFFu >> type) & (byte);
- state = utf8d[256u + state * 16u + type];
+ std::size_t index = 256u + static_cast<size_t>(state) * 16u + static_cast<size_t>(type);
+ JSON_ASSERT(index < 400);
+ state = utf8d[index];
return state;
}
+ /*
+ * Overload to make the compiler happy while it is instantiating
+ * dump_integer for number_unsigned_t.
+ * Must never be called.
+ */
+ number_unsigned_t remove_sign(number_unsigned_t x)
+ {
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
+ return x; // LCOV_EXCL_LINE
+ }
+
+ /*
+ * Helper function for dump_integer
+ *
+ * This function takes a negative signed integer and returns its absolute
+ * value as unsigned integer. The plus/minus shuffling is necessary as we can
+ * not directly remove the sign of an arbitrary signed integer as the
+ * absolute values of INT_MIN and INT_MAX are usually not the same. See
+ * #1708 for details.
+ */
+ inline number_unsigned_t remove_sign(number_integer_t x) noexcept
+ {
+ JSON_ASSERT(x < 0 && x < (std::numeric_limits<number_integer_t>::max)());
+ return static_cast<number_unsigned_t>(-(x + 1)) + 1;
+ }
+
private:
/// the output of the serializer
output_adapter_t<char> o = nullptr;
@@ -11623,827 +16383,178 @@ class serializer
} // namespace detail
} // namespace nlohmann
-// #include <nlohmann/detail/json_ref.hpp>
-
-
-#include <initializer_list>
-#include <utility>
-
-// #include <nlohmann/detail/meta/type_traits.hpp>
-
-
-namespace nlohmann
-{
-namespace detail
-{
-template<typename BasicJsonType>
-class json_ref
-{
- public:
- using value_type = BasicJsonType;
-
- json_ref(value_type&& value)
- : owned_value(std::move(value)), value_ref(&owned_value), is_rvalue(true)
- {}
-
- json_ref(const value_type& value)
- : value_ref(const_cast<value_type*>(&value)), is_rvalue(false)
- {}
-
- json_ref(std::initializer_list<json_ref> init)
- : owned_value(init), value_ref(&owned_value), is_rvalue(true)
- {}
-
- template <
- class... Args,
- enable_if_t<std::is_constructible<value_type, Args...>::value, int> = 0 >
- json_ref(Args && ... args)
- : owned_value(std::forward<Args>(args)...), value_ref(&owned_value),
- is_rvalue(true) {}
-
- // class should be movable only
- json_ref(json_ref&&) = default;
- json_ref(const json_ref&) = delete;
- json_ref& operator=(const json_ref&) = delete;
- json_ref& operator=(json_ref&&) = delete;
- ~json_ref() = default;
-
- value_type moved_or_copied() const
- {
- if (is_rvalue)
- {
- return std::move(*value_ref);
- }
- return *value_ref;
- }
-
- value_type const& operator*() const
- {
- return *static_cast<value_type const*>(value_ref);
- }
-
- value_type const* operator->() const
- {
- return static_cast<value_type const*>(value_ref);
- }
+// #include <nlohmann/detail/value_t.hpp>
- private:
- mutable value_type owned_value = nullptr;
- value_type* value_ref = nullptr;
- const bool is_rvalue;
-};
-} // namespace detail
-} // namespace nlohmann
+// #include <nlohmann/json_fwd.hpp>
-// #include <nlohmann/detail/json_pointer.hpp>
+// #include <nlohmann/ordered_map.hpp>
-#include <cassert> // assert
-#include <numeric> // accumulate
-#include <string> // string
+#include <functional> // less
+#include <memory> // allocator
+#include <utility> // pair
#include <vector> // vector
-// #include <nlohmann/detail/macro_scope.hpp>
-
-// #include <nlohmann/detail/exceptions.hpp>
-
-// #include <nlohmann/detail/value_t.hpp>
-
-
namespace nlohmann
{
-template<typename BasicJsonType>
-class json_pointer
-{
- // allow basic_json to access private members
- NLOHMANN_BASIC_JSON_TPL_DECLARATION
- friend class basic_json;
- public:
- /*!
- @brief create JSON pointer
-
- Create a JSON pointer according to the syntax described in
- [Section 3 of RFC6901](https://tools.ietf.org/html/rfc6901#section-3).
-
- @param[in] s string representing the JSON pointer; if omitted, the empty
- string is assumed which references the whole JSON value
-
- @throw parse_error.107 if the given JSON pointer @a s is nonempty and does
- not begin with a slash (`/`); see example below
-
- @throw parse_error.108 if a tilde (`~`) in the given JSON pointer @a s is
- not followed by `0` (representing `~`) or `1` (representing `/`); see
- example below
-
- @liveexample{The example shows the construction several valid JSON pointers
- as well as the exceptional behavior.,json_pointer}
-
- @since version 2.0.0
- */
- explicit json_pointer(const std::string& s = "")
- : reference_tokens(split(s))
- {}
-
- /*!
- @brief return a string representation of the JSON pointer
-
- @invariant For each JSON pointer `ptr`, it holds:
- @code {.cpp}
- ptr == json_pointer(ptr.to_string());
- @endcode
-
- @return a string representation of the JSON pointer
-
- @liveexample{The example shows the result of `to_string`.,
- json_pointer__to_string}
-
- @since version 2.0.0
- */
- std::string to_string() const
- {
- return std::accumulate(reference_tokens.begin(), reference_tokens.end(),
- std::string{},
- [](const std::string & a, const std::string & b)
- {
- return a + "/" + escape(b);
- });
- }
-
- /// @copydoc to_string()
- operator std::string() const
- {
- return to_string();
- }
-
- /*!
- @param[in] s reference token to be converted into an array index
-
- @return integer representation of @a s
-
- @throw out_of_range.404 if string @a s could not be converted to an integer
- */
- static int array_index(const std::string& s)
- {
- std::size_t processed_chars = 0;
- const int res = std::stoi(s, &processed_chars);
-
- // check if the string was completely read
- if (JSON_UNLIKELY(processed_chars != s.size()))
- {
- JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + s + "'"));
- }
+/// ordered_map: a minimal map-like container that preserves insertion order
+/// for use within nlohmann::basic_json<ordered_map>
+template <class Key, class T, class IgnoredLess = std::less<Key>,
+ class Allocator = std::allocator<std::pair<const Key, T>>>
+ struct ordered_map : std::vector<std::pair<const Key, T>, Allocator>
+{
+ using key_type = Key;
+ using mapped_type = T;
+ using Container = std::vector<std::pair<const Key, T>, Allocator>;
+ using typename Container::iterator;
+ using typename Container::const_iterator;
+ using typename Container::size_type;
+ using typename Container::value_type;
- return res;
- }
+ // Explicit constructors instead of `using Container::Container`
+ // otherwise older compilers choke on it (GCC <= 5.5, xcode <= 9.4)
+ ordered_map(const Allocator& alloc = Allocator()) : Container{alloc} {}
+ template <class It>
+ ordered_map(It first, It last, const Allocator& alloc = Allocator())
+ : Container{first, last, alloc} {}
+ ordered_map(std::initializer_list<T> init, const Allocator& alloc = Allocator() )
+ : Container{init, alloc} {}
- private:
- /*!
- @brief remove and return last reference pointer
- @throw out_of_range.405 if JSON pointer has no parent
- */
- std::string pop_back()
+ std::pair<iterator, bool> emplace(const key_type& key, T&& t)
{
- if (JSON_UNLIKELY(is_root()))
+ for (auto it = this->begin(); it != this->end(); ++it)
{
- JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent"));
+ if (it->first == key)
+ {
+ return {it, false};
+ }
}
-
- auto last = reference_tokens.back();
- reference_tokens.pop_back();
- return last;
+ Container::emplace_back(key, t);
+ return {--this->end(), true};
}
- /// return whether pointer points to the root document
- bool is_root() const noexcept
+ T& operator[](const Key& key)
{
- return reference_tokens.empty();
+ return emplace(key, T{}).first->second;
}
- json_pointer top() const
+ const T& operator[](const Key& key) const
{
- if (JSON_UNLIKELY(is_root()))
- {
- JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent"));
- }
-
- json_pointer result = *this;
- result.reference_tokens = {reference_tokens[0]};
- return result;
+ return at(key);
}
- /*!
- @brief create and return a reference to the pointed to value
-
- @complexity Linear in the number of reference tokens.
-
- @throw parse_error.109 if array index is not a number
- @throw type_error.313 if value cannot be unflattened
- */
- BasicJsonType& get_and_create(BasicJsonType& j) const
+ T& at(const Key& key)
{
- using size_type = typename BasicJsonType::size_type;
- auto result = &j;
-
- // in case no reference tokens exist, return a reference to the JSON value
- // j which will be overwritten by a primitive value
- for (const auto& reference_token : reference_tokens)
+ for (auto it = this->begin(); it != this->end(); ++it)
{
- switch (result->m_type)
+ if (it->first == key)
{
- case detail::value_t::null:
- {
- if (reference_token == "0")
- {
- // start a new array if reference token is 0
- result = &result->operator[](0);
- }
- else
- {
- // start a new object otherwise
- result = &result->operator[](reference_token);
- }
- break;
- }
-
- case detail::value_t::object:
- {
- // create an entry in the object
- result = &result->operator[](reference_token);
- break;
- }
-
- case detail::value_t::array:
- {
- // create an entry in the array
- JSON_TRY
- {
- result = &result->operator[](static_cast<size_type>(array_index(reference_token)));
- }
- JSON_CATCH(std::invalid_argument&)
- {
- JSON_THROW(detail::parse_error::create(109, 0, "array index '" + reference_token + "' is not a number"));
- }
- break;
- }
-
- /*
- The following code is only reached if there exists a reference
- token _and_ the current value is primitive. In this case, we have
- an error situation, because primitive values may only occur as
- single value; that is, with an empty list of reference tokens.
- */
- default:
- JSON_THROW(detail::type_error::create(313, "invalid value to unflatten"));
+ return it->second;
}
}
- return *result;
+ throw std::out_of_range("key not found");
}
- /*!
- @brief return a reference to the pointed to value
-
- @note This version does not throw if a value is not present, but tries to
- create nested values instead. For instance, calling this function
- with pointer `"/this/that"` on a null value is equivalent to calling
- `operator[]("this").operator[]("that")` on that value, effectively
- changing the null value to an object.
-
- @param[in] ptr a JSON value
-
- @return reference to the JSON value pointed to by the JSON pointer
-
- @complexity Linear in the length of the JSON pointer.
-
- @throw parse_error.106 if an array index begins with '0'
- @throw parse_error.109 if an array index was not a number
- @throw out_of_range.404 if the JSON pointer can not be resolved
- */
- BasicJsonType& get_unchecked(BasicJsonType* ptr) const
+ const T& at(const Key& key) const
{
- using size_type = typename BasicJsonType::size_type;
- for (const auto& reference_token : reference_tokens)
+ for (auto it = this->begin(); it != this->end(); ++it)
{
- // convert null values to arrays or objects before continuing
- if (ptr->m_type == detail::value_t::null)
- {
- // check if reference token is a number
- const bool nums =
- std::all_of(reference_token.begin(), reference_token.end(),
- [](const char x)
- {
- return (x >= '0' and x <= '9');
- });
-
- // change value to array for numbers or "-" or to object otherwise
- *ptr = (nums or reference_token == "-")
- ? detail::value_t::array
- : detail::value_t::object;
- }
-
- switch (ptr->m_type)
+ if (it->first == key)
{
- case detail::value_t::object:
- {
- // use unchecked object access
- ptr = &ptr->operator[](reference_token);
- break;
- }
-
- case detail::value_t::array:
- {
- // error condition (cf. RFC 6901, Sect. 4)
- if (JSON_UNLIKELY(reference_token.size() > 1 and reference_token[0] == '0'))
- {
- JSON_THROW(detail::parse_error::create(106, 0,
- "array index '" + reference_token +
- "' must not begin with '0'"));
- }
-
- if (reference_token == "-")
- {
- // explicitly treat "-" as index beyond the end
- ptr = &ptr->operator[](ptr->m_value.array->size());
- }
- else
- {
- // convert array index to number; unchecked access
- JSON_TRY
- {
- ptr = &ptr->operator[](
- static_cast<size_type>(array_index(reference_token)));
- }
- JSON_CATCH(std::invalid_argument&)
- {
- JSON_THROW(detail::parse_error::create(109, 0, "array index '" + reference_token + "' is not a number"));
- }
- }
- break;
- }
-
- default:
- JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'"));
+ return it->second;
}
}
- return *ptr;
+ throw std::out_of_range("key not found");
}
- /*!
- @throw parse_error.106 if an array index begins with '0'
- @throw parse_error.109 if an array index was not a number
- @throw out_of_range.402 if the array index '-' is used
- @throw out_of_range.404 if the JSON pointer can not be resolved
- */
- BasicJsonType& get_checked(BasicJsonType* ptr) const
+ size_type erase(const Key& key)
{
- using size_type = typename BasicJsonType::size_type;
- for (const auto& reference_token : reference_tokens)
+ for (auto it = this->begin(); it != this->end(); ++it)
{
- switch (ptr->m_type)
+ if (it->first == key)
{
- case detail::value_t::object:
+ // Since we cannot move const Keys, re-construct them in place
+ for (auto next = it; ++next != this->end(); ++it)
{
- // note: at performs range check
- ptr = &ptr->at(reference_token);
- break;
- }
-
- case detail::value_t::array:
- {
- if (JSON_UNLIKELY(reference_token == "-"))
- {
- // "-" always fails the range check
- JSON_THROW(detail::out_of_range::create(402,
- "array index '-' (" + std::to_string(ptr->m_value.array->size()) +
- ") is out of range"));
- }
-
- // error condition (cf. RFC 6901, Sect. 4)
- if (JSON_UNLIKELY(reference_token.size() > 1 and reference_token[0] == '0'))
- {
- JSON_THROW(detail::parse_error::create(106, 0,
- "array index '" + reference_token +
- "' must not begin with '0'"));
- }
-
- // note: at performs range check
- JSON_TRY
- {
- ptr = &ptr->at(static_cast<size_type>(array_index(reference_token)));
- }
- JSON_CATCH(std::invalid_argument&)
- {
- JSON_THROW(detail::parse_error::create(109, 0, "array index '" + reference_token + "' is not a number"));
- }
- break;
+ it->~value_type(); // Destroy but keep allocation
+ new (&*it) value_type{std::move(*next)};
}
-
- default:
- JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'"));
+ Container::pop_back();
+ return 1;
}
}
-
- return *ptr;
+ return 0;
}
- /*!
- @brief return a const reference to the pointed to value
-
- @param[in] ptr a JSON value
-
- @return const reference to the JSON value pointed to by the JSON
- pointer
-
- @throw parse_error.106 if an array index begins with '0'
- @throw parse_error.109 if an array index was not a number
- @throw out_of_range.402 if the array index '-' is used
- @throw out_of_range.404 if the JSON pointer can not be resolved
- */
- const BasicJsonType& get_unchecked(const BasicJsonType* ptr) const
+ iterator erase(iterator pos)
{
- using size_type = typename BasicJsonType::size_type;
- for (const auto& reference_token : reference_tokens)
- {
- switch (ptr->m_type)
- {
- case detail::value_t::object:
- {
- // use unchecked object access
- ptr = &ptr->operator[](reference_token);
- break;
- }
+ auto it = pos;
- case detail::value_t::array:
- {
- if (JSON_UNLIKELY(reference_token == "-"))
- {
- // "-" cannot be used for const access
- JSON_THROW(detail::out_of_range::create(402,
- "array index '-' (" + std::to_string(ptr->m_value.array->size()) +
- ") is out of range"));
- }
-
- // error condition (cf. RFC 6901, Sect. 4)
- if (JSON_UNLIKELY(reference_token.size() > 1 and reference_token[0] == '0'))
- {
- JSON_THROW(detail::parse_error::create(106, 0,
- "array index '" + reference_token +
- "' must not begin with '0'"));
- }
-
- // use unchecked array access
- JSON_TRY
- {
- ptr = &ptr->operator[](
- static_cast<size_type>(array_index(reference_token)));
- }
- JSON_CATCH(std::invalid_argument&)
- {
- JSON_THROW(detail::parse_error::create(109, 0, "array index '" + reference_token + "' is not a number"));
- }
- break;
- }
-
- default:
- JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'"));
- }
+ // Since we cannot move const Keys, re-construct them in place
+ for (auto next = it; ++next != this->end(); ++it)
+ {
+ it->~value_type(); // Destroy but keep allocation
+ new (&*it) value_type{std::move(*next)};
}
-
- return *ptr;
+ Container::pop_back();
+ return pos;
}
- /*!
- @throw parse_error.106 if an array index begins with '0'
- @throw parse_error.109 if an array index was not a number
- @throw out_of_range.402 if the array index '-' is used
- @throw out_of_range.404 if the JSON pointer can not be resolved
- */
- const BasicJsonType& get_checked(const BasicJsonType* ptr) const
+ size_type count(const Key& key) const
{
- using size_type = typename BasicJsonType::size_type;
- for (const auto& reference_token : reference_tokens)
+ for (auto it = this->begin(); it != this->end(); ++it)
{
- switch (ptr->m_type)
+ if (it->first == key)
{
- case detail::value_t::object:
- {
- // note: at performs range check
- ptr = &ptr->at(reference_token);
- break;
- }
-
- case detail::value_t::array:
- {
- if (JSON_UNLIKELY(reference_token == "-"))
- {
- // "-" always fails the range check
- JSON_THROW(detail::out_of_range::create(402,
- "array index '-' (" + std::to_string(ptr->m_value.array->size()) +
- ") is out of range"));
- }
-
- // error condition (cf. RFC 6901, Sect. 4)
- if (JSON_UNLIKELY(reference_token.size() > 1 and reference_token[0] == '0'))
- {
- JSON_THROW(detail::parse_error::create(106, 0,
- "array index '" + reference_token +
- "' must not begin with '0'"));
- }
-
- // note: at performs range check
- JSON_TRY
- {
- ptr = &ptr->at(static_cast<size_type>(array_index(reference_token)));
- }
- JSON_CATCH(std::invalid_argument&)
- {
- JSON_THROW(detail::parse_error::create(109, 0, "array index '" + reference_token + "' is not a number"));
- }
- break;
- }
-
- default:
- JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'"));
+ return 1;
}
}
-
- return *ptr;
+ return 0;
}
- /*!
- @brief split the string input to reference tokens
-
- @note This function is only called by the json_pointer constructor.
- All exceptions below are documented there.
-
- @throw parse_error.107 if the pointer is not empty or begins with '/'
- @throw parse_error.108 if character '~' is not followed by '0' or '1'
- */
- static std::vector<std::string> split(const std::string& reference_string)
+ iterator find(const Key& key)
{
- std::vector<std::string> result;
-
- // special case: empty reference string -> no reference tokens
- if (reference_string.empty())
- {
- return result;
- }
-
- // check if nonempty reference string begins with slash
- if (JSON_UNLIKELY(reference_string[0] != '/'))
- {
- JSON_THROW(detail::parse_error::create(107, 1,
- "JSON pointer must be empty or begin with '/' - was: '" +
- reference_string + "'"));
- }
-
- // extract the reference tokens:
- // - slash: position of the last read slash (or end of string)
- // - start: position after the previous slash
- for (
- // search for the first slash after the first character
- std::size_t slash = reference_string.find_first_of('/', 1),
- // set the beginning of the first reference token
- start = 1;
- // we can stop if start == 0 (if slash == std::string::npos)
- start != 0;
- // set the beginning of the next reference token
- // (will eventually be 0 if slash == std::string::npos)
- start = (slash == std::string::npos) ? 0 : slash + 1,
- // find next slash
- slash = reference_string.find_first_of('/', start))
+ for (auto it = this->begin(); it != this->end(); ++it)
{
- // use the text between the beginning of the reference token
- // (start) and the last slash (slash).
- auto reference_token = reference_string.substr(start, slash - start);
-
- // check reference tokens are properly escaped
- for (std::size_t pos = reference_token.find_first_of('~');
- pos != std::string::npos;
- pos = reference_token.find_first_of('~', pos + 1))
+ if (it->first == key)
{
- assert(reference_token[pos] == '~');
-
- // ~ must be followed by 0 or 1
- if (JSON_UNLIKELY(pos == reference_token.size() - 1 or
- (reference_token[pos + 1] != '0' and
- reference_token[pos + 1] != '1')))
- {
- JSON_THROW(detail::parse_error::create(108, 0, "escape character '~' must be followed with '0' or '1'"));
- }
+ return it;
}
-
- // finally, store the reference token
- unescape(reference_token);
- result.push_back(reference_token);
}
-
- return result;
- }
-
- /*!
- @brief replace all occurrences of a substring by another string
-
- @param[in,out] s the string to manipulate; changed so that all
- occurrences of @a f are replaced with @a t
- @param[in] f the substring to replace with @a t
- @param[in] t the string to replace @a f
-
- @pre The search string @a f must not be empty. **This precondition is
- enforced with an assertion.**
-
- @since version 2.0.0
- */
- static void replace_substring(std::string& s, const std::string& f,
- const std::string& t)
- {
- assert(not f.empty());
- for (auto pos = s.find(f); // find first occurrence of f
- pos != std::string::npos; // make sure f was found
- s.replace(pos, f.size(), t), // replace with t, and
- pos = s.find(f, pos + t.size())) // find next occurrence of f
- {}
- }
-
- /// escape "~" to "~0" and "/" to "~1"
- static std::string escape(std::string s)
- {
- replace_substring(s, "~", "~0");
- replace_substring(s, "/", "~1");
- return s;
- }
-
- /// unescape "~1" to tilde and "~0" to slash (order is important!)
- static void unescape(std::string& s)
- {
- replace_substring(s, "~1", "/");
- replace_substring(s, "~0", "~");
+ return Container::end();
}
- /*!
- @param[in] reference_string the reference string to the current value
- @param[in] value the value to consider
- @param[in,out] result the result object to insert values to
-
- @note Empty objects or arrays are flattened to `null`.
- */
- static void flatten(const std::string& reference_string,
- const BasicJsonType& value,
- BasicJsonType& result)
+ const_iterator find(const Key& key) const
{
- switch (value.m_type)
+ for (auto it = this->begin(); it != this->end(); ++it)
{
- case detail::value_t::array:
+ if (it->first == key)
{
- if (value.m_value.array->empty())
- {
- // flatten empty array as null
- result[reference_string] = nullptr;
- }
- else
- {
- // iterate array and use index as reference string
- for (std::size_t i = 0; i < value.m_value.array->size(); ++i)
- {
- flatten(reference_string + "/" + std::to_string(i),
- value.m_value.array->operator[](i), result);
- }
- }
- break;
- }
-
- case detail::value_t::object:
- {
- if (value.m_value.object->empty())
- {
- // flatten empty object as null
- result[reference_string] = nullptr;
- }
- else
- {
- // iterate object and use keys as reference string
- for (const auto& element : *value.m_value.object)
- {
- flatten(reference_string + "/" + escape(element.first), element.second, result);
- }
- }
- break;
- }
-
- default:
- {
- // add primitive value with its reference string
- result[reference_string] = value;
- break;
+ return it;
}
}
+ return Container::end();
}
- /*!
- @param[in] value flattened JSON
-
- @return unflattened JSON
-
- @throw parse_error.109 if array index is not a number
- @throw type_error.314 if value is not an object
- @throw type_error.315 if object values are not primitive
- @throw type_error.313 if value cannot be unflattened
- */
- static BasicJsonType
- unflatten(const BasicJsonType& value)
+ std::pair<iterator, bool> insert( value_type&& value )
{
- if (JSON_UNLIKELY(not value.is_object()))
- {
- JSON_THROW(detail::type_error::create(314, "only objects can be unflattened"));
- }
-
- BasicJsonType result;
+ return emplace(value.first, std::move(value.second));
+ }
- // iterate the JSON object values
- for (const auto& element : *value.m_value.object)
+ std::pair<iterator, bool> insert( const value_type& value )
+ {
+ for (auto it = this->begin(); it != this->end(); ++it)
{
- if (JSON_UNLIKELY(not element.second.is_primitive()))
+ if (it->first == value.first)
{
- JSON_THROW(detail::type_error::create(315, "values in object must be primitive"));
+ return {it, false};
}
-
- // assign value to reference pointed to by JSON pointer; Note that if
- // the JSON pointer is "" (i.e., points to the whole value), function
- // get_and_create returns a reference to result itself. An assignment
- // will then create a primitive value.
- json_pointer(element.first).get_and_create(result) = element.second;
}
-
- return result;
- }
-
- friend bool operator==(json_pointer const& lhs,
- json_pointer const& rhs) noexcept
- {
- return (lhs.reference_tokens == rhs.reference_tokens);
- }
-
- friend bool operator!=(json_pointer const& lhs,
- json_pointer const& rhs) noexcept
- {
- return not (lhs == rhs);
- }
-
- /// the reference tokens
- std::vector<std::string> reference_tokens;
-};
-} // namespace nlohmann
-
-// #include <nlohmann/adl_serializer.hpp>
-
-
-#include <utility>
-
-// #include <nlohmann/detail/conversions/from_json.hpp>
-
-// #include <nlohmann/detail/conversions/to_json.hpp>
-
-
-namespace nlohmann
-{
-
-template<typename, typename>
-struct adl_serializer
-{
- /*!
- @brief convert a JSON value to any value type
-
- This function is usually called by the `get()` function of the
- @ref basic_json class (either explicit or via conversion operators).
-
- @param[in] j JSON value to read from
- @param[in,out] val value to write to
- */
- template<typename BasicJsonType, typename ValueType>
- static auto from_json(BasicJsonType&& j, ValueType& val) noexcept(
- noexcept(::nlohmann::from_json(std::forward<BasicJsonType>(j), val)))
- -> decltype(::nlohmann::from_json(std::forward<BasicJsonType>(j), val), void())
- {
- ::nlohmann::from_json(std::forward<BasicJsonType>(j), val);
- }
-
- /*!
- @brief convert any value type to a JSON value
-
- This function is usually called by the constructors of the @ref basic_json
- class.
-
- @param[in,out] j JSON value to write to
- @param[in] val value to read from
- */
- template <typename BasicJsonType, typename ValueType>
- static auto to_json(BasicJsonType& j, ValueType&& val) noexcept(
- noexcept(::nlohmann::to_json(j, std::forward<ValueType>(val))))
- -> decltype(::nlohmann::to_json(j, std::forward<ValueType>(val)), void())
- {
- ::nlohmann::to_json(j, std::forward<ValueType>(val));
+ Container::push_back(value);
+ return {--this->end(), true};
}
};
@@ -12475,6 +16586,9 @@ default; will be used in @ref number_integer_t)
`uint64_t` by default; will be used in @ref number_unsigned_t)
@tparam NumberFloatType type for JSON floating-point numbers (`double` by
default; will be used in @ref number_float_t)
+@tparam BinaryType type for packed binary data for compatibility with binary
+serialization formats (`std::vector<std::uint8_t>` by default; will be used in
+@ref binary_t)
@tparam AllocatorType type of the allocator to use (`std::allocator` by
default)
@tparam JSONSerializer the serializer to resolve internal calls to `to_json()`
@@ -12529,7 +16643,7 @@ relationship:
The invariants are checked by member function assert_invariant().
@internal
-@note ObjectType trick from http://stackoverflow.com/a/9860911
+@note ObjectType trick from https://stackoverflow.com/a/9860911
@endinternal
@see [RFC 7159: The JavaScript Object Notation (JSON) Data Interchange
@@ -12545,13 +16659,15 @@ class basic_json
private:
template<detail::value_t> friend struct detail::external_constructor;
friend ::nlohmann::json_pointer<basic_json>;
- friend ::nlohmann::detail::parser<basic_json>;
+
+ template<typename BasicJsonType, typename InputType>
+ friend class ::nlohmann::detail::parser;
friend ::nlohmann::detail::serializer<basic_json>;
template<typename BasicJsonType>
friend class ::nlohmann::detail::iter_impl;
template<typename BasicJsonType, typename CharType>
friend class ::nlohmann::detail::binary_writer;
- template<typename BasicJsonType, typename SAX>
+ template<typename BasicJsonType, typename InputType, typename SAX>
friend class ::nlohmann::detail::binary_reader;
template<typename BasicJsonType>
friend class ::nlohmann::detail::json_sax_dom_parser;
@@ -12562,8 +16678,19 @@ class basic_json
using basic_json_t = NLOHMANN_BASIC_JSON_TPL;
// convenience aliases for types residing in namespace detail;
- using lexer = ::nlohmann::detail::lexer<basic_json>;
- using parser = ::nlohmann::detail::parser<basic_json>;
+ using lexer = ::nlohmann::detail::lexer_base<basic_json>;
+
+ template<typename InputAdapterType>
+ static ::nlohmann::detail::parser<basic_json, InputAdapterType> parser(
+ InputAdapterType adapter,
+ detail::parser_callback_t<basic_json>cb = nullptr,
+ const bool allow_exceptions = true,
+ const bool ignore_comments = false
+ )
+ {
+ return ::nlohmann::detail::parser<basic_json, InputAdapterType>(std::move(adapter),
+ std::move(cb), allow_exceptions, ignore_comments);
+ }
using primitive_iterator_t = ::nlohmann::detail::primitive_iterator_t;
template<typename BasicJsonType>
@@ -12577,7 +16704,8 @@ class basic_json
template<typename CharType>
using output_adapter_t = ::nlohmann::detail::output_adapter_t<CharType>;
- using binary_reader = ::nlohmann::detail::binary_reader<basic_json>;
+ template<typename InputType>
+ using binary_reader = ::nlohmann::detail::binary_reader<basic_json, InputType>;
template<typename CharType> using binary_writer = ::nlohmann::detail::binary_writer<basic_json, CharType>;
using serializer = ::nlohmann::detail::serializer<basic_json>;
@@ -12590,6 +16718,8 @@ class basic_json
using json_serializer = JSONSerializer<T, SFINAE>;
/// how to treat decoding errors
using error_handler_t = detail::error_handler_t;
+ /// how to treat CBOR tags
+ using cbor_tag_handler_t = detail::cbor_tag_handler_t;
/// helper type for initializer lists of basic_json values
using initializer_list_t = std::initializer_list<detail::json_ref<basic_json>>;
@@ -12697,11 +16827,12 @@ class basic_json
@since 2.1.0
*/
+ JSON_HEDLEY_WARN_UNUSED_RESULT
static basic_json meta()
{
basic_json result;
- result["copyright"] = "(C) 2013-2017 Niels Lohmann";
+ result["copyright"] = "(C) 2013-2020 Niels Lohmann";
result["name"] = "JSON for Modern C++";
result["url"] = "https://github.com/nlohmann/json";
result["version"]["string"] =
@@ -13195,12 +17326,83 @@ class basic_json
*/
using number_float_t = NumberFloatType;
+ /*!
+ @brief a type for a packed binary type
+
+ This type is a type designed to carry binary data that appears in various
+ serialized formats, such as CBOR's Major Type 2, MessagePack's bin, and
+ BSON's generic binary subtype. This type is NOT a part of standard JSON and
+ exists solely for compatibility with these binary types. As such, it is
+ simply defined as an ordered sequence of zero or more byte values.
+
+ Additionally, as an implementation detail, the subtype of the binary data is
+ carried around as a `std::uint8_t`, which is compatible with both of the
+ binary data formats that use binary subtyping, (though the specific
+ numbering is incompatible with each other, and it is up to the user to
+ translate between them).
+
+ [CBOR's RFC 7049](https://tools.ietf.org/html/rfc7049) describes this type
+ as:
+ > Major type 2: a byte string. The string's length in bytes is represented
+ > following the rules for positive integers (major type 0).
+
+ [MessagePack's documentation on the bin type
+ family](https://github.com/msgpack/msgpack/blob/master/spec.md#bin-format-family)
+ describes this type as:
+ > Bin format family stores an byte array in 2, 3, or 5 bytes of extra bytes
+ > in addition to the size of the byte array.
+
+ [BSON's specifications](http://bsonspec.org/spec.html) describe several
+ binary types; however, this type is intended to represent the generic binary
+ type which has the description:
+ > Generic binary subtype - This is the most commonly used binary subtype and
+ > should be the 'default' for drivers and tools.
+
+ None of these impose any limitations on the internal representation other
+ than the basic unit of storage be some type of array whose parts are
+ decomposable into bytes.
+
+ The default representation of this binary format is a
+ `std::vector<std::uint8_t>`, which is a very common way to represent a byte
+ array in modern C++.
+
+ #### Default type
+
+ The default values for @a BinaryType is `std::vector<std::uint8_t>`
+
+ #### Storage
+
+ Binary Arrays are stored as pointers in a @ref basic_json type. That is,
+ for any access to array values, a pointer of the type `binary_t*` must be
+ dereferenced.
+
+ #### Notes on subtypes
+
+ - CBOR
+ - Binary values are represented as byte strings. No subtypes are
+ supported and will be ignored when CBOR is written.
+ - MessagePack
+ - If a subtype is given and the binary array contains exactly 1, 2, 4, 8,
+ or 16 elements, the fixext family (fixext1, fixext2, fixext4, fixext8)
+ is used. For other sizes, the ext family (ext8, ext16, ext32) is used.
+ The subtype is then added as singed 8-bit integer.
+ - If no subtype is given, the bin family (bin8, bin16, bin32) is used.
+ - BSON
+ - If a subtype is given, it is used and added as unsigned 8-bit integer.
+ - If no subtype is given, the generic binary subtype 0x00 is used.
+
+ @sa @ref binary -- create a binary array
+
+ @since version 3.8.0
+ */
+ using binary_t = nlohmann::byte_container_with_subtype<BinaryType>;
/// @}
private:
/// helper for exception-safe object creation
template<typename T, typename... Args>
+ JSON_HEDLEY_RETURNS_NON_NULL
static T* create(Args&& ... args)
{
AllocatorType<T> alloc;
@@ -13212,7 +17414,7 @@ class basic_json
};
std::unique_ptr<T, decltype(deleter)> object(AllocatorTraits::allocate(alloc, 1), deleter);
AllocatorTraits::construct(alloc, object.get(), std::forward<Args>(args)...);
- assert(object != nullptr);
+ JSON_ASSERT(object != nullptr);
return object.release();
}
@@ -13236,6 +17438,7 @@ class basic_json
number | number_integer | @ref number_integer_t
number | number_unsigned | @ref number_unsigned_t
number | number_float | @ref number_float_t
+ binary | binary | pointer to @ref binary_t
null | null | *no value is stored*
@note Variable-length types (objects, arrays, and strings) are stored as
@@ -13252,6 +17455,8 @@ class basic_json
array_t* array;
/// string (stored with pointer to save storage)
string_t* string;
+ /// binary (stored with pointer to save storage)
+ binary_t* binary;
/// boolean
boolean_t boolean;
/// number (integer)
@@ -13294,6 +17499,12 @@ class basic_json
break;
}
+ case value_t::binary:
+ {
+ binary = create<binary_t>();
+ break;
+ }
+
case value_t::boolean:
{
boolean = boolean_t(false);
@@ -13327,9 +17538,9 @@ class basic_json
default:
{
object = nullptr; // silence warning, see #821
- if (JSON_UNLIKELY(t == value_t::null))
+ if (JSON_HEDLEY_UNLIKELY(t == value_t::null))
{
- JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.5.0")); // LCOV_EXCL_LINE
+ JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.9.1")); // LCOV_EXCL_LINE
}
break;
}
@@ -13372,8 +17583,79 @@ class basic_json
array = create<array_t>(std::move(value));
}
+ /// constructor for binary arrays
+ json_value(const typename binary_t::container_type& value)
+ {
+ binary = create<binary_t>(value);
+ }
+
+ /// constructor for rvalue binary arrays
+ json_value(typename binary_t::container_type&& value)
+ {
+ binary = create<binary_t>(std::move(value));
+ }
+
+ /// constructor for binary arrays (internal type)
+ json_value(const binary_t& value)
+ {
+ binary = create<binary_t>(value);
+ }
+
+ /// constructor for rvalue binary arrays (internal type)
+ json_value(binary_t&& value)
+ {
+ binary = create<binary_t>(std::move(value));
+ }
+
void destroy(value_t t) noexcept
{
+ // flatten the current json_value to a heap-allocated stack
+ std::vector<basic_json> stack;
+
+ // move the top-level items to stack
+ if (t == value_t::array)
+ {
+ stack.reserve(array->size());
+ std::move(array->begin(), array->end(), std::back_inserter(stack));
+ }
+ else if (t == value_t::object)
+ {
+ stack.reserve(object->size());
+ for (auto&& it : *object)
+ {
+ stack.push_back(std::move(it.second));
+ }
+ }
+
+ while (!stack.empty())
+ {
+ // move the last item to local variable to be processed
+ basic_json current_item(std::move(stack.back()));
+ stack.pop_back();
+
+ // if current_item is array/object, move
+ // its children to the stack to be processed later
+ if (current_item.is_array())
+ {
+ std::move(current_item.m_value.array->begin(), current_item.m_value.array->end(),
+ std::back_inserter(stack));
+
+ current_item.m_value.array->clear();
+ }
+ else if (current_item.is_object())
+ {
+ for (auto&& it : *current_item.m_value.object)
+ {
+ stack.push_back(std::move(it.second));
+ }
+
+ current_item.m_value.object->clear();
+ }
+
+ // it's now safe that current_item get destructed
+ // since it doesn't have any children
+ }
+
switch (t)
{
case value_t::object:
@@ -13400,6 +17682,14 @@ class basic_json
break;
}
+ case value_t::binary:
+ {
+ AllocatorType<binary_t> alloc;
+ std::allocator_traits<decltype(alloc)>::destroy(alloc, binary);
+ std::allocator_traits<decltype(alloc)>::deallocate(alloc, binary, 1);
+ break;
+ }
+
default:
{
break;
@@ -13419,9 +17709,10 @@ class basic_json
*/
void assert_invariant() const noexcept
{
- assert(m_type != value_t::object or m_value.object != nullptr);
- assert(m_type != value_t::array or m_value.array != nullptr);
- assert(m_type != value_t::string or m_value.string != nullptr);
+ JSON_ASSERT(m_type != value_t::object || m_value.object != nullptr);
+ JSON_ASSERT(m_type != value_t::array || m_value.array != nullptr);
+ JSON_ASSERT(m_type != value_t::string || m_value.string != nullptr);
+ JSON_ASSERT(m_type != value_t::binary || m_value.binary != nullptr);
}
public:
@@ -13444,7 +17735,7 @@ class basic_json
@sa @ref parser_callback_t for more information and examples
*/
- using parse_event_t = typename parser::parse_event_t;
+ using parse_event_t = detail::parse_event_t;
/*!
@brief per-element parser callback type
@@ -13495,7 +17786,7 @@ class basic_json
@since version 1.0.0
*/
- using parser_callback_t = typename parser::parser_callback_t;
+ using parser_callback_t = detail::parser_callback_t<basic_json>;
//////////////////
// constructors //
@@ -13520,6 +17811,7 @@ class basic_json
number | `0`
object | `{}`
array | `[]`
+ binary | empty array
@param[in] v the type of the value to create
@@ -13591,6 +17883,12 @@ class basic_json
@ref number_float_t, and all convertible number types such as `int`,
`size_t`, `int64_t`, `float` or `double` can be used.
- **boolean**: @ref boolean_t / `bool` can be used.
+ - **binary**: @ref binary_t / `std::vector<uint8_t>` may be used,
+ unfortunately because string literals cannot be distinguished from binary
+ character arrays by the C++ type system, all types compatible with `const
+ char*` will be directed to the string constructor instead. This is both
+ for backwards compatibility, and due to the fact that a binary type is not
+ a standard JSON type.
See the examples below.
@@ -13622,10 +17920,10 @@ class basic_json
@since version 2.1.0
*/
- template <typename CompatibleType,
- typename U = detail::uncvref_t<CompatibleType>,
- detail::enable_if_t<
- not detail::is_basic_json<U>::value and detail::is_compatible_type<basic_json_t, U>::value, int> = 0>
+ template < typename CompatibleType,
+ typename U = detail::uncvref_t<CompatibleType>,
+ detail::enable_if_t <
+ !detail::is_basic_json<U>::value && detail::is_compatible_type<basic_json_t, U>::value, int > = 0 >
basic_json(CompatibleType && val) noexcept(noexcept(
JSONSerializer<U>::to_json(std::declval<basic_json_t&>(),
std::forward<CompatibleType>(val))))
@@ -13660,9 +17958,9 @@ class basic_json
@since version 3.2.0
*/
- template <typename BasicJsonType,
- detail::enable_if_t<
- detail::is_basic_json<BasicJsonType>::value and not std::is_same<basic_json, BasicJsonType>::value, int> = 0>
+ template < typename BasicJsonType,
+ detail::enable_if_t <
+ detail::is_basic_json<BasicJsonType>::value&& !std::is_same<basic_json, BasicJsonType>::value, int > = 0 >
basic_json(const BasicJsonType& val)
{
using other_boolean_t = typename BasicJsonType::boolean_t;
@@ -13672,6 +17970,7 @@ class basic_json
using other_string_t = typename BasicJsonType::string_t;
using other_object_t = typename BasicJsonType::object_t;
using other_array_t = typename BasicJsonType::array_t;
+ using other_binary_t = typename BasicJsonType::binary_t;
switch (val.type())
{
@@ -13696,12 +17995,17 @@ class basic_json
case value_t::array:
JSONSerializer<other_array_t>::to_json(*this, val.template get_ref<const other_array_t&>());
break;
+ case value_t::binary:
+ JSONSerializer<other_binary_t>::to_json(*this, val.template get_ref<const other_binary_t&>());
+ break;
case value_t::null:
*this = nullptr;
break;
case value_t::discarded:
m_type = value_t::discarded;
break;
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
}
assert_invariant();
}
@@ -13789,11 +18093,11 @@ class basic_json
bool is_an_object = std::all_of(init.begin(), init.end(),
[](const detail::json_ref<basic_json>& element_ref)
{
- return (element_ref->is_array() and element_ref->size() == 2 and (*element_ref)[0].is_string());
+ return element_ref->is_array() && element_ref->size() == 2 && (*element_ref)[0].is_string();
});
// adjust type if type deduction is not wanted
- if (not type_deduction)
+ if (!type_deduction)
{
// if array is wanted, do not create an object though possible
if (manual_type == value_t::array)
@@ -13802,7 +18106,7 @@ class basic_json
}
// if object is wanted but impossible, throw an exception
- if (JSON_UNLIKELY(manual_type == value_t::object and not is_an_object))
+ if (JSON_HEDLEY_UNLIKELY(manual_type == value_t::object && !is_an_object))
{
JSON_THROW(type_error::create(301, "cannot create object from initializer list"));
}
@@ -13833,6 +18137,99 @@ class basic_json
}
/*!
+ @brief explicitly create a binary array (without subtype)
+
+ Creates a JSON binary array value from a given binary container. Binary
+ values are part of various binary formats, such as CBOR, MessagePack, and
+ BSON. This constructor is used to create a value for serialization to those
+ formats.
+
+ @note Note, this function exists because of the difficulty in correctly
+ specifying the correct template overload in the standard value ctor, as both
+ JSON arrays and JSON binary arrays are backed with some form of a
+ `std::vector`. Because JSON binary arrays are a non-standard extension it
+ was decided that it would be best to prevent automatic initialization of a
+ binary array type, for backwards compatibility and so it does not happen on
+ accident.
+
+ @param[in] init container containing bytes to use as binary type
+
+ @return JSON binary array value
+
+ @complexity Linear in the size of @a init.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes to any JSON value.
+
+ @since version 3.8.0
+ */
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json binary(const typename binary_t::container_type& init)
+ {
+ auto res = basic_json();
+ res.m_type = value_t::binary;
+ res.m_value = init;
+ return res;
+ }
+
+ /*!
+ @brief explicitly create a binary array (with subtype)
+
+ Creates a JSON binary array value from a given binary container. Binary
+ values are part of various binary formats, such as CBOR, MessagePack, and
+ BSON. This constructor is used to create a value for serialization to those
+ formats.
+
+ @note Note, this function exists because of the difficulty in correctly
+ specifying the correct template overload in the standard value ctor, as both
+ JSON arrays and JSON binary arrays are backed with some form of a
+ `std::vector`. Because JSON binary arrays are a non-standard extension it
+ was decided that it would be best to prevent automatic initialization of a
+ binary array type, for backwards compatibility and so it does not happen on
+ accident.
+
+ @param[in] init container containing bytes to use as binary type
+ @param[in] subtype subtype to use in MessagePack and BSON
+
+ @return JSON binary array value
+
+ @complexity Linear in the size of @a init.
+
+ @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+ changes to any JSON value.
+
+ @since version 3.8.0
+ */
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json binary(const typename binary_t::container_type& init, std::uint8_t subtype)
+ {
+ auto res = basic_json();
+ res.m_type = value_t::binary;
+ res.m_value = binary_t(init, subtype);
+ return res;
+ }
+
+ /// @copydoc binary(const typename binary_t::container_type&)
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json binary(typename binary_t::container_type&& init)
+ {
+ auto res = basic_json();
+ res.m_type = value_t::binary;
+ res.m_value = std::move(init);
+ return res;
+ }
+
+ /// @copydoc binary(const typename binary_t::container_type&, std::uint8_t)
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json binary(typename binary_t::container_type&& init, std::uint8_t subtype)
+ {
+ auto res = basic_json();
+ res.m_type = value_t::binary;
+ res.m_value = binary_t(std::move(init), subtype);
+ return res;
+ }
+
+ /*!
@brief explicitly create an array from an initializer list
Creates a JSON array value from a given initializer list. That is, given a
@@ -13869,6 +18266,7 @@ class basic_json
@since version 1.0.0
*/
+ JSON_HEDLEY_WARN_UNUSED_RESULT
static basic_json array(initializer_list_t init = {})
{
return basic_json(init, false, value_t::array);
@@ -13912,6 +18310,7 @@ class basic_json
@since version 1.0.0
*/
+ JSON_HEDLEY_WARN_UNUSED_RESULT
static basic_json object(initializer_list_t init = {})
{
return basic_json(init, false, value_t::object);
@@ -14001,16 +18400,16 @@ class basic_json
@since version 1.0.0
*/
- template<class InputIT, typename std::enable_if<
- std::is_same<InputIT, typename basic_json_t::iterator>::value or
- std::is_same<InputIT, typename basic_json_t::const_iterator>::value, int>::type = 0>
+ template < class InputIT, typename std::enable_if <
+ std::is_same<InputIT, typename basic_json_t::iterator>::value ||
+ std::is_same<InputIT, typename basic_json_t::const_iterator>::value, int >::type = 0 >
basic_json(InputIT first, InputIT last)
{
- assert(first.m_object != nullptr);
- assert(last.m_object != nullptr);
+ JSON_ASSERT(first.m_object != nullptr);
+ JSON_ASSERT(last.m_object != nullptr);
// make sure iterator fits the current value
- if (JSON_UNLIKELY(first.m_object != last.m_object))
+ if (JSON_HEDLEY_UNLIKELY(first.m_object != last.m_object))
{
JSON_THROW(invalid_iterator::create(201, "iterators are not compatible"));
}
@@ -14027,8 +18426,8 @@ class basic_json
case value_t::number_unsigned:
case value_t::string:
{
- if (JSON_UNLIKELY(not first.m_it.primitive_iterator.is_begin()
- or not last.m_it.primitive_iterator.is_end()))
+ if (JSON_HEDLEY_UNLIKELY(!first.m_it.primitive_iterator.is_begin()
+ || !last.m_it.primitive_iterator.is_end()))
{
JSON_THROW(invalid_iterator::create(204, "iterators out of range"));
}
@@ -14085,6 +18484,12 @@ class basic_json
break;
}
+ case value_t::binary:
+ {
+ m_value = *first.m_object->m_value.binary;
+ break;
+ }
+
default:
JSON_THROW(invalid_iterator::create(206, "cannot construct with iterators from " +
std::string(first.m_object->type_name())));
@@ -14098,10 +18503,10 @@ class basic_json
// other constructors and destructor //
///////////////////////////////////////
- /// @private
- basic_json(const detail::json_ref<basic_json>& ref)
- : basic_json(ref.moved_or_copied())
- {}
+ template<typename JsonRef,
+ detail::enable_if_t<detail::conjunction<detail::is_json_ref<JsonRef>,
+ std::is_same<typename JsonRef::value_type, basic_json>>::value, int> = 0 >
+ basic_json(const JsonRef& ref) : basic_json(ref.moved_or_copied()) {}
/*!
@brief copy constructor
@@ -14178,6 +18583,12 @@ class basic_json
break;
}
+ case value_t::binary:
+ {
+ m_value = *other.m_value.binary;
+ break;
+ }
+
default:
break;
}
@@ -14249,9 +18660,9 @@ class basic_json
@since version 1.0.0
*/
basic_json& operator=(basic_json other) noexcept (
- std::is_nothrow_move_constructible<value_t>::value and
- std::is_nothrow_move_assignable<value_t>::value and
- std::is_nothrow_move_constructible<json_value>::value and
+ std::is_nothrow_move_constructible<value_t>::value&&
+ std::is_nothrow_move_assignable<value_t>::value&&
+ std::is_nothrow_move_constructible<json_value>::value&&
std::is_nothrow_move_assignable<json_value>::value
)
{
@@ -14317,12 +18728,17 @@ class basic_json
@param[in] error_handler how to react on decoding errors; there are three
possible values: `strict` (throws and exception in case a decoding error
occurs; default), `replace` (replace invalid UTF-8 sequences with U+FFFD),
- and `ignore` (ignore invalid UTF-8 sequences during serialization).
+ and `ignore` (ignore invalid UTF-8 sequences during serialization; all
+ bytes are copied to the output unchanged).
@return string containing the serialization of the JSON value
@throw type_error.316 if a string stored inside the JSON value is not
- UTF-8 encoded
+ UTF-8 encoded and @a error_handler is set to strict
+
+ @note Binary values are serialized as object containing two keys:
+ - "bytes": an array of bytes as integers
+ - "subtype": the subtype as integer or "null" if the binary has no subtype
@complexity Linear.
@@ -14337,7 +18753,8 @@ class basic_json
@since version 1.0.0; indentation character @a indent_char, option
@a ensure_ascii and exceptions added in version 3.0.0; error
- handlers added in version 3.4.0.
+ handlers added in version 3.4.0; serialization of binary values added
+ in version 3.8.0.
*/
string_t dump(const int indent = -1,
const char indent_char = ' ',
@@ -14376,6 +18793,7 @@ class basic_json
number (floating-point) | value_t::number_float
object | value_t::object
array | value_t::array
+ binary | value_t::binary
discarded | value_t::discarded
@complexity Constant.
@@ -14418,12 +18836,13 @@ class basic_json
@sa @ref is_string() -- returns whether JSON value is a string
@sa @ref is_boolean() -- returns whether JSON value is a boolean
@sa @ref is_number() -- returns whether JSON value is a number
+ @sa @ref is_binary() -- returns whether JSON value is a binary array
@since version 1.0.0
*/
constexpr bool is_primitive() const noexcept
{
- return is_null() or is_string() or is_boolean() or is_number();
+ return is_null() || is_string() || is_boolean() || is_number() || is_binary();
}
/*!
@@ -14450,7 +18869,7 @@ class basic_json
*/
constexpr bool is_structured() const noexcept
{
- return is_array() or is_object();
+ return is_array() || is_object();
}
/*!
@@ -14472,7 +18891,7 @@ class basic_json
*/
constexpr bool is_null() const noexcept
{
- return (m_type == value_t::null);
+ return m_type == value_t::null;
}
/*!
@@ -14494,7 +18913,7 @@ class basic_json
*/
constexpr bool is_boolean() const noexcept
{
- return (m_type == value_t::boolean);
+ return m_type == value_t::boolean;
}
/*!
@@ -14524,7 +18943,7 @@ class basic_json
*/
constexpr bool is_number() const noexcept
{
- return is_number_integer() or is_number_float();
+ return is_number_integer() || is_number_float();
}
/*!
@@ -14553,7 +18972,7 @@ class basic_json
*/
constexpr bool is_number_integer() const noexcept
{
- return (m_type == value_t::number_integer or m_type == value_t::number_unsigned);
+ return m_type == value_t::number_integer || m_type == value_t::number_unsigned;
}
/*!
@@ -14581,7 +19000,7 @@ class basic_json
*/
constexpr bool is_number_unsigned() const noexcept
{
- return (m_type == value_t::number_unsigned);
+ return m_type == value_t::number_unsigned;
}
/*!
@@ -14609,7 +19028,7 @@ class basic_json
*/
constexpr bool is_number_float() const noexcept
{
- return (m_type == value_t::number_float);
+ return m_type == value_t::number_float;
}
/*!
@@ -14631,7 +19050,7 @@ class basic_json
*/
constexpr bool is_object() const noexcept
{
- return (m_type == value_t::object);
+ return m_type == value_t::object;
}
/*!
@@ -14653,7 +19072,7 @@ class basic_json
*/
constexpr bool is_array() const noexcept
{
- return (m_type == value_t::array);
+ return m_type == value_t::array;
}
/*!
@@ -14675,7 +19094,29 @@ class basic_json
*/
constexpr bool is_string() const noexcept
{
- return (m_type == value_t::string);
+ return m_type == value_t::string;
+ }
+
+ /*!
+ @brief return whether value is a binary array
+
+ This function returns true if and only if the JSON value is a binary array.
+
+ @return `true` if type is binary array, `false` otherwise.
+
+ @complexity Constant.
+
+ @exceptionsafety No-throw guarantee: this member function never throws
+ exceptions.
+
+ @liveexample{The following code exemplifies `is_binary()` for all JSON
+ types.,is_binary}
+
+ @since version 3.8.0
+ */
+ constexpr bool is_binary() const noexcept
+ {
+ return m_type == value_t::binary;
}
/*!
@@ -14702,7 +19143,7 @@ class basic_json
*/
constexpr bool is_discarded() const noexcept
{
- return (m_type == value_t::discarded);
+ return m_type == value_t::discarded;
}
/*!
@@ -14741,7 +19182,7 @@ class basic_json
/// get a boolean (explicit)
boolean_t get_impl(boolean_t* /*unused*/) const
{
- if (JSON_LIKELY(is_boolean()))
+ if (JSON_HEDLEY_LIKELY(is_boolean()))
{
return m_value.boolean;
}
@@ -14833,6 +19274,18 @@ class basic_json
return is_number_float() ? &m_value.number_float : nullptr;
}
+ /// get a pointer to the value (binary)
+ binary_t* get_impl_ptr(binary_t* /*unused*/) noexcept
+ {
+ return is_binary() ? m_value.binary : nullptr;
+ }
+
+ /// get a pointer to the value (binary)
+ constexpr const binary_t* get_impl_ptr(const binary_t* /*unused*/) const noexcept
+ {
+ return is_binary() ? m_value.binary : nullptr;
+ }
+
/*!
@brief helper function to implement get_ref()
@@ -14850,7 +19303,7 @@ class basic_json
// delegate the call to get_ptr<>()
auto ptr = obj.template get_ptr<typename std::add_pointer<ReferenceType>::type>();
- if (JSON_LIKELY(ptr != nullptr))
+ if (JSON_HEDLEY_LIKELY(ptr != nullptr))
{
return *ptr;
}
@@ -14900,9 +19353,9 @@ class basic_json
@since version 3.2.0
*/
- template<typename BasicJsonType, detail::enable_if_t<
- not std::is_same<BasicJsonType, basic_json>::value and
- detail::is_basic_json<BasicJsonType>::value, int> = 0>
+ template < typename BasicJsonType, detail::enable_if_t <
+ !std::is_same<BasicJsonType, basic_json>::value&&
+ detail::is_basic_json<BasicJsonType>::value, int > = 0 >
BasicJsonType get() const
{
return *this;
@@ -14947,19 +19400,19 @@ class basic_json
@since version 2.1.0
*/
- template<typename ValueTypeCV, typename ValueType = detail::uncvref_t<ValueTypeCV>,
- detail::enable_if_t <
- not detail::is_basic_json<ValueType>::value and
- detail::has_from_json<basic_json_t, ValueType>::value and
- not detail::has_non_default_from_json<basic_json_t, ValueType>::value,
- int> = 0>
+ template < typename ValueTypeCV, typename ValueType = detail::uncvref_t<ValueTypeCV>,
+ detail::enable_if_t <
+ !detail::is_basic_json<ValueType>::value &&
+ detail::has_from_json<basic_json_t, ValueType>::value &&
+ !detail::has_non_default_from_json<basic_json_t, ValueType>::value,
+ int > = 0 >
ValueType get() const noexcept(noexcept(
JSONSerializer<ValueType>::from_json(std::declval<const basic_json_t&>(), std::declval<ValueType&>())))
{
// we cannot static_assert on ValueTypeCV being non-const, because
// there is support for get<const basic_json_t>(), which is why we
// still need the uncvref
- static_assert(not std::is_reference<ValueTypeCV>::value,
+ static_assert(!std::is_reference<ValueTypeCV>::value,
"get() cannot be used with reference types, you might want to use get_ref()");
static_assert(std::is_default_constructible<ValueType>::value,
"types must be DefaultConstructible when used with get()");
@@ -15000,16 +19453,16 @@ class basic_json
@since version 2.1.0
*/
- template<typename ValueTypeCV, typename ValueType = detail::uncvref_t<ValueTypeCV>,
- detail::enable_if_t<not std::is_same<basic_json_t, ValueType>::value and
- detail::has_non_default_from_json<basic_json_t, ValueType>::value,
- int> = 0>
+ template < typename ValueTypeCV, typename ValueType = detail::uncvref_t<ValueTypeCV>,
+ detail::enable_if_t < !std::is_same<basic_json_t, ValueType>::value &&
+ detail::has_non_default_from_json<basic_json_t, ValueType>::value,
+ int > = 0 >
ValueType get() const noexcept(noexcept(
- JSONSerializer<ValueTypeCV>::from_json(std::declval<const basic_json_t&>())))
+ JSONSerializer<ValueType>::from_json(std::declval<const basic_json_t&>())))
{
- static_assert(not std::is_reference<ValueTypeCV>::value,
+ static_assert(!std::is_reference<ValueTypeCV>::value,
"get() cannot be used with reference types, you might want to use get_ref()");
- return JSONSerializer<ValueTypeCV>::from_json(*this);
+ return JSONSerializer<ValueType>::from_json(*this);
}
/*!
@@ -15045,11 +19498,11 @@ class basic_json
@since version 3.3.0
*/
- template<typename ValueType,
- detail::enable_if_t <
- not detail::is_basic_json<ValueType>::value and
- detail::has_from_json<basic_json_t, ValueType>::value,
- int> = 0>
+ template < typename ValueType,
+ detail::enable_if_t <
+ !detail::is_basic_json<ValueType>::value&&
+ detail::has_from_json<basic_json_t, ValueType>::value,
+ int > = 0 >
ValueType & get_to(ValueType& v) const noexcept(noexcept(
JSONSerializer<ValueType>::from_json(std::declval<const basic_json_t&>(), v)))
{
@@ -15057,6 +19510,31 @@ class basic_json
return v;
}
+ // specialization to allow to call get_to with a basic_json value
+ // see https://github.com/nlohmann/json/issues/2175
+ template<typename ValueType,
+ detail::enable_if_t <
+ detail::is_basic_json<ValueType>::value,
+ int> = 0>
+ ValueType & get_to(ValueType& v) const
+ {
+ v = *this;
+ return v;
+ }
+
+ template <
+ typename T, std::size_t N,
+ typename Array = T (&)[N],
+ detail::enable_if_t <
+ detail::has_from_json<basic_json_t, Array>::value, int > = 0 >
+ Array get_to(T (&v)[N]) const
+ noexcept(noexcept(JSONSerializer<Array>::from_json(
+ std::declval<const basic_json_t&>(), v)))
+ {
+ JSONSerializer<Array>::from_json(*this, v);
+ return v;
+ }
+
/*!
@brief get a pointer value (implicit)
@@ -15096,9 +19574,9 @@ class basic_json
@brief get a pointer value (implicit)
@copydoc get_ptr()
*/
- template<typename PointerType, typename std::enable_if<
- std::is_pointer<PointerType>::value and
- std::is_const<typename std::remove_pointer<PointerType>::type>::value, int>::type = 0>
+ template < typename PointerType, typename std::enable_if <
+ std::is_pointer<PointerType>::value&&
+ std::is_const<typename std::remove_pointer<PointerType>::type>::value, int >::type = 0 >
constexpr auto get_ptr() const noexcept -> decltype(std::declval<const basic_json_t&>().get_impl_ptr(std::declval<PointerType>()))
{
// delegate the call to get_impl_ptr<>() const
@@ -15190,9 +19668,9 @@ class basic_json
@brief get a reference value (implicit)
@copydoc get_ref()
*/
- template<typename ReferenceType, typename std::enable_if<
- std::is_reference<ReferenceType>::value and
- std::is_const<typename std::remove_reference<ReferenceType>::type>::value, int>::type = 0>
+ template < typename ReferenceType, typename std::enable_if <
+ std::is_reference<ReferenceType>::value&&
+ std::is_const<typename std::remove_reference<ReferenceType>::type>::value, int >::type = 0 >
ReferenceType get_ref() const
{
// delegate call to get_ref_impl
@@ -15229,25 +19707,52 @@ class basic_json
@since version 1.0.0
*/
template < typename ValueType, typename std::enable_if <
- not std::is_pointer<ValueType>::value and
- not std::is_same<ValueType, detail::json_ref<basic_json>>::value and
- not std::is_same<ValueType, typename string_t::value_type>::value and
- not detail::is_basic_json<ValueType>::value
-
-#ifndef _MSC_VER // fix for issue #167 operator<< ambiguity under VS2015
- and not std::is_same<ValueType, std::initializer_list<typename string_t::value_type>>::value
-#if defined(JSON_HAS_CPP_17) && defined(_MSC_VER) and _MSC_VER <= 1914
- and not std::is_same<ValueType, typename std::string_view>::value
-#endif
+ !std::is_pointer<ValueType>::value&&
+ !std::is_same<ValueType, detail::json_ref<basic_json>>::value&&
+ !std::is_same<ValueType, typename string_t::value_type>::value&&
+ !detail::is_basic_json<ValueType>::value
+ && !std::is_same<ValueType, std::initializer_list<typename string_t::value_type>>::value
+#if defined(JSON_HAS_CPP_17) && (defined(__GNUC__) || (defined(_MSC_VER) && _MSC_VER >= 1910 && _MSC_VER <= 1914))
+ && !std::is_same<ValueType, typename std::string_view>::value
#endif
- and detail::is_detected<detail::get_template_function, const basic_json_t&, ValueType>::value
+ && detail::is_detected<detail::get_template_function, const basic_json_t&, ValueType>::value
, int >::type = 0 >
- operator ValueType() const
+ JSON_EXPLICIT operator ValueType() const
{
// delegate the call to get<>() const
return get<ValueType>();
}
+ /*!
+ @return reference to the binary value
+
+ @throw type_error.302 if the value is not binary
+
+ @sa @ref is_binary() to check if the value is binary
+
+ @since version 3.8.0
+ */
+ binary_t& get_binary()
+ {
+ if (!is_binary())
+ {
+ JSON_THROW(type_error::create(302, "type must be binary, but is " + std::string(type_name())));
+ }
+
+ return *get_ptr<binary_t*>();
+ }
+
+ /// @copydoc get_binary()
+ const binary_t& get_binary() const
+ {
+ if (!is_binary())
+ {
+ JSON_THROW(type_error::create(302, "type must be binary, but is " + std::string(type_name())));
+ }
+
+ return *get_ptr<const binary_t*>();
+ }
+
/// @}
@@ -15288,7 +19793,7 @@ class basic_json
reference at(size_type idx)
{
// at only works for arrays
- if (JSON_LIKELY(is_array()))
+ if (JSON_HEDLEY_LIKELY(is_array()))
{
JSON_TRY
{
@@ -15335,7 +19840,7 @@ class basic_json
const_reference at(size_type idx) const
{
// at only works for arrays
- if (JSON_LIKELY(is_array()))
+ if (JSON_HEDLEY_LIKELY(is_array()))
{
JSON_TRY
{
@@ -15386,7 +19891,7 @@ class basic_json
reference at(const typename object_t::key_type& key)
{
// at only works for objects
- if (JSON_LIKELY(is_object()))
+ if (JSON_HEDLEY_LIKELY(is_object()))
{
JSON_TRY
{
@@ -15437,7 +19942,7 @@ class basic_json
const_reference at(const typename object_t::key_type& key) const
{
// at only works for objects
- if (JSON_LIKELY(is_object()))
+ if (JSON_HEDLEY_LIKELY(is_object()))
{
JSON_TRY
{
@@ -15491,7 +19996,7 @@ class basic_json
}
// operator[] only works for arrays
- if (JSON_LIKELY(is_array()))
+ if (JSON_HEDLEY_LIKELY(is_array()))
{
// fill up array with null values if given idx is outside range
if (idx >= m_value.array->size())
@@ -15529,7 +20034,7 @@ class basic_json
const_reference operator[](size_type idx) const
{
// const operator[] only works for arrays
- if (JSON_LIKELY(is_array()))
+ if (JSON_HEDLEY_LIKELY(is_array()))
{
return m_value.array->operator[](idx);
}
@@ -15575,7 +20080,7 @@ class basic_json
}
// operator[] only works for objects
- if (JSON_LIKELY(is_object()))
+ if (JSON_HEDLEY_LIKELY(is_object()))
{
return m_value.object->operator[](key);
}
@@ -15616,9 +20121,9 @@ class basic_json
const_reference operator[](const typename object_t::key_type& key) const
{
// const operator[] only works for objects
- if (JSON_LIKELY(is_object()))
+ if (JSON_HEDLEY_LIKELY(is_object()))
{
- assert(m_value.object->find(key) != m_value.object->end());
+ JSON_ASSERT(m_value.object->find(key) != m_value.object->end());
return m_value.object->find(key)->second;
}
@@ -15653,6 +20158,7 @@ class basic_json
@since version 1.1.0
*/
template<typename T>
+ JSON_HEDLEY_NON_NULL(2)
reference operator[](T* key)
{
// implicitly convert null to object
@@ -15664,7 +20170,7 @@ class basic_json
}
// at only works for objects
- if (JSON_LIKELY(is_object()))
+ if (JSON_HEDLEY_LIKELY(is_object()))
{
return m_value.object->operator[](key);
}
@@ -15703,12 +20209,13 @@ class basic_json
@since version 1.1.0
*/
template<typename T>
+ JSON_HEDLEY_NON_NULL(2)
const_reference operator[](T* key) const
{
// at only works for objects
- if (JSON_LIKELY(is_object()))
+ if (JSON_HEDLEY_LIKELY(is_object()))
{
- assert(m_value.object->find(key) != m_value.object->end());
+ JSON_ASSERT(m_value.object->find(key) != m_value.object->end());
return m_value.object->find(key)->second;
}
@@ -15748,6 +20255,8 @@ class basic_json
@return copy of the element at key @a key or @a default_value if @a key
is not found
+ @throw type_error.302 if @a default_value does not match the type of the
+ value at @a key
@throw type_error.306 if the JSON value is not an object; in that case,
using `value()` with a key makes no sense.
@@ -15763,18 +20272,20 @@ class basic_json
@since version 1.0.0
*/
- template<class ValueType, typename std::enable_if<
- std::is_convertible<basic_json_t, ValueType>::value, int>::type = 0>
+ // using std::is_convertible in a std::enable_if will fail when using explicit conversions
+ template < class ValueType, typename std::enable_if <
+ detail::is_getable<basic_json_t, ValueType>::value
+ && !std::is_same<value_t, ValueType>::value, int >::type = 0 >
ValueType value(const typename object_t::key_type& key, const ValueType& default_value) const
{
// at only works for objects
- if (JSON_LIKELY(is_object()))
+ if (JSON_HEDLEY_LIKELY(is_object()))
{
// if key is found, return value and given default value otherwise
const auto it = find(key);
if (it != end())
{
- return *it;
+ return it->template get<ValueType>();
}
return default_value;
@@ -15821,6 +20332,8 @@ class basic_json
@return copy of the element at key @a key or @a default_value if @a key
is not found
+ @throw type_error.302 if @a default_value does not match the type of the
+ value at @a ptr
@throw type_error.306 if the JSON value is not an object; in that case,
using `value()` with a key makes no sense.
@@ -15834,16 +20347,16 @@ class basic_json
@since version 2.0.2
*/
template<class ValueType, typename std::enable_if<
- std::is_convertible<basic_json_t, ValueType>::value, int>::type = 0>
+ detail::is_getable<basic_json_t, ValueType>::value, int>::type = 0>
ValueType value(const json_pointer& ptr, const ValueType& default_value) const
{
// at only works for objects
- if (JSON_LIKELY(is_object()))
+ if (JSON_HEDLEY_LIKELY(is_object()))
{
// if pointer resolves a value, return it or use default value
JSON_TRY
{
- return ptr.get_checked(this);
+ return ptr.get_checked(this).template get<ValueType>();
}
JSON_INTERNAL_CATCH (out_of_range&)
{
@@ -15858,6 +20371,7 @@ class basic_json
@brief overload for a default value of type const char*
@copydoc basic_json::value(const json_pointer&, ValueType) const
*/
+ JSON_HEDLEY_NON_NULL(3)
string_t value(const json_pointer& ptr, const char* default_value) const
{
return value(ptr, string_t(default_value));
@@ -15870,8 +20384,8 @@ class basic_json
container `c`, the expression `c.front()` is equivalent to `*c.begin()`.
@return In case of a structured type (array or object), a reference to the
- first element is returned. In case of number, string, or boolean values, a
- reference to the value is returned.
+ first element is returned. In case of number, string, boolean, or binary
+ values, a reference to the value is returned.
@complexity Constant.
@@ -15913,8 +20427,8 @@ class basic_json
@endcode
@return In case of a structured type (array or object), a reference to the
- last element is returned. In case of number, string, or boolean values, a
- reference to the value is returned.
+ last element is returned. In case of number, string, boolean, or binary
+ values, a reference to the value is returned.
@complexity Constant.
@@ -15980,7 +20494,7 @@ class basic_json
@complexity The complexity depends on the type:
- objects: amortized constant
- arrays: linear in distance between @a pos and the end of the container
- - strings: linear in the length of the string
+ - strings and binary: linear in the length of the member
- other types: constant
@liveexample{The example shows the result of `erase()` for different JSON
@@ -15995,14 +20509,14 @@ class basic_json
@since version 1.0.0
*/
- template<class IteratorType, typename std::enable_if<
- std::is_same<IteratorType, typename basic_json_t::iterator>::value or
- std::is_same<IteratorType, typename basic_json_t::const_iterator>::value, int>::type
- = 0>
+ template < class IteratorType, typename std::enable_if <
+ std::is_same<IteratorType, typename basic_json_t::iterator>::value ||
+ std::is_same<IteratorType, typename basic_json_t::const_iterator>::value, int >::type
+ = 0 >
IteratorType erase(IteratorType pos)
{
// make sure iterator fits the current value
- if (JSON_UNLIKELY(this != pos.m_object))
+ if (JSON_HEDLEY_UNLIKELY(this != pos.m_object))
{
JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value"));
}
@@ -16016,8 +20530,9 @@ class basic_json
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::string:
+ case value_t::binary:
{
- if (JSON_UNLIKELY(not pos.m_it.primitive_iterator.is_begin()))
+ if (JSON_HEDLEY_UNLIKELY(!pos.m_it.primitive_iterator.is_begin()))
{
JSON_THROW(invalid_iterator::create(205, "iterator out of range"));
}
@@ -16029,6 +20544,13 @@ class basic_json
std::allocator_traits<decltype(alloc)>::deallocate(alloc, m_value.string, 1);
m_value.string = nullptr;
}
+ else if (is_binary())
+ {
+ AllocatorType<binary_t> alloc;
+ std::allocator_traits<decltype(alloc)>::destroy(alloc, m_value.binary);
+ std::allocator_traits<decltype(alloc)>::deallocate(alloc, m_value.binary, 1);
+ m_value.binary = nullptr;
+ }
m_type = value_t::null;
assert_invariant();
@@ -16086,7 +20608,7 @@ class basic_json
- objects: `log(size()) + std::distance(first, last)`
- arrays: linear in the distance between @a first and @a last, plus linear
in the distance between @a last and end of the container
- - strings: linear in the length of the string
+ - strings and binary: linear in the length of the member
- other types: constant
@liveexample{The example shows the result of `erase()` for different JSON
@@ -16100,14 +20622,14 @@ class basic_json
@since version 1.0.0
*/
- template<class IteratorType, typename std::enable_if<
- std::is_same<IteratorType, typename basic_json_t::iterator>::value or
- std::is_same<IteratorType, typename basic_json_t::const_iterator>::value, int>::type
- = 0>
+ template < class IteratorType, typename std::enable_if <
+ std::is_same<IteratorType, typename basic_json_t::iterator>::value ||
+ std::is_same<IteratorType, typename basic_json_t::const_iterator>::value, int >::type
+ = 0 >
IteratorType erase(IteratorType first, IteratorType last)
{
// make sure iterator fits the current value
- if (JSON_UNLIKELY(this != first.m_object or this != last.m_object))
+ if (JSON_HEDLEY_UNLIKELY(this != first.m_object || this != last.m_object))
{
JSON_THROW(invalid_iterator::create(203, "iterators do not fit current value"));
}
@@ -16121,9 +20643,10 @@ class basic_json
case value_t::number_integer:
case value_t::number_unsigned:
case value_t::string:
+ case value_t::binary:
{
- if (JSON_LIKELY(not first.m_it.primitive_iterator.is_begin()
- or not last.m_it.primitive_iterator.is_end()))
+ if (JSON_HEDLEY_LIKELY(!first.m_it.primitive_iterator.is_begin()
+ || !last.m_it.primitive_iterator.is_end()))
{
JSON_THROW(invalid_iterator::create(204, "iterators out of range"));
}
@@ -16135,6 +20658,13 @@ class basic_json
std::allocator_traits<decltype(alloc)>::deallocate(alloc, m_value.string, 1);
m_value.string = nullptr;
}
+ else if (is_binary())
+ {
+ AllocatorType<binary_t> alloc;
+ std::allocator_traits<decltype(alloc)>::destroy(alloc, m_value.binary);
+ std::allocator_traits<decltype(alloc)>::deallocate(alloc, m_value.binary, 1);
+ m_value.binary = nullptr;
+ }
m_type = value_t::null;
assert_invariant();
@@ -16194,7 +20724,7 @@ class basic_json
size_type erase(const typename object_t::key_type& key)
{
// this erase only works for objects
- if (JSON_LIKELY(is_object()))
+ if (JSON_HEDLEY_LIKELY(is_object()))
{
return m_value.object->erase(key);
}
@@ -16229,9 +20759,9 @@ class basic_json
void erase(const size_type idx)
{
// this erase only works for arrays
- if (JSON_LIKELY(is_array()))
+ if (JSON_HEDLEY_LIKELY(is_array()))
{
- if (JSON_UNLIKELY(idx >= size()))
+ if (JSON_HEDLEY_UNLIKELY(idx >= size()))
{
JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range"));
}
@@ -16274,6 +20804,8 @@ class basic_json
@liveexample{The example shows how `find()` is used.,find__key_type}
+ @sa @ref contains(KeyT&&) const -- checks whether a key exists
+
@since version 1.0.0
*/
template<typename KeyT>
@@ -16334,6 +20866,69 @@ class basic_json
return is_object() ? m_value.object->count(std::forward<KeyT>(key)) : 0;
}
+ /*!
+ @brief check the existence of an element in a JSON object
+
+ Check whether an element exists in a JSON object with key equivalent to
+ @a key. If the element is not found or the JSON value is not an object,
+ false is returned.
+
+ @note This method always returns false when executed on a JSON type
+ that is not an object.
+
+ @param[in] key key value to check its existence.
+
+ @return true if an element with specified @a key exists. If no such
+ element with such key is found or the JSON value is not an object,
+ false is returned.
+
+ @complexity Logarithmic in the size of the JSON object.
+
+ @liveexample{The following code shows an example for `contains()`.,contains}
+
+ @sa @ref find(KeyT&&) -- returns an iterator to an object element
+ @sa @ref contains(const json_pointer&) const -- checks the existence for a JSON pointer
+
+ @since version 3.6.0
+ */
+ template < typename KeyT, typename std::enable_if <
+ !std::is_same<typename std::decay<KeyT>::type, json_pointer>::value, int >::type = 0 >
+ bool contains(KeyT && key) const
+ {
+ return is_object() && m_value.object->find(std::forward<KeyT>(key)) != m_value.object->end();
+ }
+
+ /*!
+ @brief check the existence of an element in a JSON object given a JSON pointer
+
+ Check whether the given JSON pointer @a ptr can be resolved in the current
+ JSON value.
+
+ @note This method can be executed on any JSON value type.
+
+ @param[in] ptr JSON pointer to check its existence.
+
+ @return true if the JSON pointer can be resolved to a stored value, false
+ otherwise.
+
+ @post If `j.contains(ptr)` returns true, it is safe to call `j[ptr]`.
+
+ @throw parse_error.106 if an array index begins with '0'
+ @throw parse_error.109 if an array index was not a number
+
+ @complexity Logarithmic in the size of the JSON object.
+
+ @liveexample{The following code shows an example for `contains()`.,contains_json_pointer}
+
+ @sa @ref contains(KeyT &&) const -- checks the existence of a key
+
+ @since version 3.7.0
+ */
+ bool contains(const json_pointer& ptr) const
+ {
+ return ptr.contains(this);
+ }
+
/// @}
@@ -16675,7 +21270,7 @@ class basic_json
future 4.0.0 of the library. Please use @ref items() instead;
that is, replace `json::iterator_wrapper(j)` with `j.items()`.
*/
- JSON_DEPRECATED
+ JSON_HEDLEY_DEPRECATED_FOR(3.1.0, items())
static iteration_proxy<iterator> iterator_wrapper(reference ref) noexcept
{
return ref.items();
@@ -16684,7 +21279,7 @@ class basic_json
/*!
@copydoc iterator_wrapper(reference)
*/
- JSON_DEPRECATED
+ JSON_HEDLEY_DEPRECATED_FOR(3.1.0, items())
static iteration_proxy<const_iterator> iterator_wrapper(const_reference ref) noexcept
{
return ref.items();
@@ -16741,6 +21336,11 @@ class basic_json
element as string (see example). For primitive types (e.g., numbers),
`key()` returns an empty string.
+ @warning Using `items()` on temporary objects is dangerous. Make sure the
+ object's lifetime exeeds the iteration. See
+ <https://github.com/nlohmann/json/issues/2040> for more
+ information.
+
@return iteration proxy object wrapping @a ref with an interface to use in
range-based for loops
@@ -16789,6 +21389,7 @@ class basic_json
boolean | `false`
string | `false`
number | `false`
+ binary | `false`
object | result of function `object_t::empty()`
array | result of function `array_t::empty()`
@@ -16860,6 +21461,7 @@ class basic_json
boolean | `1`
string | `1`
number | `1`
+ binary | `1`
object | result of function object_t::size()
array | result of function array_t::size()
@@ -16934,6 +21536,7 @@ class basic_json
boolean | `1` (same as `size()`)
string | `1` (same as `size()`)
number | `1` (same as `size()`)
+ binary | `1` (same as `size()`)
object | result of function `object_t::max_size()`
array | result of function `array_t::max_size()`
@@ -17006,6 +21609,7 @@ class basic_json
boolean | `false`
string | `""`
number | `0`
+ binary | An empty byte vector
object | `{}`
array | `[]`
@@ -17063,6 +21667,12 @@ class basic_json
break;
}
+ case value_t::binary:
+ {
+ m_value.binary->clear();
+ break;
+ }
+
case value_t::array:
{
m_value.array->clear();
@@ -17103,7 +21713,7 @@ class basic_json
void push_back(basic_json&& val)
{
// push_back only works for null objects or arrays
- if (JSON_UNLIKELY(not(is_null() or is_array())))
+ if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_array())))
{
JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name())));
}
@@ -17118,8 +21728,7 @@ class basic_json
// add element to array (move semantics)
m_value.array->push_back(std::move(val));
- // invalidate object
- val.m_type = value_t::null;
+ // if val is moved from, basic_json move constructor marks it null so we do not call the destructor
}
/*!
@@ -17139,7 +21748,7 @@ class basic_json
void push_back(const basic_json& val)
{
// push_back only works for null objects or arrays
- if (JSON_UNLIKELY(not(is_null() or is_array())))
+ if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_array())))
{
JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name())));
}
@@ -17189,7 +21798,7 @@ class basic_json
void push_back(const typename object_t::value_type& val)
{
// push_back only works for null objects or objects
- if (JSON_UNLIKELY(not(is_null() or is_object())))
+ if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_object())))
{
JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name())));
}
@@ -17243,7 +21852,7 @@ class basic_json
*/
void push_back(initializer_list_t init)
{
- if (is_object() and init.size() == 2 and (*init.begin())->is_string())
+ if (is_object() && init.size() == 2 && (*init.begin())->is_string())
{
basic_json&& key = init.begin()->moved_or_copied();
push_back(typename object_t::value_type(
@@ -17275,6 +21884,8 @@ class basic_json
@param[in] args arguments to forward to a constructor of @ref basic_json
@tparam Args compatible types to create a @ref basic_json object
+ @return reference to the inserted element
+
@throw type_error.311 when called on a type other than JSON array or
null; example: `"cannot use emplace_back() with number"`
@@ -17284,13 +21895,13 @@ class basic_json
elements to a JSON array. Note how the `null` value was silently converted
to a JSON array.,emplace_back}
- @since version 2.0.8
+ @since version 2.0.8, returns reference since 3.7.0
*/
template<class... Args>
- void emplace_back(Args&& ... args)
+ reference emplace_back(Args&& ... args)
{
// emplace_back only works for null objects or arrays
- if (JSON_UNLIKELY(not(is_null() or is_array())))
+ if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_array())))
{
JSON_THROW(type_error::create(311, "cannot use emplace_back() with " + std::string(type_name())));
}
@@ -17304,7 +21915,12 @@ class basic_json
}
// add element to array (perfect forwarding)
+#ifdef JSON_HAS_CPP_17
+ return m_value.array->emplace_back(std::forward<Args>(args)...);
+#else
m_value.array->emplace_back(std::forward<Args>(args)...);
+ return m_value.array->back();
+#endif
}
/*!
@@ -17338,7 +21954,7 @@ class basic_json
std::pair<iterator, bool> emplace(Args&& ... args)
{
// emplace only works for null objects or arrays
- if (JSON_UNLIKELY(not(is_null() or is_object())))
+ if (JSON_HEDLEY_UNLIKELY(!(is_null() || is_object())))
{
JSON_THROW(type_error::create(311, "cannot use emplace() with " + std::string(type_name())));
}
@@ -17368,7 +21984,7 @@ class basic_json
iterator insert_iterator(const_iterator pos, Args&& ... args)
{
iterator result(this);
- assert(m_value.array != nullptr);
+ JSON_ASSERT(m_value.array != nullptr);
auto insert_pos = std::distance(m_value.array->begin(), pos.m_it.array_iterator);
m_value.array->insert(pos.m_it.array_iterator, std::forward<Args>(args)...);
@@ -17406,10 +22022,10 @@ class basic_json
iterator insert(const_iterator pos, const basic_json& val)
{
// insert only works for arrays
- if (JSON_LIKELY(is_array()))
+ if (JSON_HEDLEY_LIKELY(is_array()))
{
// check if iterator pos fits to this JSON value
- if (JSON_UNLIKELY(pos.m_object != this))
+ if (JSON_HEDLEY_UNLIKELY(pos.m_object != this))
{
JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value"));
}
@@ -17457,10 +22073,10 @@ class basic_json
iterator insert(const_iterator pos, size_type cnt, const basic_json& val)
{
// insert only works for arrays
- if (JSON_LIKELY(is_array()))
+ if (JSON_HEDLEY_LIKELY(is_array()))
{
// check if iterator pos fits to this JSON value
- if (JSON_UNLIKELY(pos.m_object != this))
+ if (JSON_HEDLEY_UNLIKELY(pos.m_object != this))
{
JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value"));
}
@@ -17505,24 +22121,24 @@ class basic_json
iterator insert(const_iterator pos, const_iterator first, const_iterator last)
{
// insert only works for arrays
- if (JSON_UNLIKELY(not is_array()))
+ if (JSON_HEDLEY_UNLIKELY(!is_array()))
{
JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name())));
}
// check if iterator pos fits to this JSON value
- if (JSON_UNLIKELY(pos.m_object != this))
+ if (JSON_HEDLEY_UNLIKELY(pos.m_object != this))
{
JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value"));
}
// check if range iterators belong to the same JSON object
- if (JSON_UNLIKELY(first.m_object != last.m_object))
+ if (JSON_HEDLEY_UNLIKELY(first.m_object != last.m_object))
{
JSON_THROW(invalid_iterator::create(210, "iterators do not fit"));
}
- if (JSON_UNLIKELY(first.m_object == this))
+ if (JSON_HEDLEY_UNLIKELY(first.m_object == this))
{
JSON_THROW(invalid_iterator::create(211, "passed iterators may not belong to container"));
}
@@ -17558,13 +22174,13 @@ class basic_json
iterator insert(const_iterator pos, initializer_list_t ilist)
{
// insert only works for arrays
- if (JSON_UNLIKELY(not is_array()))
+ if (JSON_HEDLEY_UNLIKELY(!is_array()))
{
JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name())));
}
// check if iterator pos fits to this JSON value
- if (JSON_UNLIKELY(pos.m_object != this))
+ if (JSON_HEDLEY_UNLIKELY(pos.m_object != this))
{
JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value"));
}
@@ -17599,19 +22215,19 @@ class basic_json
void insert(const_iterator first, const_iterator last)
{
// insert only works for objects
- if (JSON_UNLIKELY(not is_object()))
+ if (JSON_HEDLEY_UNLIKELY(!is_object()))
{
JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name())));
}
// check if range iterators belong to the same JSON object
- if (JSON_UNLIKELY(first.m_object != last.m_object))
+ if (JSON_HEDLEY_UNLIKELY(first.m_object != last.m_object))
{
JSON_THROW(invalid_iterator::create(210, "iterators do not fit"));
}
// passed iterators must belong to objects
- if (JSON_UNLIKELY(not first.m_object->is_object()))
+ if (JSON_HEDLEY_UNLIKELY(!first.m_object->is_object()))
{
JSON_THROW(invalid_iterator::create(202, "iterators first and last must point to objects"));
}
@@ -17648,11 +22264,11 @@ class basic_json
assert_invariant();
}
- if (JSON_UNLIKELY(not is_object()))
+ if (JSON_HEDLEY_UNLIKELY(!is_object()))
{
JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(type_name())));
}
- if (JSON_UNLIKELY(not j.is_object()))
+ if (JSON_HEDLEY_UNLIKELY(!j.is_object()))
{
JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(j.type_name())));
}
@@ -17699,20 +22315,20 @@ class basic_json
assert_invariant();
}
- if (JSON_UNLIKELY(not is_object()))
+ if (JSON_HEDLEY_UNLIKELY(!is_object()))
{
JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(type_name())));
}
// check if range iterators belong to the same JSON object
- if (JSON_UNLIKELY(first.m_object != last.m_object))
+ if (JSON_HEDLEY_UNLIKELY(first.m_object != last.m_object))
{
JSON_THROW(invalid_iterator::create(210, "iterators do not fit"));
}
// passed iterators must belong to objects
- if (JSON_UNLIKELY(not first.m_object->is_object()
- or not last.m_object->is_object()))
+ if (JSON_HEDLEY_UNLIKELY(!first.m_object->is_object()
+ || !last.m_object->is_object()))
{
JSON_THROW(invalid_iterator::create(202, "iterators first and last must point to objects"));
}
@@ -17741,9 +22357,9 @@ class basic_json
@since version 1.0.0
*/
void swap(reference other) noexcept (
- std::is_nothrow_move_constructible<value_t>::value and
- std::is_nothrow_move_assignable<value_t>::value and
- std::is_nothrow_move_constructible<json_value>::value and
+ std::is_nothrow_move_constructible<value_t>::value&&
+ std::is_nothrow_move_assignable<value_t>::value&&
+ std::is_nothrow_move_constructible<json_value>::value&&
std::is_nothrow_move_assignable<json_value>::value
)
{
@@ -17755,6 +22371,34 @@ class basic_json
/*!
@brief exchanges the values
+ Exchanges the contents of the JSON value from @a left with those of @a right. Does not
+ invoke any move, copy, or swap operations on individual elements. All
+ iterators and references remain valid. The past-the-end iterator is
+ invalidated. implemented as a friend function callable via ADL.
+
+ @param[in,out] left JSON value to exchange the contents with
+ @param[in,out] right JSON value to exchange the contents with
+
+ @complexity Constant.
+
+ @liveexample{The example below shows how JSON values can be swapped with
+ `swap()`.,swap__reference}
+
+ @since version 1.0.0
+ */
+ friend void swap(reference left, reference right) noexcept (
+ std::is_nothrow_move_constructible<value_t>::value&&
+ std::is_nothrow_move_assignable<value_t>::value&&
+ std::is_nothrow_move_constructible<json_value>::value&&
+ std::is_nothrow_move_assignable<json_value>::value
+ )
+ {
+ left.swap(right);
+ }
+
+ /*!
+ @brief exchanges the values
+
Exchanges the contents of a JSON array with those of @a other. Does not
invoke any move, copy, or swap operations on individual elements. All
iterators and references remain valid. The past-the-end iterator is
@@ -17775,7 +22419,7 @@ class basic_json
void swap(array_t& other)
{
// swap only works for arrays
- if (JSON_LIKELY(is_array()))
+ if (JSON_HEDLEY_LIKELY(is_array()))
{
std::swap(*(m_value.array), other);
}
@@ -17808,7 +22452,7 @@ class basic_json
void swap(object_t& other)
{
// swap only works for objects
- if (JSON_LIKELY(is_object()))
+ if (JSON_HEDLEY_LIKELY(is_object()))
{
std::swap(*(m_value.object), other);
}
@@ -17841,7 +22485,7 @@ class basic_json
void swap(string_t& other)
{
// swap only works for strings
- if (JSON_LIKELY(is_string()))
+ if (JSON_HEDLEY_LIKELY(is_string()))
{
std::swap(*(m_value.string), other);
}
@@ -17851,6 +22495,53 @@ class basic_json
}
}
+ /*!
+ @brief exchanges the values
+
+ Exchanges the contents of a JSON string with those of @a other. Does not
+ invoke any move, copy, or swap operations on individual elements. All
+ iterators and references remain valid. The past-the-end iterator is
+ invalidated.
+
+ @param[in,out] other binary to exchange the contents with
+
+ @throw type_error.310 when JSON value is not a string; example: `"cannot
+ use swap() with boolean"`
+
+ @complexity Constant.
+
+ @liveexample{The example below shows how strings can be swapped with
+ `swap()`.,swap__binary_t}
+
+ @since version 3.8.0
+ */
+ void swap(binary_t& other)
+ {
+ // swap only works for strings
+ if (JSON_HEDLEY_LIKELY(is_binary()))
+ {
+ std::swap(*(m_value.binary), other);
+ }
+ else
+ {
+ JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name())));
+ }
+ }
+
+ /// @copydoc swap(binary_t)
+ void swap(typename binary_t::container_type& other)
+ {
+ // swap only works for strings
+ if (JSON_HEDLEY_LIKELY(is_binary()))
+ {
+ std::swap(*(m_value.binary), other);
+ }
+ else
+ {
+ JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name())));
+ }
+ }
+
/// @}
public:
@@ -17869,13 +22560,13 @@ class basic_json
their stored values are the same according to their respective
`operator==`.
- Integer and floating-point numbers are automatically converted before
- comparison. Note than two NaN values are always treated as unequal.
+ comparison. Note that two NaN values are always treated as unequal.
- Two JSON null values are equal.
@note Floating-point inside JSON values numbers are compared with
`json::number_float_t::operator==` which is `double::operator==` by
default. To compare floating-point while respecting an epsilon, an alternative
- [comparison function](https://github.com/mariokonrad/marnav/blob/master/src/marnav/math/floatingpoint.hpp#L34-#L39)
+ [comparison function](https://github.com/mariokonrad/marnav/blob/master/include/marnav/math/floatingpoint.hpp#L34-#L39)
could be used, for instance
@code {.cpp}
template<typename T, typename = typename std::enable_if<std::is_floating_point<T>::value, T>::type>
@@ -17884,6 +22575,22 @@ class basic_json
return std::abs(a - b) <= epsilon;
}
@endcode
+ Or you can self-defined operator equal function like this:
+ @code {.cpp}
+ bool my_equal(const_reference lhs, const_reference rhs) {
+ const auto lhs_type lhs.type();
+ const auto rhs_type rhs.type();
+ if (lhs_type == rhs_type) {
+ switch(lhs_type)
+ // self_defined case
+ case value_t::number_float:
+ return std::abs(lhs - rhs) <= std::numeric_limits<float>::epsilon();
+ // other cases remain the same with the original
+ ...
+ }
+ ...
+ }
+ @endcode
@note NaN values never compare equal to themselves or to other NaN values.
@@ -17910,56 +22617,59 @@ class basic_json
switch (lhs_type)
{
case value_t::array:
- return (*lhs.m_value.array == *rhs.m_value.array);
+ return *lhs.m_value.array == *rhs.m_value.array;
case value_t::object:
- return (*lhs.m_value.object == *rhs.m_value.object);
+ return *lhs.m_value.object == *rhs.m_value.object;
case value_t::null:
return true;
case value_t::string:
- return (*lhs.m_value.string == *rhs.m_value.string);
+ return *lhs.m_value.string == *rhs.m_value.string;
case value_t::boolean:
- return (lhs.m_value.boolean == rhs.m_value.boolean);
+ return lhs.m_value.boolean == rhs.m_value.boolean;
case value_t::number_integer:
- return (lhs.m_value.number_integer == rhs.m_value.number_integer);
+ return lhs.m_value.number_integer == rhs.m_value.number_integer;
case value_t::number_unsigned:
- return (lhs.m_value.number_unsigned == rhs.m_value.number_unsigned);
+ return lhs.m_value.number_unsigned == rhs.m_value.number_unsigned;
case value_t::number_float:
- return (lhs.m_value.number_float == rhs.m_value.number_float);
+ return lhs.m_value.number_float == rhs.m_value.number_float;
+
+ case value_t::binary:
+ return *lhs.m_value.binary == *rhs.m_value.binary;
default:
return false;
}
}
- else if (lhs_type == value_t::number_integer and rhs_type == value_t::number_float)
+ else if (lhs_type == value_t::number_integer && rhs_type == value_t::number_float)
{
- return (static_cast<number_float_t>(lhs.m_value.number_integer) == rhs.m_value.number_float);
+ return static_cast<number_float_t>(lhs.m_value.number_integer) == rhs.m_value.number_float;
}
- else if (lhs_type == value_t::number_float and rhs_type == value_t::number_integer)
+ else if (lhs_type == value_t::number_float && rhs_type == value_t::number_integer)
{
- return (lhs.m_value.number_float == static_cast<number_float_t>(rhs.m_value.number_integer));
+ return lhs.m_value.number_float == static_cast<number_float_t>(rhs.m_value.number_integer);
}
- else if (lhs_type == value_t::number_unsigned and rhs_type == value_t::number_float)
+ else if (lhs_type == value_t::number_unsigned && rhs_type == value_t::number_float)
{
- return (static_cast<number_float_t>(lhs.m_value.number_unsigned) == rhs.m_value.number_float);
+ return static_cast<number_float_t>(lhs.m_value.number_unsigned) == rhs.m_value.number_float;
}
- else if (lhs_type == value_t::number_float and rhs_type == value_t::number_unsigned)
+ else if (lhs_type == value_t::number_float && rhs_type == value_t::number_unsigned)
{
- return (lhs.m_value.number_float == static_cast<number_float_t>(rhs.m_value.number_unsigned));
+ return lhs.m_value.number_float == static_cast<number_float_t>(rhs.m_value.number_unsigned);
}
- else if (lhs_type == value_t::number_unsigned and rhs_type == value_t::number_integer)
+ else if (lhs_type == value_t::number_unsigned && rhs_type == value_t::number_integer)
{
- return (static_cast<number_integer_t>(lhs.m_value.number_unsigned) == rhs.m_value.number_integer);
+ return static_cast<number_integer_t>(lhs.m_value.number_unsigned) == rhs.m_value.number_integer;
}
- else if (lhs_type == value_t::number_integer and rhs_type == value_t::number_unsigned)
+ else if (lhs_type == value_t::number_integer && rhs_type == value_t::number_unsigned)
{
- return (lhs.m_value.number_integer == static_cast<number_integer_t>(rhs.m_value.number_unsigned));
+ return lhs.m_value.number_integer == static_cast<number_integer_t>(rhs.m_value.number_unsigned);
}
return false;
@@ -17973,7 +22683,7 @@ class basic_json
std::is_scalar<ScalarType>::value, int>::type = 0>
friend bool operator==(const_reference lhs, const ScalarType rhs) noexcept
{
- return (lhs == basic_json(rhs));
+ return lhs == basic_json(rhs);
}
/*!
@@ -17984,7 +22694,7 @@ class basic_json
std::is_scalar<ScalarType>::value, int>::type = 0>
friend bool operator==(const ScalarType lhs, const_reference rhs) noexcept
{
- return (basic_json(lhs) == rhs);
+ return basic_json(lhs) == rhs;
}
/*!
@@ -18007,7 +22717,7 @@ class basic_json
*/
friend bool operator!=(const_reference lhs, const_reference rhs) noexcept
{
- return not (lhs == rhs);
+ return !(lhs == rhs);
}
/*!
@@ -18018,7 +22728,7 @@ class basic_json
std::is_scalar<ScalarType>::value, int>::type = 0>
friend bool operator!=(const_reference lhs, const ScalarType rhs) noexcept
{
- return (lhs != basic_json(rhs));
+ return lhs != basic_json(rhs);
}
/*!
@@ -18029,7 +22739,7 @@ class basic_json
std::is_scalar<ScalarType>::value, int>::type = 0>
friend bool operator!=(const ScalarType lhs, const_reference rhs) noexcept
{
- return (basic_json(lhs) != rhs);
+ return basic_json(lhs) != rhs;
}
/*!
@@ -18068,54 +22778,59 @@ class basic_json
switch (lhs_type)
{
case value_t::array:
+ // note parentheses are necessary, see
+ // https://github.com/nlohmann/json/issues/1530
return (*lhs.m_value.array) < (*rhs.m_value.array);
case value_t::object:
- return *lhs.m_value.object < *rhs.m_value.object;
+ return (*lhs.m_value.object) < (*rhs.m_value.object);
case value_t::null:
return false;
case value_t::string:
- return *lhs.m_value.string < *rhs.m_value.string;
+ return (*lhs.m_value.string) < (*rhs.m_value.string);
case value_t::boolean:
- return lhs.m_value.boolean < rhs.m_value.boolean;
+ return (lhs.m_value.boolean) < (rhs.m_value.boolean);
case value_t::number_integer:
- return lhs.m_value.number_integer < rhs.m_value.number_integer;
+ return (lhs.m_value.number_integer) < (rhs.m_value.number_integer);
case value_t::number_unsigned:
- return lhs.m_value.number_unsigned < rhs.m_value.number_unsigned;
+ return (lhs.m_value.number_unsigned) < (rhs.m_value.number_unsigned);
case value_t::number_float:
- return lhs.m_value.number_float < rhs.m_value.number_float;
+ return (lhs.m_value.number_float) < (rhs.m_value.number_float);
+
+ case value_t::binary:
+ return (*lhs.m_value.binary) < (*rhs.m_value.binary);
default:
return false;
}
}
- else if (lhs_type == value_t::number_integer and rhs_type == value_t::number_float)
+ else if (lhs_type == value_t::number_integer && rhs_type == value_t::number_float)
{
return static_cast<number_float_t>(lhs.m_value.number_integer) < rhs.m_value.number_float;
}
- else if (lhs_type == value_t::number_float and rhs_type == value_t::number_integer)
+ else if (lhs_type == value_t::number_float && rhs_type == value_t::number_integer)
{
return lhs.m_value.number_float < static_cast<number_float_t>(rhs.m_value.number_integer);
}
- else if (lhs_type == value_t::number_unsigned and rhs_type == value_t::number_float)
+ else if (lhs_type == value_t::number_unsigned && rhs_type == value_t::number_float)
{
return static_cast<number_float_t>(lhs.m_value.number_unsigned) < rhs.m_value.number_float;
}
- else if (lhs_type == value_t::number_float and rhs_type == value_t::number_unsigned)
+ else if (lhs_type == value_t::number_float && rhs_type == value_t::number_unsigned)
{
return lhs.m_value.number_float < static_cast<number_float_t>(rhs.m_value.number_unsigned);
}
- else if (lhs_type == value_t::number_integer and rhs_type == value_t::number_unsigned)
+ else if (lhs_type == value_t::number_integer && rhs_type == value_t::number_unsigned)
{
return lhs.m_value.number_integer < static_cast<number_integer_t>(rhs.m_value.number_unsigned);
}
- else if (lhs_type == value_t::number_unsigned and rhs_type == value_t::number_integer)
+ else if (lhs_type == value_t::number_unsigned && rhs_type == value_t::number_integer)
{
return static_cast<number_integer_t>(lhs.m_value.number_unsigned) < rhs.m_value.number_integer;
}
@@ -18134,7 +22849,7 @@ class basic_json
std::is_scalar<ScalarType>::value, int>::type = 0>
friend bool operator<(const_reference lhs, const ScalarType rhs) noexcept
{
- return (lhs < basic_json(rhs));
+ return lhs < basic_json(rhs);
}
/*!
@@ -18145,7 +22860,7 @@ class basic_json
std::is_scalar<ScalarType>::value, int>::type = 0>
friend bool operator<(const ScalarType lhs, const_reference rhs) noexcept
{
- return (basic_json(lhs) < rhs);
+ return basic_json(lhs) < rhs;
}
/*!
@@ -18169,7 +22884,7 @@ class basic_json
*/
friend bool operator<=(const_reference lhs, const_reference rhs) noexcept
{
- return not (rhs < lhs);
+ return !(rhs < lhs);
}
/*!
@@ -18180,7 +22895,7 @@ class basic_json
std::is_scalar<ScalarType>::value, int>::type = 0>
friend bool operator<=(const_reference lhs, const ScalarType rhs) noexcept
{
- return (lhs <= basic_json(rhs));
+ return lhs <= basic_json(rhs);
}
/*!
@@ -18191,7 +22906,7 @@ class basic_json
std::is_scalar<ScalarType>::value, int>::type = 0>
friend bool operator<=(const ScalarType lhs, const_reference rhs) noexcept
{
- return (basic_json(lhs) <= rhs);
+ return basic_json(lhs) <= rhs;
}
/*!
@@ -18215,7 +22930,7 @@ class basic_json
*/
friend bool operator>(const_reference lhs, const_reference rhs) noexcept
{
- return not (lhs <= rhs);
+ return !(lhs <= rhs);
}
/*!
@@ -18226,7 +22941,7 @@ class basic_json
std::is_scalar<ScalarType>::value, int>::type = 0>
friend bool operator>(const_reference lhs, const ScalarType rhs) noexcept
{
- return (lhs > basic_json(rhs));
+ return lhs > basic_json(rhs);
}
/*!
@@ -18237,7 +22952,7 @@ class basic_json
std::is_scalar<ScalarType>::value, int>::type = 0>
friend bool operator>(const ScalarType lhs, const_reference rhs) noexcept
{
- return (basic_json(lhs) > rhs);
+ return basic_json(lhs) > rhs;
}
/*!
@@ -18261,7 +22976,7 @@ class basic_json
*/
friend bool operator>=(const_reference lhs, const_reference rhs) noexcept
{
- return not (lhs < rhs);
+ return !(lhs < rhs);
}
/*!
@@ -18272,7 +22987,7 @@ class basic_json
std::is_scalar<ScalarType>::value, int>::type = 0>
friend bool operator>=(const_reference lhs, const ScalarType rhs) noexcept
{
- return (lhs >= basic_json(rhs));
+ return lhs >= basic_json(rhs);
}
/*!
@@ -18283,7 +22998,7 @@ class basic_json
std::is_scalar<ScalarType>::value, int>::type = 0>
friend bool operator>=(const ScalarType lhs, const_reference rhs) noexcept
{
- return (basic_json(lhs) >= rhs);
+ return basic_json(lhs) >= rhs;
}
/// @}
@@ -18329,8 +23044,8 @@ class basic_json
friend std::ostream& operator<<(std::ostream& o, const basic_json& j)
{
// read width member and use it as indentation parameter if nonzero
- const bool pretty_print = (o.width() > 0);
- const auto indentation = (pretty_print ? o.width() : 0);
+ const bool pretty_print = o.width() > 0;
+ const auto indentation = pretty_print ? o.width() : 0;
// reset width to 0 for subsequent calls to this stream
o.width(0);
@@ -18349,7 +23064,7 @@ class basic_json
instead; that is, replace calls like `j >> o;` with `o << j;`.
@since version 1.0.0; deprecated since version 3.0.0
*/
- JSON_DEPRECATED
+ JSON_HEDLEY_DEPRECATED_FOR(3.0.0, operator<<(std::ostream&, const basic_json&))
friend std::ostream& operator>>(const basic_json& j, std::ostream& o)
{
return o << j;
@@ -18368,32 +23083,13 @@ class basic_json
/*!
@brief deserialize from a compatible input
- This function reads from a compatible input. Examples are:
- - an array of 1-byte values
- - strings with character/literal type with size of 1 byte
- - input streams
- - container with contiguous storage of 1-byte values. Compatible container
- types include `std::vector`, `std::string`, `std::array`,
- `std::valarray`, and `std::initializer_list`. Furthermore, C-style
- arrays can be used with `std::begin()`/`std::end()`. User-defined
- containers can be used as long as they implement random-access iterators
- and a contiguous storage.
-
- @pre Each element of the container has a size of 1 byte. Violating this
- precondition yields undefined behavior. **This precondition is enforced
- with a static assertion.**
-
- @pre The container storage is contiguous. Violating this precondition
- yields undefined behavior. **This precondition is enforced with an
- assertion.**
- @pre Each element of the container has a size of 1 byte. Violating this
- precondition yields undefined behavior. **This precondition is enforced
- with a static assertion.**
-
- @warning There is no way to enforce all preconditions at compile-time. If
- the function is called with a noncompliant container and with
- assertions switched off, the behavior is undefined and will most
- likely yield segmentation violation.
+ @tparam InputType A compatible input, for instance
+ - an std::istream object
+ - a FILE pointer
+ - a C-style array of characters
+ - a pointer to a null-terminated string of single byte characters
+ - an object obj for which begin(obj) and end(obj) produces a valid pair of
+ iterators.
@param[in] i input to read from
@param[in] cb a parser callback function of type @ref parser_callback_t
@@ -18401,8 +23097,13 @@ class basic_json
(optional)
@param[in] allow_exceptions whether to throw exceptions in case of a
parse error (optional, true by default)
+ @param[in] ignore_comments whether comments should be ignored and treated
+ like whitespace (true) or yield a parse error (true); (optional, false by
+ default)
- @return result of the deserialization
+ @return deserialized JSON value; in case of a parse error and
+ @a allow_exceptions set to `false`, the return value will be
+ value_t::discarded.
@throw parse_error.101 if a parse error occurs; example: `""unexpected end
of input; expected string literal""`
@@ -18411,7 +23112,7 @@ class basic_json
@complexity Linear in the length of the input. The parser is a predictive
LL(1) parser. The complexity can be higher if the parser callback function
- @a cb has a super-linear complexity.
+ @a cb or reading from the input @a i has a super-linear complexity.
@note A UTF-8 byte order mark is silently ignored.
@@ -18427,20 +23128,122 @@ class basic_json
@liveexample{The example below demonstrates the `parse()` function reading
from a contiguous container.,parse__contiguouscontainer__parser_callback_t}
- @since version 2.0.3 (contiguous containers)
+ @since version 2.0.3 (contiguous containers); version 3.9.0 allowed to
+ ignore comments.
*/
- static basic_json parse(detail::input_adapter&& i,
+ template<typename InputType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json parse(InputType&& i,
+ const parser_callback_t cb = nullptr,
+ const bool allow_exceptions = true,
+ const bool ignore_comments = false)
+ {
+ basic_json result;
+ parser(detail::input_adapter(std::forward<InputType>(i)), cb, allow_exceptions, ignore_comments).parse(true, result);
+ return result;
+ }
+
+ /*!
+ @brief deserialize from a pair of character iterators
+
+ The value_type of the iterator must be a integral type with size of 1, 2 or
+ 4 bytes, which will be interpreted respectively as UTF-8, UTF-16 and UTF-32.
+
+ @param[in] first iterator to start of character range
+ @param[in] last iterator to end of character range
+ @param[in] cb a parser callback function of type @ref parser_callback_t
+ which is used to control the deserialization by filtering unwanted values
+ (optional)
+ @param[in] allow_exceptions whether to throw exceptions in case of a
+ parse error (optional, true by default)
+ @param[in] ignore_comments whether comments should be ignored and treated
+ like whitespace (true) or yield a parse error (true); (optional, false by
+ default)
+
+ @return deserialized JSON value; in case of a parse error and
+ @a allow_exceptions set to `false`, the return value will be
+ value_t::discarded.
+
+ @throw parse_error.101 if a parse error occurs; example: `""unexpected end
+ of input; expected string literal""`
+ @throw parse_error.102 if to_unicode fails or surrogate error
+ @throw parse_error.103 if to_unicode fails
+ */
+ template<typename IteratorType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json parse(IteratorType first,
+ IteratorType last,
+ const parser_callback_t cb = nullptr,
+ const bool allow_exceptions = true,
+ const bool ignore_comments = false)
+ {
+ basic_json result;
+ parser(detail::input_adapter(std::move(first), std::move(last)), cb, allow_exceptions, ignore_comments).parse(true, result);
+ return result;
+ }
+
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, parse(ptr, ptr + len))
+ static basic_json parse(detail::span_input_adapter&& i,
const parser_callback_t cb = nullptr,
- const bool allow_exceptions = true)
+ const bool allow_exceptions = true,
+ const bool ignore_comments = false)
{
basic_json result;
- parser(i, cb, allow_exceptions).parse(true, result);
+ parser(i.get(), cb, allow_exceptions, ignore_comments).parse(true, result);
return result;
}
- static bool accept(detail::input_adapter&& i)
+ /*!
+ @brief check if the input is valid JSON
+
+ Unlike the @ref parse(InputType&&, const parser_callback_t,const bool)
+ function, this function neither throws an exception in case of invalid JSON
+ input (i.e., a parse error) nor creates diagnostic information.
+
+ @tparam InputType A compatible input, for instance
+ - an std::istream object
+ - a FILE pointer
+ - a C-style array of characters
+ - a pointer to a null-terminated string of single byte characters
+ - an object obj for which begin(obj) and end(obj) produces a valid pair of
+ iterators.
+
+ @param[in] i input to read from
+ @param[in] ignore_comments whether comments should be ignored and treated
+ like whitespace (true) or yield a parse error (true); (optional, false by
+ default)
+
+ @return Whether the input read from @a i is valid JSON.
+
+ @complexity Linear in the length of the input. The parser is a predictive
+ LL(1) parser.
+
+ @note A UTF-8 byte order mark is silently ignored.
+
+ @liveexample{The example below demonstrates the `accept()` function reading
+ from a string.,accept__string}
+ */
+ template<typename InputType>
+ static bool accept(InputType&& i,
+ const bool ignore_comments = false)
{
- return parser(i).accept(true);
+ return parser(detail::input_adapter(std::forward<InputType>(i)), nullptr, false, ignore_comments).accept(true);
+ }
+
+ template<typename IteratorType>
+ static bool accept(IteratorType first, IteratorType last,
+ const bool ignore_comments = false)
+ {
+ return parser(detail::input_adapter(std::move(first), std::move(last)), nullptr, false, ignore_comments).accept(true);
+ }
+
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, accept(ptr, ptr + len))
+ static bool accept(detail::span_input_adapter&& i,
+ const bool ignore_comments = false)
+ {
+ return parser(i.get(), nullptr, false, ignore_comments).accept(true);
}
/*!
@@ -18449,36 +23252,20 @@ class basic_json
The SAX event lister must follow the interface of @ref json_sax.
This function reads from a compatible input. Examples are:
- - an array of 1-byte values
- - strings with character/literal type with size of 1 byte
- - input streams
- - container with contiguous storage of 1-byte values. Compatible container
- types include `std::vector`, `std::string`, `std::array`,
- `std::valarray`, and `std::initializer_list`. Furthermore, C-style
- arrays can be used with `std::begin()`/`std::end()`. User-defined
- containers can be used as long as they implement random-access iterators
- and a contiguous storage.
-
- @pre Each element of the container has a size of 1 byte. Violating this
- precondition yields undefined behavior. **This precondition is enforced
- with a static assertion.**
-
- @pre The container storage is contiguous. Violating this precondition
- yields undefined behavior. **This precondition is enforced with an
- assertion.**
- @pre Each element of the container has a size of 1 byte. Violating this
- precondition yields undefined behavior. **This precondition is enforced
- with a static assertion.**
-
- @warning There is no way to enforce all preconditions at compile-time. If
- the function is called with a noncompliant container and with
- assertions switched off, the behavior is undefined and will most
- likely yield segmentation violation.
+ - an std::istream object
+ - a FILE pointer
+ - a C-style array of characters
+ - a pointer to a null-terminated string of single byte characters
+ - an object obj for which begin(obj) and end(obj) produces a valid pair of
+ iterators.
@param[in] i input to read from
@param[in,out] sax SAX event listener
@param[in] format the format to parse (JSON, CBOR, MessagePack, or UBJSON)
@param[in] strict whether the input has to be consumed completely
+ @param[in] ignore_comments whether comments should be ignored and treated
+ like whitespace (true) or yield a parse error (true); (optional, false by
+ default); only applies to the JSON file format.
@return return value of the last processed SAX event
@@ -18499,97 +23286,44 @@ class basic_json
@since version 3.2.0
*/
- template <typename SAX>
- static bool sax_parse(detail::input_adapter&& i, SAX* sax,
+ template <typename InputType, typename SAX>
+ JSON_HEDLEY_NON_NULL(2)
+ static bool sax_parse(InputType&& i, SAX* sax,
input_format_t format = input_format_t::json,
- const bool strict = true)
- {
- assert(sax);
- switch (format)
- {
- case input_format_t::json:
- return parser(std::move(i)).sax_parse(sax, strict);
- default:
- return detail::binary_reader<basic_json, SAX>(std::move(i)).sax_parse(format, sax, strict);
- }
- }
-
- /*!
- @brief deserialize from an iterator range with contiguous storage
-
- This function reads from an iterator range of a container with contiguous
- storage of 1-byte values. Compatible container types include
- `std::vector`, `std::string`, `std::array`, `std::valarray`, and
- `std::initializer_list`. Furthermore, C-style arrays can be used with
- `std::begin()`/`std::end()`. User-defined containers can be used as long
- as they implement random-access iterators and a contiguous storage.
-
- @pre The iterator range is contiguous. Violating this precondition yields
- undefined behavior. **This precondition is enforced with an assertion.**
- @pre Each element in the range has a size of 1 byte. Violating this
- precondition yields undefined behavior. **This precondition is enforced
- with a static assertion.**
-
- @warning There is no way to enforce all preconditions at compile-time. If
- the function is called with noncompliant iterators and with
- assertions switched off, the behavior is undefined and will most
- likely yield segmentation violation.
-
- @tparam IteratorType iterator of container with contiguous storage
- @param[in] first begin of the range to parse (included)
- @param[in] last end of the range to parse (excluded)
- @param[in] cb a parser callback function of type @ref parser_callback_t
- which is used to control the deserialization by filtering unwanted values
- (optional)
- @param[in] allow_exceptions whether to throw exceptions in case of a
- parse error (optional, true by default)
-
- @return result of the deserialization
-
- @throw parse_error.101 in case of an unexpected token
- @throw parse_error.102 if to_unicode fails or surrogate error
- @throw parse_error.103 if to_unicode fails
-
- @complexity Linear in the length of the input. The parser is a predictive
- LL(1) parser. The complexity can be higher if the parser callback function
- @a cb has a super-linear complexity.
-
- @note A UTF-8 byte order mark is silently ignored.
-
- @liveexample{The example below demonstrates the `parse()` function reading
- from an iterator range.,parse__iteratortype__parser_callback_t}
-
- @since version 2.0.3
- */
- template<class IteratorType, typename std::enable_if<
- std::is_base_of<
- std::random_access_iterator_tag,
- typename std::iterator_traits<IteratorType>::iterator_category>::value, int>::type = 0>
- static basic_json parse(IteratorType first, IteratorType last,
- const parser_callback_t cb = nullptr,
- const bool allow_exceptions = true)
+ const bool strict = true,
+ const bool ignore_comments = false)
{
- basic_json result;
- parser(detail::input_adapter(first, last), cb, allow_exceptions).parse(true, result);
- return result;
+ auto ia = detail::input_adapter(std::forward<InputType>(i));
+ return format == input_format_t::json
+ ? parser(std::move(ia), nullptr, true, ignore_comments).sax_parse(sax, strict)
+ : detail::binary_reader<basic_json, decltype(ia), SAX>(std::move(ia)).sax_parse(format, sax, strict);
}
- template<class IteratorType, typename std::enable_if<
- std::is_base_of<
- std::random_access_iterator_tag,
- typename std::iterator_traits<IteratorType>::iterator_category>::value, int>::type = 0>
- static bool accept(IteratorType first, IteratorType last)
+ template<class IteratorType, class SAX>
+ JSON_HEDLEY_NON_NULL(3)
+ static bool sax_parse(IteratorType first, IteratorType last, SAX* sax,
+ input_format_t format = input_format_t::json,
+ const bool strict = true,
+ const bool ignore_comments = false)
{
- return parser(detail::input_adapter(first, last)).accept(true);
+ auto ia = detail::input_adapter(std::move(first), std::move(last));
+ return format == input_format_t::json
+ ? parser(std::move(ia), nullptr, true, ignore_comments).sax_parse(sax, strict)
+ : detail::binary_reader<basic_json, decltype(ia), SAX>(std::move(ia)).sax_parse(format, sax, strict);
}
- template<class IteratorType, class SAX, typename std::enable_if<
- std::is_base_of<
- std::random_access_iterator_tag,
- typename std::iterator_traits<IteratorType>::iterator_category>::value, int>::type = 0>
- static bool sax_parse(IteratorType first, IteratorType last, SAX* sax)
+ template <typename SAX>
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, sax_parse(ptr, ptr + len, ...))
+ JSON_HEDLEY_NON_NULL(2)
+ static bool sax_parse(detail::span_input_adapter&& i, SAX* sax,
+ input_format_t format = input_format_t::json,
+ const bool strict = true,
+ const bool ignore_comments = false)
{
- return parser(detail::input_adapter(first, last)).sax_parse(sax);
+ auto ia = i.get();
+ return format == input_format_t::json
+ ? parser(std::move(ia), nullptr, true, ignore_comments).sax_parse(sax, strict)
+ : detail::binary_reader<basic_json, decltype(ia), SAX>(std::move(ia)).sax_parse(format, sax, strict);
}
/*!
@@ -18600,7 +23334,7 @@ class basic_json
instead; that is, replace calls like `j << i;` with `i >> j;`.
@since version 1.0.0; deprecated since version 3.0.0
*/
- JSON_DEPRECATED
+ JSON_HEDLEY_DEPRECATED_FOR(3.0.0, operator>>(std::istream&, basic_json&))
friend std::istream& operator<<(basic_json& j, std::istream& i)
{
return operator>>(i, j);
@@ -18658,6 +23392,7 @@ class basic_json
number | `"number"` (for all number types)
object | `"object"`
array | `"array"`
+ binary | `"binary"`
discarded | `"discarded"`
@exceptionsafety No-throw guarantee: this function never throws exceptions.
@@ -18673,6 +23408,7 @@ class basic_json
@since version 1.0.0, public since 2.1.0, `const char*` and `noexcept`
since 3.0.0
*/
+ JSON_HEDLEY_RETURNS_NON_NULL
const char* type_name() const noexcept
{
{
@@ -18688,6 +23424,8 @@ class basic_json
return "string";
case value_t::boolean:
return "boolean";
+ case value_t::binary:
+ return "binary";
case value_t::discarded:
return "discarded";
default:
@@ -18747,7 +23485,8 @@ class basic_json
number_unsigned | 256..65535 | Unsigned integer (2 bytes follow) | 0x19
number_unsigned | 65536..4294967295 | Unsigned integer (4 bytes follow) | 0x1A
number_unsigned | 4294967296..18446744073709551615 | Unsigned integer (8 bytes follow) | 0x1B
- number_float | *any value* | Double-Precision Float | 0xFB
+ number_float | *any value representable by a float* | Single-Precision Float | 0xFA
+ number_float | *any value NOT representable by a float* | Double-Precision Float | 0xFB
string | *length*: 0..23 | UTF-8 string | 0x60..0x77
string | *length*: 23..255 | UTF-8 string (1 byte follow) | 0x78
string | *length*: 256..65535 | UTF-8 string (2 bytes follow) | 0x79
@@ -18763,6 +23502,11 @@ class basic_json
object | *size*: 256..65535 | map (2 bytes follow) | 0xB9
object | *size*: 65536..4294967295 | map (4 bytes follow) | 0xBA
object | *size*: 4294967296..18446744073709551615 | map (8 bytes follow) | 0xBB
+ binary | *size*: 0..23 | byte string | 0x40..0x57
+ binary | *size*: 23..255 | byte string (1 byte follow) | 0x58
+ binary | *size*: 256..65535 | byte string (2 bytes follow) | 0x59
+ binary | *size*: 65536..4294967295 | byte string (4 bytes follow) | 0x5A
+ binary | *size*: 4294967296..18446744073709551615 | byte string (8 bytes follow) | 0x5B
@note The mapping is **complete** in the sense that any JSON value type
can be converted to a CBOR value.
@@ -18772,23 +23516,22 @@ class basic_json
function which serializes NaN or Infinity to `null`.
@note The following CBOR types are not used in the conversion:
- - byte strings (0x40..0x5F)
- UTF-8 strings terminated by "break" (0x7F)
- arrays terminated by "break" (0x9F)
- maps terminated by "break" (0xBF)
+ - byte strings terminated by "break" (0x5F)
- date/time (0xC0..0xC1)
- bignum (0xC2..0xC3)
- decimal fraction (0xC4)
- bigfloat (0xC5)
- - tagged items (0xC6..0xD4, 0xD8..0xDB)
- expected conversions (0xD5..0xD7)
- simple values (0xE0..0xF3, 0xF8)
- undefined (0xF7)
- - half and single-precision floats (0xF9-0xFA)
+ - half-precision floats (0xF9)
- break (0xFF)
@param[in] j JSON value to serialize
- @return MessagePack serialization as byte vector
+ @return CBOR serialization as byte vector
@complexity Linear in the size of the JSON value @a j.
@@ -18796,13 +23539,14 @@ class basic_json
vector in CBOR format.,to_cbor}
@sa http://cbor.io
- @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool) for the
+ @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the
analogous deserialization
@sa @ref to_msgpack(const basic_json&) for the related MessagePack format
@sa @ref to_ubjson(const basic_json&, const bool, const bool) for the
related UBJSON format
- @since version 2.0.9
+ @since version 2.0.9; compact representation of floating-point numbers
+ since version 3.8.0
*/
static std::vector<uint8_t> to_cbor(const basic_json& j)
{
@@ -18851,7 +23595,8 @@ class basic_json
number_unsigned | 256..65535 | uint 16 | 0xCD
number_unsigned | 65536..4294967295 | uint 32 | 0xCE
number_unsigned | 4294967296..18446744073709551615 | uint 64 | 0xCF
- number_float | *any value* | float 64 | 0xCB
+ number_float | *any value representable by a float* | float 32 | 0xCA
+ number_float | *any value NOT representable by a float* | float 64 | 0xCB
string | *length*: 0..31 | fixstr | 0xA0..0xBF
string | *length*: 32..255 | str 8 | 0xD9
string | *length*: 256..65535 | str 16 | 0xDA
@@ -18862,21 +23607,19 @@ class basic_json
object | *size*: 0..15 | fix map | 0x80..0x8F
object | *size*: 16..65535 | map 16 | 0xDE
object | *size*: 65536..4294967295 | map 32 | 0xDF
+ binary | *size*: 0..255 | bin 8 | 0xC4
+ binary | *size*: 256..65535 | bin 16 | 0xC5
+ binary | *size*: 65536..4294967295 | bin 32 | 0xC6
@note The mapping is **complete** in the sense that any JSON value type
can be converted to a MessagePack value.
@note The following values can **not** be converted to a MessagePack value:
- strings with more than 4294967295 bytes
+ - byte strings with more than 4294967295 bytes
- arrays with more than 4294967295 elements
- objects with more than 4294967295 elements
- @note The following MessagePack types are not used in the conversion:
- - bin 8 - bin 32 (0xC4..0xC6)
- - ext 8 - ext 32 (0xC7..0xC9)
- - float 32 (0xCA)
- - fixext 1 - fixext 16 (0xD4..0xD8)
-
@note Any MessagePack output created @ref to_msgpack can be successfully
parsed by @ref from_msgpack.
@@ -18945,6 +23688,7 @@ class basic_json
number_unsigned | 256..32767 | int16 | `I`
number_unsigned | 32768..2147483647 | int32 | `l`
number_unsigned | 2147483648..9223372036854775807 | int64 | `L`
+ number_unsigned | 2147483649..18446744073709551615 | high-precision | `H`
number_float | *any value* | float64 | `D`
string | *with shortest length indicator* | string | `S`
array | *see notes on optimized format* | array | `[`
@@ -18955,7 +23699,6 @@ class basic_json
@note The following values can **not** be converted to a UBJSON value:
- strings with more than 9223372036854775807 bytes (theoretical)
- - unsigned integer numbers above 9223372036854775807
@note The following markers are not used in the conversion:
- `Z`: no-op values are not created.
@@ -18978,6 +23721,12 @@ class basic_json
the benefit of this parameter is that the receiving side is
immediately informed on the number of elements of the container.
+ @note If the JSON data contains the binary type, the value stored is a list
+ of integers, as suggested by the UBJSON documentation. In particular,
+ this means that serialization and the deserialization of a JSON
+ containing binary values into UBJSON and back will result in a
+ different JSON object.
+
@param[in] j JSON value to serialize
@param[in] use_size whether to add size annotations to container types
@param[in] use_type whether to add type annotations to container types
@@ -19042,6 +23791,7 @@ class basic_json
string | *any value* | string | 0x02
array | *any value* | document | 0x04
object | *any value* | document | 0x03
+ binary | *any value* | binary | 0x05
@warning The mapping is **incomplete**, since only JSON-objects (and things
contained therein) can be serialized to BSON.
@@ -19123,7 +23873,11 @@ class basic_json
Negative integer | number_integer | 0x39
Negative integer | number_integer | 0x3A
Negative integer | number_integer | 0x3B
- Negative integer | number_integer | 0x40..0x57
+ Byte string | binary | 0x40..0x57
+ Byte string | binary | 0x58
+ Byte string | binary | 0x59
+ Byte string | binary | 0x5A
+ Byte string | binary | 0x5B
UTF-8 string | string | 0x60..0x77
UTF-8 string | string | 0x78
UTF-8 string | string | 0x79
@@ -19152,12 +23906,10 @@ class basic_json
@warning The mapping is **incomplete** in the sense that not all CBOR
types can be converted to a JSON value. The following CBOR types
are not supported and will yield parse errors (parse_error.112):
- - byte strings (0x40..0x5F)
- date/time (0xC0..0xC1)
- bignum (0xC2..0xC3)
- decimal fraction (0xC4)
- bigfloat (0xC5)
- - tagged items (0xC6..0xD4, 0xD8..0xDB)
- expected conversions (0xD5..0xD7)
- simple values (0xE0..0xF3, 0xF8)
- undefined (0xF7)
@@ -19174,8 +23926,11 @@ class basic_json
(true by default)
@param[in] allow_exceptions whether to throw exceptions in case of a
parse error (optional, true by default)
+ @param[in] tag_handler how to treat CBOR tags (optional, error by default)
- @return deserialized JSON value
+ @return deserialized JSON value; in case of a parse error and
+ @a allow_exceptions set to `false`, the return value will be
+ value_t::discarded.
@throw parse_error.110 if the given input ends prematurely or the end of
file was not reached when @a strict was set to true
@@ -19198,30 +23953,62 @@ class basic_json
@since version 2.0.9; parameter @a start_index since 2.1.1; changed to
consume input adapters, removed start_index parameter, and added
@a strict parameter since 3.0.0; added @a allow_exceptions parameter
- since 3.2.0
+ since 3.2.0; added @a tag_handler parameter since 3.9.0.
*/
- static basic_json from_cbor(detail::input_adapter&& i,
+ template<typename InputType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json from_cbor(InputType&& i,
const bool strict = true,
- const bool allow_exceptions = true)
+ const bool allow_exceptions = true,
+ const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error)
{
basic_json result;
detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
- const bool res = binary_reader(detail::input_adapter(i)).sax_parse(input_format_t::cbor, &sdp, strict);
+ auto ia = detail::input_adapter(std::forward<InputType>(i));
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler);
return res ? result : basic_json(value_t::discarded);
}
/*!
- @copydoc from_cbor(detail::input_adapter&&, const bool, const bool)
+ @copydoc from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t)
*/
- template<typename A1, typename A2,
- detail::enable_if_t<std::is_constructible<detail::input_adapter, A1, A2>::value, int> = 0>
- static basic_json from_cbor(A1 && a1, A2 && a2,
+ template<typename IteratorType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json from_cbor(IteratorType first, IteratorType last,
const bool strict = true,
- const bool allow_exceptions = true)
+ const bool allow_exceptions = true,
+ const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error)
{
basic_json result;
detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
- const bool res = binary_reader(detail::input_adapter(std::forward<A1>(a1), std::forward<A2>(a2))).sax_parse(input_format_t::cbor, &sdp, strict);
+ auto ia = detail::input_adapter(std::move(first), std::move(last));
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler);
+ return res ? result : basic_json(value_t::discarded);
+ }
+
+ template<typename T>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_cbor(ptr, ptr + len))
+ static basic_json from_cbor(const T* ptr, std::size_t len,
+ const bool strict = true,
+ const bool allow_exceptions = true,
+ const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error)
+ {
+ return from_cbor(ptr, ptr + len, strict, allow_exceptions, tag_handler);
+ }
+
+
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_cbor(ptr, ptr + len))
+ static basic_json from_cbor(detail::span_input_adapter&& i,
+ const bool strict = true,
+ const bool allow_exceptions = true,
+ const cbor_tag_handler_t tag_handler = cbor_tag_handler_t::error)
+ {
+ basic_json result;
+ detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+ auto ia = i.get();
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::cbor, &sdp, strict, tag_handler);
return res ? result : basic_json(value_t::discarded);
}
@@ -19259,15 +24046,19 @@ class basic_json
array 32 | array | 0xDD
map 16 | object | 0xDE
map 32 | object | 0xDF
+ bin 8 | binary | 0xC4
+ bin 16 | binary | 0xC5
+ bin 32 | binary | 0xC6
+ ext 8 | binary | 0xC7
+ ext 16 | binary | 0xC8
+ ext 32 | binary | 0xC9
+ fixext 1 | binary | 0xD4
+ fixext 2 | binary | 0xD5
+ fixext 4 | binary | 0xD6
+ fixext 8 | binary | 0xD7
+ fixext 16 | binary | 0xD8
negative fixint | number_integer | 0xE0-0xFF
- @warning The mapping is **incomplete** in the sense that not all
- MessagePack types can be converted to a JSON value. The following
- MessagePack types are not supported and will yield parse errors:
- - bin 8 - bin 32 (0xC4..0xC6)
- - ext 8 - ext 32 (0xC7..0xC9)
- - fixext 1 - fixext 16 (0xD4..0xD8)
-
@note Any MessagePack output created @ref to_msgpack can be successfully
parsed by @ref from_msgpack.
@@ -19278,7 +24069,9 @@ class basic_json
@param[in] allow_exceptions whether to throw exceptions in case of a
parse error (optional, true by default)
- @return deserialized JSON value
+ @return deserialized JSON value; in case of a parse error and
+ @a allow_exceptions set to `false`, the return value will be
+ value_t::discarded.
@throw parse_error.110 if the given input ends prematurely or the end of
file was not reached when @a strict was set to true
@@ -19293,7 +24086,7 @@ class basic_json
@sa http://msgpack.org
@sa @ref to_msgpack(const basic_json&) for the analogous serialization
- @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool) for the
+ @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the
related CBOR format
@sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for
the related UBJSON format
@@ -19305,31 +24098,60 @@ class basic_json
@a strict parameter since 3.0.0; added @a allow_exceptions parameter
since 3.2.0
*/
- static basic_json from_msgpack(detail::input_adapter&& i,
+ template<typename InputType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json from_msgpack(InputType&& i,
const bool strict = true,
const bool allow_exceptions = true)
{
basic_json result;
detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
- const bool res = binary_reader(detail::input_adapter(i)).sax_parse(input_format_t::msgpack, &sdp, strict);
+ auto ia = detail::input_adapter(std::forward<InputType>(i));
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::msgpack, &sdp, strict);
return res ? result : basic_json(value_t::discarded);
}
/*!
@copydoc from_msgpack(detail::input_adapter&&, const bool, const bool)
*/
- template<typename A1, typename A2,
- detail::enable_if_t<std::is_constructible<detail::input_adapter, A1, A2>::value, int> = 0>
- static basic_json from_msgpack(A1 && a1, A2 && a2,
+ template<typename IteratorType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json from_msgpack(IteratorType first, IteratorType last,
const bool strict = true,
const bool allow_exceptions = true)
{
basic_json result;
detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
- const bool res = binary_reader(detail::input_adapter(std::forward<A1>(a1), std::forward<A2>(a2))).sax_parse(input_format_t::msgpack, &sdp, strict);
+ auto ia = detail::input_adapter(std::move(first), std::move(last));
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::msgpack, &sdp, strict);
return res ? result : basic_json(value_t::discarded);
}
+
+ template<typename T>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_msgpack(ptr, ptr + len))
+ static basic_json from_msgpack(const T* ptr, std::size_t len,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ return from_msgpack(ptr, ptr + len, strict, allow_exceptions);
+ }
+
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_msgpack(ptr, ptr + len))
+ static basic_json from_msgpack(detail::span_input_adapter&& i,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ basic_json result;
+ detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+ auto ia = i.get();
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::msgpack, &sdp, strict);
+ return res ? result : basic_json(value_t::discarded);
+ }
+
+
/*!
@brief create a JSON value from an input in UBJSON format
@@ -19351,6 +24173,7 @@ class basic_json
int16 | number_integer | `I`
int32 | number_integer | `l`
int64 | number_integer | `L`
+ high-precision number | number_integer, number_unsigned, or number_float - depends on number string | 'H'
string | string | `S`
char | string | `C`
array | array (optimized values are supported) | `[`
@@ -19365,7 +24188,9 @@ class basic_json
@param[in] allow_exceptions whether to throw exceptions in case of a
parse error (optional, true by default)
- @return deserialized JSON value
+ @return deserialized JSON value; in case of a parse error and
+ @a allow_exceptions set to `false`, the return value will be
+ value_t::discarded.
@throw parse_error.110 if the given input ends prematurely or the end of
file was not reached when @a strict was set to true
@@ -19380,7 +24205,7 @@ class basic_json
@sa http://ubjson.org
@sa @ref to_ubjson(const basic_json&, const bool, const bool) for the
analogous serialization
- @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool) for the
+ @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the
related CBOR format
@sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for
the related MessagePack format
@@ -19389,31 +24214,59 @@ class basic_json
@since version 3.1.0; added @a allow_exceptions parameter since 3.2.0
*/
- static basic_json from_ubjson(detail::input_adapter&& i,
+ template<typename InputType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json from_ubjson(InputType&& i,
const bool strict = true,
const bool allow_exceptions = true)
{
basic_json result;
detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
- const bool res = binary_reader(detail::input_adapter(i)).sax_parse(input_format_t::ubjson, &sdp, strict);
+ auto ia = detail::input_adapter(std::forward<InputType>(i));
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::ubjson, &sdp, strict);
return res ? result : basic_json(value_t::discarded);
}
/*!
@copydoc from_ubjson(detail::input_adapter&&, const bool, const bool)
*/
- template<typename A1, typename A2,
- detail::enable_if_t<std::is_constructible<detail::input_adapter, A1, A2>::value, int> = 0>
- static basic_json from_ubjson(A1 && a1, A2 && a2,
+ template<typename IteratorType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json from_ubjson(IteratorType first, IteratorType last,
const bool strict = true,
const bool allow_exceptions = true)
{
basic_json result;
detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
- const bool res = binary_reader(detail::input_adapter(std::forward<A1>(a1), std::forward<A2>(a2))).sax_parse(input_format_t::ubjson, &sdp, strict);
+ auto ia = detail::input_adapter(std::move(first), std::move(last));
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::ubjson, &sdp, strict);
return res ? result : basic_json(value_t::discarded);
}
+ template<typename T>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_ubjson(ptr, ptr + len))
+ static basic_json from_ubjson(const T* ptr, std::size_t len,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ return from_ubjson(ptr, ptr + len, strict, allow_exceptions);
+ }
+
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_ubjson(ptr, ptr + len))
+ static basic_json from_ubjson(detail::span_input_adapter&& i,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ basic_json result;
+ detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+ auto ia = i.get();
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::ubjson, &sdp, strict);
+ return res ? result : basic_json(value_t::discarded);
+ }
+
+
/*!
@brief Create a JSON value from an input in BSON format
@@ -19454,7 +24307,9 @@ class basic_json
@param[in] allow_exceptions whether to throw exceptions in case of a
parse error (optional, true by default)
- @return deserialized JSON value
+ @return deserialized JSON value; in case of a parse error and
+ @a allow_exceptions set to `false`, the return value will be
+ value_t::discarded.
@throw parse_error.114 if an unsupported BSON record type is encountered
@@ -19465,40 +24320,64 @@ class basic_json
@sa http://bsonspec.org/spec.html
@sa @ref to_bson(const basic_json&) for the analogous serialization
- @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool) for the
+ @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool, const cbor_tag_handler_t) for the
related CBOR format
@sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for
the related MessagePack format
@sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the
related UBJSON format
*/
- static basic_json from_bson(detail::input_adapter&& i,
+ template<typename InputType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json from_bson(InputType&& i,
const bool strict = true,
const bool allow_exceptions = true)
{
basic_json result;
detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
- const bool res = binary_reader(detail::input_adapter(i)).sax_parse(input_format_t::bson, &sdp, strict);
+ auto ia = detail::input_adapter(std::forward<InputType>(i));
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::bson, &sdp, strict);
return res ? result : basic_json(value_t::discarded);
}
/*!
@copydoc from_bson(detail::input_adapter&&, const bool, const bool)
*/
- template<typename A1, typename A2,
- detail::enable_if_t<std::is_constructible<detail::input_adapter, A1, A2>::value, int> = 0>
- static basic_json from_bson(A1 && a1, A2 && a2,
+ template<typename IteratorType>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ static basic_json from_bson(IteratorType first, IteratorType last,
const bool strict = true,
const bool allow_exceptions = true)
{
basic_json result;
detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
- const bool res = binary_reader(detail::input_adapter(std::forward<A1>(a1), std::forward<A2>(a2))).sax_parse(input_format_t::bson, &sdp, strict);
+ auto ia = detail::input_adapter(std::move(first), std::move(last));
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::bson, &sdp, strict);
return res ? result : basic_json(value_t::discarded);
}
+ template<typename T>
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_bson(ptr, ptr + len))
+ static basic_json from_bson(const T* ptr, std::size_t len,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ return from_bson(ptr, ptr + len, strict, allow_exceptions);
+ }
-
+ JSON_HEDLEY_WARN_UNUSED_RESULT
+ JSON_HEDLEY_DEPRECATED_FOR(3.8.0, from_bson(ptr, ptr + len))
+ static basic_json from_bson(detail::span_input_adapter&& i,
+ const bool strict = true,
+ const bool allow_exceptions = true)
+ {
+ basic_json result;
+ detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+ auto ia = i.get();
+ const bool res = binary_reader<decltype(ia)>(std::move(ia)).sax_parse(input_format_t::bson, &sdp, strict);
+ return res ? result : basic_json(value_t::discarded);
+ }
/// @}
//////////////////////////
@@ -19551,7 +24430,7 @@ class basic_json
Uses a JSON pointer to retrieve a reference to the respective JSON value.
No bound checking is performed. The function does not change the JSON
- value; no `null` values are created. In particular, the the special value
+ value; no `null` values are created. In particular, the special value
`-` yields an exception.
@param[in] ptr JSON pointer to the desired element
@@ -19822,63 +24701,59 @@ class basic_json
const auto operation_add = [&result](json_pointer & ptr, basic_json val)
{
// adding to the root of the target document means replacing it
- if (ptr.is_root())
+ if (ptr.empty())
{
result = val;
+ return;
}
- else
+
+ // make sure the top element of the pointer exists
+ json_pointer top_pointer = ptr.top();
+ if (top_pointer != ptr)
+ {
+ result.at(top_pointer);
+ }
+
+ // get reference to parent of JSON pointer ptr
+ const auto last_path = ptr.back();
+ ptr.pop_back();
+ basic_json& parent = result[ptr];
+
+ switch (parent.m_type)
{
- // make sure the top element of the pointer exists
- json_pointer top_pointer = ptr.top();
- if (top_pointer != ptr)
+ case value_t::null:
+ case value_t::object:
{
- result.at(top_pointer);
+ // use operator[] to add value
+ parent[last_path] = val;
+ break;
}
- // get reference to parent of JSON pointer ptr
- const auto last_path = ptr.pop_back();
- basic_json& parent = result[ptr];
-
- switch (parent.m_type)
+ case value_t::array:
{
- case value_t::null:
- case value_t::object:
+ if (last_path == "-")
{
- // use operator[] to add value
- parent[last_path] = val;
- break;
+ // special case: append to back
+ parent.push_back(val);
}
-
- case value_t::array:
+ else
{
- if (last_path == "-")
+ const auto idx = json_pointer::array_index(last_path);
+ if (JSON_HEDLEY_UNLIKELY(idx > parent.size()))
{
- // special case: append to back
- parent.push_back(val);
+ // avoid undefined behavior
+ JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range"));
}
- else
- {
- const auto idx = json_pointer::array_index(last_path);
- if (JSON_UNLIKELY(static_cast<size_type>(idx) > parent.size()))
- {
- // avoid undefined behavior
- JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range"));
- }
-
- // default case: insert add offset
- parent.insert(parent.begin() + static_cast<difference_type>(idx), val);
- }
- break;
- }
- // LCOV_EXCL_START
- default:
- {
- // if there exists a parent it cannot be primitive
- assert(false);
+ // default case: insert add offset
+ parent.insert(parent.begin() + static_cast<difference_type>(idx), val);
}
- // LCOV_EXCL_STOP
+ break;
}
+
+ // if there exists a parent it cannot be primitive
+ default: // LCOV_EXCL_LINE
+ JSON_ASSERT(false); // LCOV_EXCL_LINE
}
};
@@ -19886,7 +24761,8 @@ class basic_json
const auto operation_remove = [&result](json_pointer & ptr)
{
// get reference to parent of JSON pointer ptr
- const auto last_path = ptr.pop_back();
+ const auto last_path = ptr.back();
+ ptr.pop_back();
basic_json& parent = result.at(ptr);
// remove child
@@ -19894,7 +24770,7 @@ class basic_json
{
// perform range check
auto it = parent.find(last_path);
- if (JSON_LIKELY(it != parent.end()))
+ if (JSON_HEDLEY_LIKELY(it != parent.end()))
{
parent.erase(it);
}
@@ -19906,12 +24782,12 @@ class basic_json
else if (parent.is_array())
{
// note erase performs range check
- parent.erase(static_cast<size_type>(json_pointer::array_index(last_path)));
+ parent.erase(json_pointer::array_index(last_path));
}
};
// type check: top level value must be an array
- if (JSON_UNLIKELY(not json_patch.is_array()))
+ if (JSON_HEDLEY_UNLIKELY(!json_patch.is_array()))
{
JSON_THROW(parse_error::create(104, 0, "JSON patch must be an array of objects"));
}
@@ -19931,13 +24807,13 @@ class basic_json
const auto error_msg = (op == "op") ? "operation" : "operation '" + op + "'";
// check if desired value is present
- if (JSON_UNLIKELY(it == val.m_value.object->end()))
+ if (JSON_HEDLEY_UNLIKELY(it == val.m_value.object->end()))
{
JSON_THROW(parse_error::create(105, 0, error_msg + " must have member '" + member + "'"));
}
// check if result is of type string
- if (JSON_UNLIKELY(string_type and not it->second.is_string()))
+ if (JSON_HEDLEY_UNLIKELY(string_type && !it->second.is_string()))
{
JSON_THROW(parse_error::create(105, 0, error_msg + " must have string member '" + member + "'"));
}
@@ -19947,14 +24823,14 @@ class basic_json
};
// type check: every element of the array must be an object
- if (JSON_UNLIKELY(not val.is_object()))
+ if (JSON_HEDLEY_UNLIKELY(!val.is_object()))
{
JSON_THROW(parse_error::create(104, 0, "JSON patch must be an array of objects"));
}
// collect mandatory members
- const std::string op = get_value("op", "op", true);
- const std::string path = get_value(op, "path", true);
+ const auto op = get_value("op", "op", true).template get<std::string>();
+ const auto path = get_value(op, "path", true).template get<std::string>();
json_pointer ptr(path);
switch (get_op(op))
@@ -19980,7 +24856,7 @@ class basic_json
case patch_operations::move:
{
- const std::string from_path = get_value("move", "from", true);
+ const auto from_path = get_value("move", "from", true).template get<std::string>();
json_pointer from_ptr(from_path);
// the "from" location must exist - use at()
@@ -19997,7 +24873,7 @@ class basic_json
case patch_operations::copy:
{
- const std::string from_path = get_value("copy", "from", true);
+ const auto from_path = get_value("copy", "from", true).template get<std::string>();
const json_pointer from_ptr(from_path);
// the "from" location must exist - use at()
@@ -20025,7 +24901,7 @@ class basic_json
}
// throw an exception if test fails
- if (JSON_UNLIKELY(not success))
+ if (JSON_HEDLEY_UNLIKELY(!success))
{
JSON_THROW(other_error::create(501, "unsuccessful: " + val.dump()));
}
@@ -20033,7 +24909,7 @@ class basic_json
break;
}
- case patch_operations::invalid:
+ default:
{
// op must be "add", "remove", "replace", "move", "copy", or
// "test"
@@ -20078,6 +24954,7 @@ class basic_json
@since version 2.0.0
*/
+ JSON_HEDLEY_WARN_UNUSED_RESULT
static basic_json diff(const basic_json& source, const basic_json& target,
const std::string& path = "")
{
@@ -20097,106 +24974,105 @@ class basic_json
{
{"op", "replace"}, {"path", path}, {"value", target}
});
+ return result;
}
- else
+
+ switch (source.type())
{
- switch (source.type())
+ case value_t::array:
{
- case value_t::array:
+ // first pass: traverse common elements
+ std::size_t i = 0;
+ while (i < source.size() && i < target.size())
{
- // first pass: traverse common elements
- std::size_t i = 0;
- while (i < source.size() and i < target.size())
- {
- // recursive call to compare array values at index i
- auto temp_diff = diff(source[i], target[i], path + "/" + std::to_string(i));
- result.insert(result.end(), temp_diff.begin(), temp_diff.end());
- ++i;
- }
+ // recursive call to compare array values at index i
+ auto temp_diff = diff(source[i], target[i], path + "/" + std::to_string(i));
+ result.insert(result.end(), temp_diff.begin(), temp_diff.end());
+ ++i;
+ }
- // i now reached the end of at least one array
- // in a second pass, traverse the remaining elements
+ // i now reached the end of at least one array
+ // in a second pass, traverse the remaining elements
- // remove my remaining elements
- const auto end_index = static_cast<difference_type>(result.size());
- while (i < source.size())
+ // remove my remaining elements
+ const auto end_index = static_cast<difference_type>(result.size());
+ while (i < source.size())
+ {
+ // add operations in reverse order to avoid invalid
+ // indices
+ result.insert(result.begin() + end_index, object(
{
- // add operations in reverse order to avoid invalid
- // indices
- result.insert(result.begin() + end_index, object(
- {
- {"op", "remove"},
- {"path", path + "/" + std::to_string(i)}
- }));
- ++i;
- }
+ {"op", "remove"},
+ {"path", path + "/" + std::to_string(i)}
+ }));
+ ++i;
+ }
- // add other remaining elements
- while (i < target.size())
+ // add other remaining elements
+ while (i < target.size())
+ {
+ result.push_back(
{
- result.push_back(
- {
- {"op", "add"},
- {"path", path + "/" + std::to_string(i)},
- {"value", target[i]}
- });
- ++i;
- }
-
- break;
+ {"op", "add"},
+ {"path", path + "/-"},
+ {"value", target[i]}
+ });
+ ++i;
}
- case value_t::object:
+ break;
+ }
+
+ case value_t::object:
+ {
+ // first pass: traverse this object's elements
+ for (auto it = source.cbegin(); it != source.cend(); ++it)
{
- // first pass: traverse this object's elements
- for (auto it = source.cbegin(); it != source.cend(); ++it)
- {
- // escape the key name to be used in a JSON patch
- const auto key = json_pointer::escape(it.key());
+ // escape the key name to be used in a JSON patch
+ const auto key = json_pointer::escape(it.key());
- if (target.find(it.key()) != target.end())
- {
- // recursive call to compare object values at key it
- auto temp_diff = diff(it.value(), target[it.key()], path + "/" + key);
- result.insert(result.end(), temp_diff.begin(), temp_diff.end());
- }
- else
- {
- // found a key that is not in o -> remove it
- result.push_back(object(
- {
- {"op", "remove"}, {"path", path + "/" + key}
- }));
- }
+ if (target.find(it.key()) != target.end())
+ {
+ // recursive call to compare object values at key it
+ auto temp_diff = diff(it.value(), target[it.key()], path + "/" + key);
+ result.insert(result.end(), temp_diff.begin(), temp_diff.end());
}
-
- // second pass: traverse other object's elements
- for (auto it = target.cbegin(); it != target.cend(); ++it)
+ else
{
- if (source.find(it.key()) == source.end())
+ // found a key that is not in o -> remove it
+ result.push_back(object(
{
- // found a key that is not in this -> add it
- const auto key = json_pointer::escape(it.key());
- result.push_back(
- {
- {"op", "add"}, {"path", path + "/" + key},
- {"value", it.value()}
- });
- }
+ {"op", "remove"}, {"path", path + "/" + key}
+ }));
}
-
- break;
}
- default:
+ // second pass: traverse other object's elements
+ for (auto it = target.cbegin(); it != target.cend(); ++it)
{
- // both primitive type: replace value
- result.push_back(
+ if (source.find(it.key()) == source.end())
{
- {"op", "replace"}, {"path", path}, {"value", target}
- });
- break;
+ // found a key that is not in this -> add it
+ const auto key = json_pointer::escape(it.key());
+ result.push_back(
+ {
+ {"op", "add"}, {"path", path + "/" + key},
+ {"value", it.value()}
+ });
+ }
}
+
+ break;
+ }
+
+ default:
+ {
+ // both primitive type: replace value
+ result.push_back(
+ {
+ {"op", "replace"}, {"path", path}, {"value", target}
+ });
+ break;
}
}
@@ -20258,7 +25134,7 @@ class basic_json
{
if (apply_patch.is_object())
{
- if (not is_object())
+ if (!is_object())
{
*this = object();
}
@@ -20282,6 +25158,21 @@ class basic_json
/// @}
};
+
+/*!
+@brief user-defined to_string function for JSON values
+
+This function implements a user-defined to_string for JSON objects.
+
+@param[in] j a JSON object
+@return a std::string object
+*/
+
+NLOHMANN_BASIC_JSON_TPL_DECLARATION
+std::string to_string(const NLOHMANN_BASIC_JSON_TPL& j)
+{
+ return j.dump();
+}
} // namespace nlohmann
///////////////////////
@@ -20303,9 +25194,7 @@ struct hash<nlohmann::json>
*/
std::size_t operator()(const nlohmann::json& j) const
{
- // a naive hashing via the string representation
- const auto& h = hash<nlohmann::json::string_t>();
- return h(j.dump());
+ return nlohmann::detail::hash(j);
}
};
@@ -20313,7 +25202,7 @@ struct hash<nlohmann::json>
/// @note: do not remove the space after '<',
/// see https://github.com/nlohmann/json/pull/679
template<>
-struct less< ::nlohmann::detail::value_t>
+struct less<::nlohmann::detail::value_t>
{
/*!
@brief compare two value_t enum values
@@ -20326,6 +25215,9 @@ struct less< ::nlohmann::detail::value_t>
}
};
+// C++20 prohibit function specialization in the std namespace.
+#ifndef JSON_HAS_CPP_20
+
/*!
@brief exchanges the values of two JSON objects
@@ -20333,13 +25225,15 @@ struct less< ::nlohmann::detail::value_t>
*/
template<>
inline void swap<nlohmann::json>(nlohmann::json& j1, nlohmann::json& j2) noexcept(
- is_nothrow_move_constructible<nlohmann::json>::value and
+ is_nothrow_move_constructible<nlohmann::json>::value&&
is_nothrow_move_assignable<nlohmann::json>::value
-)
+ )
{
j1.swap(j2);
}
+#endif
+
} // namespace std
/*!
@@ -20355,6 +25249,7 @@ if no parse error occurred.
@since version 1.0.0
*/
+JSON_HEDLEY_NON_NULL(1)
inline nlohmann::json operator "" _json(const char* s, std::size_t n)
{
return nlohmann::json::parse(s, s + n);
@@ -20373,6 +25268,7 @@ object if no parse error occurred.
@since version 2.0.0
*/
+JSON_HEDLEY_NON_NULL(1)
inline nlohmann::json::json_pointer operator "" _json_pointer(const char* s, std::size_t n)
{
return nlohmann::json::json_pointer(std::string(s, n));
@@ -20390,17 +25286,162 @@ inline nlohmann::json::json_pointer operator "" _json_pointer(const char* s, std
#endif
// clean up
+#undef JSON_ASSERT
#undef JSON_INTERNAL_CATCH
#undef JSON_CATCH
#undef JSON_THROW
#undef JSON_TRY
-#undef JSON_LIKELY
-#undef JSON_UNLIKELY
-#undef JSON_DEPRECATED
#undef JSON_HAS_CPP_14
#undef JSON_HAS_CPP_17
#undef NLOHMANN_BASIC_JSON_TPL_DECLARATION
#undef NLOHMANN_BASIC_JSON_TPL
-
-
-#endif
+#undef JSON_EXPLICIT
+
+// #include <nlohmann/thirdparty/hedley/hedley_undef.hpp>
+#undef JSON_HEDLEY_ALWAYS_INLINE
+#undef JSON_HEDLEY_ARM_VERSION
+#undef JSON_HEDLEY_ARM_VERSION_CHECK
+#undef JSON_HEDLEY_ARRAY_PARAM
+#undef JSON_HEDLEY_ASSUME
+#undef JSON_HEDLEY_BEGIN_C_DECLS
+#undef JSON_HEDLEY_CLANG_HAS_ATTRIBUTE
+#undef JSON_HEDLEY_CLANG_HAS_BUILTIN
+#undef JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE
+#undef JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE
+#undef JSON_HEDLEY_CLANG_HAS_EXTENSION
+#undef JSON_HEDLEY_CLANG_HAS_FEATURE
+#undef JSON_HEDLEY_CLANG_HAS_WARNING
+#undef JSON_HEDLEY_COMPCERT_VERSION
+#undef JSON_HEDLEY_COMPCERT_VERSION_CHECK
+#undef JSON_HEDLEY_CONCAT
+#undef JSON_HEDLEY_CONCAT3
+#undef JSON_HEDLEY_CONCAT3_EX
+#undef JSON_HEDLEY_CONCAT_EX
+#undef JSON_HEDLEY_CONST
+#undef JSON_HEDLEY_CONSTEXPR
+#undef JSON_HEDLEY_CONST_CAST
+#undef JSON_HEDLEY_CPP_CAST
+#undef JSON_HEDLEY_CRAY_VERSION
+#undef JSON_HEDLEY_CRAY_VERSION_CHECK
+#undef JSON_HEDLEY_C_DECL
+#undef JSON_HEDLEY_DEPRECATED
+#undef JSON_HEDLEY_DEPRECATED_FOR
+#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL
+#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_
+#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED
+#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES
+#undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS
+#undef JSON_HEDLEY_DIAGNOSTIC_POP
+#undef JSON_HEDLEY_DIAGNOSTIC_PUSH
+#undef JSON_HEDLEY_DMC_VERSION
+#undef JSON_HEDLEY_DMC_VERSION_CHECK
+#undef JSON_HEDLEY_EMPTY_BASES
+#undef JSON_HEDLEY_EMSCRIPTEN_VERSION
+#undef JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK
+#undef JSON_HEDLEY_END_C_DECLS
+#undef JSON_HEDLEY_FLAGS
+#undef JSON_HEDLEY_FLAGS_CAST
+#undef JSON_HEDLEY_GCC_HAS_ATTRIBUTE
+#undef JSON_HEDLEY_GCC_HAS_BUILTIN
+#undef JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE
+#undef JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE
+#undef JSON_HEDLEY_GCC_HAS_EXTENSION
+#undef JSON_HEDLEY_GCC_HAS_FEATURE
+#undef JSON_HEDLEY_GCC_HAS_WARNING
+#undef JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK
+#undef JSON_HEDLEY_GCC_VERSION
+#undef JSON_HEDLEY_GCC_VERSION_CHECK
+#undef JSON_HEDLEY_GNUC_HAS_ATTRIBUTE
+#undef JSON_HEDLEY_GNUC_HAS_BUILTIN
+#undef JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE
+#undef JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE
+#undef JSON_HEDLEY_GNUC_HAS_EXTENSION
+#undef JSON_HEDLEY_GNUC_HAS_FEATURE
+#undef JSON_HEDLEY_GNUC_HAS_WARNING
+#undef JSON_HEDLEY_GNUC_VERSION
+#undef JSON_HEDLEY_GNUC_VERSION_CHECK
+#undef JSON_HEDLEY_HAS_ATTRIBUTE
+#undef JSON_HEDLEY_HAS_BUILTIN
+#undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE
+#undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS
+#undef JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE
+#undef JSON_HEDLEY_HAS_EXTENSION
+#undef JSON_HEDLEY_HAS_FEATURE
+#undef JSON_HEDLEY_HAS_WARNING
+#undef JSON_HEDLEY_IAR_VERSION
+#undef JSON_HEDLEY_IAR_VERSION_CHECK
+#undef JSON_HEDLEY_IBM_VERSION
+#undef JSON_HEDLEY_IBM_VERSION_CHECK
+#undef JSON_HEDLEY_IMPORT
+#undef JSON_HEDLEY_INLINE
+#undef JSON_HEDLEY_INTEL_VERSION
+#undef JSON_HEDLEY_INTEL_VERSION_CHECK
+#undef JSON_HEDLEY_IS_CONSTANT
+#undef JSON_HEDLEY_IS_CONSTEXPR_
+#undef JSON_HEDLEY_LIKELY
+#undef JSON_HEDLEY_MALLOC
+#undef JSON_HEDLEY_MESSAGE
+#undef JSON_HEDLEY_MSVC_VERSION
+#undef JSON_HEDLEY_MSVC_VERSION_CHECK
+#undef JSON_HEDLEY_NEVER_INLINE
+#undef JSON_HEDLEY_NON_NULL
+#undef JSON_HEDLEY_NO_ESCAPE
+#undef JSON_HEDLEY_NO_RETURN
+#undef JSON_HEDLEY_NO_THROW
+#undef JSON_HEDLEY_NULL
+#undef JSON_HEDLEY_PELLES_VERSION
+#undef JSON_HEDLEY_PELLES_VERSION_CHECK
+#undef JSON_HEDLEY_PGI_VERSION
+#undef JSON_HEDLEY_PGI_VERSION_CHECK
+#undef JSON_HEDLEY_PREDICT
+#undef JSON_HEDLEY_PRINTF_FORMAT
+#undef JSON_HEDLEY_PRIVATE
+#undef JSON_HEDLEY_PUBLIC
+#undef JSON_HEDLEY_PURE
+#undef JSON_HEDLEY_REINTERPRET_CAST
+#undef JSON_HEDLEY_REQUIRE
+#undef JSON_HEDLEY_REQUIRE_CONSTEXPR
+#undef JSON_HEDLEY_REQUIRE_MSG
+#undef JSON_HEDLEY_RESTRICT
+#undef JSON_HEDLEY_RETURNS_NON_NULL
+#undef JSON_HEDLEY_SENTINEL
+#undef JSON_HEDLEY_STATIC_ASSERT
+#undef JSON_HEDLEY_STATIC_CAST
+#undef JSON_HEDLEY_STRINGIFY
+#undef JSON_HEDLEY_STRINGIFY_EX
+#undef JSON_HEDLEY_SUNPRO_VERSION
+#undef JSON_HEDLEY_SUNPRO_VERSION_CHECK
+#undef JSON_HEDLEY_TINYC_VERSION
+#undef JSON_HEDLEY_TINYC_VERSION_CHECK
+#undef JSON_HEDLEY_TI_ARMCL_VERSION
+#undef JSON_HEDLEY_TI_ARMCL_VERSION_CHECK
+#undef JSON_HEDLEY_TI_CL2000_VERSION
+#undef JSON_HEDLEY_TI_CL2000_VERSION_CHECK
+#undef JSON_HEDLEY_TI_CL430_VERSION
+#undef JSON_HEDLEY_TI_CL430_VERSION_CHECK
+#undef JSON_HEDLEY_TI_CL6X_VERSION
+#undef JSON_HEDLEY_TI_CL6X_VERSION_CHECK
+#undef JSON_HEDLEY_TI_CL7X_VERSION
+#undef JSON_HEDLEY_TI_CL7X_VERSION_CHECK
+#undef JSON_HEDLEY_TI_CLPRU_VERSION
+#undef JSON_HEDLEY_TI_CLPRU_VERSION_CHECK
+#undef JSON_HEDLEY_TI_VERSION
+#undef JSON_HEDLEY_TI_VERSION_CHECK
+#undef JSON_HEDLEY_UNAVAILABLE
+#undef JSON_HEDLEY_UNLIKELY
+#undef JSON_HEDLEY_UNPREDICTABLE
+#undef JSON_HEDLEY_UNREACHABLE
+#undef JSON_HEDLEY_UNREACHABLE_RETURN
+#undef JSON_HEDLEY_VERSION
+#undef JSON_HEDLEY_VERSION_DECODE_MAJOR
+#undef JSON_HEDLEY_VERSION_DECODE_MINOR
+#undef JSON_HEDLEY_VERSION_DECODE_REVISION
+#undef JSON_HEDLEY_VERSION_ENCODE
+#undef JSON_HEDLEY_WARNING
+#undef JSON_HEDLEY_WARN_UNUSED_RESULT
+#undef JSON_HEDLEY_WARN_UNUSED_RESULT_MSG
+#undef JSON_HEDLEY_FALL_THROUGH
+
+
+
+#endif // INCLUDE_NLOHMANN_JSON_HPP_
diff --git a/src/nlohmann/json_fwd.hpp b/src/nlohmann/json_fwd.hpp
new file mode 100644
index 000000000..332227c1b
--- /dev/null
+++ b/src/nlohmann/json_fwd.hpp
@@ -0,0 +1,78 @@
+#ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_
+#define INCLUDE_NLOHMANN_JSON_FWD_HPP_
+
+#include <cstdint> // int64_t, uint64_t
+#include <map> // map
+#include <memory> // allocator
+#include <string> // string
+#include <vector> // vector
+
+/*!
+@brief namespace for Niels Lohmann
+@see https://github.com/nlohmann
+@since version 1.0.0
+*/
+namespace nlohmann
+{
+/*!
+@brief default JSONSerializer template argument
+
+This serializer ignores the template arguments and uses ADL
+([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl))
+for serialization.
+*/
+template<typename T = void, typename SFINAE = void>
+struct adl_serializer;
+
+template<template<typename U, typename V, typename... Args> class ObjectType =
+ std::map,
+ template<typename U, typename... Args> class ArrayType = std::vector,
+ class StringType = std::string, class BooleanType = bool,
+ class NumberIntegerType = std::int64_t,
+ class NumberUnsignedType = std::uint64_t,
+ class NumberFloatType = double,
+ template<typename U> class AllocatorType = std::allocator,
+ template<typename T, typename SFINAE = void> class JSONSerializer =
+ adl_serializer,
+ class BinaryType = std::vector<std::uint8_t>>
+class basic_json;
+
+/*!
+@brief JSON Pointer
+
+A JSON pointer defines a string syntax for identifying a specific value
+within a JSON document. It can be used with functions `at` and
+`operator[]`. Furthermore, JSON pointers are the base for JSON patches.
+
+@sa [RFC 6901](https://tools.ietf.org/html/rfc6901)
+
+@since version 2.0.0
+*/
+template<typename BasicJsonType>
+class json_pointer;
+
+/*!
+@brief default JSON class
+
+This type is the default specialization of the @ref basic_json class which
+uses the standard template types.
+
+@since version 1.0.0
+*/
+using json = basic_json<>;
+
+template<class Key, class T, class IgnoredLess, class Allocator>
+struct ordered_map;
+
+/*!
+@brief ordered JSON class
+
+This type preserves the insertion order of object keys.
+
+@since version 3.9.0
+*/
+using ordered_json = basic_json<nlohmann::ordered_map>;
+
+} // namespace nlohmann
+
+#endif // INCLUDE_NLOHMANN_JSON_FWD_HPP_
diff --git a/src/resolve-system-dependencies/local.mk b/src/resolve-system-dependencies/local.mk
index 054ae01cb..fc48a8417 100644
--- a/src/resolve-system-dependencies/local.mk
+++ b/src/resolve-system-dependencies/local.mk
@@ -1,4 +1,4 @@
-ifeq ($(OS), Darwin)
+ifdef HOST_DARWIN
programs += resolve-system-dependencies
endif
diff --git a/tests/add.sh b/tests/add.sh
index e26e05843..5c3eed793 100644
--- a/tests/add.sh
+++ b/tests/add.sh
@@ -9,7 +9,7 @@ echo $path2
if test "$path1" != "$path2"; then
echo "nix-store --add and --add-fixed mismatch"
exit 1
-fi
+fi
path3=$(nix-store --add-fixed sha256 ./dummy)
echo $path3
diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh
index 6697ce236..d7bc1507b 100644
--- a/tests/binary-cache.sh
+++ b/tests/binary-cache.sh
@@ -1,5 +1,7 @@
source common.sh
+needLocalStore "“--no-require-sigs” can’t be used with the daemon"
+
# We can produce drvs directly into the binary cache
clearStore
clearCacheCache
diff --git a/tests/build-remote-content-addressed-floating.sh b/tests/build-remote-content-addressed-floating.sh
index 7447d92bd..13ef47d2d 100644
--- a/tests/build-remote-content-addressed-floating.sh
+++ b/tests/build-remote-content-addressed-floating.sh
@@ -4,4 +4,6 @@ file=build-hook-ca-floating.nix
sed -i 's/experimental-features .*/& ca-derivations/' "$NIX_CONF_DIR"/nix.conf
+CONTENT_ADDRESSED=true
+
source build-remote.sh
diff --git a/tests/build-remote.sh b/tests/build-remote.sh
index 70f82e939..806c6d261 100644
--- a/tests/build-remote.sh
+++ b/tests/build-remote.sh
@@ -6,12 +6,17 @@ unset NIX_STATE_DIR
function join_by { local d=$1; shift; echo -n "$1"; shift; printf "%s" "${@/#/$d}"; }
+EXTRA_SYSTEM_FEATURES=()
+if [[ -n "$CONTENT_ADDRESSED" ]]; then
+ EXTRA_SYSTEM_FEATURES=("ca-derivations")
+fi
+
builders=(
# system-features will automatically be added to the outer URL, but not inner
# remote-store URL.
- "ssh://localhost?remote-store=$TEST_ROOT/machine1?system-features=foo - - 1 1 foo"
- "$TEST_ROOT/machine2 - - 1 1 bar"
- "ssh-ng://localhost?remote-store=$TEST_ROOT/machine3?system-features=baz - - 1 1 baz"
+ "ssh://localhost?remote-store=$TEST_ROOT/machine1?system-features=$(join_by "%20" foo ${EXTRA_SYSTEM_FEATURES[@]}) - - 1 1 $(join_by "," foo ${EXTRA_SYSTEM_FEATURES[@]})"
+ "$TEST_ROOT/machine2 - - 1 1 $(join_by "," bar ${EXTRA_SYSTEM_FEATURES[@]})"
+ "ssh-ng://localhost?remote-store=$TEST_ROOT/machine3?system-features=$(join_by "%20" baz ${EXTRA_SYSTEM_FEATURES[@]}) - - 1 1 $(join_by "," baz ${EXTRA_SYSTEM_FEATURES[@]})"
)
chmod -R +w $TEST_ROOT/machine* || true
@@ -48,3 +53,16 @@ nix path-info --store $TEST_ROOT/machine3 --all \
| grep -v builder-build-remote-input-1.sh \
| grep -v builder-build-remote-input-2.sh \
| grep builder-build-remote-input-3.sh
+
+# Behavior of keep-failed
+out="$(nix-build 2>&1 failing.nix \
+ --builders "$(join_by '; ' "${builders[@]}")" \
+ --keep-failed \
+ --store $TEST_ROOT/machine0 \
+ -j0 \
+ --arg busybox $busybox)" || true
+
+[[ "$out" =~ .*"note: keeping build directory".* ]]
+
+build_dir="$(grep "note: keeping build" <<< "$out" | sed -E "s/^(.*)note: keeping build directory '(.*)'(.*)$/\2/")"
+[[ "foo" = $(<"$build_dir"/bar) ]]
diff --git a/tests/build.sh b/tests/build.sh
index aa54b88eb..c77f620f7 100644
--- a/tests/build.sh
+++ b/tests/build.sh
@@ -1,7 +1,7 @@
source common.sh
expectedJSONRegex='\[\{"drvPath":".*multiple-outputs-a.drv","outputs":\{"first":".*multiple-outputs-a-first","second":".*multiple-outputs-a-second"}},\{"drvPath":".*multiple-outputs-b.drv","outputs":\{"out":".*multiple-outputs-b"}}]'
-nix build -f multiple-outputs.nix --json a.all b.all | jq --exit-status '
+nix build -f multiple-outputs.nix --json a.all b.all --no-link | jq --exit-status '
(.[0] |
(.drvPath | match(".*multiple-outputs-a.drv")) and
(.outputs.first | match(".*multiple-outputs-a-first")) and
@@ -10,3 +10,10 @@ nix build -f multiple-outputs.nix --json a.all b.all | jq --exit-status '
(.drvPath | match(".*multiple-outputs-b.drv")) and
(.outputs.out | match(".*multiple-outputs-b")))
'
+testNormalization () {
+ clearStore
+ outPath=$(nix-build ./simple.nix --no-out-link)
+ test "$(stat -c %Y $outPath)" -eq 1
+}
+
+testNormalization
diff --git a/tests/ca/build-with-garbage-path.sh b/tests/ca/build-with-garbage-path.sh
new file mode 100755
index 000000000..884cd2802
--- /dev/null
+++ b/tests/ca/build-with-garbage-path.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+
+# Regression test for https://github.com/NixOS/nix/issues/4858
+
+source common.sh
+
+requireDaemonNewerThan "2.4pre20210621"
+
+# Get the output path of `rootCA`, and put some garbage instead
+outPath="$(nix-build ./content-addressed.nix -A rootCA --no-out-link)"
+nix-store --delete $(nix-store -q --referrers-closure "$outPath")
+touch "$outPath"
+
+# The build should correctly remove the garbage and put the expected path instead
+nix-build ./content-addressed.nix -A rootCA --no-out-link
+
+# Rebuild it. This shouldn’t overwrite the existing path
+oldInode=$(stat -c '%i' "$outPath")
+nix-build ./content-addressed.nix -A rootCA --no-out-link --arg seed 2
+newInode=$(stat -c '%i' "$outPath")
+[[ "$oldInode" == "$newInode" ]]
diff --git a/tests/ca/build.sh b/tests/ca/build.sh
index 35bf1dcf7..c8877f87f 100644
--- a/tests/ca/build.sh
+++ b/tests/ca/build.sh
@@ -59,9 +59,17 @@ testNixCommand () {
nix build --experimental-features 'nix-command ca-derivations' --file ./content-addressed.nix --no-link
}
+# Regression test for https://github.com/NixOS/nix/issues/4775
+testNormalization () {
+ clearStore
+ outPath=$(buildAttr rootCA 1)
+ test "$(stat -c %Y $outPath)" -eq 1
+}
+
# Disabled until we have it properly working
# testRemoteCache
clearStore
+testNormalization
testDeterministicCA
clearStore
testCutoff
diff --git a/tests/ca/common.sh b/tests/ca/common.sh
index e083d873c..c5aa34334 100644
--- a/tests/ca/common.sh
+++ b/tests/ca/common.sh
@@ -1 +1,5 @@
source ../common.sh
+
+sed -i 's/experimental-features .*/& ca-derivations ca-references/' "$NIX_CONF_DIR"/nix.conf
+
+restartDaemon
diff --git a/tests/ca/concurrent-builds.sh b/tests/ca/concurrent-builds.sh
new file mode 100755
index 000000000..b442619e2
--- /dev/null
+++ b/tests/ca/concurrent-builds.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+
+# Ensure that we can’t build twice the same derivation concurrently.
+# Regression test for https://github.com/NixOS/nix/issues/5029
+
+source common.sh
+
+buggyNeedLocalStore "For some reason, this deadlocks with the daemon"
+
+export NIX_TESTS_CA_BY_DEFAULT=1
+
+clearStore
+
+for i in {0..5}; do
+ nix build --no-link --file ./racy.nix &
+done
+
+wait
diff --git a/tests/ca/config.nix.in b/tests/ca/config.nix.in
new file mode 120000
index 000000000..af24ddb30
--- /dev/null
+++ b/tests/ca/config.nix.in
@@ -0,0 +1 @@
+../config.nix.in \ No newline at end of file
diff --git a/tests/ca/content-addressed.nix b/tests/ca/content-addressed.nix
index e5b1c4de3..d328fc92c 100644
--- a/tests/ca/content-addressed.nix
+++ b/tests/ca/content-addressed.nix
@@ -1,4 +1,11 @@
-with import ../config.nix;
+with import ./config.nix;
+
+let mkCADerivation = args: mkDerivation ({
+ __contentAddressed = true;
+ outputHashMode = "recursive";
+ outputHashAlgo = "sha256";
+} // args);
+in
{ seed ? 0 }:
# A simple content-addressed derivation.
@@ -14,7 +21,7 @@ rec {
echo "Hello World" > $out/hello
'';
};
- rootCA = mkDerivation {
+ rootCA = mkCADerivation {
name = "rootCA";
outputs = [ "out" "dev" "foo"];
buildCommand = ''
@@ -27,11 +34,8 @@ rec {
ln -s $out $dev
ln -s $out $foo
'';
- __contentAddressed = true;
- outputHashMode = "recursive";
- outputHashAlgo = "sha256";
};
- dependentCA = mkDerivation {
+ dependentCA = mkCADerivation {
name = "dependent";
buildCommand = ''
echo "building a dependent derivation"
@@ -39,20 +43,14 @@ rec {
cat ${rootCA}/self/dep
echo ${rootCA}/self/dep > $out/dep
'';
- __contentAddressed = true;
- outputHashMode = "recursive";
- outputHashAlgo = "sha256";
};
- transitivelyDependentCA = mkDerivation {
+ transitivelyDependentCA = mkCADerivation {
name = "transitively-dependent";
buildCommand = ''
echo "building transitively-dependent"
cat ${dependentCA}/dep
echo ${dependentCA} > $out
'';
- __contentAddressed = true;
- outputHashMode = "recursive";
- outputHashAlgo = "sha256";
};
dependentNonCA = mkDerivation {
name = "dependent-non-ca";
@@ -72,6 +70,14 @@ rec {
cat ${dependentCA}/dep
echo foo > $out
'';
-
+ };
+ runnable = mkCADerivation rec {
+ name = "runnable-thing";
+ buildCommand = ''
+ mkdir -p $out/bin
+ echo ${rootCA} # Just to make it depend on it
+ echo "" > $out/bin/${name}
+ chmod +x $out/bin/${name}
+ '';
};
}
diff --git a/tests/ca/duplicate-realisation-in-closure.sh b/tests/ca/duplicate-realisation-in-closure.sh
new file mode 100644
index 000000000..74c5d25fd
--- /dev/null
+++ b/tests/ca/duplicate-realisation-in-closure.sh
@@ -0,0 +1,28 @@
+source ./common.sh
+
+requireDaemonNewerThan "2.4pre20210625"
+
+sed -i 's/experimental-features .*/& ca-derivations ca-references/' "$NIX_CONF_DIR"/nix.conf
+
+export REMOTE_STORE_DIR="$TEST_ROOT/remote_store"
+export REMOTE_STORE="file://$REMOTE_STORE_DIR"
+
+rm -rf $REMOTE_STORE_DIR
+clearStore
+
+# Build dep1 and push that to the binary cache.
+# This entails building (and pushing) current-time.
+nix copy --to "$REMOTE_STORE" -f nondeterministic.nix dep1
+clearStore
+sleep 2 # To make sure that `$(date)` will be different
+# Build dep2.
+# As we’ve cleared the cache, we’ll have to rebuild current-time. And because
+# the current time isn’t the same as before, this will yield a new (different)
+# realisation
+nix build -f nondeterministic.nix dep2 --no-link
+
+# Build something that depends both on dep1 and dep2.
+# If everything goes right, we should rebuild dep2 rather than fetch it from
+# the cache (because that would mean duplicating `current-time` in the closure),
+# and have `dep1 == dep2`.
+nix build --substituters "$REMOTE_STORE" -f nondeterministic.nix toplevel --no-require-sigs --no-link
diff --git a/tests/ca/flake.nix b/tests/ca/flake.nix
new file mode 100644
index 000000000..332c92a67
--- /dev/null
+++ b/tests/ca/flake.nix
@@ -0,0 +1,3 @@
+{
+ outputs = { self }: import ./content-addressed.nix {};
+}
diff --git a/tests/ca/gc.sh b/tests/ca/gc.sh
new file mode 100755
index 000000000..e9b6c5ab5
--- /dev/null
+++ b/tests/ca/gc.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+# Ensure that garbage collection works properly with ca derivations
+
+source common.sh
+
+export NIX_TESTS_CA_BY_DEFAULT=1
+
+cd ..
+source gc.sh
diff --git a/tests/ca/nix-run.sh b/tests/ca/nix-run.sh
new file mode 100755
index 000000000..81402af10
--- /dev/null
+++ b/tests/ca/nix-run.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+source common.sh
+
+sed -i 's/experimental-features .*/& ca-derivations ca-references nix-command flakes/' "$NIX_CONF_DIR"/nix.conf
+
+FLAKE_PATH=path:$PWD
+
+nix run --no-write-lock-file $FLAKE_PATH#runnable
diff --git a/tests/ca/nix-shell.sh b/tests/ca/nix-shell.sh
new file mode 100755
index 000000000..7f1a3a73e
--- /dev/null
+++ b/tests/ca/nix-shell.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+source common.sh
+
+sed -i 's/experimental-features .*/& ca-derivations ca-references nix-command flakes/' "$NIX_CONF_DIR"/nix.conf
+
+CONTENT_ADDRESSED=true
+cd ..
+source ./nix-shell.sh
+
diff --git a/tests/ca/nondeterministic.nix b/tests/ca/nondeterministic.nix
new file mode 100644
index 000000000..d6d099a3e
--- /dev/null
+++ b/tests/ca/nondeterministic.nix
@@ -0,0 +1,35 @@
+with import ./config.nix;
+
+let mkCADerivation = args: mkDerivation ({
+ __contentAddressed = true;
+ outputHashMode = "recursive";
+ outputHashAlgo = "sha256";
+} // args);
+in
+
+rec {
+ currentTime = mkCADerivation {
+ name = "current-time";
+ buildCommand = ''
+ mkdir $out
+ echo $(date) > $out/current-time
+ '';
+ };
+ dep = seed: mkCADerivation {
+ name = "dep";
+ inherit seed;
+ buildCommand = ''
+ echo ${currentTime} > $out
+ '';
+ };
+ dep1 = dep 1;
+ dep2 = dep 2;
+ toplevel = mkCADerivation {
+ name = "toplevel";
+ buildCommand = ''
+ test ${dep1} == ${dep2}
+ touch $out
+ '';
+ };
+}
+
diff --git a/tests/ca/post-hook.sh b/tests/ca/post-hook.sh
new file mode 100755
index 000000000..1c9d4f700
--- /dev/null
+++ b/tests/ca/post-hook.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+source common.sh
+
+requireDaemonNewerThan "2.4pre20210626"
+
+sed -i 's/experimental-features .*/& ca-derivations ca-references nix-command flakes/' "$NIX_CONF_DIR"/nix.conf
+
+export NIX_TESTS_CA_BY_DEFAULT=1
+cd ..
+source ./post-hook.sh
+
+
diff --git a/tests/ca/racy.nix b/tests/ca/racy.nix
new file mode 100644
index 000000000..555a15484
--- /dev/null
+++ b/tests/ca/racy.nix
@@ -0,0 +1,15 @@
+# A derivation that would certainly fail if several builders tried to
+# build it at once.
+
+
+with import ./config.nix;
+
+mkDerivation {
+ name = "simple";
+ buildCommand = ''
+ mkdir $out
+ echo bar >> $out/foo
+ sleep 3
+ [[ "$(cat $out/foo)" == bar ]]
+ '';
+}
diff --git a/tests/ca/recursive.sh b/tests/ca/recursive.sh
new file mode 100755
index 000000000..648bf0a91
--- /dev/null
+++ b/tests/ca/recursive.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+source common.sh
+
+requireDaemonNewerThan "2.4pre20210623"
+
+sed -i 's/experimental-features .*/& ca-derivations ca-references nix-command flakes/' "$NIX_CONF_DIR"/nix.conf
+
+export NIX_TESTS_CA_BY_DEFAULT=1
+cd ..
+source ./recursive.sh
+
+
diff --git a/tests/ca/repl.sh b/tests/ca/repl.sh
new file mode 100644
index 000000000..3808c7cb2
--- /dev/null
+++ b/tests/ca/repl.sh
@@ -0,0 +1,5 @@
+source common.sh
+
+export NIX_TESTS_CA_BY_DEFAULT=1
+
+cd .. && source repl.sh
diff --git a/tests/ca/signatures.sh b/tests/ca/signatures.sh
index 4b4e468f7..0c7d974ea 100644
--- a/tests/ca/signatures.sh
+++ b/tests/ca/signatures.sh
@@ -22,8 +22,8 @@ testOneCopy () {
rm -rf "$REMOTE_STORE_DIR"
attrPath="$1"
- nix copy --to $REMOTE_STORE "$attrPath" --file ./content-addressed.nix \
- --secret-key-files "$TEST_ROOT/sk1"
+ nix copy -vvvv --to $REMOTE_STORE "$attrPath" --file ./content-addressed.nix \
+ --secret-key-files "$TEST_ROOT/sk1" --show-trace
ensureCorrectlyCopied "$attrPath"
diff --git a/tests/ca/substitute.sh b/tests/ca/substitute.sh
index 737c851a5..3d9001bb8 100644
--- a/tests/ca/substitute.sh
+++ b/tests/ca/substitute.sh
@@ -4,7 +4,7 @@
source common.sh
-sed -i 's/experimental-features .*/& ca-derivations ca-references/' "$NIX_CONF_DIR"/nix.conf
+needLocalStore "“--no-require-sigs” can’t be used with the daemon"
rm -rf $TEST_ROOT/binary_cache
@@ -17,11 +17,15 @@ buildDrvs () {
# Populate the remote cache
clearStore
-buildDrvs --post-build-hook ../push-to-store.sh
+nix copy --to $REMOTE_STORE --file ./content-addressed.nix
# Restart the build on an empty store, ensuring that we don't build
clearStore
-buildDrvs --substitute --substituters $REMOTE_STORE --no-require-sigs -j0
+buildDrvs --substitute --substituters $REMOTE_STORE --no-require-sigs -j0 transitivelyDependentCA
+# Check that the thing we’ve just substituted has its realisation stored
+nix realisation info --file ./content-addressed.nix transitivelyDependentCA
+# Check that its dependencies have it too
+nix realisation info --file ./content-addressed.nix dependentCA rootCA
# Same thing, but
# 1. With non-ca derivations
@@ -45,3 +49,16 @@ if [[ -z "$(ls "$REMOTE_STORE_DIR/realisations")" ]]; then
echo "Realisations not rebuilt"
exit 1
fi
+
+# Test the local realisation disk cache
+buildDrvs --post-build-hook ../push-to-store.sh
+clearStore
+# Add the realisations of rootCA to the cachecache
+clearCacheCache
+export _NIX_FORCE_HTTP=1
+buildDrvs --substitute --substituters $REMOTE_STORE --no-require-sigs -j0
+# Try rebuilding, but remove the realisations from the remote cache to force
+# using the cachecache
+clearStore
+rm $REMOTE_STORE_DIR/realisations/*
+buildDrvs --substitute --substituters $REMOTE_STORE --no-require-sigs -j0
diff --git a/tests/check.nix b/tests/check.nix
index bca04fdaf..ec455ae2d 100644
--- a/tests/check.nix
+++ b/tests/check.nix
@@ -44,7 +44,7 @@ with import ./config.nix;
};
hashmismatch = import <nix/fetchurl.nix> {
- url = "file://" + toString ./dummy;
+ url = "file://" + builtins.getEnv "TMPDIR" + "/dummy";
sha256 = "0mdqa9w1p6cmli6976v4wi0sw9r4p5prkj7lzfd1877wk11c9c73";
};
diff --git a/tests/check.sh b/tests/check.sh
index 5f4997e28..ab48ff865 100644
--- a/tests/check.sh
+++ b/tests/check.sh
@@ -1,5 +1,8 @@
source common.sh
+# XXX: This shouldn’t be, but #4813 cause this test to fail
+buggyNeedLocalStore "see #4813"
+
checkBuildTempDirRemoved ()
{
buildDir=$(sed -n 's/CHECK_TMPDIR=//p' $1 | head -1)
@@ -74,12 +77,13 @@ nix-build check.nix -A fetchurl --no-out-link --check
nix-build check.nix -A fetchurl --no-out-link --repair
[[ $(cat $path) != foo ]]
+echo 'Hello World' > $TMPDIR/dummy
nix-build check.nix -A hashmismatch --no-out-link || status=$?
[ "$status" = "102" ]
-echo -n > ./dummy
+echo -n > $TMPDIR/dummy
nix-build check.nix -A hashmismatch --no-out-link
-echo 'Hello World' > ./dummy
+echo 'Hello World' > $TMPDIR/dummy
nix-build check.nix -A hashmismatch --no-out-link --check || status=$?
[ "$status" = "102" ]
diff --git a/tests/common.sh.in b/tests/common.sh.in
index d31d3fbb8..61abab1d7 100644
--- a/tests/common.sh.in
+++ b/tests/common.sh.in
@@ -1,5 +1,9 @@
set -e
+if [[ -z "$COMMON_SH_SOURCED" ]]; then
+
+COMMON_SH_SOURCED=1
+
export TEST_ROOT=$(realpath ${TMPDIR:-/tmp}/nix-test)/${TEST_NAME:-default}
export NIX_STORE_DIR
if ! NIX_STORE_DIR=$(readlink -f $TEST_ROOT/store 2> /dev/null); then
@@ -32,8 +36,9 @@ export PATH=@bindir@:$PATH
if [[ -n "${NIX_CLIENT_PACKAGE:-}" ]]; then
export PATH="$NIX_CLIENT_PACKAGE/bin":$PATH
fi
+DAEMON_PATH="$PATH"
if [[ -n "${NIX_DAEMON_PACKAGE:-}" ]]; then
- export NIX_DAEMON_COMMAND="$NIX_DAEMON_PACKAGE/bin/nix-daemon"
+ DAEMON_PATH="${NIX_DAEMON_PACKAGE}/bin:$DAEMON_PATH"
fi
coreutils=@coreutils@
@@ -45,6 +50,9 @@ export busybox="@sandbox_shell@"
export version=@PACKAGE_VERSION@
export system=@system@
+export IMPURE_VAR1=foo
+export IMPURE_VAR2=bar
+
cacheDir=$TEST_ROOT/binary-cache
readLink() {
@@ -75,29 +83,57 @@ clearCacheCache() {
}
startDaemon() {
+ # Don’t start the daemon twice, as this would just make it loop indefinitely
+ if [[ "$NIX_REMOTE" == daemon ]]; then
+ return
+ fi
# Start the daemon, wait for the socket to appear. !!!
# ‘nix-daemon’ should have an option to fork into the background.
- rm -f $NIX_STATE_DIR/daemon-socket/socket
- ${NIX_DAEMON_COMMAND:-nix daemon} &
+ rm -f $NIX_DAEMON_SOCKET_PATH
+ PATH=$DAEMON_PATH nix daemon &
for ((i = 0; i < 30; i++)); do
- if [ -e $NIX_DAEMON_SOCKET_PATH ]; then break; fi
+ if [[ -S $NIX_DAEMON_SOCKET_PATH ]]; then break; fi
sleep 1
done
pidDaemon=$!
- trap "kill -9 $pidDaemon" EXIT
+ trap "killDaemon" EXIT
export NIX_REMOTE=daemon
}
killDaemon() {
- kill -9 $pidDaemon
+ kill $pidDaemon
+ for i in {0.10}; do
+ kill -0 $pidDaemon || break
+ sleep 1
+ done
+ kill -9 $pidDaemon || true
wait $pidDaemon || true
trap "" EXIT
}
+restartDaemon() {
+ [[ -z "${pidDaemon:-}" ]] && return 0
+
+ killDaemon
+ unset NIX_REMOTE
+ startDaemon
+}
+
if [[ $(uname) == Linux ]] && [[ -L /proc/self/ns/user ]] && unshare --user true; then
_canUseSandbox=1
fi
+isDaemonNewer () {
+ [[ -n "${NIX_DAEMON_PACKAGE:-}" ]] || return 0
+ local requiredVersion="$1"
+ local daemonVersion=$($NIX_DAEMON_PACKAGE/bin/nix-daemon --version | cut -d' ' -f3)
+ [[ $(nix eval --expr "builtins.compareVersions ''$daemonVersion'' ''$requiredVersion''") -ge 0 ]]
+}
+
+requireDaemonNewerThan () {
+ isDaemonNewer "$1" || exit 99
+}
+
canUseSandbox() {
if [[ ! $_canUseSandbox ]]; then
echo "Sandboxing not supported, skipping this test..."
@@ -123,4 +159,22 @@ expect() {
[[ $res -eq $expected ]]
}
+needLocalStore() {
+ if [[ "$NIX_REMOTE" == "daemon" ]]; then
+ echo "Can’t run through the daemon ($1), skipping this test..."
+ return 99
+ fi
+}
+
+# Just to make it easy to find which tests should be fixed
+buggyNeedLocalStore () {
+ needLocalStore
+}
+
set -x
+
+if [[ -n "${NIX_DAEMON_PACKAGE:-}" ]]; then
+ startDaemon
+fi
+
+fi # COMMON_SH_SOURCED
diff --git a/tests/compression-levels.sh b/tests/compression-levels.sh
new file mode 100644
index 000000000..85f12974a
--- /dev/null
+++ b/tests/compression-levels.sh
@@ -0,0 +1,22 @@
+source common.sh
+
+clearStore
+clearCache
+
+outPath=$(nix-build dependencies.nix --no-out-link)
+
+cacheURI="file://$cacheDir?compression=xz&compression-level=0"
+
+nix copy --to $cacheURI $outPath
+
+FILESIZES=$(cat ${cacheDir}/*.narinfo | awk '/FileSize: /{sum+=$2}END{print sum}')
+
+clearCache
+
+cacheURI="file://$cacheDir?compression=xz&compression-level=5"
+
+nix copy --to $cacheURI $outPath
+
+FILESIZES2=$(cat ${cacheDir}/*.narinfo | awk '/FileSize: /{sum+=$2}END{print sum}')
+
+[[ $FILESIZES -gt $FILESIZES2 ]]
diff --git a/tests/config.nix.in b/tests/config.nix.in
index a57a8c596..7facbdcbc 100644
--- a/tests/config.nix.in
+++ b/tests/config.nix.in
@@ -1,3 +1,12 @@
+let
+ contentAddressedByDefault = builtins.getEnv "NIX_TESTS_CA_BY_DEFAULT" == "1";
+ caArgs = if contentAddressedByDefault then {
+ __contentAddressed = true;
+ outputHashMode = "recursive";
+ outputHashAlgo = "sha256";
+ } else {};
+in
+
rec {
shell = "@bash@";
@@ -13,6 +22,6 @@ rec {
builder = shell;
args = ["-e" args.builder or (builtins.toFile "builder-${args.name}.sh" "if [ -e .attrs.sh ]; then source .attrs.sh; fi; eval \"$buildCommand\"")];
PATH = path;
- } // removeAttrs args ["builder" "meta"])
+ } // caArgs // removeAttrs args ["builder" "meta"])
// { meta = args.meta or {}; };
}
diff --git a/tests/config.sh b/tests/config.sh
index 01c78f2c3..3d0da3cef 100644
--- a/tests/config.sh
+++ b/tests/config.sh
@@ -50,4 +50,4 @@ exp_cores=$(nix show-config | grep '^cores' | cut -d '=' -f 2 | xargs)
exp_features=$(nix show-config | grep '^experimental-features' | cut -d '=' -f 2 | xargs)
[[ $prev != $exp_cores ]]
[[ $exp_cores == "4242" ]]
-[[ $exp_features == "nix-command flakes" ]]
+[[ $exp_features == "flakes nix-command" ]]
diff --git a/tests/db-migration.sh b/tests/db-migration.sh
index e0ff7d311..3f9dc8972 100644
--- a/tests/db-migration.sh
+++ b/tests/db-migration.sh
@@ -3,11 +3,14 @@
# Only run this if we have an older Nix available
# XXX: This assumes that the `daemon` package is older than the `client` one
if [[ -z "$NIX_DAEMON_PACKAGE" ]]; then
- exit 0
+ exit 99
fi
source common.sh
+killDaemon
+unset NIX_REMOTE
+
# Fill the db using the older Nix
PATH_WITH_NEW_NIX="$PATH"
export PATH="$NIX_DAEMON_PACKAGE/bin:$PATH"
diff --git a/tests/dummy b/tests/dummy
new file mode 100644
index 000000000..557db03de
--- /dev/null
+++ b/tests/dummy
@@ -0,0 +1 @@
+Hello World
diff --git a/tests/dump-db.sh b/tests/dump-db.sh
index d6eea42aa..48647f403 100644
--- a/tests/dump-db.sh
+++ b/tests/dump-db.sh
@@ -1,5 +1,7 @@
source common.sh
+needLocalStore "--dump-db requires a local store"
+
clearStore
path=$(nix-build dependencies.nix -o $TEST_ROOT/result)
diff --git a/tests/eval-store.sh b/tests/eval-store.sh
new file mode 100644
index 000000000..679da5741
--- /dev/null
+++ b/tests/eval-store.sh
@@ -0,0 +1,30 @@
+source common.sh
+
+# Using `--eval-store` with the daemon will eventually copy everything
+# to the build store, invalidating most of the tests here
+needLocalStore
+
+eval_store=$TEST_ROOT/eval-store
+
+clearStore
+rm -rf "$eval_store"
+
+nix build -f dependencies.nix --eval-store "$eval_store" -o "$TEST_ROOT/result"
+[[ -e $TEST_ROOT/result/foobar ]]
+(! ls $NIX_STORE_DIR/*.drv)
+ls $eval_store/nix/store/*.drv
+
+clearStore
+rm -rf "$eval_store"
+
+nix-instantiate dependencies.nix --eval-store "$eval_store"
+(! ls $NIX_STORE_DIR/*.drv)
+ls $eval_store/nix/store/*.drv
+
+clearStore
+rm -rf "$eval_store"
+
+nix-build dependencies.nix --eval-store "$eval_store" -o "$TEST_ROOT/result"
+[[ -e $TEST_ROOT/result/foobar ]]
+(! ls $NIX_STORE_DIR/*.drv)
+ls $eval_store/nix/store/*.drv
diff --git a/tests/failing.nix b/tests/failing.nix
new file mode 100644
index 000000000..2a0350d4d
--- /dev/null
+++ b/tests/failing.nix
@@ -0,0 +1,22 @@
+{ busybox }:
+with import ./config.nix;
+let
+
+ mkDerivation = args:
+ derivation ({
+ inherit system;
+ builder = busybox;
+ args = ["sh" "-e" args.builder or (builtins.toFile "builder-${args.name}.sh" "if [ -e .attrs.sh ]; then source .attrs.sh; fi; eval \"$buildCommand\"")];
+ } // removeAttrs args ["builder" "meta"])
+ // { meta = args.meta or {}; };
+in
+{
+
+ failing = mkDerivation {
+ name = "failing";
+ buildCommand = ''
+ echo foo > bar
+ exit 1
+ '';
+ };
+}
diff --git a/tests/fetchGit.sh b/tests/fetchGit.sh
index 88744ee7f..89294d8d2 100644
--- a/tests/fetchGit.sh
+++ b/tests/fetchGit.sh
@@ -189,3 +189,7 @@ path8=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$rep
rev4=$(git -C $repo rev-parse HEAD)
rev4_nix=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"HEAD\"; }).rev")
[[ $rev4 = $rev4_nix ]]
+
+# The name argument should be handled
+path9=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"HEAD\"; name = \"foo\"; }).outPath")
+[[ $path9 =~ -foo$ ]]
diff --git a/tests/fetchMercurial.sh b/tests/fetchMercurial.sh
index d8a4e09d2..726840664 100644
--- a/tests/fetchMercurial.sh
+++ b/tests/fetchMercurial.sh
@@ -94,3 +94,8 @@ hg commit --cwd $repo -m 'Bla3'
path4=$(nix eval --impure --refresh --raw --expr "(builtins.fetchMercurial file://$repo).outPath")
[[ $path2 = $path4 ]]
+
+echo paris > $repo/hello
+# Passing a `name` argument should be reflected in the output path
+path5=$(nix eval -vvvvv --impure --refresh --raw --expr "(builtins.fetchMercurial { url = \"file://$repo\"; name = \"foo\"; } ).outPath")
+[[ $path5 =~ -foo$ ]]
diff --git a/tests/fetchurl.sh b/tests/fetchurl.sh
index cd84e9a4c..3d1685f43 100644
--- a/tests/fetchurl.sh
+++ b/tests/fetchurl.sh
@@ -5,7 +5,7 @@ clearStore
# Test fetching a flat file.
hash=$(nix-hash --flat --type sha256 ./fetchurl.sh)
-outPath=$(nix-build --expr 'import <nix/fetchurl.nix>' --argstr url file://$(pwd)/fetchurl.sh --argstr sha256 $hash --no-out-link)
+outPath=$(nix-build -vvvvv --expr 'import <nix/fetchurl.nix>' --argstr url file://$(pwd)/fetchurl.sh --argstr sha256 $hash --no-out-link)
cmp $outPath fetchurl.sh
@@ -14,7 +14,7 @@ clearStore
hash=$(nix hash file --type sha512 --base64 ./fetchurl.sh)
-outPath=$(nix-build --expr 'import <nix/fetchurl.nix>' --argstr url file://$(pwd)/fetchurl.sh --argstr sha512 $hash --no-out-link)
+outPath=$(nix-build -vvvvv --expr 'import <nix/fetchurl.nix>' --argstr url file://$(pwd)/fetchurl.sh --argstr sha512 $hash --no-out-link)
cmp $outPath fetchurl.sh
@@ -25,7 +25,7 @@ hash=$(nix hash file ./fetchurl.sh)
[[ $hash =~ ^sha256- ]]
-outPath=$(nix-build --expr 'import <nix/fetchurl.nix>' --argstr url file://$(pwd)/fetchurl.sh --argstr hash $hash --no-out-link)
+outPath=$(nix-build -vvvvv --expr 'import <nix/fetchurl.nix>' --argstr url file://$(pwd)/fetchurl.sh --argstr hash $hash --no-out-link)
cmp $outPath fetchurl.sh
@@ -38,10 +38,10 @@ hash=$(nix hash file --type sha256 --base16 ./fetchurl.sh)
storePath=$(nix --store $other_store store add-file ./fetchurl.sh)
-outPath=$(nix-build --expr 'import <nix/fetchurl.nix>' --argstr url file:///no-such-dir/fetchurl.sh --argstr sha256 $hash --no-out-link --substituters $other_store)
+outPath=$(nix-build -vvvvv --expr 'import <nix/fetchurl.nix>' --argstr url file:///no-such-dir/fetchurl.sh --argstr sha256 $hash --no-out-link --substituters $other_store)
# Test hashed mirrors with an SRI hash.
-nix-build --expr 'import <nix/fetchurl.nix>' --argstr url file:///no-such-dir/fetchurl.sh --argstr hash $(nix hash to-sri --type sha256 $hash) \
+nix-build -vvvvv --expr 'import <nix/fetchurl.nix>' --argstr url file:///no-such-dir/fetchurl.sh --argstr hash $(nix hash to-sri --type sha256 $hash) \
--no-out-link --substituters $other_store
# Test unpacking a NAR.
@@ -55,7 +55,7 @@ nix-store --dump $TEST_ROOT/archive > $nar
hash=$(nix-hash --flat --type sha256 $nar)
-outPath=$(nix-build --expr 'import <nix/fetchurl.nix>' --argstr url file://$nar --argstr sha256 $hash \
+outPath=$(nix-build -vvvvv --expr 'import <nix/fetchurl.nix>' --argstr url file://$nar --argstr sha256 $hash \
--arg unpack true --argstr name xyzzy --no-out-link)
echo $outPath | grep -q 'xyzzy'
@@ -69,7 +69,7 @@ nix-store --delete $outPath
narxz=$TEST_ROOT/archive.nar.xz
rm -f $narxz
xz --keep $nar
-outPath=$(nix-build --expr 'import <nix/fetchurl.nix>' --argstr url file://$narxz --argstr sha256 $hash \
+outPath=$(nix-build -vvvvv --expr 'import <nix/fetchurl.nix>' --argstr url file://$narxz --argstr sha256 $hash \
--arg unpack true --argstr name xyzzy --no-out-link)
test -x $outPath/fetchurl.sh
diff --git a/tests/fixed.nix b/tests/fixed.nix
index 76580ffa1..babe71504 100644
--- a/tests/fixed.nix
+++ b/tests/fixed.nix
@@ -21,6 +21,14 @@ rec {
(f ./fixed.builder2.sh "recursive" "sha1" "vw46m23bizj4n8afrc0fj19wrp7mj3c0")
];
+ # Expression to test that `nix-build --check` also throws an error if the hash of
+ # fixed-output derivation has changed even if the hash exists in the store (in this
+ # case the hash exists because of `fixed.builder2.sh`, but building a derivation
+ # with the same hash and a different result must throw an error).
+ check = [
+ (f ./fixed.builder1.sh "recursive" "md5" "3670af73070fa14077ad74e0f5ea4e42")
+ ];
+
good2 = [
# Yes, this looks fscked up: builder2 doesn't have that result.
# But Nix sees that an output with the desired hash already
diff --git a/tests/fixed.sh b/tests/fixed.sh
index 8f51403a7..f1e1ce420 100644
--- a/tests/fixed.sh
+++ b/tests/fixed.sh
@@ -2,9 +2,6 @@ source common.sh
clearStore
-export IMPURE_VAR1=foo
-export IMPURE_VAR2=bar
-
path=$(nix-store -q $(nix-instantiate fixed.nix -A good.0))
echo 'testing bad...'
@@ -18,6 +15,11 @@ nix path-info --json $path | grep fixed:md5:2qk15sxzzjlnpjk9brn7j8ppcd
echo 'testing good...'
nix-build fixed.nix -A good --no-out-link
+if isDaemonNewer "2.4pre20210927"; then
+ echo 'testing --check...'
+ nix-build fixed.nix -A check --check && fail "should fail"
+fi
+
echo 'testing good2...'
nix-build fixed.nix -A good2 --no-out-link
diff --git a/tests/flake-local-settings.sh b/tests/flake-local-settings.sh
new file mode 100644
index 000000000..09f6b4ca8
--- /dev/null
+++ b/tests/flake-local-settings.sh
@@ -0,0 +1,34 @@
+source common.sh
+
+clearStore
+rm -rf $TEST_HOME/.cache $TEST_HOME/.config $TEST_HOME/.local
+
+cp ./simple.nix ./simple.builder.sh ./config.nix $TEST_HOME
+
+cd $TEST_HOME
+
+rm -f post-hook-ran
+cat <<EOF > echoing-post-hook.sh
+#!/bin/sh
+
+echo "ThePostHookRan" > $PWD/post-hook-ran
+EOF
+chmod +x echoing-post-hook.sh
+
+cat <<EOF > flake.nix
+{
+ nixConfig.post-build-hook = "$PWD/echoing-post-hook.sh";
+
+ outputs = a: {
+ defaultPackage.$system = import ./simple.nix;
+ };
+}
+EOF
+
+# Without --accept-flake-config, the post hook should not run.
+nix build < /dev/null
+(! [[ -f post-hook-ran ]])
+clearStore
+
+nix build --accept-flake-config
+test -f post-hook-ran || fail "The post hook should have ran"
diff --git a/tests/flakes.sh b/tests/flakes.sh
index e78e4a39d..9e10322b9 100644
--- a/tests/flakes.sh
+++ b/tests/flakes.sh
@@ -5,11 +5,6 @@ if [[ -z $(type -p git) ]]; then
exit 99
fi
-if [[ -z $(type -p hg) ]]; then
- echo "Mercurial not installed; skipping flake tests"
- exit 99
-fi
-
clearStore
rm -rf $TEST_HOME/.cache $TEST_HOME/.config
@@ -23,13 +18,19 @@ flake6Dir=$TEST_ROOT/flake6
flake7Dir=$TEST_ROOT/flake7
templatesDir=$TEST_ROOT/templates
nonFlakeDir=$TEST_ROOT/nonFlake
+badFlakeDir=$TEST_ROOT/badFlake
flakeA=$TEST_ROOT/flakeA
flakeB=$TEST_ROOT/flakeB
flakeGitBare=$TEST_ROOT/flakeGitBare
+flakeFollowsA=$TEST_ROOT/follows/flakeA
+flakeFollowsB=$TEST_ROOT/follows/flakeA/flakeB
+flakeFollowsC=$TEST_ROOT/follows/flakeA/flakeB/flakeC
+flakeFollowsD=$TEST_ROOT/follows/flakeA/flakeD
+flakeFollowsE=$TEST_ROOT/follows/flakeA/flakeE
-for repo in $flake1Dir $flake2Dir $flake3Dir $flake7Dir $templatesDir $nonFlakeDir $flakeA $flakeB; do
+for repo in $flake1Dir $flake2Dir $flake3Dir $flake7Dir $templatesDir $nonFlakeDir $flakeA $flakeB $flakeFollowsA; do
rm -rf $repo $repo.tmp
- mkdir $repo
+ mkdir -p $repo
git -C $repo init
git -C $repo config user.email "foobar@example.com"
git -C $repo config user.name "Foobar"
@@ -90,76 +91,14 @@ EOF
git -C $nonFlakeDir add README.md
git -C $nonFlakeDir commit -m 'Initial'
-cat > $registry <<EOF
-{
- "version": 2,
- "flakes": [
- { "from": {
- "type": "indirect",
- "id": "flake1"
- },
- "to": {
- "type": "git",
- "url": "file://$flake1Dir"
- }
- },
- { "from": {
- "type": "indirect",
- "id": "flake2"
- },
- "to": {
- "type": "git",
- "url": "file://$flake2Dir"
- }
- },
- { "from": {
- "type": "indirect",
- "id": "flake3"
- },
- "to": {
- "type": "git",
- "url": "file://$flake3Dir"
- }
- },
- { "from": {
- "type": "indirect",
- "id": "flake4"
- },
- "to": {
- "type": "indirect",
- "id": "flake3"
- }
- },
- { "from": {
- "type": "indirect",
- "id": "flake5"
- },
- "to": {
- "type": "hg",
- "url": "file://$flake5Dir"
- }
- },
- { "from": {
- "type": "indirect",
- "id": "nixpkgs"
- },
- "to": {
- "type": "indirect",
- "id": "flake1"
- }
- },
- { "from": {
- "type": "indirect",
- "id": "templates"
- },
- "to": {
- "type": "git",
- "url": "file://$templatesDir"
- }
- }
- ]
-}
-EOF
+# Construct a custom registry, additionally test the --registry flag
+nix registry add --registry $registry flake1 git+file://$flake1Dir
+nix registry add --registry $registry flake2 git+file://$flake2Dir
+nix registry add --registry $registry flake3 git+file://$flake3Dir
+nix registry add --registry $registry flake4 flake3
+nix registry add --registry $registry flake5 hg+file://$flake5Dir
+nix registry add --registry $registry nixpkgs flake1
+nix registry add --registry $registry templates git+file://$templatesDir
# Test 'nix flake list'.
[[ $(nix registry list | wc -l) == 7 ]]
@@ -209,6 +148,7 @@ nix build -o $TEST_ROOT/result --expr "(builtins.getFlake \"git+file://$flake1Di
# Building a flake with an unlocked dependency should fail in pure mode.
(! nix build -o $TEST_ROOT/result flake2#bar --no-registries)
+(! nix build -o $TEST_ROOT/result flake2#bar --no-use-registries)
(! nix eval --expr "builtins.getFlake \"$flake2Dir\"")
# But should succeed in impure mode.
@@ -232,6 +172,7 @@ nix build -o $TEST_ROOT/result $flake2Dir#bar
# Building with a lockfile should not require a fetch of the registry.
nix build -o $TEST_ROOT/result --flake-registry file:///no-registry.json $flake2Dir#bar --refresh
nix build -o $TEST_ROOT/result --no-registries $flake2Dir#bar --refresh
+nix build -o $TEST_ROOT/result --no-use-registries $flake2Dir#bar --refresh
# Updating the flake should not change the lockfile.
nix flake lock $flake2Dir
@@ -242,6 +183,7 @@ nix build -o $TEST_ROOT/result flake2#bar
# Or without a registry.
nix build -o $TEST_ROOT/result --no-registries git+file://$flake2Dir#bar --refresh
+nix build -o $TEST_ROOT/result --no-use-registries git+file://$flake2Dir#bar --refresh
# Test whether indirect dependencies work.
nix build -o $TEST_ROOT/result $flake3Dir#xyzzy
@@ -319,6 +261,8 @@ cat > $flake3Dir/flake.nix <<EOF
mkDerivation {
inherit system;
name = "fnord";
+ dummy = builtins.readFile (builtins.path { name = "source"; path = ./.; filter = path: type: baseNameOf path == "config.nix"; } + "/config.nix");
+ dummy2 = builtins.readFile (builtins.path { name = "source"; path = inputs.flake1; filter = path: type: baseNameOf path == "simple.nix"; } + "/simple.nix");
buildCommand = ''
cat \${inputs.nonFlake}/README.md > \$out
'';
@@ -405,6 +349,8 @@ nix registry add flake1 flake3
[[ $(nix registry list | wc -l) == 8 ]]
nix registry pin flake1
[[ $(nix registry list | wc -l) == 8 ]]
+nix registry pin flake1 flake3
+[[ $(nix registry list | wc -l) == 8 ]]
nix registry remove flake1
[[ $(nix registry list | wc -l) == 7 ]]
@@ -443,12 +389,14 @@ git -C $templatesDir commit -m 'Initial'
nix flake check templates
nix flake show templates
+nix flake show templates --json | jq
(cd $flake7Dir && nix flake init)
(cd $flake7Dir && nix flake init) # check idempotence
git -C $flake7Dir add flake.nix
nix flake check $flake7Dir
nix flake show $flake7Dir
+nix flake show $flake7Dir --json | jq
git -C $flake7Dir commit -a -m 'Initial'
# Test 'nix flake new'.
@@ -535,6 +483,21 @@ EOF
(! nix flake check $flake3Dir)
+cat > $flake3Dir/flake.nix <<EOF
+{
+ outputs = { flake1, self }: {
+ defaultPackage = {
+ system-1 = "foo";
+ system-2 = "bar";
+ };
+ };
+}
+EOF
+
+checkRes=$(nix flake check --keep-going $flake3Dir 2>&1 && fail "nix flake check should have failed" || true)
+echo "$checkRes" | grep -q "defaultPackage.system-1"
+echo "$checkRes" | grep -q "defaultPackage.system-2"
+
# Test 'follows' inputs.
cat > $flake3Dir/flake.nix <<EOF
{
@@ -613,44 +576,52 @@ nix build -o $TEST_ROOT/result git+file://$flakeGitBare
# Test Mercurial flakes.
rm -rf $flake5Dir
-hg init $flake5Dir
+mkdir $flake5Dir
cat > $flake5Dir/flake.nix <<EOF
{
outputs = { self, flake1 }: {
defaultPackage.$system = flake1.defaultPackage.$system;
-
expr = assert builtins.pathExists ./flake.lock; 123;
};
}
EOF
-hg add $flake5Dir/flake.nix
-hg commit --config ui.username=foobar@example.org $flake5Dir -m 'Initial commit'
+if [[ -n $(type -p hg) ]]; then
+ hg init $flake5Dir
-nix build -o $TEST_ROOT/result hg+file://$flake5Dir
-[[ -e $TEST_ROOT/result/hello ]]
+ hg add $flake5Dir/flake.nix
+ hg commit --config ui.username=foobar@example.org $flake5Dir -m 'Initial commit'
+
+ nix build -o $TEST_ROOT/result hg+file://$flake5Dir
+ [[ -e $TEST_ROOT/result/hello ]]
-(! nix flake metadata --json hg+file://$flake5Dir | jq -e -r .revision)
+ (! nix flake metadata --json hg+file://$flake5Dir | jq -e -r .revision)
-nix eval hg+file://$flake5Dir#expr
+ nix eval hg+file://$flake5Dir#expr
-nix eval hg+file://$flake5Dir#expr
+ nix eval hg+file://$flake5Dir#expr
-(! nix eval hg+file://$flake5Dir#expr --no-allow-dirty)
+ (! nix eval hg+file://$flake5Dir#expr --no-allow-dirty)
-(! nix flake metadata --json hg+file://$flake5Dir | jq -e -r .revision)
+ (! nix flake metadata --json hg+file://$flake5Dir | jq -e -r .revision)
-hg commit --config ui.username=foobar@example.org $flake5Dir -m 'Add lock file'
+ hg commit --config ui.username=foobar@example.org $flake5Dir -m 'Add lock file'
-nix flake metadata --json hg+file://$flake5Dir --refresh | jq -e -r .revision
-nix flake metadata --json hg+file://$flake5Dir
-[[ $(nix flake metadata --json hg+file://$flake5Dir | jq -e -r .revCount) = 1 ]]
+ nix flake metadata --json hg+file://$flake5Dir --refresh | jq -e -r .revision
+ nix flake metadata --json hg+file://$flake5Dir
+ [[ $(nix flake metadata --json hg+file://$flake5Dir | jq -e -r .revCount) = 1 ]]
+
+ nix build -o $TEST_ROOT/result hg+file://$flake5Dir --no-registries --no-allow-dirty
+ nix build -o $TEST_ROOT/result hg+file://$flake5Dir --no-use-registries --no-allow-dirty
+fi
-nix build -o $TEST_ROOT/result hg+file://$flake5Dir --no-registries --no-allow-dirty
+# Test path flakes.
+rm -rf $flake5Dir/.hg $flake5Dir/flake.lock
+nix flake lock path://$flake5Dir
-# Test tarball flakes
-tar cfz $TEST_ROOT/flake.tar.gz -C $TEST_ROOT --exclude .hg flake5
+# Test tarball flakes.
+tar cfz $TEST_ROOT/flake.tar.gz -C $TEST_ROOT flake5
nix build -o $TEST_ROOT/result file://$TEST_ROOT/flake.tar.gz
@@ -665,8 +636,8 @@ nix build -o $TEST_ROOT/result "file://$TEST_ROOT/flake.tar.gz?narHash=sha256-qQ
# Test --override-input.
git -C $flake3Dir reset --hard
-nix flake lock $flake3Dir --override-input flake2/flake1 flake5 -vvvvv
-[[ $(jq .nodes.flake1_2.locked.url $flake3Dir/flake.lock) =~ flake5 ]]
+nix flake lock $flake3Dir --override-input flake2/flake1 file://$TEST_ROOT/flake.tar.gz -vvvvv
+[[ $(jq .nodes.flake1_2.locked.url $flake3Dir/flake.lock) =~ flake.tar.gz ]]
nix flake lock $flake3Dir --override-input flake2/flake1 flake1
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]]
@@ -722,3 +693,120 @@ git -C $flakeB commit -a -m 'Foo'
# Test list-inputs with circular dependencies
nix flake metadata $flakeA
+
+# Test flake follow paths
+mkdir -p $flakeFollowsB
+mkdir -p $flakeFollowsC
+mkdir -p $flakeFollowsD
+mkdir -p $flakeFollowsE
+
+cat > $flakeFollowsA/flake.nix <<EOF
+{
+ description = "Flake A";
+ inputs = {
+ B = {
+ url = "path:./flakeB";
+ inputs.foobar.follows = "D";
+ inputs.nonFlake.follows = "D";
+ };
+
+ D.url = "path:./flakeD";
+ };
+ outputs = { ... }: {};
+}
+EOF
+
+cat > $flakeFollowsB/flake.nix <<EOF
+{
+ description = "Flake B";
+ inputs = {
+ foobar.url = "path:$flakeFollowsA/flakeE";
+ nonFlake.url = "path:$nonFlakeDir";
+ C = {
+ url = "path:./flakeC";
+ inputs.foobar.follows = "foobar";
+ };
+ };
+ outputs = { ... }: {};
+}
+EOF
+
+cat > $flakeFollowsC/flake.nix <<EOF
+{
+ description = "Flake C";
+ inputs = {
+ foobar.url = "path:$flakeFollowsA/flakeE";
+ };
+ outputs = { ... }: {};
+}
+EOF
+
+cat > $flakeFollowsD/flake.nix <<EOF
+{
+ description = "Flake D";
+ inputs = {};
+ outputs = { ... }: {};
+}
+EOF
+
+cat > $flakeFollowsE/flake.nix <<EOF
+{
+ description = "Flake D";
+ inputs = {};
+ outputs = { ... }: {};
+}
+EOF
+
+git -C $flakeFollowsA add flake.nix flakeB/flake.nix \
+ flakeB/flakeC/flake.nix flakeD/flake.nix flakeE/flake.nix
+
+nix flake lock $flakeFollowsA
+
+[[ $(jq -c .nodes.B.inputs.C $flakeFollowsA/flake.lock) = '"C"' ]]
+[[ $(jq -c .nodes.B.inputs.foobar $flakeFollowsA/flake.lock) = '["D"]' ]]
+[[ $(jq -c .nodes.C.inputs.foobar $flakeFollowsA/flake.lock) = '["B","foobar"]' ]]
+
+# Ensure removing follows from flake.nix removes them from the lockfile
+
+cat > $flakeFollowsA/flake.nix <<EOF
+{
+ description = "Flake A";
+ inputs = {
+ B = {
+ url = "path:./flakeB";
+ inputs.nonFlake.follows = "D";
+ };
+ D.url = "path:./flakeD";
+ };
+ outputs = { ... }: {};
+}
+EOF
+
+nix flake lock $flakeFollowsA
+
+[[ $(jq -c .nodes.B.inputs.foobar $flakeFollowsA/flake.lock) = '"foobar"' ]]
+jq -r -c '.nodes | keys | .[]' $flakeFollowsA/flake.lock | grep "^foobar$"
+
+# Ensure a relative path is not allowed to go outside the store path
+cat > $flakeFollowsA/flake.nix <<EOF
+{
+ description = "Flake A";
+ inputs = {
+ B.url = "path:../flakeB";
+ };
+ outputs = { ... }: {};
+}
+EOF
+
+git -C $flakeFollowsA add flake.nix
+
+nix flake lock $flakeFollowsA 2>&1 | grep 'points outside'
+
+# Test flake in store does not evaluate
+rm -rf $badFlakeDir
+mkdir $badFlakeDir
+echo INVALID > $badFlakeDir/flake.nix
+nix store delete $(nix store add-path $badFlakeDir)
+
+[[ $(nix path-info $(nix store add-path $flake1Dir)) =~ flake1 ]]
+[[ $(nix path-info path:$(nix store add-path $flake1Dir)) =~ simple ]]
diff --git a/tests/function-trace.sh b/tests/function-trace.sh
index 3b7f364e3..0b7f49d82 100755
--- a/tests/function-trace.sh
+++ b/tests/function-trace.sh
@@ -60,8 +60,6 @@ function-trace exited (string):1:1 at
expect_trace '(x: x) 1 2' "
function-trace entered (string):1:1 at
function-trace exited (string):1:1 at
-function-trace entered (string):1:1 at
-function-trace exited (string):1:1 at
"
# Not a function
diff --git a/tests/gc-auto.sh b/tests/gc-auto.sh
index 6867f2eb4..521d9e539 100644
--- a/tests/gc-auto.sh
+++ b/tests/gc-auto.sh
@@ -1,5 +1,7 @@
source common.sh
+needLocalStore "“min-free” and “max-free” are daemon options"
+
clearStore
garbage1=$(nix store add-path --name garbage1 ./nar-access.sh)
diff --git a/tests/gc-non-blocking.sh b/tests/gc-non-blocking.sh
new file mode 100644
index 000000000..8b21c6f1c
--- /dev/null
+++ b/tests/gc-non-blocking.sh
@@ -0,0 +1,33 @@
+# Test whether the collector is non-blocking, i.e. a build can run in
+# parallel with it.
+source common.sh
+
+needLocalStore "the GC test needs a synchronisation point"
+
+clearStore
+
+fifo=$TEST_ROOT/test.fifo
+mkfifo "$fifo"
+
+dummy=$(nix store add-path ./simple.nix)
+
+running=$TEST_ROOT/running
+touch $running
+
+(_NIX_TEST_GC_SYNC=$fifo nix-store --gc -vvvvv; rm $running) &
+pid=$!
+
+sleep 2
+
+outPath=$(nix-build -o "$TEST_ROOT/result" -E "
+ with import ./config.nix;
+ mkDerivation {
+ name = \"non-blocking\";
+ buildCommand = \"set -x; test -e $running; mkdir \$out; echo > $fifo\";
+ }")
+
+wait $pid
+
+(! test -e $running)
+(! test -e $dummy)
+test -e $outPath
diff --git a/tests/gc.sh b/tests/gc.sh
index 8b4f8d282..a736b63db 100644
--- a/tests/gc.sh
+++ b/tests/gc.sh
@@ -1,5 +1,7 @@
source common.sh
+clearStore
+
drvPath=$(nix-instantiate dependencies.nix)
outPath=$(nix-store -rvv "$drvPath")
@@ -12,7 +14,7 @@ ln -sf $outPath "$NIX_STATE_DIR"/gcroots/foo
nix-store --gc --print-roots | grep $outPath
nix-store --gc --print-live | grep $outPath
nix-store --gc --print-dead | grep $drvPath
-if nix-store --gc --print-dead | grep $outPath; then false; fi
+if nix-store --gc --print-dead | grep -E $outPath$; then false; fi
nix-store --gc --print-dead
@@ -23,6 +25,12 @@ test -e $inUse
if nix-store --delete $outPath; then false; fi
test -e $outPath
+for i in $NIX_STORE_DIR/*; do
+ if [[ $i =~ /trash ]]; then continue; fi # compat with old daemon
+ touch $i.lock
+ touch $i.chroot
+done
+
nix-collect-garbage
# Check that the root and its dependencies haven't been deleted.
@@ -38,3 +46,7 @@ nix-collect-garbage
# Check that the output has been GC'd.
if test -e $outPath/foobar; then false; fi
+
+# Check that the store is empty.
+rmdir $NIX_STORE_DIR/.links
+rmdir $NIX_STORE_DIR
diff --git a/tests/init.sh b/tests/init.sh
index 1a6ccb6fe..3c6d5917d 100644
--- a/tests/init.sh
+++ b/tests/init.sh
@@ -23,6 +23,7 @@ substituters =
flake-registry = $TEST_ROOT/registry.json
show-trace = true
include nix.conf.extra
+trusted-users = $(whoami)
EOF
cat > "$NIX_CONF_DIR"/nix.conf.extra <<EOF
@@ -35,5 +36,3 @@ nix-store --init
# Did anything happen?
test -e "$NIX_STATE_DIR"/db/db.sqlite
-
-echo 'Hello World' > ./dummy
diff --git a/tests/lang/eval-fail-antiquoted-path.nix b/tests/lang/eval-fail-nonexist-path.nix
index f2f08107b..f2f08107b 100644
--- a/tests/lang/eval-fail-antiquoted-path.nix
+++ b/tests/lang/eval-fail-nonexist-path.nix
diff --git a/tests/lang/eval-okay-floor-ceil.exp b/tests/lang/eval-okay-floor-ceil.exp
new file mode 100644
index 000000000..81f80420b
--- /dev/null
+++ b/tests/lang/eval-okay-floor-ceil.exp
@@ -0,0 +1 @@
+"23;24;23;23"
diff --git a/tests/lang/eval-okay-floor-ceil.nix b/tests/lang/eval-okay-floor-ceil.nix
new file mode 100644
index 000000000..d76a0d86e
--- /dev/null
+++ b/tests/lang/eval-okay-floor-ceil.nix
@@ -0,0 +1,9 @@
+with import ./lib.nix;
+
+let
+ n1 = builtins.floor 23.5;
+ n2 = builtins.ceil 23.5;
+ n3 = builtins.floor 23;
+ n4 = builtins.ceil 23;
+in
+ builtins.concatStringsSep ";" (map toString [ n1 n2 n3 n4 ])
diff --git a/tests/lang/eval-okay-path-antiquotation.nix b/tests/lang/eval-okay-path-antiquotation.nix
new file mode 100644
index 000000000..497d7c1c7
--- /dev/null
+++ b/tests/lang/eval-okay-path-antiquotation.nix
@@ -0,0 +1,12 @@
+let
+ foo = "foo";
+in
+{
+ simple = ./${foo};
+ surrounded = ./a-${foo}-b;
+ absolute = /${foo};
+ expr = ./${foo + "/bar"};
+ home = ~/${foo};
+ notfirst = ./bar/${foo};
+ slashes = /${foo}/${"bar"};
+}
diff --git a/tests/lang/eval-okay-sort.exp b/tests/lang/eval-okay-sort.exp
index 148b93516..899119e20 100644
--- a/tests/lang/eval-okay-sort.exp
+++ b/tests/lang/eval-okay-sort.exp
@@ -1 +1 @@
-[ [ 42 77 147 249 483 526 ] [ 526 483 249 147 77 42 ] [ "bar" "fnord" "foo" "xyzzy" ] [ { key = 1; value = "foo"; } { key = 1; value = "fnord"; } { key = 2; value = "bar"; } ] ]
+[ [ 42 77 147 249 483 526 ] [ 526 483 249 147 77 42 ] [ "bar" "fnord" "foo" "xyzzy" ] [ { key = 1; value = "foo"; } { key = 1; value = "fnord"; } { key = 2; value = "bar"; } ] [ [ ] [ ] [ 1 ] [ 1 4 ] [ 1 5 ] [ 1 6 ] [ 2 ] [ 2 3 ] [ 3 ] [ 3 ] ] ]
diff --git a/tests/lang/eval-okay-sort.nix b/tests/lang/eval-okay-sort.nix
index 8299c3a4a..50aa78e40 100644
--- a/tests/lang/eval-okay-sort.nix
+++ b/tests/lang/eval-okay-sort.nix
@@ -4,5 +4,17 @@ with builtins;
(sort (x: y: y < x) [ 483 249 526 147 42 77 ])
(sort lessThan [ "foo" "bar" "xyzzy" "fnord" ])
(sort (x: y: x.key < y.key)
- [ { key = 1; value = "foo"; } { key = 2; value = "bar"; } { key = 1; value = "fnord"; } ])
+ [ { key = 1; value = "foo"; } { key = 2; value = "bar"; } { key = 1; value = "fnord"; } ])
+ (sort lessThan [
+ [ 1 6 ]
+ [ ]
+ [ 2 3 ]
+ [ 3 ]
+ [ 1 5 ]
+ [ 2 ]
+ [ 1 ]
+ [ ]
+ [ 1 4 ]
+ [ 3 ]
+ ])
]
diff --git a/tests/lang/parse-okay-url.nix b/tests/lang/parse-okay-url.nix
index fce3b13ee..08de27d0a 100644
--- a/tests/lang/parse-okay-url.nix
+++ b/tests/lang/parse-okay-url.nix
@@ -3,5 +3,6 @@
http://www2.mplayerhq.hu/MPlayer/releases/fonts/font-arial-iso-8859-1.tar.bz2
http://losser.st-lab.cs.uu.nl/~armijn/.nix/gcc-3.3.4-static-nix.tar.gz
http://fpdownload.macromedia.com/get/shockwave/flash/english/linux/7.0r25/install_flash_player_7_linux.tar.gz
+ https://ftp5.gwdg.de/pub/linux/archlinux/extra/os/x86_64/unzip-6.0-14-x86_64.pkg.tar.zst
ftp://ftp.gtk.org/pub/gtk/v1.2/gtk+-1.2.10.tar.gz
]
diff --git a/tests/linux-sandbox.sh b/tests/linux-sandbox.sh
index eac62d461..3f304ac2f 100644
--- a/tests/linux-sandbox.sh
+++ b/tests/linux-sandbox.sh
@@ -1,5 +1,7 @@
source common.sh
+needLocalStore "the sandbox only runs on the builder side, so it makes no sense to test it with the daemon"
+
clearStore
if ! canUseSandbox; then exit 99; fi
diff --git a/tests/local-store.sh b/tests/local-store.sh
index 4ec3d64b0..0247346f1 100644
--- a/tests/local-store.sh
+++ b/tests/local-store.sh
@@ -15,6 +15,5 @@ PATH1=$(nix path-info --store ./x $CORRECT_PATH)
PATH2=$(nix path-info --store "$PWD/x" $CORRECT_PATH)
[ $CORRECT_PATH == $PATH2 ]
-# FIXME we could also test the query parameter version:
-# PATH3=$(nix path-info --store "local?store=$PWD/x" $CORRECT_PATH)
-# [ $CORRECT_PATH == $PATH3 ]
+PATH3=$(nix path-info --store "local?root=$PWD/x" $CORRECT_PATH)
+[ $CORRECT_PATH == $PATH3 ]
diff --git a/tests/local.mk b/tests/local.mk
index e2c94dde6..6f38853bc 100644
--- a/tests/local.mk
+++ b/tests/local.mk
@@ -2,7 +2,9 @@ nix_tests = \
hash.sh lang.sh add.sh simple.sh dependencies.sh \
config.sh \
gc.sh \
+ ca/gc.sh \
gc-concurrent.sh \
+ gc-non-blocking.sh \
gc-auto.sh \
referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \
gc-runtime.sh check-refs.sh filter-source.sh \
@@ -11,6 +13,7 @@ nix_tests = \
timeout.sh secure-drv-outputs.sh nix-channel.sh \
multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \
binary-cache.sh \
+ substitute-with-invalid-ca.sh \
binary-cache-build-remote.sh \
nix-profile.sh repair.sh dump-db.sh case-hack.sh \
check-reqs.sh pass-as-file.sh tarball.sh restricted.sh \
@@ -31,28 +34,39 @@ nix_tests = \
shell.sh \
brotli.sh \
zstd.sh \
+ compression-levels.sh \
pure-eval.sh \
check.sh \
plugins.sh \
search.sh \
nix-copy-ssh.sh \
post-hook.sh \
+ ca/post-hook.sh \
function-trace.sh \
recursive.sh \
describe-stores.sh \
flakes.sh \
+ flake-local-settings.sh \
build.sh \
compute-levels.sh \
+ repl.sh ca/repl.sh \
ca/build.sh \
+ ca/build-with-garbage-path.sh \
+ ca/duplicate-realisation-in-closure.sh \
ca/substitute.sh \
ca/signatures.sh \
- ca/nix-copy.sh
+ ca/nix-shell.sh \
+ ca/nix-run.sh \
+ ca/recursive.sh \
+ ca/concurrent-builds.sh \
+ ca/nix-copy.sh \
+ eval-store.sh
# parallel.sh
install-tests += $(foreach x, $(nix_tests), tests/$(x))
tests-environment = NIX_REMOTE= $(bash) -e
-clean-files += $(d)/common.sh $(d)/config.nix
+clean-files += $(d)/common.sh $(d)/config.nix $(d)/ca/config.nix
-test-deps += tests/common.sh tests/config.nix tests/plugins/libplugintest.$(SO_EXT)
+test-deps += tests/common.sh tests/config.nix tests/ca/config.nix tests/plugins/libplugintest.$(SO_EXT)
diff --git a/tests/multiple-outputs.sh b/tests/multiple-outputs.sh
index de573d4fa..0d45ad35b 100644
--- a/tests/multiple-outputs.sh
+++ b/tests/multiple-outputs.sh
@@ -58,7 +58,7 @@ outPath2=$(nix-build $(nix-instantiate multiple-outputs.nix -A a.second) --no-ou
# Delete one of the outputs and rebuild it. This will cause a hash
# rewrite.
-nix store delete $TEST_ROOT/result-second --ignore-liveness
+env -u NIX_REMOTE nix store delete $TEST_ROOT/result-second --ignore-liveness
nix-build multiple-outputs.nix -A a.all -o $TEST_ROOT/result
[ "$(cat $TEST_ROOT/result-second/file)" = "second" ]
[ "$(cat $TEST_ROOT/result-second/link/file)" = "first" ]
@@ -76,7 +76,10 @@ if nix-build multiple-outputs.nix -A cyclic --no-out-link; then
exit 1
fi
+# Do a GC. This should leave an empty store.
echo "collecting garbage..."
rm $TEST_ROOT/result*
nix-store --gc --keep-derivations --keep-outputs
nix-store --gc --print-roots
+rm -rf $NIX_STORE_DIR/.links
+rmdir $NIX_STORE_DIR
diff --git a/tests/nix-shell.sh b/tests/nix-shell.sh
index 4775bafb9..a31d35887 100644
--- a/tests/nix-shell.sh
+++ b/tests/nix-shell.sh
@@ -2,6 +2,20 @@ source common.sh
clearStore
+if [[ -n ${CONTENT_ADDRESSED:-} ]]; then
+ nix-shell () {
+ command nix-shell --arg contentAddressed true "$@"
+ }
+
+ nix_develop() {
+ nix develop --arg contentAddressed true "$@"
+ }
+else
+ nix_develop() {
+ nix develop "$@"
+ }
+fi
+
# Test nix-shell -A
export IMPURE_VAR=foo
export SELECTED_IMPURE_VAR=baz
@@ -40,8 +54,12 @@ nix-instantiate shell.nix -A shellDrv --add-root $TEST_ROOT/shell
output=$(NIX_PATH=nixpkgs=shell.nix nix-shell --pure -p foo bar --run 'echo "$(foo) $(bar)"')
[ "$output" = "foo bar" ]
+# Test nix-shell -p --arg x y
+output=$(NIX_PATH=nixpkgs=shell.nix nix-shell --pure -p foo --argstr fooContents baz --run 'echo "$(foo)"')
+[ "$output" = "baz" ]
+
# Test nix-shell shebang mode
-sed -e "s|@ENV_PROG@|$(type -p env)|" shell.shebang.sh > $TEST_ROOT/shell.shebang.sh
+sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > $TEST_ROOT/shell.shebang.sh
chmod a+rx $TEST_ROOT/shell.shebang.sh
output=$($TEST_ROOT/shell.shebang.sh abc def)
@@ -49,7 +67,7 @@ output=$($TEST_ROOT/shell.shebang.sh abc def)
# Test nix-shell shebang mode again with metacharacters in the filename.
# First word of filename is chosen to not match any file in the test root.
-sed -e "s|@ENV_PROG@|$(type -p env)|" shell.shebang.sh > $TEST_ROOT/spaced\ \\\'\"shell.shebang.sh
+sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > $TEST_ROOT/spaced\ \\\'\"shell.shebang.sh
chmod a+rx $TEST_ROOT/spaced\ \\\'\"shell.shebang.sh
output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.sh abc def)
@@ -58,7 +76,7 @@ output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.sh abc def)
# Test nix-shell shebang mode for ruby
# This uses a fake interpreter that returns the arguments passed
# This, in turn, verifies the `rc` script is valid and the `load()` script (given using `-e`) is as expected.
-sed -e "s|@SHELL_PROG@|$(type -p nix-shell)|" shell.shebang.rb > $TEST_ROOT/shell.shebang.rb
+sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > $TEST_ROOT/shell.shebang.rb
chmod a+rx $TEST_ROOT/shell.shebang.rb
output=$($TEST_ROOT/shell.shebang.rb abc ruby)
@@ -66,21 +84,27 @@ output=$($TEST_ROOT/shell.shebang.rb abc ruby)
# Test nix-shell shebang mode for ruby again with metacharacters in the filename.
# Note: fake interpreter only space-separates args without adding escapes to its output.
-sed -e "s|@SHELL_PROG@|$(type -p nix-shell)|" shell.shebang.rb > $TEST_ROOT/spaced\ \\\'\"shell.shebang.rb
+sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > $TEST_ROOT/spaced\ \\\'\"shell.shebang.rb
chmod a+rx $TEST_ROOT/spaced\ \\\'\"shell.shebang.rb
output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.rb abc ruby)
[ "$output" = '-e load(ARGV.shift) -- '"$TEST_ROOT"'/spaced \'\''"shell.shebang.rb abc ruby' ]
# Test 'nix develop'.
-nix develop -f shell.nix shellDrv -c bash -c '[[ -n $stdenv ]]'
+nix_develop -f shell.nix shellDrv -c bash -c '[[ -n $stdenv ]]'
# Ensure `nix develop -c` preserves stdin
echo foo | nix develop -f shell.nix shellDrv -c cat | grep -q foo
# Ensure `nix develop -c` actually executes the command if stdout isn't a terminal
-nix develop -f shell.nix shellDrv -c echo foo |& grep -q foo
+nix_develop -f shell.nix shellDrv -c echo foo |& grep -q foo
# Test 'nix print-dev-env'.
+[[ $(nix print-dev-env -f shell.nix shellDrv --json | jq -r .variables.arr1.value[2]) = '3 4' ]]
+
source <(nix print-dev-env -f shell.nix shellDrv)
[[ -n $stdenv ]]
+[[ ${arr1[2]} = "3 4" ]]
+[[ ${arr2[1]} = $'\n' ]]
+[[ ${arr2[2]} = $'x\ny' ]]
+[[ $(fun) = blabla ]]
diff --git a/tests/nss-preload.nix b/tests/nss-preload.nix
new file mode 100644
index 000000000..2610d2b30
--- /dev/null
+++ b/tests/nss-preload.nix
@@ -0,0 +1,123 @@
+{ nixpkgs, system, overlay }:
+
+with import (nixpkgs + "/nixos/lib/testing-python.nix") {
+ inherit system;
+ extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ];
+};
+
+makeTest (
+
+rec {
+ name = "nss-preload";
+
+ nodes = {
+ http_dns = { lib, pkgs, config, ... }: {
+ networking.firewall.enable = false;
+ networking.interfaces.eth1.ipv6.addresses = lib.mkForce [
+ { address = "fd21::1"; prefixLength = 64; }
+ ];
+ networking.interfaces.eth1.ipv4.addresses = lib.mkForce [
+ { address = "192.168.0.1"; prefixLength = 24; }
+ ];
+
+ services.unbound = {
+ enable = true;
+ enableRootTrustAnchor = false;
+ settings = {
+ server = {
+ interface = [ "192.168.0.1" "fd21::1" "::1" "127.0.0.1" ];
+ access-control = [ "192.168.0.0/24 allow" "fd21::/64 allow" "::1 allow" "127.0.0.0/8 allow" ];
+ local-data = [
+ ''"example.com. IN A 192.168.0.1"''
+ ''"example.com. IN AAAA fd21::1"''
+ ''"tarballs.nixos.org. IN A 192.168.0.1"''
+ ''"tarballs.nixos.org. IN AAAA fd21::1"''
+ ];
+ };
+ };
+ };
+
+ services.nginx = {
+ enable = true;
+ virtualHosts."example.com" = {
+ root = pkgs.runCommand "testdir" {} ''
+ mkdir "$out"
+ echo hello world > "$out/index.html"
+ '';
+ };
+ };
+ };
+
+ # client consumes a remote resolver
+ client = { lib, nodes, pkgs, ... }: {
+ networking.useDHCP = false;
+ networking.nameservers = [
+ (lib.head nodes.http_dns.config.networking.interfaces.eth1.ipv6.addresses).address
+ (lib.head nodes.http_dns.config.networking.interfaces.eth1.ipv4.addresses).address
+ ];
+ networking.interfaces.eth1.ipv6.addresses = [
+ { address = "fd21::10"; prefixLength = 64; }
+ ];
+ networking.interfaces.eth1.ipv4.addresses = [
+ { address = "192.168.0.10"; prefixLength = 24; }
+ ];
+
+ nix.sandboxPaths = lib.mkForce [];
+ nix.binaryCaches = lib.mkForce [];
+ nix.useSandbox = lib.mkForce true;
+ };
+ };
+
+ nix-fetch = pkgs.writeText "fetch.nix" ''
+ derivation {
+ # This derivation is an copy from what is available over at
+ # nix.git:corepkgs/fetchurl.nix
+ builder = "builtin:fetchurl";
+
+ # We're going to fetch data from the http_dns instance created before
+ # we expect the content to be the same as the content available there.
+ # ```
+ # $ nix-hash --type sha256 --to-base32 $(echo "hello world" | sha256sum | cut -d " " -f 1)
+ # 0ix4jahrkll5zg01wandq78jw3ab30q4nscph67rniqg5x7r0j59
+ # ```
+ outputHash = "0ix4jahrkll5zg01wandq78jw3ab30q4nscph67rniqg5x7r0j59";
+ outputHashAlgo = "sha256";
+ outputHashMode = "flat";
+
+ name = "example.com";
+ url = "http://example.com";
+
+ unpack = false;
+ executable = false;
+
+ system = "builtin";
+
+ preferLocalBuild = true;
+
+ impureEnvVars = [
+ "http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
+ ];
+
+ urls = [ "http://example.com" ];
+ }
+ '';
+
+ testScript = { nodes, ... }: ''
+ http_dns.wait_for_unit("nginx")
+ http_dns.wait_for_open_port(80)
+ http_dns.wait_for_unit("unbound")
+ http_dns.wait_for_open_port(53)
+
+ client.start()
+ client.wait_for_unit('multi-user.target')
+
+ with subtest("can fetch data from a remote server outside sandbox"):
+ client.succeed("nix --version >&2")
+ client.succeed("curl -vvv http://example.com/index.html >&2")
+
+ with subtest("nix-build can lookup dns and fetch data"):
+ client.succeed("""
+ nix-build ${nix-fetch} >&2
+ """)
+ '';
+})
diff --git a/tests/optimise-store.sh b/tests/optimise-store.sh
index 61e3df2f9..8c2d05cd5 100644
--- a/tests/optimise-store.sh
+++ b/tests/optimise-store.sh
@@ -26,7 +26,8 @@ if [ "$inode1" = "$inode3" ]; then
exit 1
fi
-nix-store --optimise
+# XXX: This should work through the daemon too
+NIX_REMOTE="" nix-store --optimise
inode1="$(stat --format=%i $outPath1/foo)"
inode3="$(stat --format=%i $outPath3/foo)"
diff --git a/tests/post-hook.sh b/tests/post-hook.sh
index aa3e6a574..049e40749 100644
--- a/tests/post-hook.sh
+++ b/tests/post-hook.sh
@@ -4,7 +4,10 @@ clearStore
rm -f $TEST_ROOT/result
-export REMOTE_STORE=$TEST_ROOT/remote_store
+export REMOTE_STORE=file:$TEST_ROOT/remote_store
+echo 'require-sigs = false' >> $NIX_CONF_DIR/nix.conf
+
+restartDaemon
# Build the dependencies and push them to the remote store
nix-build -o $TEST_ROOT/result dependencies.nix --post-build-hook $PWD/push-to-store.sh
diff --git a/tests/recursive.sh b/tests/recursive.sh
index a55b061b5..91518d67d 100644
--- a/tests/recursive.sh
+++ b/tests/recursive.sh
@@ -1,5 +1,8 @@
source common.sh
+sed -i 's/experimental-features .*/& recursive-nix/' "$NIX_CONF_DIR"/nix.conf
+restartDaemon
+
# FIXME
if [[ $(uname) != Linux ]]; then exit 99; fi
@@ -9,9 +12,9 @@ rm -f $TEST_ROOT/result
export unreachable=$(nix store add-path ./recursive.sh)
-NIX_BIN_DIR=$(dirname $(type -p nix)) nix --experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --expr '
+NIX_BIN_DIR=$(dirname $(type -p nix)) nix --extra-experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --expr '
with import ./config.nix;
- mkDerivation {
+ mkDerivation rec {
name = "recursive";
dummy = builtins.toFile "dummy" "bla bla";
SHELL = shell;
@@ -19,11 +22,13 @@ NIX_BIN_DIR=$(dirname $(type -p nix)) nix --experimental-features 'nix-command r
# Note: this is a string without context.
unreachable = builtins.getEnv "unreachable";
+ NIX_TESTS_CA_BY_DEFAULT = builtins.getEnv "NIX_TESTS_CA_BY_DEFAULT";
+
requiredSystemFeatures = [ "recursive-nix" ];
buildCommand = '\'\''
mkdir $out
- opts="--experimental-features nix-command"
+ opts="--experimental-features nix-command ${if (NIX_TESTS_CA_BY_DEFAULT == "1") then "--extra-experimental-features ca-derivations" else ""}"
PATH=${builtins.getEnv "NIX_BIN_DIR"}:$PATH
@@ -46,16 +51,15 @@ NIX_BIN_DIR=$(dirname $(type -p nix)) nix --experimental-features 'nix-command r
# Add it to our closure.
ln -s $foobar $out/foobar
- [[ $(nix $opts path-info --all | wc -l) -eq 3 ]]
+ [[ $(nix $opts path-info --all | wc -l) -eq 4 ]]
# Build a derivation.
nix $opts build -L --impure --expr '\''
- derivation {
+ with import ${./config.nix};
+ mkDerivation {
name = "inner1";
- builder = builtins.getEnv "SHELL";
- system = builtins.getEnv "system";
+ buildCommand = "echo $fnord blaat > $out";
fnord = builtins.toFile "fnord" "fnord";
- args = [ "-c" "echo $fnord blaat > $out" ];
}
'\''
diff --git a/tests/referrers.sh b/tests/referrers.sh
index 614dd8f5b..81323c280 100644
--- a/tests/referrers.sh
+++ b/tests/referrers.sh
@@ -1,5 +1,7 @@
source common.sh
+needLocalStore "uses some low-level store manipulations that aren’t available through the daemon"
+
clearStore
max=500
diff --git a/tests/repair.sh b/tests/repair.sh
index ba019028d..1105b446b 100644
--- a/tests/repair.sh
+++ b/tests/repair.sh
@@ -1,5 +1,7 @@
source common.sh
+needLocalStore "--repair needs a local store"
+
clearStore
path=$(nix-build dependencies.nix -o $TEST_ROOT/result)
@@ -28,7 +30,7 @@ nix-store --verify-path $path2
chmod u+w $path2
touch $path2/bad
-nix-store --delete $(nix-store -qd $path2)
+nix-store --delete $(nix-store -q --referrers-closure $(nix-store -qd $path2))
(! nix-store --verify --check-contents --repair)
diff --git a/tests/repl.sh b/tests/repl.sh
new file mode 100644
index 000000000..d360821f2
--- /dev/null
+++ b/tests/repl.sh
@@ -0,0 +1,20 @@
+source common.sh
+
+replCmds="
+simple = import ./simple.nix
+:b simple
+"
+
+testRepl () {
+ local nixArgs=("$@")
+ local replOutput="$(nix repl "${nixArgs[@]}" <<< "$replCmds")"
+ echo "$replOutput"
+ local outPath=$(echo "$replOutput" |&
+ grep -o -E "$NIX_STORE_DIR/\w*-simple")
+ nix path-info "${nixArgs[@]}" "$outPath"
+}
+
+# Simple test, try building a drv
+testRepl
+# Same thing (kind-of), but with a remote store.
+testRepl --store "$TEST_ROOT/store?real=$NIX_STORE_DIR"
diff --git a/tests/shell.nix b/tests/shell.nix
index 24ebcc04c..4912d295a 100644
--- a/tests/shell.nix
+++ b/tests/shell.nix
@@ -1,6 +1,18 @@
-{ inNixShell ? false }:
+{ inNixShell ? false, contentAddressed ? false, fooContents ? "foo" }:
-with import ./config.nix;
+let cfg = import ./config.nix; in
+with cfg;
+
+let
+ mkDerivation =
+ if contentAddressed then
+ args: cfg.mkDerivation ({
+ __contentAddressed = true;
+ outputHashMode = "recursive";
+ outputHashAlgo = "sha256";
+ } // args)
+ else cfg.mkDerivation;
+in
let pkgs = rec {
setupSh = builtins.toFile "setup" ''
@@ -8,6 +20,20 @@ let pkgs = rec {
for pkg in $buildInputs; do
export PATH=$PATH:$pkg/bin
done
+
+ # mimic behavior of stdenv for `$out` etc. for structured attrs.
+ if [ -n "''${NIX_ATTRS_SH_FILE}" ]; then
+ for o in "''${!outputs[@]}"; do
+ eval "''${o}=''${outputs[$o]}"
+ export "''${o}"
+ done
+ fi
+
+ declare -a arr1=(1 2 "3 4" 5)
+ declare -a arr2=(x $'\n' $'x\ny')
+ fun() {
+ echo blabla
+ }
'';
stdenv = mkDerivation {
@@ -22,6 +48,8 @@ let pkgs = rec {
name = "shellDrv";
builder = "/does/not/exist";
VAR_FROM_NIX = "bar";
+ ASCII_PERCENT = "%";
+ ASCII_AT = "@";
TEST_inNixShell = if inNixShell then "true" else "false";
inherit stdenv;
outputs = ["dev" "out"];
@@ -34,7 +62,7 @@ let pkgs = rec {
foo = runCommand "foo" {} ''
mkdir -p $out/bin
- echo 'echo foo' > $out/bin/foo
+ echo 'echo ${fooContents}' > $out/bin/foo
chmod a+rx $out/bin/foo
ln -s ${shell} $out/bin/bash
'';
diff --git a/tests/structured-attrs-shell.nix b/tests/structured-attrs-shell.nix
new file mode 100644
index 000000000..57c1e6bd2
--- /dev/null
+++ b/tests/structured-attrs-shell.nix
@@ -0,0 +1,21 @@
+with import ./config.nix;
+let
+ dep = mkDerivation {
+ name = "dep";
+ buildCommand = ''
+ mkdir $out; echo bla > $out/bla
+ '';
+ };
+ inherit (import ./shell.nix { inNixShell = true; }) stdenv;
+in
+mkDerivation {
+ name = "structured2";
+ __structuredAttrs = true;
+ inherit stdenv;
+ outputs = [ "out" "dev" ];
+ my.list = [ "a" "b" "c" ];
+ exportReferencesGraph.refs = [ dep ];
+ buildCommand = ''
+ touch ''${outputs[out]}; touch ''${outputs[dev]}
+ '';
+}
diff --git a/tests/structured-attrs.nix b/tests/structured-attrs.nix
index c39c3a346..e93139a44 100644
--- a/tests/structured-attrs.nix
+++ b/tests/structured-attrs.nix
@@ -36,7 +36,7 @@ mkDerivation {
echo bar > $dest
echo foo > $dest2
- json=$(cat .attrs.json)
+ json=$(cat $NIX_ATTRS_JSON_FILE)
[[ $json =~ '"narHash":"sha256:1r7yc43zqnzl5b0als5vnyp649gk17i37s7mj00xr8kc47rjcybk"' ]]
[[ $json =~ '"narSize":288' ]]
[[ $json =~ '"closureSize":288' ]]
diff --git a/tests/structured-attrs.sh b/tests/structured-attrs.sh
index dcfe6d580..378dbc735 100644
--- a/tests/structured-attrs.sh
+++ b/tests/structured-attrs.sh
@@ -1,5 +1,9 @@
source common.sh
+# 27ce722638 required some incompatible changes to the nix file, so skip this
+# tests for the older versions
+requireDaemonNewerThan "2.4pre20210712"
+
clearStore
rm -f $TEST_ROOT/result
@@ -8,3 +12,12 @@ nix-build structured-attrs.nix -A all -o $TEST_ROOT/result
[[ $(cat $TEST_ROOT/result/foo) = bar ]]
[[ $(cat $TEST_ROOT/result-dev/foo) = foo ]]
+
+export NIX_BUILD_SHELL=$SHELL
+env NIX_PATH=nixpkgs=shell.nix nix-shell structured-attrs-shell.nix \
+ --run 'test -e .attrs.json; test "3" = "$(jq ".my.list|length" < $NIX_ATTRS_JSON_FILE)"'
+
+# `nix develop` is a slightly special way of dealing with environment vars, it parses
+# these from a shell-file exported from a derivation. This is to test especially `outputs`
+# (which is an associative array in thsi case) being fine.
+nix develop -f structured-attrs-shell.nix -c bash -c 'test -n "$out"'
diff --git a/tests/substitute-with-invalid-ca.sh b/tests/substitute-with-invalid-ca.sh
new file mode 100644
index 000000000..4d0b01e0f
--- /dev/null
+++ b/tests/substitute-with-invalid-ca.sh
@@ -0,0 +1,38 @@
+source common.sh
+
+BINARY_CACHE=file://$cacheDir
+
+getHash() {
+ basename "$1" | cut -d '-' -f 1
+}
+getRemoteNarInfo () {
+ echo "$cacheDir/$(getHash "$1").narinfo"
+}
+
+cat <<EOF > $TEST_HOME/good.txt
+I’m a good path
+EOF
+
+cat <<EOF > $TEST_HOME/bad.txt
+I’m a bad path
+EOF
+
+good=$(nix-store --add $TEST_HOME/good.txt)
+bad=$(nix-store --add $TEST_HOME/bad.txt)
+nix copy --to "$BINARY_CACHE" "$good"
+nix copy --to "$BINARY_CACHE" "$bad"
+nix-collect-garbage >/dev/null 2>&1
+
+# Falsifying the narinfo file for '$good'
+goodPathNarInfo=$(getRemoteNarInfo "$good")
+badPathNarInfo=$(getRemoteNarInfo "$bad")
+for fieldName in URL FileHash FileSize NarHash NarSize; do
+ sed -i "/^$fieldName/d" "$goodPathNarInfo"
+ grep -E "^$fieldName" "$badPathNarInfo" >> "$goodPathNarInfo"
+done
+
+# Copying back '$good' from the binary cache. This should fail as it is
+# corrupted
+if nix copy --from "$BINARY_CACHE" "$good"; then
+ fail "Importing a path with a wrong CA field should fail"
+fi
diff --git a/tests/tarball.sh b/tests/tarball.sh
index d53ec8cd9..1301922a5 100644
--- a/tests/tarball.sh
+++ b/tests/tarball.sh
@@ -40,6 +40,11 @@ test_tarball() {
(! nix-instantiate --eval -E '<fnord/xyzzy> 1' -I fnord=file://no-such-tarball$ext)
nix-instantiate --eval -E '<fnord/config.nix>' -I fnord=file://no-such-tarball$ext -I fnord=.
+
+ # Ensure that the `name` attribute isn’t accepted as that would mess
+ # with the content-addressing
+ (! nix-instantiate --eval -E "fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"$hash\"; name = \"foo\"; }")
+
}
test_tarball '' cat
diff --git a/tests/timeout.sh b/tests/timeout.sh
index eea9b5731..e3fb3ebcc 100644
--- a/tests/timeout.sh
+++ b/tests/timeout.sh
@@ -2,6 +2,8 @@
source common.sh
+# XXX: This shouldn’t be, but #4813 cause this test to fail
+needLocalStore "see #4813"
set +e
messages=$(nix-build -Q timeout.nix -A infiniteLoop --timeout 2 2>&1)