aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/build-remote/build-remote.cc4
-rw-r--r--src/libcmd/command.cc2
-rw-r--r--src/libcmd/common-eval-args.cc95
-rw-r--r--src/libcmd/installables.cc199
-rw-r--r--src/libcmd/installables.hh11
-rw-r--r--src/libcmd/local.mk2
-rw-r--r--src/libcmd/repl.cc35
-rw-r--r--src/libexpr/eval-cache.cc6
-rw-r--r--src/libexpr/eval.cc254
-rw-r--r--src/libexpr/eval.hh15
-rw-r--r--src/libexpr/flake/call-flake.nix2
-rw-r--r--src/libexpr/flake/config.cc2
-rw-r--r--src/libexpr/flake/flake.cc53
-rw-r--r--src/libexpr/flake/flake.hh6
-rw-r--r--src/libexpr/flake/flakeref.hh2
-rw-r--r--src/libexpr/flake/lockfile.cc48
-rw-r--r--src/libexpr/flake/lockfile.hh9
-rw-r--r--src/libexpr/get-drvs.cc2
-rw-r--r--src/libexpr/nixexpr.cc73
-rw-r--r--src/libexpr/nixexpr.hh29
-rw-r--r--src/libexpr/parser.y65
-rw-r--r--src/libexpr/primops.cc113
-rw-r--r--src/libexpr/primops/fetchTree.cc2
-rw-r--r--src/libexpr/tests/libexprtests.hh3
-rw-r--r--src/libexpr/tests/primops.cc17
-rw-r--r--src/libexpr/value-to-json.cc50
-rw-r--r--src/libexpr/value-to-json.hh7
-rw-r--r--src/libexpr/value-to-xml.cc3
-rw-r--r--src/libexpr/value.hh6
-rw-r--r--src/libfetchers/fetch-settings.hh7
-rw-r--r--src/libfetchers/fetchers.cc6
-rw-r--r--src/libfetchers/fetchers.hh13
-rw-r--r--src/libfetchers/git.cc64
-rw-r--r--src/libfetchers/github.cc70
-rw-r--r--src/libfetchers/indirect.cc10
-rw-r--r--src/libfetchers/mercurial.cc10
-rw-r--r--src/libfetchers/path.cc8
-rw-r--r--src/libfetchers/registry.cc3
-rw-r--r--src/libfetchers/tarball.cc11
-rw-r--r--src/libmain/progress-bar.cc12
-rw-r--r--src/libmain/shared.cc2
-rw-r--r--src/libstore/binary-cache-store.cc31
-rw-r--r--src/libstore/binary-cache-store.hh3
-rw-r--r--src/libstore/build-result.hh5
-rw-r--r--src/libstore/build/derivation-goal.cc101
-rw-r--r--src/libstore/build/derivation-goal.hh5
-rw-r--r--src/libstore/build/entry-points.cc6
-rw-r--r--src/libstore/build/hook-instance.cc2
-rw-r--r--src/libstore/build/local-derivation-goal.cc369
-rw-r--r--src/libstore/build/local-derivation-goal.hh11
-rw-r--r--src/libstore/build/personality.cc44
-rw-r--r--src/libstore/build/personality.hh11
-rw-r--r--src/libstore/builtins/buildenv.cc2
-rw-r--r--src/libstore/daemon.cc1
-rw-r--r--src/libstore/derivations.cc2
-rw-r--r--src/libstore/derivations.hh2
-rw-r--r--src/libstore/derived-path.cc47
-rw-r--r--src/libstore/derived-path.hh5
-rw-r--r--src/libstore/filetransfer.cc24
-rw-r--r--src/libstore/filetransfer.hh5
-rw-r--r--src/libstore/gc.cc3
-rw-r--r--src/libstore/globals.cc18
-rw-r--r--src/libstore/globals.hh106
-rw-r--r--src/libstore/legacy-ssh-store.cc4
-rw-r--r--src/libstore/local-store.cc29
-rw-r--r--src/libstore/local-store.hh15
-rw-r--r--src/libstore/local.mk2
-rw-r--r--src/libstore/lock.cc236
-rw-r--r--src/libstore/lock.hh43
-rw-r--r--src/libstore/nar-accessor.cc30
-rw-r--r--src/libstore/nar-accessor.hh6
-rw-r--r--src/libstore/nar-info-disk-cache.cc42
-rw-r--r--src/libstore/parsed-derivations.cc22
-rw-r--r--src/libstore/parsed-derivations.hh2
-rw-r--r--src/libstore/references.cc53
-rw-r--r--src/libstore/references.hh13
-rw-r--r--src/libstore/remote-fs-accessor.cc8
-rw-r--r--src/libstore/remote-store.cc2
-rw-r--r--src/libstore/sqlite.cc15
-rw-r--r--src/libstore/sqlite.hh11
-rw-r--r--src/libstore/ssh.cc2
-rw-r--r--src/libstore/store-api.cc57
-rw-r--r--src/libstore/store-api.hh8
-rw-r--r--src/libutil/archive.cc4
-rw-r--r--src/libutil/archive.hh4
-rw-r--r--src/libutil/canon-path.cc103
-rw-r--r--src/libutil/canon-path.hh173
-rw-r--r--src/libutil/cgroup.cc148
-rw-r--r--src/libutil/cgroup.hh29
-rw-r--r--src/libutil/error.cc193
-rw-r--r--src/libutil/error.hh61
-rw-r--r--src/libutil/experimental-features.cc2
-rw-r--r--src/libutil/experimental-features.hh2
-rw-r--r--src/libutil/filesystem.cc9
-rw-r--r--src/libutil/fmt.hh2
-rw-r--r--src/libutil/json.cc203
-rw-r--r--src/libutil/json.hh185
-rw-r--r--src/libutil/logging.cc44
-rw-r--r--src/libutil/logging.hh8
-rw-r--r--src/libutil/ref.hh5
-rw-r--r--src/libutil/serialise.cc2
-rw-r--r--src/libutil/serialise.hh14
-rw-r--r--src/libutil/tarfile.cc8
-rw-r--r--src/libutil/tests/canon-path.cc155
-rw-r--r--src/libutil/tests/json.cc193
-rw-r--r--src/libutil/util.cc76
-rw-r--r--src/libutil/util.hh29
-rw-r--r--src/nix-env/nix-env.cc41
-rw-r--r--src/nix-store/nix-store.cc18
-rw-r--r--src/nix/app.cc15
-rw-r--r--src/nix/build.cc42
-rw-r--r--src/nix/daemon.cc2
-rw-r--r--src/nix/daemon.md2
-rw-r--r--src/nix/develop.cc12
-rw-r--r--src/nix/eval.cc7
-rw-r--r--src/nix/flake-update.md2
-rw-r--r--src/nix/flake.cc41
-rw-r--r--src/nix/flake.md73
-rw-r--r--src/nix/local.mk2
-rw-r--r--src/nix/ls.cc5
-rw-r--r--src/nix/main.cc3
-rw-r--r--src/nix/make-content-addressed.cc14
-rw-r--r--src/nix/nix.md29
-rw-r--r--src/nix/path-from-hash-part.cc39
-rw-r--r--src/nix/path-from-hash-part.md20
-rw-r--r--src/nix/path-info.cc8
-rw-r--r--src/nix/path-info.md4
-rw-r--r--src/nix/profile-list.md6
-rw-r--r--src/nix/profile-upgrade.md6
-rw-r--r--src/nix/profile.cc4
-rw-r--r--src/nix/profile.md3
-rw-r--r--src/nix/registry.cc8
-rw-r--r--src/nix/repl.md2
-rw-r--r--src/nix/run.cc13
-rw-r--r--src/nix/run.hh3
-rw-r--r--src/nix/search.cc19
-rw-r--r--src/nix/show-derivation.cc67
-rw-r--r--src/nix/show-derivation.md6
-rw-r--r--src/nix/store-copy-log.md4
-rw-r--r--src/nix/why-depends.cc39
140 files changed, 2866 insertions, 2079 deletions
diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc
index ff8ba2724..6b81ecc49 100644
--- a/src/build-remote/build-remote.cc
+++ b/src/build-remote/build-remote.cc
@@ -186,12 +186,12 @@ static int main_build_remote(int argc, char * * argv)
// build the hint template.
std::string errorText =
"Failed to find a machine for remote build!\n"
- "derivation: %s\nrequired (system, features): (%s, %s)";
+ "derivation: %s\nrequired (system, features): (%s, [%s])";
errorText += "\n%s available machines:";
errorText += "\n(systems, maxjobs, supportedFeatures, mandatoryFeatures)";
for (unsigned int i = 0; i < machines.size(); ++i)
- errorText += "\n(%s, %s, %s, %s)";
+ errorText += "\n([%s], %s, [%s], [%s])";
// add the template values.
std::string drvstr;
diff --git a/src/libcmd/command.cc b/src/libcmd/command.cc
index 1fdd9e0bd..0740ea960 100644
--- a/src/libcmd/command.cc
+++ b/src/libcmd/command.cc
@@ -226,7 +226,7 @@ MixProfile::MixProfile()
{
addFlag({
.longName = "profile",
- .description = "The profile to update.",
+ .description = "The profile to operate on.",
.labels = {"path"},
.handler = {&profile},
.completer = completePath
diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc
index 140ed3b88..0e321e5e4 100644
--- a/src/libcmd/common-eval-args.cc
+++ b/src/libcmd/common-eval-args.cc
@@ -32,7 +32,77 @@ MixEvalArgs::MixEvalArgs()
addFlag({
.longName = "include",
.shortName = 'I',
- .description = "Add *path* to the list of locations used to look up `<...>` file names.",
+ .description = R"(
+ Add *path* to the Nix search path. The Nix search path is
+ initialized from the colon-separated [`NIX_PATH`](./env-common.md#env-NIX_PATH) environment
+ variable, and is used to look up the location of Nix expressions using [paths](../language/values.md#type-path) enclosed in angle
+ brackets (i.e., `<nixpkgs>`).
+
+ For instance, passing
+
+ ```
+ -I /home/eelco/Dev
+ -I /etc/nixos
+ ```
+
+ will cause Nix to look for paths relative to `/home/eelco/Dev` and
+ `/etc/nixos`, in that order. This is equivalent to setting the
+ `NIX_PATH` environment variable to
+
+ ```
+ /home/eelco/Dev:/etc/nixos
+ ```
+
+ It is also possible to match paths against a prefix. For example,
+ passing
+
+ ```
+ -I nixpkgs=/home/eelco/Dev/nixpkgs-branch
+ -I /etc/nixos
+ ```
+
+ will cause Nix to search for `<nixpkgs/path>` in
+ `/home/eelco/Dev/nixpkgs-branch/path` and `/etc/nixos/nixpkgs/path`.
+
+ If a path in the Nix search path starts with `http://` or `https://`,
+ it is interpreted as the URL of a tarball that will be downloaded and
+ unpacked to a temporary location. The tarball must consist of a single
+ top-level directory. For example, passing
+
+ ```
+ -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/master.tar.gz
+ ```
+
+ tells Nix to download and use the current contents of the `master`
+ branch in the `nixpkgs` repository.
+
+ The URLs of the tarballs from the official `nixos.org` channels
+ (see [the manual page for `nix-channel`](../nix-channel.md)) can be
+ abbreviated as `channel:<channel-name>`. For instance, the
+ following two flags are equivalent:
+
+ ```
+ -I nixpkgs=channel:nixos-21.05
+ -I nixpkgs=https://nixos.org/channels/nixos-21.05/nixexprs.tar.xz
+ ```
+
+ You can also fetch source trees using [flake URLs](./nix3-flake.md#url-like-syntax) and add them to the
+ search path. For instance,
+
+ ```
+ -I nixpkgs=flake:nixpkgs
+ ```
+
+ specifies that the prefix `nixpkgs` shall refer to the source tree
+ downloaded from the `nixpkgs` entry in the flake registry. Similarly,
+
+ ```
+ -I nixpkgs=flake:github:NixOS/nixpkgs/nixos-22.05
+ ```
+
+ makes `<nixpkgs>` refer to a particular branch of the
+ `NixOS/nixpkgs` repository on GitHub.
+ )",
.category = category,
.labels = {"path"},
.handler = {[&](std::string s) { searchPath.push_back(s); }}
@@ -89,14 +159,25 @@ Bindings * MixEvalArgs::getAutoArgs(EvalState & state)
Path lookupFileArg(EvalState & state, std::string_view s)
{
- if (isUri(s)) {
- return state.store->toRealPath(
- fetchers::downloadTarball(
- state.store, resolveUri(s), "source", false).first.storePath);
- } else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {
+ if (EvalSettings::isPseudoUrl(s)) {
+ auto storePath = fetchers::downloadTarball(
+ state.store, EvalSettings::resolvePseudoUrl(s), "source", false).first.storePath;
+ return state.store->toRealPath(storePath);
+ }
+
+ else if (hasPrefix(s, "flake:")) {
+ settings.requireExperimentalFeature(Xp::Flakes);
+ auto flakeRef = parseFlakeRef(std::string(s.substr(6)), {}, true, false);
+ auto storePath = flakeRef.resolve(state.store).fetchTree(state.store).first.storePath;
+ return state.store->toRealPath(storePath);
+ }
+
+ else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {
Path p(s.substr(1, s.size() - 2));
return state.findFile(p);
- } else
+ }
+
+ else
return absPath(std::string(s));
}
diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc
index 00a7a33b7..f481a41a0 100644
--- a/src/libcmd/installables.cc
+++ b/src/libcmd/installables.cc
@@ -168,7 +168,7 @@ SourceExprCommand::SourceExprCommand(bool supportReadOnlyMode)
addFlag({
.longName = "derivation",
- .description = "Operate on the store derivation rather than its outputs.",
+ .description = "Operate on the [store derivation](../../glossary.md#gloss-store-derivation) rather than its outputs.",
.category = installablesCategory,
.handler = {&operateOn, OperateOn::Derivation},
});
@@ -207,55 +207,59 @@ Strings SourceExprCommand::getDefaultFlakeAttrPathPrefixes()
void SourceExprCommand::completeInstallable(std::string_view prefix)
{
- if (file) {
- completionType = ctAttrs;
+ try {
+ if (file) {
+ completionType = ctAttrs;
- evalSettings.pureEval = false;
- auto state = getEvalState();
- Expr *e = state->parseExprFromFile(
- resolveExprPath(state->checkSourcePath(lookupFileArg(*state, *file)))
- );
+ evalSettings.pureEval = false;
+ auto state = getEvalState();
+ Expr *e = state->parseExprFromFile(
+ resolveExprPath(state->checkSourcePath(lookupFileArg(*state, *file)))
+ );
- Value root;
- state->eval(e, root);
+ Value root;
+ state->eval(e, root);
- auto autoArgs = getAutoArgs(*state);
+ auto autoArgs = getAutoArgs(*state);
- std::string prefix_ = std::string(prefix);
- auto sep = prefix_.rfind('.');
- std::string searchWord;
- if (sep != std::string::npos) {
- searchWord = prefix_.substr(sep + 1, std::string::npos);
- prefix_ = prefix_.substr(0, sep);
- } else {
- searchWord = prefix_;
- prefix_ = "";
- }
+ std::string prefix_ = std::string(prefix);
+ auto sep = prefix_.rfind('.');
+ std::string searchWord;
+ if (sep != std::string::npos) {
+ searchWord = prefix_.substr(sep + 1, std::string::npos);
+ prefix_ = prefix_.substr(0, sep);
+ } else {
+ searchWord = prefix_;
+ prefix_ = "";
+ }
- auto [v, pos] = findAlongAttrPath(*state, prefix_, *autoArgs, root);
- Value &v1(*v);
- state->forceValue(v1, pos);
- Value v2;
- state->autoCallFunction(*autoArgs, v1, v2);
-
- if (v2.type() == nAttrs) {
- for (auto & i : *v2.attrs) {
- std::string name = state->symbols[i.name];
- if (name.find(searchWord) == 0) {
- if (prefix_ == "")
- completions->add(name);
- else
- completions->add(prefix_ + "." + name);
+ auto [v, pos] = findAlongAttrPath(*state, prefix_, *autoArgs, root);
+ Value &v1(*v);
+ state->forceValue(v1, pos);
+ Value v2;
+ state->autoCallFunction(*autoArgs, v1, v2);
+
+ if (v2.type() == nAttrs) {
+ for (auto & i : *v2.attrs) {
+ std::string name = state->symbols[i.name];
+ if (name.find(searchWord) == 0) {
+ if (prefix_ == "")
+ completions->add(name);
+ else
+ completions->add(prefix_ + "." + name);
+ }
}
}
+ } else {
+ completeFlakeRefWithFragment(
+ getEvalState(),
+ lockFlags,
+ getDefaultFlakeAttrPathPrefixes(),
+ getDefaultFlakeAttrPaths(),
+ prefix);
}
- } else {
- completeFlakeRefWithFragment(
- getEvalState(),
- lockFlags,
- getDefaultFlakeAttrPathPrefixes(),
- getDefaultFlakeAttrPaths(),
- prefix);
+ } catch (EvalError&) {
+ // Don't want eval errors to mess-up with the completion engine, so let's just swallow them
}
}
@@ -395,44 +399,56 @@ static StorePath getDeriver(
struct InstallableStorePath : Installable
{
ref<Store> store;
- StorePath storePath;
+ DerivedPath req;
InstallableStorePath(ref<Store> store, StorePath && storePath)
- : store(store), storePath(std::move(storePath)) { }
+ : store(store),
+ req(storePath.isDerivation()
+ ? (DerivedPath) DerivedPath::Built {
+ .drvPath = std::move(storePath),
+ .outputs = {},
+ }
+ : (DerivedPath) DerivedPath::Opaque {
+ .path = std::move(storePath),
+ })
+ { }
- std::string what() const override { return store->printStorePath(storePath); }
+ InstallableStorePath(ref<Store> store, DerivedPath && req)
+ : store(store), req(std::move(req))
+ { }
+
+ std::string what() const override
+ {
+ return req.to_string(*store);
+ }
DerivedPaths toDerivedPaths() override
{
- if (storePath.isDerivation()) {
- auto drv = store->readDerivation(storePath);
- return {
- DerivedPath::Built {
- .drvPath = storePath,
- .outputs = drv.outputNames(),
- }
- };
- } else {
- return {
- DerivedPath::Opaque {
- .path = storePath,
- }
- };
- }
+ return { req };
}
StorePathSet toDrvPaths(ref<Store> store) override
{
- if (storePath.isDerivation()) {
- return {storePath};
- } else {
- return {getDeriver(store, *this, storePath)};
- }
+ return std::visit(overloaded {
+ [&](const DerivedPath::Built & bfd) -> StorePathSet {
+ return { bfd.drvPath };
+ },
+ [&](const DerivedPath::Opaque & bo) -> StorePathSet {
+ return { getDeriver(store, *this, bo.path) };
+ },
+ }, req.raw());
}
std::optional<StorePath> getStorePath() override
{
- return storePath;
+ return std::visit(overloaded {
+ [&](const DerivedPath::Built & bfd) {
+ return bfd.drvPath;
+ },
+ [&](const DerivedPath::Opaque & bo) {
+ return bo.path;
+ },
+ }, req.raw());
}
};
@@ -777,7 +793,8 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
if (file == "-") {
auto e = state->parseStdin();
state->eval(e, *vFile);
- } else if (file)
+ }
+ else if (file)
state->evalFile(lookupFileArg(*state, *file), *vFile);
else {
auto e = state->parseExprFromString(*expr, absPath("."));
@@ -798,7 +815,22 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
for (auto & s : ss) {
std::exception_ptr ex;
- if (s.find('/') != std::string::npos) {
+ auto found = s.rfind('^');
+ if (found != std::string::npos) {
+ try {
+ result.push_back(std::make_shared<InstallableStorePath>(
+ store,
+ DerivedPath::Built::parse(*store, s.substr(0, found), s.substr(found + 1))));
+ continue;
+ } catch (BadStorePath &) {
+ } catch (...) {
+ if (!ex)
+ ex = std::current_exception();
+ }
+ }
+
+ found = s.find('/');
+ if (found != std::string::npos) {
try {
result.push_back(std::make_shared<InstallableStorePath>(store, store->followLinksToStorePath(s)));
continue;
@@ -840,20 +872,20 @@ std::shared_ptr<Installable> SourceExprCommand::parseInstallable(
return installables.front();
}
-BuiltPaths Installable::build(
+std::vector<BuiltPathWithResult> Installable::build(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
const std::vector<std::shared_ptr<Installable>> & installables,
BuildMode bMode)
{
- BuiltPaths res;
- for (auto & [_, builtPath] : build2(evalStore, store, mode, installables, bMode))
- res.push_back(builtPath);
+ std::vector<BuiltPathWithResult> res;
+ for (auto & [_, builtPathWithResult] : build2(evalStore, store, mode, installables, bMode))
+ res.push_back(builtPathWithResult);
return res;
}
-std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::build2(
+std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> Installable::build2(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
@@ -873,7 +905,7 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
}
}
- std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> res;
+ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> res;
switch (mode) {
@@ -914,10 +946,10 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
output, *drvOutput->second);
}
}
- res.push_back({installable, BuiltPath::Built { bfd.drvPath, outputs }});
+ res.push_back({installable, {.path = BuiltPath::Built { bfd.drvPath, outputs }}});
},
[&](const DerivedPath::Opaque & bo) {
- res.push_back({installable, BuiltPath::Opaque { bo.path }});
+ res.push_back({installable, {.path = BuiltPath::Opaque { bo.path }}});
},
}, path.raw());
}
@@ -927,7 +959,7 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
case Realise::Outputs: {
if (settings.printMissing)
- printMissing(store, pathsToBuild, lvlInfo);
+ printMissing(store, pathsToBuild, lvlInfo);
for (auto & buildResult : store->buildPathsWithResults(pathsToBuild, bMode, evalStore)) {
if (!buildResult.success())
@@ -939,10 +971,10 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
std::map<std::string, StorePath> outputs;
for (auto & path : buildResult.builtOutputs)
outputs.emplace(path.first.outputName, path.second.outPath);
- res.push_back({installable, BuiltPath::Built { bfd.drvPath, outputs }});
+ res.push_back({installable, {.path = BuiltPath::Built { bfd.drvPath, outputs }, .result = buildResult}});
},
[&](const DerivedPath::Opaque & bo) {
- res.push_back({installable, BuiltPath::Opaque { bo.path }});
+ res.push_back({installable, {.path = BuiltPath::Opaque { bo.path }, .result = buildResult}});
},
}, buildResult.path.raw());
}
@@ -965,9 +997,12 @@ BuiltPaths Installable::toBuiltPaths(
OperateOn operateOn,
const std::vector<std::shared_ptr<Installable>> & installables)
{
- if (operateOn == OperateOn::Output)
- return Installable::build(evalStore, store, mode, installables);
- else {
+ if (operateOn == OperateOn::Output) {
+ BuiltPaths res;
+ for (auto & p : Installable::build(evalStore, store, mode, installables))
+ res.push_back(p.path);
+ return res;
+ } else {
if (mode == Realise::Nothing)
settings.readOnlyMode = true;
diff --git a/src/libcmd/installables.hh b/src/libcmd/installables.hh
index 948f78919..02ea351d3 100644
--- a/src/libcmd/installables.hh
+++ b/src/libcmd/installables.hh
@@ -7,6 +7,7 @@
#include "eval.hh"
#include "store-api.hh"
#include "flake/flake.hh"
+#include "build-result.hh"
#include <optional>
@@ -51,6 +52,12 @@ enum class OperateOn {
Derivation
};
+struct BuiltPathWithResult
+{
+ BuiltPath path;
+ std::optional<BuildResult> result;
+};
+
struct Installable
{
virtual ~Installable() { }
@@ -91,14 +98,14 @@ struct Installable
return FlakeRef::fromAttrs({{"type","indirect"}, {"id", "nixpkgs"}});
}
- static BuiltPaths build(
+ static std::vector<BuiltPathWithResult> build(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
const std::vector<std::shared_ptr<Installable>> & installables,
BuildMode bMode = bmNormal);
- static std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> build2(
+ static std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> build2(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
diff --git a/src/libcmd/local.mk b/src/libcmd/local.mk
index 3a4de6bcb..152bc388d 100644
--- a/src/libcmd/local.mk
+++ b/src/libcmd/local.mk
@@ -8,7 +8,7 @@ libcmd_SOURCES := $(wildcard $(d)/*.cc)
libcmd_CXXFLAGS += -I src/libutil -I src/libstore -I src/libexpr -I src/libmain -I src/libfetchers -I src/nix
-libcmd_LDFLAGS = $(EDITLINE_LIBS) -llowdown -pthread
+libcmd_LDFLAGS = $(EDITLINE_LIBS) $(LOWDOWN_LIBS) -pthread
libcmd_LIBS = libstore libutil libexpr libmain libfetchers
diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc
index 4a89f3e28..71a7e079a 100644
--- a/src/libcmd/repl.cc
+++ b/src/libcmd/repl.cc
@@ -215,17 +215,15 @@ static std::ostream & showDebugTrace(std::ostream & out, const PosTable & positi
out << dt.hint.str() << "\n";
// prefer direct pos, but if noPos then try the expr.
- auto pos = *dt.pos
- ? *dt.pos
- : positions[dt.expr.getPos() ? dt.expr.getPos() : noPos];
+ auto pos = dt.pos
+ ? dt.pos
+ : static_cast<std::shared_ptr<AbstractPos>>(positions[dt.expr.getPos() ? dt.expr.getPos() : noPos]);
if (pos) {
- printAtPos(pos, out);
-
- auto loc = getCodeLines(pos);
- if (loc.has_value()) {
+ out << pos;
+ if (auto loc = pos->getCodeLines()) {
out << "\n";
- printCodeLines(out, "", pos, *loc);
+ printCodeLines(out, "", *pos, *loc);
out << "\n";
}
}
@@ -270,6 +268,7 @@ void NixRepl::mainLoop()
// ctrl-D should exit the debugger.
state->debugStop = false;
state->debugQuit = true;
+ logger->cout("");
break;
}
try {
@@ -384,6 +383,10 @@ StringSet NixRepl::completePrefix(const std::string & prefix)
i++;
}
} else {
+ /* Temporarily disable the debugger, to avoid re-entering readline. */
+ auto debug_repl = state->debugRepl;
+ state->debugRepl = nullptr;
+ Finally restoreDebug([&]() { state->debugRepl = debug_repl; });
try {
/* This is an expression that should evaluate to an
attribute set. Evaluate it to get the names of the
@@ -584,15 +587,17 @@ bool NixRepl::processLine(std::string line)
Value v;
evalString(arg, v);
- const auto [file, line] = [&] () -> std::pair<std::string, uint32_t> {
+ const auto [path, line] = [&] () -> std::pair<Path, uint32_t> {
if (v.type() == nPath || v.type() == nString) {
PathSet context;
- auto filename = state->coerceToString(noPos, v, context, "while evaluating the filename to edit").toOwned();
- state->symbols.create(filename);
- return {filename, 0};
+ auto path = state->coerceToPath(noPos, v, context, "while evaluating the filename to edit");
+ return {path, 0};
} else if (v.isLambda()) {
auto pos = state->positions[v.lambda.fun->pos];
- return {pos.file, pos.line};
+ if (auto path = std::get_if<Path>(&pos.origin))
+ return {*path, pos.line};
+ else
+ throw Error("'%s' cannot be shown in an editor", pos);
} else {
// assume it's a derivation
return findPackageFilename(*state, v, arg);
@@ -600,7 +605,7 @@ bool NixRepl::processLine(std::string line)
}();
// Open in EDITOR
- auto args = editorFor(file, line);
+ auto args = editorFor(path, line);
auto editor = args.front();
args.pop_front();
@@ -782,7 +787,7 @@ void NixRepl::loadFlake(const std::string & flakeRefS)
flake::LockFlags {
.updateLockFile = false,
.useRegistries = !evalSettings.pureEval,
- .allowMutable = !evalSettings.pureEval,
+ .allowUnlocked = !evalSettings.pureEval,
}),
v);
addAttrsToScope(v);
diff --git a/src/libexpr/eval-cache.cc b/src/libexpr/eval-cache.cc
index 9a8a7fe62..afe575fee 100644
--- a/src/libexpr/eval-cache.cc
+++ b/src/libexpr/eval-cache.cc
@@ -645,17 +645,17 @@ NixInt AttrCursor::getInt()
cachedValue = root->db->getAttr(getKey());
if (cachedValue && !std::get_if<placeholder_t>(&cachedValue->second)) {
if (auto i = std::get_if<int_t>(&cachedValue->second)) {
- debug("using cached Integer attribute '%s'", getAttrPathStr());
+ debug("using cached integer attribute '%s'", getAttrPathStr());
return i->x;
} else
- throw TypeError("'%s' is not an Integer", getAttrPathStr());
+ throw TypeError("'%s' is not an integer", getAttrPathStr());
}
}
auto & v = forceValue();
if (v.type() != nInt)
- throw TypeError("'%s' is not an Integer", getAttrPathStr());
+ throw TypeError("'%s' is not an integer", getAttrPathStr());
return v.integer;
}
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
index 0ff8bba4d..72c2b104f 100644
--- a/src/libexpr/eval.cc
+++ b/src/libexpr/eval.cc
@@ -7,7 +7,6 @@
#include "globals.hh"
#include "eval-inline.hh"
#include "filetransfer.hh"
-#include "json.hh"
#include "function-trace.hh"
#include <algorithm>
@@ -21,6 +20,7 @@
#include <functional>
#include <sys/resource.h>
+#include <nlohmann/json.hpp>
#if HAVE_BOEHMGC
@@ -35,6 +35,8 @@
#endif
+using json = nlohmann::json;
+
namespace nix {
static char * allocString(size_t size)
@@ -43,7 +45,7 @@ static char * allocString(size_t size)
#if HAVE_BOEHMGC
t = (char *) GC_MALLOC_ATOMIC(size);
#else
- t = malloc(size);
+ t = (char *) malloc(size);
#endif
if (!t) throw std::bad_alloc();
return t;
@@ -65,26 +67,19 @@ static char * dupString(const char * s)
// When there's no need to write to the string, we can optimize away empty
// string allocations.
-// This function handles makeImmutableStringWithLen(null, 0) by returning the
-// empty string.
-static const char * makeImmutableStringWithLen(const char * s, size_t size)
+// This function handles makeImmutableString(std::string_view()) by returning
+// the empty string.
+static const char * makeImmutableString(std::string_view s)
{
- char * t;
+ const size_t size = s.size();
if (size == 0)
return "";
-#if HAVE_BOEHMGC
- t = GC_STRNDUP(s, size);
-#else
- t = strndup(s, size);
-#endif
- if (!t) throw std::bad_alloc();
+ auto t = allocString(size + 1);
+ memcpy(t, s.data(), size);
+ t[size] = '\0';
return t;
}
-static inline const char * makeImmutableString(std::string_view s) {
- return makeImmutableStringWithLen(s.data(), s.size());
-}
-
RootValue allocRootValue(Value * v)
{
@@ -404,7 +399,8 @@ static Strings parseNixPath(const std::string & s)
}
if (*p == ':') {
- if (isUri(std::string(start2, s.end()))) {
+ auto prefix = std::string(start2, s.end());
+ if (EvalSettings::isPseudoUrl(prefix) || hasPrefix(prefix, "flake:")) {
++p;
while (p != s.end() && *p != ':') ++p;
}
@@ -436,21 +432,23 @@ ErrorBuilder & ErrorBuilder::withFrameTrace(PosIdx pos, const std::string_view t
return *this;
}
-ErrorBuilder & ErrorBuilder::withSuggestions(Suggestions & s) {
+ErrorBuilder & ErrorBuilder::withSuggestions(Suggestions & s)
+{
info.suggestions = s;
return *this;
}
-ErrorBuilder & ErrorBuilder::withFrame(const Env & env, const Expr & expr) {
+ErrorBuilder & ErrorBuilder::withFrame(const Env & env, const Expr & expr)
+{
// NOTE: This is abusing side-effects.
// TODO: check compatibility with nested debugger calls.
state.debugTraces.push_front(DebugTrace {
- .pos = std::nullopt,
- .expr = expr,
- .env = env,
- .hint = hintformat("Fake frame for debugg{ing,er} purposes"),
- .isError = true
- });
+ .pos = nullptr,
+ .expr = expr,
+ .env = env,
+ .hint = hintformat("Fake frame for debugging purposes"),
+ .isError = true
+ });
return *this;
}
@@ -508,9 +506,6 @@ EvalState::EvalState(
#if HAVE_BOEHMGC
, valueAllocCache(std::allocate_shared<void *>(traceable_allocator<void *>(), nullptr))
, env1AllocCache(std::allocate_shared<void *>(traceable_allocator<void *>(), nullptr))
-#else
- , valueAllocCache(std::make_shared<void *>(nullptr))
- , env1AllocCache(std::make_shared<void *>(nullptr))
#endif
, baseEnv(allocEnv(128))
, staticBaseEnv{std::make_shared<StaticEnv>(false, nullptr)}
@@ -842,7 +837,7 @@ void EvalState::runDebugRepl(const Error * error, const Env & env, const Expr &
? std::make_unique<DebugTraceStacker>(
*this,
DebugTrace {
- .pos = error->info().errPos ? *error->info().errPos : positions[expr.getPos()],
+ .pos = error->info().errPos ? error->info().errPos : static_cast<std::shared_ptr<AbstractPos>>(positions[expr.getPos()]),
.expr = expr,
.env = env,
.hint = error->info().msg,
@@ -869,7 +864,7 @@ void EvalState::runDebugRepl(const Error * error, const Env & env, const Expr &
void EvalState::addErrorTrace(Error & e, const char * s, const std::string & s2) const
{
- e.addTrace(std::nullopt, s, s2);
+ e.addTrace(nullptr, s, s2);
}
void EvalState::addErrorTrace(Error & e, const PosIdx pos, const char * s, const std::string & s2, bool frame) const
@@ -881,13 +876,13 @@ static std::unique_ptr<DebugTraceStacker> makeDebugTraceStacker(
EvalState & state,
Expr & expr,
Env & env,
- std::optional<ErrPos> pos,
+ std::shared_ptr<AbstractPos> && pos,
const char * s,
const std::string & s2)
{
return std::make_unique<DebugTraceStacker>(state,
DebugTrace {
- .pos = pos,
+ .pos = std::move(pos),
.expr = expr,
.env = env,
.hint = hintfmt(s, s2),
@@ -993,9 +988,9 @@ void EvalState::mkThunk_(Value & v, Expr * expr)
void EvalState::mkPos(Value & v, PosIdx p)
{
auto pos = positions[p];
- if (!pos.file.empty()) {
+ if (auto path = std::get_if<Path>(&pos.origin)) {
auto attrs = buildBindings(3);
- attrs.alloc(sFile).mkString(pos.file);
+ attrs.alloc(sFile).mkString(*path);
attrs.alloc(sLine).mkInt(pos.line);
attrs.alloc(sColumn).mkInt(pos.column);
v.mkAttrs(attrs);
@@ -1103,7 +1098,7 @@ void EvalState::cacheFile(
*this,
*e,
this->baseEnv,
- e->getPos() ? std::optional(ErrPos(positions[e->getPos()])) : std::nullopt,
+ e->getPos() ? static_cast<std::shared_ptr<AbstractPos>>(positions[e->getPos()]) : nullptr,
"while evaluating the file '%1%':", resolvedPath)
: nullptr;
@@ -1373,10 +1368,13 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v)
state.forceValue(*vAttrs, (pos2 ? pos2 : this->pos ) );
} catch (Error & e) {
- auto pos2r = state.positions[pos2];
- if (pos2 && pos2r.file != state.derivationNixPath)
- state.addErrorTrace(e, pos2, "while evaluating the attribute '%1%'",
- showAttrPath(state, env, attrPath));
+ if (pos2) {
+ auto pos2r = state.positions[pos2];
+ auto origin = std::get_if<Path>(&pos2r.origin);
+ if (!(origin && *origin == state.derivationNixPath))
+ state.addErrorTrace(e, pos2, "while evaluating the attribute '%1%'",
+ showAttrPath(state, env, attrPath));
+ }
throw;
}
@@ -1519,7 +1517,7 @@ void EvalState::callFunction(Value & fun, size_t nrArgs, Value * * args, Value &
auto dts = debugRepl
? makeDebugTraceStacker(
*this, *lambda.body, env2, positions[lambda.pos],
- "while evaluating %s",
+ "while calling %s",
lambda.name
? concatStrings("'", symbols[lambda.name], "'")
: "anonymous lambda")
@@ -1528,13 +1526,14 @@ void EvalState::callFunction(Value & fun, size_t nrArgs, Value * * args, Value &
lambda.body->eval(*this, env2, vCur);
} catch (Error & e) {
if (loggerSettings.showTrace.get()) {
- addErrorTrace(e,
- lambda.pos,
- "while evaluating %s",
- lambda.name
- ? concatStrings("'", symbols[lambda.name], "'")
- : "anonymous lambda",
- true);
+ addErrorTrace(
+ e,
+ lambda.pos,
+ "while calling %s",
+ lambda.name
+ ? concatStrings("'", symbols[lambda.name], "'")
+ : "anonymous lambda",
+ true);
if (pos) addErrorTrace(e, pos, "from call site%s", "", true);
}
throw;
@@ -1704,7 +1703,7 @@ void EvalState::autoCallFunction(Bindings & args, Value & fun, Value & res)
Nix attempted to evaluate a function as a top level expression; in
this case it must have its arguments supplied either by default
values, or passed explicitly with '--arg' or '--argstr'. See
-https://nixos.org/manual/nix/stable/expressions/language-constructs.html#functions.)", symbols[i.name])
+https://nixos.org/manual/nix/stable/language/constructs.html#functions.)", symbols[i.name])
.atPos(i.pos).withFrame(*fun.lambda.env, *fun.lambda.fun).debugThrow<MissingArgumentError>();
}
}
@@ -2357,97 +2356,99 @@ void EvalState::printStats()
std::fstream fs;
if (outPath != "-")
fs.open(outPath, std::fstream::out);
- JSONObject topObj(outPath == "-" ? std::cerr : fs, true);
- topObj.attr("cpuTime",cpuTime);
- {
- auto envs = topObj.object("envs");
- envs.attr("number", nrEnvs);
- envs.attr("elements", nrValuesInEnvs);
- envs.attr("bytes", bEnvs);
- }
- {
- auto lists = topObj.object("list");
- lists.attr("elements", nrListElems);
- lists.attr("bytes", bLists);
- lists.attr("concats", nrListConcats);
- }
- {
- auto values = topObj.object("values");
- values.attr("number", nrValues);
- values.attr("bytes", bValues);
- }
- {
- auto syms = topObj.object("symbols");
- syms.attr("number", symbols.size());
- syms.attr("bytes", symbols.totalSize());
- }
- {
- auto sets = topObj.object("sets");
- sets.attr("number", nrAttrsets);
- sets.attr("bytes", bAttrsets);
- sets.attr("elements", nrAttrsInAttrsets);
- }
- {
- auto sizes = topObj.object("sizes");
- sizes.attr("Env", sizeof(Env));
- sizes.attr("Value", sizeof(Value));
- sizes.attr("Bindings", sizeof(Bindings));
- sizes.attr("Attr", sizeof(Attr));
- }
- topObj.attr("nrOpUpdates", nrOpUpdates);
- topObj.attr("nrOpUpdateValuesCopied", nrOpUpdateValuesCopied);
- topObj.attr("nrThunks", nrThunks);
- topObj.attr("nrAvoided", nrAvoided);
- topObj.attr("nrLookups", nrLookups);
- topObj.attr("nrPrimOpCalls", nrPrimOpCalls);
- topObj.attr("nrFunctionCalls", nrFunctionCalls);
+ json topObj = json::object();
+ topObj["cpuTime"] = cpuTime;
+ topObj["envs"] = {
+ {"number", nrEnvs},
+ {"elements", nrValuesInEnvs},
+ {"bytes", bEnvs},
+ };
+ topObj["list"] = {
+ {"elements", nrListElems},
+ {"bytes", bLists},
+ {"concats", nrListConcats},
+ };
+ topObj["values"] = {
+ {"number", nrValues},
+ {"bytes", bValues},
+ };
+ topObj["symbols"] = {
+ {"number", symbols.size()},
+ {"bytes", symbols.totalSize()},
+ };
+ topObj["sets"] = {
+ {"number", nrAttrsets},
+ {"bytes", bAttrsets},
+ {"elements", nrAttrsInAttrsets},
+ };
+ topObj["sizes"] = {
+ {"Env", sizeof(Env)},
+ {"Value", sizeof(Value)},
+ {"Bindings", sizeof(Bindings)},
+ {"Attr", sizeof(Attr)},
+ };
+ topObj["nrOpUpdates"] = nrOpUpdates;
+ topObj["nrOpUpdateValuesCopied"] = nrOpUpdateValuesCopied;
+ topObj["nrThunks"] = nrThunks;
+ topObj["nrAvoided"] = nrAvoided;
+ topObj["nrLookups"] = nrLookups;
+ topObj["nrPrimOpCalls"] = nrPrimOpCalls;
+ topObj["nrFunctionCalls"] = nrFunctionCalls;
#if HAVE_BOEHMGC
- {
- auto gc = topObj.object("gc");
- gc.attr("heapSize", heapSize);
- gc.attr("totalBytes", totalBytes);
- }
+ topObj["gc"] = {
+ {"heapSize", heapSize},
+ {"totalBytes", totalBytes},
+ };
#endif
if (countCalls) {
+ topObj["primops"] = primOpCalls;
{
- auto obj = topObj.object("primops");
- for (auto & i : primOpCalls)
- obj.attr(i.first, i.second);
- }
- {
- auto list = topObj.list("functions");
+ auto& list = topObj["functions"];
+ list = json::array();
for (auto & [fun, count] : functionCalls) {
- auto obj = list.object();
+ json obj = json::object();
if (fun->name)
- obj.attr("name", (std::string_view) symbols[fun->name]);
+ obj["name"] = (std::string_view) symbols[fun->name];
else
- obj.attr("name", nullptr);
+ obj["name"] = nullptr;
if (auto pos = positions[fun->pos]) {
- obj.attr("file", (std::string_view) pos.file);
- obj.attr("line", pos.line);
- obj.attr("column", pos.column);
+ if (auto path = std::get_if<Path>(&pos.origin))
+ obj["file"] = *path;
+ obj["line"] = pos.line;
+ obj["column"] = pos.column;
}
- obj.attr("count", count);
+ obj["count"] = count;
+ list.push_back(obj);
}
}
{
- auto list = topObj.list("attributes");
+ auto list = topObj["attributes"];
+ list = json::array();
for (auto & i : attrSelects) {
- auto obj = list.object();
+ json obj = json::object();
if (auto pos = positions[i.first]) {
- obj.attr("file", (const std::string &) pos.file);
- obj.attr("line", pos.line);
- obj.attr("column", pos.column);
+ if (auto path = std::get_if<Path>(&pos.origin))
+ obj["file"] = *path;
+ obj["line"] = pos.line;
+ obj["column"] = pos.column;
}
- obj.attr("count", i.second);
+ obj["count"] = i.second;
+ list.push_back(obj);
}
}
}
if (getEnv("NIX_SHOW_SYMBOLS").value_or("0") != "0") {
- auto list = topObj.list("symbols");
- symbols.dump([&](const std::string & s) { list.elem(s); });
+ // XXX: overrides earlier assignment
+ topObj["symbols"] = json::array();
+ auto &list = topObj["symbols"];
+ symbols.dump([&](const std::string & s) { list.emplace_back(s); });
+ }
+ if (outPath == "-") {
+ std::cerr << topObj.dump(2) << std::endl;
+ } else {
+ fs << topObj.dump(2) << std::endl;
}
}
}
@@ -2502,6 +2503,23 @@ Strings EvalSettings::getDefaultNixPath()
return res;
}
+bool EvalSettings::isPseudoUrl(std::string_view s)
+{
+ if (s.compare(0, 8, "channel:") == 0) return true;
+ size_t pos = s.find("://");
+ if (pos == std::string::npos) return false;
+ std::string scheme(s, 0, pos);
+ return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git" || scheme == "s3" || scheme == "ssh";
+}
+
+std::string EvalSettings::resolvePseudoUrl(std::string_view url)
+{
+ if (hasPrefix(url, "channel:"))
+ return "https://nixos.org/channels/" + std::string(url.substr(8)) + "/nixexprs.tar.xz";
+ else
+ return std::string(url);
+}
+
EvalSettings evalSettings;
static GlobalConfig::Register rEvalSettings(&evalSettings);
diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh
index 1f610a02f..9b3d160ea 100644
--- a/src/libexpr/eval.hh
+++ b/src/libexpr/eval.hh
@@ -60,7 +60,6 @@ void copyContext(const Value & v, PathSet & context);
typedef std::map<Path, StorePath> SrcToStore;
-std::ostream & printValue(const EvalState & state, std::ostream & str, const Value & v);
std::string printValue(const EvalState & state, const Value & v);
std::ostream & operator << (std::ostream & os, const ValueType t);
@@ -78,7 +77,7 @@ struct RegexCache;
std::shared_ptr<RegexCache> makeRegexCache();
struct DebugTrace {
- std::optional<ErrPos> pos;
+ std::shared_ptr<AbstractPos> pos;
const Expr & expr;
const Env & env;
hintformat hint;
@@ -437,8 +436,12 @@ private:
friend struct ExprAttrs;
friend struct ExprLet;
- Expr * parse(char * text, size_t length, FileOrigin origin, const PathView path,
- const PathView basePath, std::shared_ptr<StaticEnv> & staticEnv);
+ Expr * parse(
+ char * text,
+ size_t length,
+ Pos::Origin origin,
+ Path basePath,
+ std::shared_ptr<StaticEnv> & staticEnv);
public:
@@ -570,6 +573,10 @@ struct EvalSettings : Config
static Strings getDefaultNixPath();
+ static bool isPseudoUrl(std::string_view s);
+
+ static std::string resolvePseudoUrl(std::string_view url);
+
Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation",
"Whether builtin functions that allow executing native code should be enabled."};
diff --git a/src/libexpr/flake/call-flake.nix b/src/libexpr/flake/call-flake.nix
index 932ac5e90..8061db3df 100644
--- a/src/libexpr/flake/call-flake.nix
+++ b/src/libexpr/flake/call-flake.nix
@@ -43,7 +43,7 @@ let
outputs = flake.outputs (inputs // { self = result; });
- result = outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; };
+ result = outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; _type = "flake"; };
in
if node.flake or true then
assert builtins.isFunction flake.outputs;
diff --git a/src/libexpr/flake/config.cc b/src/libexpr/flake/config.cc
index 6df95f1f0..89ddbde7e 100644
--- a/src/libexpr/flake/config.cc
+++ b/src/libexpr/flake/config.cc
@@ -56,7 +56,7 @@ void ConfigFile::apply()
auto tlname = get(trustedList, name);
if (auto saved = tlname ? get(*tlname, valueS) : nullptr) {
trusted = *saved;
- warn("Using saved setting for '%s = %s' from ~/.local/share/nix/trusted-settings.json.", name,valueS);
+ printInfo("Using saved setting for '%s = %s' from ~/.local/share/nix/trusted-settings.json.", name, valueS);
} else {
// FIXME: filter ANSI escapes, newlines, \r, etc.
if (std::tolower(logger->ask(fmt("do you want to allow configuration setting '%s' to be set to '" ANSI_RED "%s" ANSI_NORMAL "' (y/N)?", name, valueS)).value_or('n')) == 'y') {
diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc
index a9f15b0f6..fc4be5678 100644
--- a/src/libexpr/flake/flake.cc
+++ b/src/libexpr/flake/flake.cc
@@ -143,7 +143,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
} catch (Error & e) {
e.addTrace(
state.positions[attr.pos],
- hintfmt("in flake attribute '%s'", state.symbols[attr.name]));
+ hintfmt("while evaluating flake attribute '%s'", state.symbols[attr.name]));
throw;
}
}
@@ -152,7 +152,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
try {
input.ref = FlakeRef::fromAttrs(attrs);
} catch (Error & e) {
- e.addTrace(state.positions[pos], hintfmt("in flake input"));
+ e.addTrace(state.positions[pos], hintfmt("while evaluating flake input"));
throw;
}
else {
@@ -220,7 +220,7 @@ static Flake getFlake(
Value vInfo;
state.evalFile(flakeFile, vInfo, true); // FIXME: symlink attack
- expectType(state, nAttrs, vInfo, state.positions.add({flakeFile, foFile}, 0, 0));
+ expectType(state, nAttrs, vInfo, state.positions.add({flakeFile}, 1, 1));
if (auto description = vInfo.attrs->get(state.sDescription)) {
expectType(state, nString, *description->value, description->pos);
@@ -353,7 +353,7 @@ LockedFlake lockFlake(
std::function<void(
const FlakeInputs & flakeInputs,
- std::shared_ptr<Node> node,
+ ref<Node> node,
const InputPath & inputPathPrefix,
std::shared_ptr<const Node> oldNode,
const InputPath & lockRootPath,
@@ -362,9 +362,15 @@ LockedFlake lockFlake(
computeLocks;
computeLocks = [&](
+ /* The inputs of this node, either from flake.nix or
+ flake.lock. */
const FlakeInputs & flakeInputs,
- std::shared_ptr<Node> node,
+ /* The node whose locks are to be updated.*/
+ ref<Node> node,
+ /* The path to this node in the lock file graph. */
const InputPath & inputPathPrefix,
+ /* The old node, if any, from which locks can be
+ copied. */
std::shared_ptr<const Node> oldNode,
const InputPath & lockRootPath,
const Path & parentPath,
@@ -452,7 +458,7 @@ LockedFlake lockFlake(
/* Copy the input from the old lock since its flakeref
didn't change and there is no override from a
higher level flake. */
- auto childNode = std::make_shared<LockedNode>(
+ auto childNode = make_ref<LockedNode>(
oldLock->lockedRef, oldLock->originalRef, oldLock->isFlake);
node->inputs.insert_or_assign(id, childNode);
@@ -481,7 +487,7 @@ LockedFlake lockFlake(
.isFlake = (*lockedNode)->isFlake,
});
} else if (auto follows = std::get_if<1>(&i.second)) {
- if (! trustLock) {
+ if (!trustLock) {
// It is possible that the flake has changed,
// so we must confirm all the follows that are in the lock file are also in the flake.
auto overridePath(inputPath);
@@ -521,8 +527,8 @@ LockedFlake lockFlake(
this input. */
debug("creating new input '%s'", inputPathS);
- if (!lockFlags.allowMutable && !input.ref->input.isLocked())
- throw Error("cannot update flake input '%s' in pure mode", inputPathS);
+ if (!lockFlags.allowUnlocked && !input.ref->input.isLocked())
+ throw Error("cannot update unlocked flake input '%s' in pure mode", inputPathS);
/* Note: in case of an --override-input, we use
the *original* ref (input2.ref) for the
@@ -544,7 +550,7 @@ LockedFlake lockFlake(
auto inputFlake = getFlake(state, localRef, useRegistries, flakeCache, inputPath);
- auto childNode = std::make_shared<LockedNode>(inputFlake.lockedRef, ref);
+ auto childNode = make_ref<LockedNode>(inputFlake.lockedRef, ref);
node->inputs.insert_or_assign(id, childNode);
@@ -564,15 +570,19 @@ LockedFlake lockFlake(
oldLock
? std::dynamic_pointer_cast<const Node>(oldLock)
: LockFile::read(
- inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root,
- oldLock ? lockRootPath : inputPath, localPath, false);
+ inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root.get_ptr(),
+ oldLock ? lockRootPath : inputPath,
+ localPath,
+ false);
}
else {
auto [sourceInfo, resolvedRef, lockedRef] = fetchOrSubstituteTree(
state, *input.ref, useRegistries, flakeCache);
- node->inputs.insert_or_assign(id,
- std::make_shared<LockedNode>(lockedRef, ref, false));
+
+ auto childNode = make_ref<LockedNode>(lockedRef, ref, false);
+
+ node->inputs.insert_or_assign(id, childNode);
}
}
@@ -587,8 +597,13 @@ LockedFlake lockFlake(
auto parentPath = canonPath(flake.sourceInfo->actualPath + "/" + flake.lockedRef.subdir, true);
computeLocks(
- flake.inputs, newLockFile.root, {},
- lockFlags.recreateLockFile ? nullptr : oldLockFile.root, {}, parentPath, false);
+ flake.inputs,
+ newLockFile.root,
+ {},
+ lockFlags.recreateLockFile ? nullptr : oldLockFile.root.get_ptr(),
+ {},
+ parentPath,
+ false);
for (auto & i : lockFlags.inputOverrides)
if (!overridesUsed.count(i.first))
@@ -611,9 +626,9 @@ LockedFlake lockFlake(
if (lockFlags.writeLockFile) {
if (auto sourcePath = topRef.input.getSourcePath()) {
- if (!newLockFile.isImmutable()) {
+ if (auto unlockedInput = newLockFile.isUnlocked()) {
if (fetchSettings.warnDirty)
- warn("will not write lock file of flake '%s' because it has a mutable input", topRef);
+ warn("will not write lock file of flake '%s' because it has an unlocked input ('%s')", topRef, *unlockedInput);
} else {
if (!lockFlags.updateLockFile)
throw Error("flake '%s' requires lock file changes but they're not allowed due to '--no-update-lock-file'", topRef);
@@ -737,7 +752,7 @@ static void prim_getFlake(EvalState & state, const PosIdx pos, Value * * args, V
.updateLockFile = false,
.writeLockFile = false,
.useRegistries = !evalSettings.pureEval && fetchSettings.useRegistries,
- .allowMutable = !evalSettings.pureEval,
+ .allowUnlocked = !evalSettings.pureEval,
}),
v);
}
diff --git a/src/libexpr/flake/flake.hh b/src/libexpr/flake/flake.hh
index 524b18af1..10301d8aa 100644
--- a/src/libexpr/flake/flake.hh
+++ b/src/libexpr/flake/flake.hh
@@ -108,11 +108,11 @@ struct LockFlags
bool applyNixConfig = false;
- /* Whether mutable flake references (i.e. those without a Git
+ /* Whether unlocked flake references (i.e. those without a Git
revision or similar) without a corresponding lock are
- allowed. Mutable flake references with a lock are always
+ allowed. Unlocked flake references with a lock are always
allowed. */
- bool allowMutable = true;
+ bool allowUnlocked = true;
/* Whether to commit changes to flake.lock. */
bool commitLockFile = false;
diff --git a/src/libexpr/flake/flakeref.hh b/src/libexpr/flake/flakeref.hh
index fe4f67193..a36d852a8 100644
--- a/src/libexpr/flake/flakeref.hh
+++ b/src/libexpr/flake/flakeref.hh
@@ -35,7 +35,7 @@ typedef std::string FlakeId;
struct FlakeRef
{
- /* fetcher-specific representation of the input, sufficient to
+ /* Fetcher-specific representation of the input, sufficient to
perform the fetch operation. */
fetchers::Input input;
diff --git a/src/libexpr/flake/lockfile.cc b/src/libexpr/flake/lockfile.cc
index 629d2e669..a3ed90e1f 100644
--- a/src/libexpr/flake/lockfile.cc
+++ b/src/libexpr/flake/lockfile.cc
@@ -31,7 +31,7 @@ FlakeRef getFlakeRef(
}
LockedNode::LockedNode(const nlohmann::json & json)
- : lockedRef(getFlakeRef(json, "locked", "info"))
+ : lockedRef(getFlakeRef(json, "locked", "info")) // FIXME: remove "info"
, originalRef(getFlakeRef(json, "original", nullptr))
, isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true)
{
@@ -49,15 +49,15 @@ std::shared_ptr<Node> LockFile::findInput(const InputPath & path)
{
auto pos = root;
- if (!pos) return {};
-
for (auto & elem : path) {
if (auto i = get(pos->inputs, elem)) {
if (auto node = std::get_if<0>(&*i))
pos = *node;
else if (auto follows = std::get_if<1>(&*i)) {
- pos = findInput(*follows);
- if (!pos) return {};
+ if (auto p = findInput(*follows))
+ pos = ref(p);
+ else
+ return {};
}
} else
return {};
@@ -72,7 +72,7 @@ LockFile::LockFile(const nlohmann::json & json, const Path & path)
if (version < 5 || version > 7)
throw Error("lock file '%s' has unsupported version %d", path, version);
- std::unordered_map<std::string, std::shared_ptr<Node>> nodeMap;
+ std::map<std::string, ref<Node>> nodeMap;
std::function<void(Node & node, const nlohmann::json & jsonNode)> getInputs;
@@ -93,12 +93,12 @@ LockFile::LockFile(const nlohmann::json & json, const Path & path)
auto jsonNode2 = nodes.find(inputKey);
if (jsonNode2 == nodes.end())
throw Error("lock file references missing node '%s'", inputKey);
- auto input = std::make_shared<LockedNode>(*jsonNode2);
+ auto input = make_ref<LockedNode>(*jsonNode2);
k = nodeMap.insert_or_assign(inputKey, input).first;
getInputs(*input, *jsonNode2);
}
- if (auto child = std::dynamic_pointer_cast<LockedNode>(k->second))
- node.inputs.insert_or_assign(i.key(), child);
+ if (auto child = k->second.dynamic_pointer_cast<LockedNode>())
+ node.inputs.insert_or_assign(i.key(), ref(child));
else
// FIXME: replace by follows node
throw Error("lock file contains cycle to root node");
@@ -122,9 +122,9 @@ nlohmann::json LockFile::toJSON() const
std::unordered_map<std::shared_ptr<const Node>, std::string> nodeKeys;
std::unordered_set<std::string> keys;
- std::function<std::string(const std::string & key, std::shared_ptr<const Node> node)> dumpNode;
+ std::function<std::string(const std::string & key, ref<const Node> node)> dumpNode;
- dumpNode = [&](std::string key, std::shared_ptr<const Node> node) -> std::string
+ dumpNode = [&](std::string key, ref<const Node> node) -> std::string
{
auto k = nodeKeys.find(node);
if (k != nodeKeys.end())
@@ -159,10 +159,11 @@ nlohmann::json LockFile::toJSON() const
n["inputs"] = std::move(inputs);
}
- if (auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(node)) {
+ if (auto lockedNode = node.dynamic_pointer_cast<const LockedNode>()) {
n["original"] = fetchers::attrsToJSON(lockedNode->originalRef.toAttrs());
n["locked"] = fetchers::attrsToJSON(lockedNode->lockedRef.toAttrs());
- if (!lockedNode->isFlake) n["flake"] = false;
+ if (!lockedNode->isFlake)
+ n["flake"] = false;
}
nodes[key] = std::move(n);
@@ -201,13 +202,13 @@ void LockFile::write(const Path & path) const
writeFile(path, fmt("%s\n", *this));
}
-bool LockFile::isImmutable() const
+std::optional<FlakeRef> LockFile::isUnlocked() const
{
- std::unordered_set<std::shared_ptr<const Node>> nodes;
+ std::set<ref<const Node>> nodes;
- std::function<void(std::shared_ptr<const Node> node)> visit;
+ std::function<void(ref<const Node> node)> visit;
- visit = [&](std::shared_ptr<const Node> node)
+ visit = [&](ref<const Node> node)
{
if (!nodes.insert(node).second) return;
for (auto & i : node->inputs)
@@ -219,11 +220,12 @@ bool LockFile::isImmutable() const
for (auto & i : nodes) {
if (i == root) continue;
- auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(i);
- if (lockedNode && !lockedNode->lockedRef.input.isLocked()) return false;
+ auto node = i.dynamic_pointer_cast<const LockedNode>();
+ if (node && !node->lockedRef.input.isLocked())
+ return node->lockedRef;
}
- return true;
+ return {};
}
bool LockFile::operator ==(const LockFile & other) const
@@ -247,12 +249,12 @@ InputPath parseInputPath(std::string_view s)
std::map<InputPath, Node::Edge> LockFile::getAllInputs() const
{
- std::unordered_set<std::shared_ptr<Node>> done;
+ std::set<ref<Node>> done;
std::map<InputPath, Node::Edge> res;
- std::function<void(const InputPath & prefix, std::shared_ptr<Node> node)> recurse;
+ std::function<void(const InputPath & prefix, ref<Node> node)> recurse;
- recurse = [&](const InputPath & prefix, std::shared_ptr<Node> node)
+ recurse = [&](const InputPath & prefix, ref<Node> node)
{
if (!done.insert(node).second) return;
diff --git a/src/libexpr/flake/lockfile.hh b/src/libexpr/flake/lockfile.hh
index 96f1edc76..02e9bdfbc 100644
--- a/src/libexpr/flake/lockfile.hh
+++ b/src/libexpr/flake/lockfile.hh
@@ -20,7 +20,7 @@ struct LockedNode;
type LockedNode. */
struct Node : std::enable_shared_from_this<Node>
{
- typedef std::variant<std::shared_ptr<LockedNode>, InputPath> Edge;
+ typedef std::variant<ref<LockedNode>, InputPath> Edge;
std::map<FlakeId, Edge> inputs;
@@ -47,11 +47,13 @@ struct LockedNode : Node
struct LockFile
{
- std::shared_ptr<Node> root = std::make_shared<Node>();
+ ref<Node> root = make_ref<Node>();
LockFile() {};
LockFile(const nlohmann::json & json, const Path & path);
+ typedef std::map<ref<const Node>, std::string> KeyMap;
+
nlohmann::json toJSON() const;
std::string to_string() const;
@@ -60,7 +62,8 @@ struct LockFile
void write(const Path & path) const;
- bool isImmutable() const;
+ /* Check whether this lock file has any unlocked inputs. */
+ std::optional<FlakeRef> isUnlocked() const;
bool operator ==(const LockFile & other) const;
diff --git a/src/libexpr/get-drvs.cc b/src/libexpr/get-drvs.cc
index 544f8efc4..1602fbffb 100644
--- a/src/libexpr/get-drvs.cc
+++ b/src/libexpr/get-drvs.cc
@@ -150,7 +150,7 @@ DrvInfo::Outputs DrvInfo::queryOutputs(bool withPaths, bool onlyOutputsToInstall
/* Check for `meta.outputsToInstall` and return `outputs` reduced to that. */
const Value * outTI = queryMeta("outputsToInstall");
if (!outTI) return outputs;
- const auto errMsg = Error("this derivation has bad 'meta.outputsToInstall'");
+ auto errMsg = Error("this derivation has bad 'meta.outputsToInstall'");
/* ^ this shows during `nix-env -i` right under the bad derivation */
if (!outTI->isList()) throw errMsg;
Outputs result;
diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc
index 7c623a07d..eb6f062b4 100644
--- a/src/libexpr/nixexpr.cc
+++ b/src/libexpr/nixexpr.cc
@@ -8,6 +8,58 @@
namespace nix {
+struct PosAdapter : AbstractPos
+{
+ Pos::Origin origin;
+
+ PosAdapter(Pos::Origin origin)
+ : origin(std::move(origin))
+ {
+ }
+
+ std::optional<std::string> getSource() const override
+ {
+ return std::visit(overloaded {
+ [](const Pos::none_tag &) -> std::optional<std::string> {
+ return std::nullopt;
+ },
+ [](const Pos::Stdin & s) -> std::optional<std::string> {
+ // Get rid of the null terminators added by the parser.
+ return std::string(s.source->c_str());
+ },
+ [](const Pos::String & s) -> std::optional<std::string> {
+ // Get rid of the null terminators added by the parser.
+ return std::string(s.source->c_str());
+ },
+ [](const Path & path) -> std::optional<std::string> {
+ try {
+ return readFile(path);
+ } catch (Error &) {
+ return std::nullopt;
+ }
+ }
+ }, origin);
+ }
+
+ void print(std::ostream & out) const override
+ {
+ std::visit(overloaded {
+ [&](const Pos::none_tag &) { out << "«none»"; },
+ [&](const Pos::Stdin &) { out << "«stdin»"; },
+ [&](const Pos::String & s) { out << "«string»"; },
+ [&](const Path & path) { out << path; }
+ }, origin);
+ }
+};
+
+Pos::operator std::shared_ptr<AbstractPos>() const
+{
+ auto pos = std::make_shared<PosAdapter>(origin);
+ pos->line = line;
+ pos->column = column;
+ return pos;
+}
+
/* Displaying abstract syntax trees. */
static void showString(std::ostream & str, std::string_view s)
@@ -248,24 +300,10 @@ void ExprPos::show(const SymbolTable & symbols, std::ostream & str) const
std::ostream & operator << (std::ostream & str, const Pos & pos)
{
- if (!pos)
+ if (auto pos2 = (std::shared_ptr<AbstractPos>) pos) {
+ str << *pos2;
+ } else
str << "undefined position";
- else
- {
- auto f = format(ANSI_BOLD "%1%" ANSI_NORMAL ":%2%:%3%");
- switch (pos.origin) {
- case foFile:
- f % (const std::string &) pos.file;
- break;
- case foStdin:
- case foString:
- f % "(string)";
- break;
- default:
- throw Error("unhandled Pos origin!");
- }
- str << (f % pos.line % pos.column).str();
- }
return str;
}
@@ -289,7 +327,6 @@ std::string showAttrPath(const SymbolTable & symbols, const AttrPath & attrPath)
}
-
/* Computing levels/displacements for variables. */
void Expr::bindVars(EvalState & es, const std::shared_ptr<const StaticEnv> & env)
diff --git a/src/libexpr/nixexpr.hh b/src/libexpr/nixexpr.hh
index ea0ce1a7f..ffe67f97d 100644
--- a/src/libexpr/nixexpr.hh
+++ b/src/libexpr/nixexpr.hh
@@ -22,15 +22,22 @@ MakeError(MissingArgumentError, EvalError);
MakeError(RestrictedPathError, Error);
/* Position objects. */
-
struct Pos
{
- std::string file;
- FileOrigin origin;
uint32_t line;
uint32_t column;
+ struct none_tag { };
+ struct Stdin { ref<std::string> source; };
+ struct String { ref<std::string> source; };
+
+ typedef std::variant<none_tag, Stdin, String, Path> Origin;
+
+ Origin origin;
+
explicit operator bool() const { return line > 0; }
+
+ operator std::shared_ptr<AbstractPos>() const;
};
class PosIdx {
@@ -46,7 +53,11 @@ public:
explicit operator bool() const { return id > 0; }
- bool operator<(const PosIdx other) const { return id < other.id; }
+ bool operator <(const PosIdx other) const { return id < other.id; }
+
+ bool operator ==(const PosIdx other) const { return id == other.id; }
+
+ bool operator !=(const PosIdx other) const { return id != other.id; }
};
class PosTable
@@ -60,13 +71,13 @@ public:
// current origins.back() can be reused or not.
mutable uint32_t idx = std::numeric_limits<uint32_t>::max();
- explicit Origin(uint32_t idx): idx(idx), file{}, origin{} {}
+ // Used for searching in PosTable::[].
+ explicit Origin(uint32_t idx): idx(idx), origin{Pos::none_tag()} {}
public:
- const std::string file;
- const FileOrigin origin;
+ const Pos::Origin origin;
- Origin(std::string file, FileOrigin origin): file(std::move(file)), origin(origin) {}
+ Origin(Pos::Origin origin): origin(origin) {}
};
struct Offset {
@@ -106,7 +117,7 @@ public:
[] (const auto & a, const auto & b) { return a.idx < b.idx; });
const auto origin = *std::prev(pastOrigin);
const auto offset = offsets[idx];
- return {origin.file, origin.origin, offset.line, offset.column};
+ return {offset.line, offset.column, origin.origin};
}
};
diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y
index 0cf0d56f0..ffb364a90 100644
--- a/src/libexpr/parser.y
+++ b/src/libexpr/parser.y
@@ -34,11 +34,6 @@ namespace nix {
Path basePath;
PosTable::Origin origin;
std::optional<ErrorInfo> error;
- ParseData(EvalState & state, PosTable::Origin origin)
- : state(state)
- , symbols(state.symbols)
- , origin(std::move(origin))
- { };
};
struct ParserFormals {
@@ -643,29 +638,26 @@ formal
#include "filetransfer.hh"
#include "fetchers.hh"
#include "store-api.hh"
+#include "flake/flake.hh"
namespace nix {
-Expr * EvalState::parse(char * text, size_t length, FileOrigin origin,
- const PathView path, const PathView basePath, std::shared_ptr<StaticEnv> & staticEnv)
+Expr * EvalState::parse(
+ char * text,
+ size_t length,
+ Pos::Origin origin,
+ Path basePath,
+ std::shared_ptr<StaticEnv> & staticEnv)
{
yyscan_t scanner;
- std::string file;
- switch (origin) {
- case foFile:
- file = path;
- break;
- case foStdin:
- case foString:
- file = text;
- break;
- default:
- assert(false);
- }
- ParseData data(*this, {file, origin});
- data.basePath = basePath;
+ ParseData data {
+ .state = *this,
+ .symbols = symbols,
+ .basePath = std::move(basePath),
+ .origin = {origin},
+ };
yylex_init(&scanner);
yy_scan_buffer(text, length, scanner);
@@ -717,14 +709,15 @@ Expr * EvalState::parseExprFromFile(const Path & path, std::shared_ptr<StaticEnv
auto buffer = readFile(path);
// readFile should have left some extra space for terminators
buffer.append("\0\0", 2);
- return parse(buffer.data(), buffer.size(), foFile, path, dirOf(path), staticEnv);
+ return parse(buffer.data(), buffer.size(), path, dirOf(path), staticEnv);
}
-Expr * EvalState::parseExprFromString(std::string s, const Path & basePath, std::shared_ptr<StaticEnv> & staticEnv)
+Expr * EvalState::parseExprFromString(std::string s_, const Path & basePath, std::shared_ptr<StaticEnv> & staticEnv)
{
- s.append("\0\0", 2);
- return parse(s.data(), s.size(), foString, "", basePath, staticEnv);
+ auto s = make_ref<std::string>(std::move(s_));
+ s->append("\0\0", 2);
+ return parse(s->data(), s->size(), Pos::String{.source = s}, basePath, staticEnv);
}
@@ -740,7 +733,8 @@ Expr * EvalState::parseStdin()
auto buffer = drainFD(0);
// drainFD should have left some extra space for terminators
buffer.append("\0\0", 2);
- return parse(buffer.data(), buffer.size(), foStdin, "", absPath("."), staticBaseEnv);
+ auto s = make_ref<std::string>(std::move(buffer));
+ return parse(s->data(), s->size(), Pos::Stdin{.source = s}, absPath("."), staticBaseEnv);
}
@@ -805,17 +799,28 @@ std::pair<bool, std::string> EvalState::resolveSearchPathElem(const SearchPathEl
std::pair<bool, std::string> res;
- if (isUri(elem.second)) {
+ if (EvalSettings::isPseudoUrl(elem.second)) {
try {
- res = { true, store->toRealPath(fetchers::downloadTarball(
- store, resolveUri(elem.second), "source", false).first.storePath) };
+ auto storePath = fetchers::downloadTarball(
+ store, EvalSettings::resolvePseudoUrl(elem.second), "source", false).first.storePath;
+ res = { true, store->toRealPath(storePath) };
} catch (FileTransferError & e) {
logWarning({
.msg = hintfmt("Nix search path entry '%1%' cannot be downloaded, ignoring", elem.second)
});
res = { false, "" };
}
- } else {
+ }
+
+ else if (hasPrefix(elem.second, "flake:")) {
+ settings.requireExperimentalFeature(Xp::Flakes);
+ auto flakeRef = parseFlakeRef(elem.second.substr(6), {}, true, false);
+ debug("fetching flake search path element '%s''", elem.second);
+ auto storePath = flakeRef.resolve(store).fetchTree(store).first.storePath;
+ res = { true, store->toRealPath(storePath) };
+ }
+
+ else {
auto path = absPath(elem.second);
if (pathExists(path))
res = { true, path };
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
index b95f54851..5142d79e8 100644
--- a/src/libexpr/primops.cc
+++ b/src/libexpr/primops.cc
@@ -5,14 +5,15 @@
#include "globals.hh"
#include "json-to-value.hh"
#include "names.hh"
+#include "references.hh"
#include "store-api.hh"
#include "util.hh"
-#include "json.hh"
#include "value-to-json.hh"
#include "value-to-xml.hh"
#include "primops.hh"
#include <boost/container/small_vector.hpp>
+#include <nlohmann/json.hpp>
#include <sys/types.h>
#include <sys/stat.h>
@@ -361,8 +362,7 @@ void prim_exec(EvalState & state, const PosIdx pos, Value * * args, Value & v)
auto output = runProgram(program, true, commandArgs);
Expr * parsed;
try {
- auto base = state.positions[pos];
- parsed = state.parseExprFromString(std::move(output), base.file);
+ parsed = state.parseExprFromString(std::move(output), "/");
} catch (Error & e) {
e.addTrace(state.positions[pos], "while parsing the output from '%1%'", program);
throw;
@@ -585,7 +585,7 @@ struct CompareValues
state.error("cannot compare %s with %s; values of that type are incomparable", showType(*v1), showType(*v2)).debugThrow<EvalError>();
}
} catch (Error & e) {
- e.addTrace(std::nullopt, errorCtx);
+ e.addTrace(nullptr, errorCtx);
throw;
}
}
@@ -788,8 +788,8 @@ static void prim_addErrorContext(EvalState & state, const PosIdx pos, Value * *
v = *args[1];
} catch (Error & e) {
PathSet context;
- e.addTrace(std::nullopt, state.coerceToString(pos, *args[0], context,
- "while evaluating the error message passed to builtins.addErrorContext").toOwned());
+ e.addTrace(nullptr, state.coerceToString(pos, *args[0], context,
+ "while evaluating the error message passed to builtins.addErrorContext").toOwned());
throw;
}
}
@@ -1003,6 +1003,7 @@ static void prim_second(EvalState & state, const PosIdx pos, Value * * args, Val
derivation. */
static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
+ using nlohmann::json;
state.forceAttrs(*args[0], pos, "while evaluating the argument passed to builtins.derivationStrict");
/* Figure out the name first (for stack backtraces). */
@@ -1018,11 +1019,10 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * *
}
/* Check whether attributes should be passed as a JSON file. */
- std::ostringstream jsonBuf;
- std::unique_ptr<JSONObject> jsonObject;
+ std::optional<json> jsonObject;
attr = args[0]->attrs->find(state.sStructuredAttrs);
if (attr != args[0]->attrs->end() && state.forceBool(*attr->value, pos, "while evaluating the `__structuredAttrs` attribute passed to builtins.derivationStrict"))
- jsonObject = std::make_unique<JSONObject>(jsonBuf);
+ jsonObject = json::object();
/* Check whether null attributes should be ignored. */
bool ignoreNulls = false;
@@ -1128,8 +1128,7 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * *
if (i->name == state.sStructuredAttrs) continue;
- auto placeholder(jsonObject->placeholder(key));
- printValueAsJSON(state, true, *i->value, pos, placeholder, context);
+ (*jsonObject)[key] = printValueAsJSON(state, true, *i->value, pos, context);
if (i->name == state.sBuilder)
drv.builder = state.forceString(*i->value, context, posDrvName, "while evaluating the `builder` attribute passed to builtins.derivationStrict");
@@ -1173,8 +1172,8 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * *
}
if (jsonObject) {
+ drv.env.emplace("__json", jsonObject->dump());
jsonObject.reset();
- drv.env.emplace("__json", jsonBuf.str());
}
/* Everything in the context of the strings in the derivation
@@ -1452,10 +1451,10 @@ static RegisterPrimOp primop_storePath({
static void prim_pathExists(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
/* We don’t check the path right now, because we don’t want to
- throw if the path isn’t allowed, but just return false (and we
- can’t just catch the exception here because we still want to
- throw if something in the evaluation of `*args[0]` tries to
- access an unauthorized path). */
+ throw if the path isn’t allowed, but just return false (and we
+ can’t just catch the exception here because we still want to
+ throw if something in the evaluation of `*args[0]` tries to
+ access an unauthorized path). */
auto path = realisePath(state, pos, *args[0], { .checkForPureEval = false });
try {
@@ -1533,6 +1532,10 @@ static void prim_readFile(EvalState & state, const PosIdx pos, Value * * args, V
refs = state.store->queryPathInfo(state.store->toStorePath(path).first)->references;
} catch (Error &) { // FIXME: should be InvalidPathError
}
+ // Re-scan references to filter down to just the ones that actually occur in the file.
+ auto refsSink = PathRefScanSink::fromPaths(refs);
+ refsSink << s;
+ refs = refsSink.getResultPaths();
}
auto context = state.store->printStorePathSet(refs);
v.mkString(s, context);
@@ -1913,8 +1916,8 @@ static RegisterPrimOp primop_toFile({
";
```
- Note that `${configFile}` is an
- [antiquotation](language-values.md), so the result of the
+ Note that `${configFile}` is a
+ [string interpolation](language/values.md#type-string), so the result of the
expression `configFile`
(i.e., a path like `/nix/store/m7p7jfny445k...-foo.conf`) will be
spliced into the resulting string.
@@ -2379,12 +2382,18 @@ static RegisterPrimOp primop_listToAttrs({
Construct a set from a list specifying the names and values of each
attribute. Each element of the list should be a set consisting of a
string-valued attribute `name` specifying the name of the attribute,
- and an attribute `value` specifying its value. Example:
+ and an attribute `value` specifying its value.
+
+ In case of duplicate occurrences of the same name, the first
+ takes precedence.
+
+ Example:
```nix
builtins.listToAttrs
[ { name = "foo"; value = 123; }
{ name = "bar"; value = 456; }
+ { name = "bar"; value = 420; }
]
```
@@ -2402,12 +2411,62 @@ static void prim_intersectAttrs(EvalState & state, const PosIdx pos, Value * * a
state.forceAttrs(*args[0], pos, "while evaluating the first argument passed to builtins.intersectAttrs");
state.forceAttrs(*args[1], pos, "while evaluating the second argument passed to builtins.intersectAttrs");
- auto attrs = state.buildBindings(std::min(args[0]->attrs->size(), args[1]->attrs->size()));
-
- for (auto & i : *args[0]->attrs) {
- Bindings::iterator j = args[1]->attrs->find(i.name);
- if (j != args[1]->attrs->end())
- attrs.insert(*j);
+ Bindings &left = *args[0]->attrs;
+ Bindings &right = *args[1]->attrs;
+
+ auto attrs = state.buildBindings(std::min(left.size(), right.size()));
+
+ // The current implementation has good asymptotic complexity and is reasonably
+ // simple. Further optimization may be possible, but does not seem productive,
+ // considering the state of eval performance in 2022.
+ //
+ // I have looked for reusable and/or standard solutions and these are my
+ // findings:
+ //
+ // STL
+ // ===
+ // std::set_intersection is not suitable, as it only performs a simultaneous
+ // linear scan; not taking advantage of random access. This is O(n + m), so
+ // linear in the largest set, which is not acceptable for callPackage in Nixpkgs.
+ //
+ // Simultaneous scan, with alternating simple binary search
+ // ===
+ // One alternative algorithm scans the attrsets simultaneously, jumping
+ // forward using `lower_bound` in case of inequality. This should perform
+ // well on very similar sets, having a local and predictable access pattern.
+ // On dissimilar sets, it seems to need more comparisons than the current
+ // algorithm, as few consecutive attrs match. `lower_bound` could take
+ // advantage of the decreasing remaining search space, but this causes
+ // the medians to move, which can mean that they don't stay in the cache
+ // like they would with the current naive `find`.
+ //
+ // Double binary search
+ // ===
+ // The optimal algorithm may be "Double binary search", which doesn't
+ // scan at all, but rather divides both sets simultaneously.
+ // See "Fast Intersection Algorithms for Sorted Sequences" by Baeza-Yates et al.
+ // https://cs.uwaterloo.ca/~ajsaling/papers/intersection_alg_app10.pdf
+ // The only downsides I can think of are not having a linear access pattern
+ // for similar sets, and having to maintain a more intricate algorithm.
+ //
+ // Adaptive
+ // ===
+ // Finally one could run try a simultaneous scan, count misses and fall back
+ // to double binary search when the counter hit some threshold and/or ratio.
+
+ if (left.size() < right.size()) {
+ for (auto & l : left) {
+ Bindings::iterator r = right.find(l.name);
+ if (r != right.end())
+ attrs.insert(*r);
+ }
+ }
+ else {
+ for (auto & r : right) {
+ Bindings::iterator l = left.find(r.name);
+ if (l != left.end())
+ attrs.insert(r);
+ }
}
v.mkAttrs(attrs.alreadySorted());
@@ -2419,6 +2478,8 @@ static RegisterPrimOp primop_intersectAttrs({
.doc = R"(
Return a set consisting of the attributes in the set *e2* which have the
same name as some attribute in *e1*.
+
+ Performs in O(*n* log *m*) where *n* is the size of the smaller set and *m* the larger set's size.
)",
.fun = prim_intersectAttrs,
});
@@ -3999,7 +4060,7 @@ void EvalState::createBaseEnv()
// the parser needs two NUL bytes as terminators; one of them
// is implied by being a C string.
"\0";
- eval(parse(code, sizeof(code), foFile, derivationNixPath, "/", staticBaseEnv), *vDerivation);
+ eval(parse(code, sizeof(code), derivationNixPath, "/", staticBaseEnv), *vDerivation);
}
diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc
index ddbaae449..1fb480089 100644
--- a/src/libexpr/primops/fetchTree.cc
+++ b/src/libexpr/primops/fetchTree.cc
@@ -218,8 +218,6 @@ static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v
} else
url = state.forceStringNoCtx(*args[0], pos, "while evaluating the url we should fetch");
- url = resolveUri(*url);
-
state.checkURI(*url);
if (name == "")
diff --git a/src/libexpr/tests/libexprtests.hh b/src/libexpr/tests/libexprtests.hh
index 4f6915882..03e468fbb 100644
--- a/src/libexpr/tests/libexprtests.hh
+++ b/src/libexpr/tests/libexprtests.hh
@@ -12,6 +12,7 @@ namespace nix {
class LibExprTest : public ::testing::Test {
public:
static void SetUpTestSuite() {
+ initLibStore();
initGC();
}
@@ -123,7 +124,7 @@ namespace nix {
MATCHER_P(IsAttrsOfSize, n, fmt("Is a set of size [%1%]", n)) {
if (arg.type() != nAttrs) {
- *result_listener << "Expexted set got " << arg.type();
+ *result_listener << "Expected set got " << arg.type();
return false;
} else if (arg.attrs->size() != (size_t)n) {
*result_listener << "Expected a set with " << n << " attributes but got " << arg.attrs->size();
diff --git a/src/libexpr/tests/primops.cc b/src/libexpr/tests/primops.cc
index a4102753c..9cdcf64a1 100644
--- a/src/libexpr/tests/primops.cc
+++ b/src/libexpr/tests/primops.cc
@@ -151,20 +151,7 @@ namespace nix {
// The `y` attribute is at position
const char* expr = "builtins.unsafeGetAttrPos \"y\" { y = \"x\"; }";
auto v = eval(expr);
- ASSERT_THAT(v, IsAttrsOfSize(3));
-
- auto file = v.attrs->find(createSymbol("file"));
- ASSERT_NE(file, nullptr);
- // FIXME: The file when running these tests is the input string?!?
- ASSERT_THAT(*file->value, IsStringEq(expr));
-
- auto line = v.attrs->find(createSymbol("line"));
- ASSERT_NE(line, nullptr);
- ASSERT_THAT(*line->value, IsIntEq(1));
-
- auto column = v.attrs->find(createSymbol("column"));
- ASSERT_NE(column, nullptr);
- ASSERT_THAT(*column->value, IsIntEq(33));
+ ASSERT_THAT(v, IsNull());
}
TEST_F(PrimOpTest, hasAttr) {
@@ -617,7 +604,7 @@ namespace nix {
TEST_F(PrimOpTest, storeDir) {
auto v = eval("builtins.storeDir");
- ASSERT_THAT(v, IsStringEq("/nix/store"));
+ ASSERT_THAT(v, IsStringEq(settings.nixStore));
}
TEST_F(PrimOpTest, nixVersion) {
diff --git a/src/libexpr/value-to-json.cc b/src/libexpr/value-to-json.cc
index 4d63d8b49..5dc453b2e 100644
--- a/src/libexpr/value-to-json.cc
+++ b/src/libexpr/value-to-json.cc
@@ -1,84 +1,82 @@
#include "value-to-json.hh"
-#include "json.hh"
#include "eval-inline.hh"
#include "util.hh"
#include <cstdlib>
#include <iomanip>
+#include <nlohmann/json.hpp>
namespace nix {
-
-void printValueAsJSON(EvalState & state, bool strict,
- Value & v, const PosIdx pos, JSONPlaceholder & out, PathSet & context, bool copyToStore)
+using json = nlohmann::json;
+json printValueAsJSON(EvalState & state, bool strict,
+ Value & v, const PosIdx pos, PathSet & context, bool copyToStore)
{
checkInterrupt();
if (strict) state.forceValue(v, pos);
+ json out;
+
switch (v.type()) {
case nInt:
- out.write(v.integer);
+ out = v.integer;
break;
case nBool:
- out.write(v.boolean);
+ out = v.boolean;
break;
case nString:
copyContext(v, context);
- out.write(v.string.s);
+ out = v.string.s;
break;
case nPath:
if (copyToStore)
- out.write(state.copyPathToStore(context, v.path));
+ out = state.copyPathToStore(context, v.path);
else
- out.write(v.path);
+ out = v.path;
break;
case nNull:
- out.write(nullptr);
break;
case nAttrs: {
auto maybeString = state.tryAttrsToString(pos, v, context, false, false);
if (maybeString) {
- out.write(*maybeString);
+ out = *maybeString;
break;
}
auto i = v.attrs->find(state.sOutPath);
if (i == v.attrs->end()) {
- auto obj(out.object());
+ out = json::object();
StringSet names;
for (auto & j : *v.attrs)
names.emplace(state.symbols[j.name]);
for (auto & j : names) {
Attr & a(*v.attrs->find(state.symbols.create(j)));
- auto placeholder(obj.placeholder(j));
- printValueAsJSON(state, strict, *a.value, a.pos, placeholder, context, copyToStore);
+ out[j] = printValueAsJSON(state, strict, *a.value, a.pos, context, copyToStore);
}
} else
- printValueAsJSON(state, strict, *i->value, i->pos, out, context, copyToStore);
+ return printValueAsJSON(state, strict, *i->value, i->pos, context, copyToStore);
break;
}
case nList: {
- auto list(out.list());
- for (auto elem : v.listItems()) {
- auto placeholder(list.placeholder());
- printValueAsJSON(state, strict, *elem, pos, placeholder, context, copyToStore);
- }
+ out = json::array();
+ for (auto elem : v.listItems())
+ out.push_back(printValueAsJSON(state, strict, *elem, pos, context, copyToStore));
break;
}
case nExternal:
- v.external->printValueAsJSON(state, strict, out, context, copyToStore);
+ return v.external->printValueAsJSON(state, strict, context, copyToStore);
break;
case nFloat:
- out.write(v.fpoint);
+ out = v.fpoint;
break;
case nThunk:
@@ -91,17 +89,17 @@ void printValueAsJSON(EvalState & state, bool strict,
state.debugThrowLastTrace(e);
throw e;
}
+ return out;
}
void printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore)
{
- JSONPlaceholder out(str);
- printValueAsJSON(state, strict, v, pos, out, context, copyToStore);
+ str << printValueAsJSON(state, strict, v, pos, context, copyToStore);
}
-void ExternalValueBase::printValueAsJSON(EvalState & state, bool strict,
- JSONPlaceholder & out, PathSet & context, bool copyToStore) const
+json ExternalValueBase::printValueAsJSON(EvalState & state, bool strict,
+ PathSet & context, bool copyToStore) const
{
state.debugThrowLastTrace(TypeError("cannot convert %1% to JSON", showType()));
}
diff --git a/src/libexpr/value-to-json.hh b/src/libexpr/value-to-json.hh
index 7ddc8a5b1..22f26b790 100644
--- a/src/libexpr/value-to-json.hh
+++ b/src/libexpr/value-to-json.hh
@@ -5,13 +5,12 @@
#include <string>
#include <map>
+#include <nlohmann/json_fwd.hpp>
namespace nix {
-class JSONPlaceholder;
-
-void printValueAsJSON(EvalState & state, bool strict,
- Value & v, const PosIdx pos, JSONPlaceholder & out, PathSet & context, bool copyToStore = true);
+nlohmann::json printValueAsJSON(EvalState & state, bool strict,
+ Value & v, const PosIdx pos, PathSet & context, bool copyToStore = true);
void printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore = true);
diff --git a/src/libexpr/value-to-xml.cc b/src/libexpr/value-to-xml.cc
index 7c3bf9492..3f6222768 100644
--- a/src/libexpr/value-to-xml.cc
+++ b/src/libexpr/value-to-xml.cc
@@ -24,7 +24,8 @@ static void printValueAsXML(EvalState & state, bool strict, bool location,
static void posToXML(EvalState & state, XMLAttrs & xmlAttrs, const Pos & pos)
{
- xmlAttrs["path"] = pos.file;
+ if (auto path = std::get_if<Path>(&pos.origin))
+ xmlAttrs["path"] = *path;
xmlAttrs["line"] = (format("%1%") % pos.line).str();
xmlAttrs["column"] = (format("%1%") % pos.column).str();
}
diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh
index a428bc565..f57597cff 100644
--- a/src/libexpr/value.hh
+++ b/src/libexpr/value.hh
@@ -7,6 +7,7 @@
#if HAVE_BOEHMGC
#include <gc/gc_allocator.h>
#endif
+#include <nlohmann/json_fwd.hpp>
namespace nix {
@@ -62,7 +63,6 @@ class StorePath;
class Store;
class EvalState;
class XMLWriter;
-class JSONPlaceholder;
typedef int64_t NixInt;
@@ -98,8 +98,8 @@ class ExternalValueBase
virtual bool operator ==(const ExternalValueBase & b) const;
/* Print the value as JSON. Defaults to unconvertable, i.e. throws an error */
- virtual void printValueAsJSON(EvalState & state, bool strict,
- JSONPlaceholder & out, PathSet & context, bool copyToStore = true) const;
+ virtual nlohmann::json printValueAsJSON(EvalState & state, bool strict,
+ PathSet & context, bool copyToStore = true) const;
/* Print the value as XML. Defaults to unevaluated */
virtual void printValueAsXML(EvalState & state, bool strict, bool location,
diff --git a/src/libfetchers/fetch-settings.hh b/src/libfetchers/fetch-settings.hh
index 6452143a1..f33cbdcfc 100644
--- a/src/libfetchers/fetch-settings.hh
+++ b/src/libfetchers/fetch-settings.hh
@@ -71,7 +71,12 @@ struct FetchSettings : public Config
"Whether to warn about dirty Git/Mercurial trees."};
Setting<std::string> flakeRegistry{this, "https://channels.nixos.org/flake-registry.json", "flake-registry",
- "Path or URI of the global flake registry."};
+ R"(
+ Path or URI of the global flake registry.
+
+ When empty, disables the global flake registry.
+ )"};
+
Setting<bool> useRegistries{this, true, "use-registries",
"Whether to use flake registries to resolve flake references."};
diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc
index 6957d2da4..c767e72e5 100644
--- a/src/libfetchers/fetchers.cc
+++ b/src/libfetchers/fetchers.cc
@@ -266,7 +266,7 @@ std::optional<time_t> Input::getLastModified() const
return {};
}
-ParsedURL InputScheme::toURL(const Input & input)
+ParsedURL InputScheme::toURL(const Input & input) const
{
throw Error("don't know how to convert input '%s' to a URL", attrsToJSON(input.attrs));
}
@@ -274,7 +274,7 @@ ParsedURL InputScheme::toURL(const Input & input)
Input InputScheme::applyOverrides(
const Input & input,
std::optional<std::string> ref,
- std::optional<Hash> rev)
+ std::optional<Hash> rev) const
{
if (ref)
throw Error("don't know how to set branch/tag name of input '%s' to '%s'", input.to_string(), *ref);
@@ -293,7 +293,7 @@ void InputScheme::markChangedFile(const Input & input, std::string_view file, st
assert(false);
}
-void InputScheme::clone(const Input & input, const Path & destDir)
+void InputScheme::clone(const Input & input, const Path & destDir) const
{
throw Error("do not know how to clone input '%s'", input.to_string());
}
diff --git a/src/libfetchers/fetchers.hh b/src/libfetchers/fetchers.hh
index bc9a76b0b..17da37f47 100644
--- a/src/libfetchers/fetchers.hh
+++ b/src/libfetchers/fetchers.hh
@@ -107,26 +107,25 @@ public:
* recognized. The Input object contains the information the fetcher
* needs to actually perform the "fetch()" when called.
*/
-
struct InputScheme
{
virtual ~InputScheme()
{ }
- virtual std::optional<Input> inputFromURL(const ParsedURL & url) = 0;
+ virtual std::optional<Input> inputFromURL(const ParsedURL & url) const = 0;
- virtual std::optional<Input> inputFromAttrs(const Attrs & attrs) = 0;
+ virtual std::optional<Input> inputFromAttrs(const Attrs & attrs) const = 0;
- virtual ParsedURL toURL(const Input & input);
+ virtual ParsedURL toURL(const Input & input) const;
- virtual bool hasAllInfo(const Input & input) = 0;
+ virtual bool hasAllInfo(const Input & input) const = 0;
virtual Input applyOverrides(
const Input & input,
std::optional<std::string> ref,
- std::optional<Hash> rev);
+ std::optional<Hash> rev) const;
- virtual void clone(const Input & input, const Path & destDir);
+ virtual void clone(const Input & input, const Path & destDir) const;
virtual std::optional<Path> getSourcePath(const Input & input);
diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc
index c1a21e764..1f7d7c07d 100644
--- a/src/libfetchers/git.cc
+++ b/src/libfetchers/git.cc
@@ -18,6 +18,7 @@
using namespace std::string_literals;
namespace nix::fetchers {
+
namespace {
// Explicit initial branch of our bare repo to suppress warnings from new version of git.
@@ -26,23 +27,23 @@ namespace {
// old version of git, which will ignore unrecognized `-c` options.
const std::string gitInitialBranch = "__nix_dummy_branch";
-bool isCacheFileWithinTtl(const time_t now, const struct stat & st)
+bool isCacheFileWithinTtl(time_t now, const struct stat & st)
{
return st.st_mtime + settings.tarballTtl > now;
}
-bool touchCacheFile(const Path& path, const time_t& touch_time)
+bool touchCacheFile(const Path & path, time_t touch_time)
{
- struct timeval times[2];
- times[0].tv_sec = touch_time;
- times[0].tv_usec = 0;
- times[1].tv_sec = touch_time;
- times[1].tv_usec = 0;
+ struct timeval times[2];
+ times[0].tv_sec = touch_time;
+ times[0].tv_usec = 0;
+ times[1].tv_sec = touch_time;
+ times[1].tv_usec = 0;
- return lutimes(path.c_str(), times) == 0;
+ return lutimes(path.c_str(), times) == 0;
}
-Path getCachePath(std::string key)
+Path getCachePath(std::string_view key)
{
return getCacheDir() + "/nix/gitv3/" +
hashString(htSHA256, key).to_string(Base32, false);
@@ -57,13 +58,12 @@ Path getCachePath(std::string key)
// ...
std::optional<std::string> readHead(const Path & path)
{
- auto [exit_code, output] = runProgram(RunOptions {
+ auto [status, output] = runProgram(RunOptions {
.program = "git",
+ // FIXME: use 'HEAD' to avoid returning all refs
.args = {"ls-remote", "--symref", path},
});
- if (exit_code != 0) {
- return std::nullopt;
- }
+ if (status != 0) return std::nullopt;
std::string_view line = output;
line = line.substr(0, line.find("\n"));
@@ -82,12 +82,11 @@ std::optional<std::string> readHead(const Path & path)
}
// Persist the HEAD ref from the remote repo in the local cached repo.
-bool storeCachedHead(const std::string& actualUrl, const std::string& headRef)
+bool storeCachedHead(const std::string & actualUrl, const std::string & headRef)
{
Path cacheDir = getCachePath(actualUrl);
- auto gitDir = ".";
try {
- runProgram("git", true, { "-C", cacheDir, "--git-dir", gitDir, "symbolic-ref", "--", "HEAD", headRef });
+ runProgram("git", true, { "-C", cacheDir, "--git-dir", ".", "symbolic-ref", "--", "HEAD", headRef });
} catch (ExecError &e) {
if (!WIFEXITED(e.status)) throw;
return false;
@@ -96,7 +95,7 @@ bool storeCachedHead(const std::string& actualUrl, const std::string& headRef)
return true;
}
-std::optional<std::string> readHeadCached(const std::string& actualUrl)
+std::optional<std::string> readHeadCached(const std::string & actualUrl)
{
// Create a cache path to store the branch of the HEAD ref. Append something
// in front of the URL to prevent collision with the repository itself.
@@ -110,16 +109,15 @@ std::optional<std::string> readHeadCached(const std::string& actualUrl)
cachedRef = readHead(cacheDir);
if (cachedRef != std::nullopt &&
*cachedRef != gitInitialBranch &&
- isCacheFileWithinTtl(now, st)) {
+ isCacheFileWithinTtl(now, st))
+ {
debug("using cached HEAD ref '%s' for repo '%s'", *cachedRef, actualUrl);
return cachedRef;
}
}
auto ref = readHead(actualUrl);
- if (ref) {
- return ref;
- }
+ if (ref) return ref;
if (cachedRef) {
// If the cached git ref is expired in fetch() below, and the 'git fetch'
@@ -250,7 +248,7 @@ std::pair<StorePath, Input> fetchFromWorkdir(ref<Store> store, Input & input, co
struct GitInputScheme : InputScheme
{
- std::optional<Input> inputFromURL(const ParsedURL & url) override
+ std::optional<Input> inputFromURL(const ParsedURL & url) const override
{
if (url.scheme != "git" &&
url.scheme != "git+http" &&
@@ -265,7 +263,7 @@ struct GitInputScheme : InputScheme
Attrs attrs;
attrs.emplace("type", "git");
- for (auto &[name, value] : url.query) {
+ for (auto & [name, value] : url.query) {
if (name == "rev" || name == "ref")
attrs.emplace(name, value);
else if (name == "shallow" || name == "submodules")
@@ -279,7 +277,7 @@ struct GitInputScheme : InputScheme
return inputFromAttrs(attrs);
}
- std::optional<Input> inputFromAttrs(const Attrs & attrs) override
+ std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{
if (maybeGetStrAttr(attrs, "type") != "git") return {};
@@ -302,7 +300,7 @@ struct GitInputScheme : InputScheme
return input;
}
- ParsedURL toURL(const Input & input) override
+ ParsedURL toURL(const Input & input) const override
{
auto url = parseURL(getStrAttr(input.attrs, "url"));
if (url.scheme != "git") url.scheme = "git+" + url.scheme;
@@ -313,7 +311,7 @@ struct GitInputScheme : InputScheme
return url;
}
- bool hasAllInfo(const Input & input) override
+ bool hasAllInfo(const Input & input) const override
{
bool maybeDirty = !input.getRef();
bool shallow = maybeGetBoolAttr(input.attrs, "shallow").value_or(false);
@@ -325,7 +323,7 @@ struct GitInputScheme : InputScheme
Input applyOverrides(
const Input & input,
std::optional<std::string> ref,
- std::optional<Hash> rev) override
+ std::optional<Hash> rev) const override
{
auto res(input);
if (rev) res.attrs.insert_or_assign("rev", rev->gitRev());
@@ -335,7 +333,7 @@ struct GitInputScheme : InputScheme
return res;
}
- void clone(const Input & input, const Path & destDir) override
+ void clone(const Input & input, const Path & destDir) const override
{
auto [isLocal, actualUrl] = getActualUrl(input);
@@ -485,6 +483,10 @@ struct GitInputScheme : InputScheme
}
input.attrs.insert_or_assign("ref", *head);
unlockedAttrs.insert_or_assign("ref", *head);
+ } else {
+ if (!input.getRev()) {
+ unlockedAttrs.insert_or_assign("ref", input.getRef().value());
+ }
}
if (auto res = getCache()->lookup(store, unlockedAttrs)) {
@@ -599,9 +601,9 @@ struct GitInputScheme : InputScheme
{
throw Error(
"Cannot find Git revision '%s' in ref '%s' of repository '%s'! "
- "Please make sure that the " ANSI_BOLD "rev" ANSI_NORMAL " exists on the "
- ANSI_BOLD "ref" ANSI_NORMAL " you've specified or add " ANSI_BOLD
- "allRefs = true;" ANSI_NORMAL " to " ANSI_BOLD "fetchGit" ANSI_NORMAL ".",
+ "Please make sure that the " ANSI_BOLD "rev" ANSI_NORMAL " exists on the "
+ ANSI_BOLD "ref" ANSI_NORMAL " you've specified or add " ANSI_BOLD
+ "allRefs = true;" ANSI_NORMAL " to " ANSI_BOLD "fetchGit" ANSI_NORMAL ".",
input.getRev()->gitRev(),
*input.getRef(),
actualUrl
diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc
index a491d82a6..1ed09d30d 100644
--- a/src/libfetchers/github.cc
+++ b/src/libfetchers/github.cc
@@ -26,11 +26,11 @@ std::regex hostRegex(hostRegexS, std::regex::ECMAScript);
struct GitArchiveInputScheme : InputScheme
{
- virtual std::string type() = 0;
+ virtual std::string type() const = 0;
virtual std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const = 0;
- std::optional<Input> inputFromURL(const ParsedURL & url) override
+ std::optional<Input> inputFromURL(const ParsedURL & url) const override
{
if (url.scheme != type()) return {};
@@ -100,7 +100,7 @@ struct GitArchiveInputScheme : InputScheme
return input;
}
- std::optional<Input> inputFromAttrs(const Attrs & attrs) override
+ std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{
if (maybeGetStrAttr(attrs, "type") != type()) return {};
@@ -116,7 +116,7 @@ struct GitArchiveInputScheme : InputScheme
return input;
}
- ParsedURL toURL(const Input & input) override
+ ParsedURL toURL(const Input & input) const override
{
auto owner = getStrAttr(input.attrs, "owner");
auto repo = getStrAttr(input.attrs, "repo");
@@ -132,7 +132,7 @@ struct GitArchiveInputScheme : InputScheme
};
}
- bool hasAllInfo(const Input & input) override
+ bool hasAllInfo(const Input & input) const override
{
return input.getRev() && maybeGetIntAttr(input.attrs, "lastModified");
}
@@ -140,7 +140,7 @@ struct GitArchiveInputScheme : InputScheme
Input applyOverrides(
const Input & _input,
std::optional<std::string> ref,
- std::optional<Hash> rev) override
+ std::optional<Hash> rev) const override
{
auto input(_input);
if (rev && ref)
@@ -227,7 +227,7 @@ struct GitArchiveInputScheme : InputScheme
struct GitHubInputScheme : GitArchiveInputScheme
{
- std::string type() override { return "github"; }
+ std::string type() const override { return "github"; }
std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override
{
@@ -240,14 +240,29 @@ struct GitHubInputScheme : GitArchiveInputScheme
return std::pair<std::string, std::string>("Authorization", fmt("token %s", token));
}
+ std::string getHost(const Input & input) const
+ {
+ return maybeGetStrAttr(input.attrs, "host").value_or("github.com");
+ }
+
+ std::string getOwner(const Input & input) const
+ {
+ return getStrAttr(input.attrs, "owner");
+ }
+
+ std::string getRepo(const Input & input) const
+ {
+ return getStrAttr(input.attrs, "repo");
+ }
+
Hash getRevFromRef(nix::ref<Store> store, const Input & input) const override
{
- auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
+ auto host = getHost(input);
auto url = fmt(
host == "github.com"
? "https://api.%s/repos/%s/%s/commits/%s"
: "https://%s/api/v3/repos/%s/%s/commits/%s",
- host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), *input.getRef());
+ host, getOwner(input), getRepo(input), *input.getRef());
Headers headers = makeHeadersWithAuthTokens(host);
@@ -262,25 +277,30 @@ struct GitHubInputScheme : GitArchiveInputScheme
DownloadUrl getDownloadUrl(const Input & input) const override
{
- // FIXME: use regular /archive URLs instead? api.github.com
- // might have stricter rate limits.
- auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
- auto url = fmt(
- host == "github.com"
- ? "https://api.%s/repos/%s/%s/tarball/%s"
- : "https://%s/api/v3/repos/%s/%s/tarball/%s",
- host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"),
- input.getRev()->to_string(Base16, false));
+ auto host = getHost(input);
Headers headers = makeHeadersWithAuthTokens(host);
+
+ // If we have no auth headers then we default to the public archive
+ // urls so we do not run into rate limits.
+ const auto urlFmt =
+ host != "github.com"
+ ? "https://%s/api/v3/repos/%s/%s/tarball/%s"
+ : headers.empty()
+ ? "https://%s/%s/%s/archive/%s.tar.gz"
+ : "https://api.%s/repos/%s/%s/tarball/%s";
+
+ const auto url = fmt(urlFmt, host, getOwner(input), getRepo(input),
+ input.getRev()->to_string(Base16, false));
+
return DownloadUrl { url, headers };
}
- void clone(const Input & input, const Path & destDir) override
+ void clone(const Input & input, const Path & destDir) const override
{
- auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
+ auto host = getHost(input);
Input::fromURL(fmt("git+https://%s/%s/%s.git",
- host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo")))
+ host, getOwner(input), getRepo(input)))
.applyOverrides(input.getRef(), input.getRev())
.clone(destDir);
}
@@ -288,7 +308,7 @@ struct GitHubInputScheme : GitArchiveInputScheme
struct GitLabInputScheme : GitArchiveInputScheme
{
- std::string type() override { return "gitlab"; }
+ std::string type() const override { return "gitlab"; }
std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override
{
@@ -343,7 +363,7 @@ struct GitLabInputScheme : GitArchiveInputScheme
return DownloadUrl { url, headers };
}
- void clone(const Input & input, const Path & destDir) override
+ void clone(const Input & input, const Path & destDir) const override
{
auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com");
// FIXME: get username somewhere
@@ -356,7 +376,7 @@ struct GitLabInputScheme : GitArchiveInputScheme
struct SourceHutInputScheme : GitArchiveInputScheme
{
- std::string type() override { return "sourcehut"; }
+ std::string type() const override { return "sourcehut"; }
std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override
{
@@ -430,7 +450,7 @@ struct SourceHutInputScheme : GitArchiveInputScheme
return DownloadUrl { url, headers };
}
- void clone(const Input & input, const Path & destDir) override
+ void clone(const Input & input, const Path & destDir) const override
{
auto host = maybeGetStrAttr(input.attrs, "host").value_or("git.sr.ht");
Input::fromURL(fmt("git+https://%s/%s/%s",
diff --git a/src/libfetchers/indirect.cc b/src/libfetchers/indirect.cc
index 9288fc6cf..b99504a16 100644
--- a/src/libfetchers/indirect.cc
+++ b/src/libfetchers/indirect.cc
@@ -7,7 +7,7 @@ std::regex flakeRegex("[a-zA-Z][a-zA-Z0-9_-]*", std::regex::ECMAScript);
struct IndirectInputScheme : InputScheme
{
- std::optional<Input> inputFromURL(const ParsedURL & url) override
+ std::optional<Input> inputFromURL(const ParsedURL & url) const override
{
if (url.scheme != "flake") return {};
@@ -50,7 +50,7 @@ struct IndirectInputScheme : InputScheme
return input;
}
- std::optional<Input> inputFromAttrs(const Attrs & attrs) override
+ std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{
if (maybeGetStrAttr(attrs, "type") != "indirect") return {};
@@ -68,7 +68,7 @@ struct IndirectInputScheme : InputScheme
return input;
}
- ParsedURL toURL(const Input & input) override
+ ParsedURL toURL(const Input & input) const override
{
ParsedURL url;
url.scheme = "flake";
@@ -78,7 +78,7 @@ struct IndirectInputScheme : InputScheme
return url;
}
- bool hasAllInfo(const Input & input) override
+ bool hasAllInfo(const Input & input) const override
{
return false;
}
@@ -86,7 +86,7 @@ struct IndirectInputScheme : InputScheme
Input applyOverrides(
const Input & _input,
std::optional<std::string> ref,
- std::optional<Hash> rev) override
+ std::optional<Hash> rev) const override
{
auto input(_input);
if (rev) input.attrs.insert_or_assign("rev", rev->gitRev());
diff --git a/src/libfetchers/mercurial.cc b/src/libfetchers/mercurial.cc
index 5c5671681..86e8f81f4 100644
--- a/src/libfetchers/mercurial.cc
+++ b/src/libfetchers/mercurial.cc
@@ -43,7 +43,7 @@ static std::string runHg(const Strings & args, const std::optional<std::string>
struct MercurialInputScheme : InputScheme
{
- std::optional<Input> inputFromURL(const ParsedURL & url) override
+ std::optional<Input> inputFromURL(const ParsedURL & url) const override
{
if (url.scheme != "hg+http" &&
url.scheme != "hg+https" &&
@@ -69,7 +69,7 @@ struct MercurialInputScheme : InputScheme
return inputFromAttrs(attrs);
}
- std::optional<Input> inputFromAttrs(const Attrs & attrs) override
+ std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{
if (maybeGetStrAttr(attrs, "type") != "hg") return {};
@@ -89,7 +89,7 @@ struct MercurialInputScheme : InputScheme
return input;
}
- ParsedURL toURL(const Input & input) override
+ ParsedURL toURL(const Input & input) const override
{
auto url = parseURL(getStrAttr(input.attrs, "url"));
url.scheme = "hg+" + url.scheme;
@@ -98,7 +98,7 @@ struct MercurialInputScheme : InputScheme
return url;
}
- bool hasAllInfo(const Input & input) override
+ bool hasAllInfo(const Input & input) const override
{
// FIXME: ugly, need to distinguish between dirty and clean
// default trees.
@@ -108,7 +108,7 @@ struct MercurialInputScheme : InputScheme
Input applyOverrides(
const Input & input,
std::optional<std::string> ref,
- std::optional<Hash> rev) override
+ std::optional<Hash> rev) const override
{
auto res(input);
if (rev) res.attrs.insert_or_assign("rev", rev->gitRev());
diff --git a/src/libfetchers/path.cc b/src/libfetchers/path.cc
index f0ef97da5..61541e69d 100644
--- a/src/libfetchers/path.cc
+++ b/src/libfetchers/path.cc
@@ -6,7 +6,7 @@ namespace nix::fetchers {
struct PathInputScheme : InputScheme
{
- std::optional<Input> inputFromURL(const ParsedURL & url) override
+ std::optional<Input> inputFromURL(const ParsedURL & url) const override
{
if (url.scheme != "path") return {};
@@ -32,7 +32,7 @@ struct PathInputScheme : InputScheme
return input;
}
- std::optional<Input> inputFromAttrs(const Attrs & attrs) override
+ std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{
if (maybeGetStrAttr(attrs, "type") != "path") return {};
@@ -54,7 +54,7 @@ struct PathInputScheme : InputScheme
return input;
}
- ParsedURL toURL(const Input & input) override
+ ParsedURL toURL(const Input & input) const override
{
auto query = attrsToQuery(input.attrs);
query.erase("path");
@@ -66,7 +66,7 @@ struct PathInputScheme : InputScheme
};
}
- bool hasAllInfo(const Input & input) override
+ bool hasAllInfo(const Input & input) const override
{
return true;
}
diff --git a/src/libfetchers/registry.cc b/src/libfetchers/registry.cc
index acd1ff866..43c03beec 100644
--- a/src/libfetchers/registry.cc
+++ b/src/libfetchers/registry.cc
@@ -153,6 +153,9 @@ static std::shared_ptr<Registry> getGlobalRegistry(ref<Store> store)
{
static auto reg = [&]() {
auto path = fetchSettings.flakeRegistry.get();
+ if (path == "") {
+ return std::make_shared<Registry>(Registry::Global); // empty registry
+ }
if (!hasPrefix(path, "/")) {
auto storePath = downloadFile(store, path, "flake-registry.json", false).storePath;
diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc
index 6c551bd93..e9686262a 100644
--- a/src/libfetchers/tarball.cc
+++ b/src/libfetchers/tarball.cc
@@ -185,7 +185,7 @@ struct CurlInputScheme : InputScheme
virtual bool isValidURL(const ParsedURL & url) const = 0;
- std::optional<Input> inputFromURL(const ParsedURL & url) override
+ std::optional<Input> inputFromURL(const ParsedURL & url) const override
{
if (!isValidURL(url))
return std::nullopt;
@@ -203,7 +203,7 @@ struct CurlInputScheme : InputScheme
return input;
}
- std::optional<Input> inputFromAttrs(const Attrs & attrs) override
+ std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{
auto type = maybeGetStrAttr(attrs, "type");
if (type != inputType()) return {};
@@ -220,16 +220,17 @@ struct CurlInputScheme : InputScheme
return input;
}
- ParsedURL toURL(const Input & input) override
+ ParsedURL toURL(const Input & input) const override
{
auto url = parseURL(getStrAttr(input.attrs, "url"));
- // NAR hashes are preferred over file hashes since tar/zip files // don't have a canonical representation.
+ // NAR hashes are preferred over file hashes since tar/zip
+ // files don't have a canonical representation.
if (auto narHash = input.getNarHash())
url.query.insert_or_assign("narHash", narHash->to_string(SRI, true));
return url;
}
- bool hasAllInfo(const Input & input) override
+ bool hasAllInfo(const Input & input) const override
{
return true;
}
diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc
index 961f4e18a..e9205a5e5 100644
--- a/src/libmain/progress-bar.cc
+++ b/src/libmain/progress-bar.cc
@@ -132,7 +132,7 @@ public:
log(*state, lvl, fs.s);
}
- void logEI(const ErrorInfo &ei) override
+ void logEI(const ErrorInfo & ei) override
{
auto state(state_.lock());
@@ -180,10 +180,12 @@ public:
auto machineName = getS(fields, 1);
if (machineName != "")
i->s += fmt(" on " ANSI_BOLD "%s" ANSI_NORMAL, machineName);
- auto curRound = getI(fields, 2);
- auto nrRounds = getI(fields, 3);
- if (nrRounds != 1)
- i->s += fmt(" (round %d/%d)", curRound, nrRounds);
+
+ // Used to be curRound and nrRounds, but the
+ // implementation was broken for a long time.
+ if (getI(fields, 2) != 1 || getI(fields, 3) != 1) {
+ throw Error("log message indicated repeating builds, but this is not currently implemented");
+ }
i->name = DrvName(name).name;
}
diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc
index c1cf38565..99e3d6f56 100644
--- a/src/libmain/shared.cc
+++ b/src/libmain/shared.cc
@@ -33,6 +33,7 @@
namespace nix {
+char * * savedArgv;
static bool gcWarning = true;
@@ -234,6 +235,7 @@ void initNix()
#endif
preloadNSS();
+ initLibStore();
}
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc
index 9226c4e19..149d414d3 100644
--- a/src/libstore/binary-cache-store.cc
+++ b/src/libstore/binary-cache-store.cc
@@ -9,7 +9,6 @@
#include "remote-fs-accessor.hh"
#include "nar-info-disk-cache.hh"
#include "nar-accessor.hh"
-#include "json.hh"
#include "thread-pool.hh"
#include "callback.hh"
@@ -194,19 +193,12 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
/* Optionally write a JSON file containing a listing of the
contents of the NAR. */
if (writeNARListing) {
- std::ostringstream jsonOut;
-
- {
- JSONObject jsonRoot(jsonOut);
- jsonRoot.attr("version", 1);
-
- {
- auto res = jsonRoot.placeholder("root");
- listNar(res, ref<FSAccessor>(narAccessor), "", true);
- }
- }
+ nlohmann::json j = {
+ {"version", 1},
+ {"root", listNar(ref<FSAccessor>(narAccessor), "", true)},
+ };
- upsertFile(std::string(info.path.hashPart()) + ".ls", jsonOut.str(), "application/json");
+ upsertFile(std::string(info.path.hashPart()) + ".ls", j.dump(), "application/json");
}
/* Optionally maintain an index of DWARF debug info files
@@ -331,6 +323,17 @@ bool BinaryCacheStore::isValidPathUncached(const StorePath & storePath)
return fileExists(narInfoFileFor(storePath));
}
+std::optional<StorePath> BinaryCacheStore::queryPathFromHashPart(const std::string & hashPart)
+{
+ auto pseudoPath = StorePath(hashPart + "-" + MissingName);
+ try {
+ auto info = queryPathInfo(pseudoPath);
+ return info->path;
+ } catch (InvalidPath &) {
+ return std::nullopt;
+ }
+}
+
void BinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink)
{
auto info = queryPathInfo(storePath).cast<const NarInfo>();
@@ -343,7 +346,7 @@ void BinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink)
try {
getFile(info->url, *decompressor);
} catch (NoSuchBinaryCacheFile & e) {
- throw SubstituteGone(e.info());
+ throw SubstituteGone(std::move(e.info()));
}
decompressor->finish();
diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh
index ca538b3cb..8c82e2387 100644
--- a/src/libstore/binary-cache-store.hh
+++ b/src/libstore/binary-cache-store.hh
@@ -95,8 +95,7 @@ public:
void queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept override;
- std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
- { unsupported("queryPathFromHashPart"); }
+ std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override;
void addToStore(const ValidPathInfo & info, Source & narSource,
RepairFlag repair, CheckSigsFlag checkSigs) override;
diff --git a/src/libstore/build-result.hh b/src/libstore/build-result.hh
index 24fb1f763..a5749cf33 100644
--- a/src/libstore/build-result.hh
+++ b/src/libstore/build-result.hh
@@ -5,7 +5,7 @@
#include <string>
#include <chrono>
-
+#include <optional>
namespace nix {
@@ -78,6 +78,9 @@ struct BuildResult
was repeated). */
time_t startTime = 0, stopTime = 0;
+ /* User and system CPU time the build took. */
+ std::optional<std::chrono::microseconds> cpuUser, cpuSystem;
+
bool success()
{
return status == Built || status == Substituted || status == AlreadyValid || status == ResolvesToAlreadyValid;
diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc
index 41d2e2a1c..173058d1b 100644
--- a/src/libstore/build/derivation-goal.cc
+++ b/src/libstore/build/derivation-goal.cc
@@ -7,7 +7,6 @@
#include "finally.hh"
#include "util.hh"
#include "archive.hh"
-#include "json.hh"
#include "compression.hh"
#include "worker-protocol.hh"
#include "topo-sort.hh"
@@ -40,7 +39,6 @@
#include <sys/ioctl.h>
#include <net/if.h>
#include <netinet/ip.h>
-#include <sys/personality.h>
#include <sys/mman.h>
#include <sched.h>
#include <sys/param.h>
@@ -135,7 +133,7 @@ void DerivationGoal::killChild()
void DerivationGoal::timedOut(Error && ex)
{
killChild();
- done(BuildResult::TimedOut, {}, ex);
+ done(BuildResult::TimedOut, {}, std::move(ex));
}
@@ -502,6 +500,14 @@ void DerivationGoal::inputsRealised()
now-known results of dependencies. If so, we become a
stub goal aliasing that resolved derivation goal. */
std::optional attempt = fullDrv.tryResolve(worker.store, inputDrvOutputs);
+ if (!attempt) {
+ /* TODO (impure derivations-induced tech debt) (see below):
+ The above attempt should have found it, but because we manage
+ inputDrvOutputs statefully, sometimes it gets out of sync with
+ the real source of truth (store). So we query the store
+ directly if there's a problem. */
+ attempt = fullDrv.tryResolve(worker.store);
+ }
assert(attempt);
Derivation drvResolved { *std::move(attempt) };
@@ -528,13 +534,31 @@ void DerivationGoal::inputsRealised()
/* Add the relevant output closures of the input derivation
`i' as input paths. Only add the closures of output paths
that are specified as inputs. */
- for (auto & j : wantedDepOutputs)
- if (auto outPath = get(inputDrvOutputs, { depDrvPath, j }))
+ for (auto & j : wantedDepOutputs) {
+ /* TODO (impure derivations-induced tech debt):
+ Tracking input derivation outputs statefully through the
+ goals is error prone and has led to bugs.
+ For a robust nix, we need to move towards the `else` branch,
+ which does not rely on goal state to match up with the
+ reality of the store, which is our real source of truth.
+ However, the impure derivations feature still relies on this
+ fragile way of doing things, because its builds do not have
+ a representation in the store, which is a usability problem
+ in itself */
+ if (auto outPath = get(inputDrvOutputs, { depDrvPath, j })) {
worker.store.computeFSClosure(*outPath, inputPaths);
- else
- throw Error(
- "derivation '%s' requires non-existent output '%s' from input derivation '%s'",
- worker.store.printStorePath(drvPath), j, worker.store.printStorePath(depDrvPath));
+ }
+ else {
+ auto outMap = worker.evalStore.queryDerivationOutputMap(depDrvPath);
+ auto outMapPath = outMap.find(j);
+ if (outMapPath == outMap.end()) {
+ throw Error(
+ "derivation '%s' requires non-existent output '%s' from input derivation '%s'",
+ worker.store.printStorePath(drvPath), j, worker.store.printStorePath(depDrvPath));
+ }
+ worker.store.computeFSClosure(outMapPath->second, inputPaths);
+ }
+ }
}
}
@@ -546,10 +570,6 @@ void DerivationGoal::inputsRealised()
/* What type of derivation are we building? */
derivationType = drv->type();
- /* Don't repeat fixed-output derivations since they're already
- verified by their output hash.*/
- nrRounds = derivationType.isFixed() ? 1 : settings.buildRepeat + 1;
-
/* Okay, try to build. Note that here we don't wait for a build
slot to become available, since we don't need one if there is a
build hook. */
@@ -564,12 +584,11 @@ void DerivationGoal::started()
auto msg = fmt(
buildMode == bmRepair ? "repairing outputs of '%s'" :
buildMode == bmCheck ? "checking outputs of '%s'" :
- nrRounds > 1 ? "building '%s' (round %d/%d)" :
- "building '%s'", worker.store.printStorePath(drvPath), curRound, nrRounds);
+ "building '%s'", worker.store.printStorePath(drvPath));
fmt("building '%s'", worker.store.printStorePath(drvPath));
if (hook) msg += fmt(" on '%s'", machineName);
act = std::make_unique<Activity>(*logger, lvlInfo, actBuild, msg,
- Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", curRound, nrRounds});
+ Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", 1, 1});
mcRunningBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.runningBuilds);
worker.updateProgress();
}
@@ -869,6 +888,14 @@ void DerivationGoal::buildDone()
cleanupPostChildKill();
+ if (buildResult.cpuUser && buildResult.cpuSystem) {
+ debug("builder for '%s' terminated with status %d, user CPU %.3fs, system CPU %.3fs",
+ worker.store.printStorePath(drvPath),
+ status,
+ ((double) buildResult.cpuUser->count()) / 1000000,
+ ((double) buildResult.cpuSystem->count()) / 1000000);
+ }
+
bool diskFull = false;
try {
@@ -915,14 +942,6 @@ void DerivationGoal::buildDone()
cleanupPostOutputsRegisteredModeNonCheck();
- /* Repeat the build if necessary. */
- if (curRound++ < nrRounds) {
- outputLocks.unlock();
- state = &DerivationGoal::tryToBuild;
- worker.wakeUp(shared_from_this());
- return;
- }
-
/* It is now safe to delete the lock files, since all future
lockers will see that the output paths are valid; they will
not create new lock files with the same names as the old
@@ -951,7 +970,7 @@ void DerivationGoal::buildDone()
BuildResult::PermanentFailure;
}
- done(st, {}, e);
+ done(st, {}, std::move(e));
return;
}
}
@@ -983,22 +1002,34 @@ void DerivationGoal::resolvedFinished()
throw Error(
"derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,resolve)",
worker.store.printStorePath(drvPath), wantedOutput);
- auto realisation = get(resolvedResult.builtOutputs, DrvOutput { *resolvedHash, wantedOutput });
- if (!realisation)
- throw Error(
- "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,realisation)",
- worker.store.printStorePath(resolvedDrvGoal->drvPath), wantedOutput);
+
+ auto realisation = [&]{
+ auto take1 = get(resolvedResult.builtOutputs, DrvOutput { *resolvedHash, wantedOutput });
+ if (take1) return *take1;
+
+ /* The above `get` should work. But sateful tracking of
+ outputs in resolvedResult, this can get out of sync with the
+ store, which is our actual source of truth. For now we just
+ check the store directly if it fails. */
+ auto take2 = worker.evalStore.queryRealisation(DrvOutput { *resolvedHash, wantedOutput });
+ if (take2) return *take2;
+
+ throw Error(
+ "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,realisation)",
+ worker.store.printStorePath(resolvedDrvGoal->drvPath), wantedOutput);
+ }();
+
if (drv->type().isPure()) {
- auto newRealisation = *realisation;
+ auto newRealisation = realisation;
newRealisation.id = DrvOutput { initialOutput->outputHash, wantedOutput };
newRealisation.signatures.clear();
if (!drv->type().isFixed())
- newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation->outPath);
+ newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation.outPath);
signRealisation(newRealisation);
worker.store.registerDrvOutput(newRealisation);
}
- outputPaths.insert(realisation->outPath);
- builtOutputs.emplace(realisation->id, *realisation);
+ outputPaths.insert(realisation.outPath);
+ builtOutputs.emplace(realisation.id, realisation);
}
runPostBuildHook(
@@ -1402,7 +1433,7 @@ void DerivationGoal::done(
fs << worker.store.printStorePath(drvPath) << "\t" << buildResult.toString() << std::endl;
}
- amDone(buildResult.success() ? ecSuccess : ecFailed, ex);
+ amDone(buildResult.success() ? ecSuccess : ecFailed, std::move(ex));
}
diff --git a/src/libstore/build/derivation-goal.hh b/src/libstore/build/derivation-goal.hh
index 2d8bfd592..d33e04cbc 100644
--- a/src/libstore/build/derivation-goal.hh
+++ b/src/libstore/build/derivation-goal.hh
@@ -115,11 +115,6 @@ struct DerivationGoal : public Goal
BuildMode buildMode;
- /* The current round, if we're building multiple times. */
- size_t curRound = 1;
-
- size_t nrRounds;
-
std::unique_ptr<MaintainCount<uint64_t>> mcExpectedBuilds, mcRunningBuilds;
std::unique_ptr<Activity> act;
diff --git a/src/libstore/build/entry-points.cc b/src/libstore/build/entry-points.cc
index bea7363db..e1b80165e 100644
--- a/src/libstore/build/entry-points.cc
+++ b/src/libstore/build/entry-points.cc
@@ -30,7 +30,7 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
if (ex)
logError(i->ex->info());
else
- ex = i->ex;
+ ex = std::move(i->ex);
}
if (i->exitCode != Goal::ecSuccess) {
if (auto i2 = dynamic_cast<DerivationGoal *>(i.get())) failed.insert(i2->drvPath);
@@ -40,7 +40,7 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
if (failed.size() == 1 && ex) {
ex->status = worker.exitStatus();
- throw *ex;
+ throw std::move(*ex);
} else if (!failed.empty()) {
if (ex) logError(ex->info());
throw Error(worker.exitStatus(), "build of %s failed", showPaths(failed));
@@ -109,7 +109,7 @@ void Store::ensurePath(const StorePath & path)
if (goal->exitCode != Goal::ecSuccess) {
if (goal->ex) {
goal->ex->status = worker.exitStatus();
- throw *goal->ex;
+ throw std::move(*goal->ex);
} else
throw Error(worker.exitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path));
}
diff --git a/src/libstore/build/hook-instance.cc b/src/libstore/build/hook-instance.cc
index 1f19ddccc..cb58a1f02 100644
--- a/src/libstore/build/hook-instance.cc
+++ b/src/libstore/build/hook-instance.cc
@@ -16,11 +16,11 @@ HookInstance::HookInstance()
buildHookArgs.pop_front();
Strings args;
+ args.push_back(std::string(baseNameOf(buildHook)));
for (auto & arg : buildHookArgs)
args.push_back(arg);
- args.push_back(std::string(baseNameOf(settings.buildHook.get())));
args.push_back(std::to_string(verbosity));
/* Create a pipe to get the output of the child. */
diff --git a/src/libstore/build/local-derivation-goal.cc b/src/libstore/build/local-derivation-goal.cc
index 5cea3b590..9d869d513 100644
--- a/src/libstore/build/local-derivation-goal.cc
+++ b/src/libstore/build/local-derivation-goal.cc
@@ -8,13 +8,14 @@
#include "finally.hh"
#include "util.hh"
#include "archive.hh"
-#include "json.hh"
#include "compression.hh"
#include "daemon.hh"
#include "worker-protocol.hh"
#include "topo-sort.hh"
#include "callback.hh"
#include "json-utils.hh"
+#include "cgroup.hh"
+#include "personality.hh"
#include <regex>
#include <queue>
@@ -24,7 +25,6 @@
#include <termios.h>
#include <unistd.h>
#include <sys/mman.h>
-#include <sys/utsname.h>
#include <sys/resource.h>
#include <sys/socket.h>
@@ -37,7 +37,6 @@
#include <sys/ioctl.h>
#include <net/if.h>
#include <netinet/ip.h>
-#include <sys/personality.h>
#include <sys/mman.h>
#include <sched.h>
#include <sys/param.h>
@@ -56,6 +55,7 @@
#include <pwd.h>
#include <grp.h>
+#include <iostream>
namespace nix {
@@ -129,26 +129,44 @@ void LocalDerivationGoal::killChild()
if (pid != -1) {
worker.childTerminated(this);
- if (buildUser) {
- /* If we're using a build user, then there is a tricky
- race condition: if we kill the build user before the
- child has done its setuid() to the build user uid, then
- it won't be killed, and we'll potentially lock up in
- pid.wait(). So also send a conventional kill to the
- child. */
- ::kill(-pid, SIGKILL); /* ignore the result */
- buildUser->kill();
- pid.wait();
- } else
- pid.kill();
+ /* If we're using a build user, then there is a tricky race
+ condition: if we kill the build user before the child has
+ done its setuid() to the build user uid, then it won't be
+ killed, and we'll potentially lock up in pid.wait(). So
+ also send a conventional kill to the child. */
+ ::kill(-pid, SIGKILL); /* ignore the result */
+
+ killSandbox(true);
- assert(pid == -1);
+ pid.wait();
}
DerivationGoal::killChild();
}
+void LocalDerivationGoal::killSandbox(bool getStats)
+{
+ if (cgroup) {
+ #if __linux__
+ auto stats = destroyCgroup(*cgroup);
+ if (getStats) {
+ buildResult.cpuUser = stats.cpuUser;
+ buildResult.cpuSystem = stats.cpuSystem;
+ }
+ #else
+ abort();
+ #endif
+ }
+
+ else if (buildUser) {
+ auto uid = buildUser->getUID();
+ assert(uid != 0);
+ killUser(uid);
+ }
+}
+
+
void LocalDerivationGoal::tryLocalBuild() {
unsigned int curBuilds = worker.getNrLocalBuilds();
if (curBuilds >= settings.maxBuildJobs) {
@@ -158,28 +176,46 @@ void LocalDerivationGoal::tryLocalBuild() {
return;
}
- /* If `build-users-group' is not empty, then we have to build as
- one of the members of that group. */
- if (settings.buildUsersGroup != "" && getuid() == 0) {
-#if defined(__linux__) || defined(__APPLE__)
- if (!buildUser) buildUser = std::make_unique<UserLock>();
+ /* Are we doing a chroot build? */
+ {
+ auto noChroot = parsedDrv->getBoolAttr("__noChroot");
+ if (settings.sandboxMode == smEnabled) {
+ if (noChroot)
+ throw Error("derivation '%s' has '__noChroot' set, "
+ "but that's not allowed when 'sandbox' is 'true'", worker.store.printStorePath(drvPath));
+#if __APPLE__
+ if (additionalSandboxProfile != "")
+ throw Error("derivation '%s' specifies a sandbox profile, "
+ "but this is only allowed when 'sandbox' is 'relaxed'", worker.store.printStorePath(drvPath));
+#endif
+ useChroot = true;
+ }
+ else if (settings.sandboxMode == smDisabled)
+ useChroot = false;
+ else if (settings.sandboxMode == smRelaxed)
+ useChroot = derivationType.isSandboxed() && !noChroot;
+ }
+
+ auto & localStore = getLocalStore();
+ if (localStore.storeDir != localStore.realStoreDir.get()) {
+ #if __linux__
+ useChroot = true;
+ #else
+ throw Error("building using a diverted store is not supported on this platform");
+ #endif
+ }
- if (buildUser->findFreeUser()) {
- /* Make sure that no other processes are executing under this
- uid. */
- buildUser->kill();
- } else {
+ if (useBuildUsers()) {
+ if (!buildUser)
+ buildUser = acquireUserLock(parsedDrv->useUidRange() ? 65536 : 1, useChroot);
+
+ if (!buildUser) {
if (!actLock)
actLock = std::make_unique<Activity>(*logger, lvlWarn, actBuildWaiting,
fmt("waiting for UID to build '%s'", yellowtxt(worker.store.printStorePath(drvPath))));
worker.waitForAWhile(shared_from_this());
return;
}
-#else
- /* Don't know how to block the creation of setuid/setgid
- binaries on this platform. */
- throw Error("build users are not supported on this platform for security reasons");
-#endif
}
actLock.reset();
@@ -193,7 +229,7 @@ void LocalDerivationGoal::tryLocalBuild() {
outputLocks.unlock();
buildUser.reset();
worker.permanentFailure = true;
- done(BuildResult::InputRejected, {}, e);
+ done(BuildResult::InputRejected, {}, std::move(e));
return;
}
@@ -270,7 +306,7 @@ void LocalDerivationGoal::cleanupPostChildKill()
malicious user from leaving behind a process that keeps files
open and modifies them after they have been chown'ed to
root. */
- if (buildUser) buildUser->kill();
+ killSandbox(true);
/* Terminate the recursive Nix daemon. */
stopDaemon();
@@ -363,6 +399,64 @@ static void linkOrCopy(const Path & from, const Path & to)
void LocalDerivationGoal::startBuilder()
{
+ if ((buildUser && buildUser->getUIDCount() != 1)
+ #if __linux__
+ || settings.useCgroups
+ #endif
+ )
+ {
+ #if __linux__
+ settings.requireExperimentalFeature(Xp::Cgroups);
+
+ auto cgroupFS = getCgroupFS();
+ if (!cgroupFS)
+ throw Error("cannot determine the cgroups file system");
+
+ auto ourCgroups = getCgroups("/proc/self/cgroup");
+ auto ourCgroup = ourCgroups[""];
+ if (ourCgroup == "")
+ throw Error("cannot determine cgroup name from /proc/self/cgroup");
+
+ auto ourCgroupPath = canonPath(*cgroupFS + "/" + ourCgroup);
+
+ if (!pathExists(ourCgroupPath))
+ throw Error("expected cgroup directory '%s'", ourCgroupPath);
+
+ static std::atomic<unsigned int> counter{0};
+
+ cgroup = buildUser
+ ? fmt("%s/nix-build-uid-%d", ourCgroupPath, buildUser->getUID())
+ : fmt("%s/nix-build-pid-%d-%d", ourCgroupPath, getpid(), counter++);
+
+ debug("using cgroup '%s'", *cgroup);
+
+ /* When using a build user, record the cgroup we used for that
+ user so that if we got interrupted previously, we can kill
+ any left-over cgroup first. */
+ if (buildUser) {
+ auto cgroupsDir = settings.nixStateDir + "/cgroups";
+ createDirs(cgroupsDir);
+
+ auto cgroupFile = fmt("%s/%d", cgroupsDir, buildUser->getUID());
+
+ if (pathExists(cgroupFile)) {
+ auto prevCgroup = readFile(cgroupFile);
+ destroyCgroup(prevCgroup);
+ }
+
+ writeFile(cgroupFile, *cgroup);
+ }
+
+ #else
+ throw Error("cgroups are not supported on this platform");
+ #endif
+ }
+
+ /* Make sure that no other processes are executing under the
+ sandbox uids. This must be done before any chownToBuilder()
+ calls. */
+ killSandbox(false);
+
/* Right platform? */
if (!parsedDrv->canBuildLocally(worker.store))
throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}",
@@ -376,35 +470,6 @@ void LocalDerivationGoal::startBuilder()
additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or("");
#endif
- /* Are we doing a chroot build? */
- {
- auto noChroot = parsedDrv->getBoolAttr("__noChroot");
- if (settings.sandboxMode == smEnabled) {
- if (noChroot)
- throw Error("derivation '%s' has '__noChroot' set, "
- "but that's not allowed when 'sandbox' is 'true'", worker.store.printStorePath(drvPath));
-#if __APPLE__
- if (additionalSandboxProfile != "")
- throw Error("derivation '%s' specifies a sandbox profile, "
- "but this is only allowed when 'sandbox' is 'relaxed'", worker.store.printStorePath(drvPath));
-#endif
- useChroot = true;
- }
- else if (settings.sandboxMode == smDisabled)
- useChroot = false;
- else if (settings.sandboxMode == smRelaxed)
- useChroot = derivationType.isSandboxed() && !noChroot;
- }
-
- auto & localStore = getLocalStore();
- if (localStore.storeDir != localStore.realStoreDir.get()) {
- #if __linux__
- useChroot = true;
- #else
- throw Error("building using a diverted store is not supported on this platform");
- #endif
- }
-
/* Create a temporary directory where the build will take
place. */
tmpDir = createTempDir("", "nix-build-" + std::string(drvPath.name()), false, false, 0700);
@@ -580,10 +645,11 @@ void LocalDerivationGoal::startBuilder()
printMsg(lvlChatty, format("setting up chroot environment in '%1%'") % chrootRootDir);
- if (mkdir(chrootRootDir.c_str(), 0750) == -1)
+ // FIXME: make this 0700
+ if (mkdir(chrootRootDir.c_str(), buildUser && buildUser->getUIDCount() != 1 ? 0755 : 0750) == -1)
throw SysError("cannot create '%1%'", chrootRootDir);
- if (buildUser && chown(chrootRootDir.c_str(), 0, buildUser->getGID()) == -1)
+ if (buildUser && chown(chrootRootDir.c_str(), buildUser->getUIDCount() != 1 ? buildUser->getUID() : 0, buildUser->getGID()) == -1)
throw SysError("cannot change ownership of '%1%'", chrootRootDir);
/* Create a writable /tmp in the chroot. Many builders need
@@ -597,6 +663,10 @@ void LocalDerivationGoal::startBuilder()
nobody account. The latter is kind of a hack to support
Samba-in-QEMU. */
createDirs(chrootRootDir + "/etc");
+ chownToBuilder(chrootRootDir + "/etc");
+
+ if (parsedDrv->useUidRange() && (!buildUser || buildUser->getUIDCount() < 65536))
+ throw Error("feature 'uid-range' requires the setting '%s' to be enabled", settings.autoAllocateUids.name);
/* Declare the build user's group so that programs get a consistent
view of the system (e.g., "id -gn"). */
@@ -647,12 +717,28 @@ void LocalDerivationGoal::startBuilder()
dirsInChroot.erase(worker.store.printStorePath(*i.second.second));
}
-#elif __APPLE__
- /* We don't really have any parent prep work to do (yet?)
- All work happens in the child, instead. */
+ if (cgroup) {
+ if (mkdir(cgroup->c_str(), 0755) != 0)
+ throw SysError("creating cgroup '%s'", *cgroup);
+ chownToBuilder(*cgroup);
+ chownToBuilder(*cgroup + "/cgroup.procs");
+ chownToBuilder(*cgroup + "/cgroup.threads");
+ //chownToBuilder(*cgroup + "/cgroup.subtree_control");
+ }
+
#else
- throw Error("sandboxing builds is not supported on this platform");
+ if (parsedDrv->useUidRange())
+ throw Error("feature 'uid-range' is not supported on this platform");
+ #if __APPLE__
+ /* We don't really have any parent prep work to do (yet?)
+ All work happens in the child, instead. */
+ #else
+ throw Error("sandboxing builds is not supported on this platform");
+ #endif
#endif
+ } else {
+ if (parsedDrv->useUidRange())
+ throw Error("feature 'uid-range' is only supported in sandboxed builds");
}
if (needsHashRewrite() && pathExists(homeDir))
@@ -913,14 +999,16 @@ void LocalDerivationGoal::startBuilder()
the calling user (if build users are disabled). */
uid_t hostUid = buildUser ? buildUser->getUID() : getuid();
uid_t hostGid = buildUser ? buildUser->getGID() : getgid();
+ uid_t nrIds = buildUser ? buildUser->getUIDCount() : 1;
writeFile("/proc/" + std::to_string(pid) + "/uid_map",
- fmt("%d %d 1", sandboxUid(), hostUid));
+ fmt("%d %d %d", sandboxUid(), hostUid, nrIds));
- writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny");
+ if (!buildUser || buildUser->getUIDCount() == 1)
+ writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny");
writeFile("/proc/" + std::to_string(pid) + "/gid_map",
- fmt("%d %d 1", sandboxGid(), hostGid));
+ fmt("%d %d %d", sandboxGid(), hostGid, nrIds));
} else {
debug("note: not using a user namespace");
if (!buildUser)
@@ -947,6 +1035,10 @@ void LocalDerivationGoal::startBuilder()
throw SysError("getting sandbox user namespace");
}
+ /* Move the child into its own cgroup. */
+ if (cgroup)
+ writeFile(*cgroup + "/cgroup.procs", fmt("%d", (pid_t) pid));
+
/* Signal the builder that we've updated its user namespace. */
writeFull(userNamespaceSync.writeSide.get(), "1");
@@ -1552,6 +1644,22 @@ void setupSeccomp()
seccomp_arch_add(ctx, SCMP_ARCH_ARM) != 0)
printError("unable to add ARM seccomp architecture; this may result in spurious build failures if running 32-bit ARM processes");
+ if (nativeSystem == "mips64-linux" &&
+ seccomp_arch_add(ctx, SCMP_ARCH_MIPS) != 0)
+ printError("unable to add mips seccomp architecture");
+
+ if (nativeSystem == "mips64-linux" &&
+ seccomp_arch_add(ctx, SCMP_ARCH_MIPS64N32) != 0)
+ printError("unable to add mips64-*abin32 seccomp architecture");
+
+ if (nativeSystem == "mips64el-linux" &&
+ seccomp_arch_add(ctx, SCMP_ARCH_MIPSEL) != 0)
+ printError("unable to add mipsel seccomp architecture");
+
+ if (nativeSystem == "mips64el-linux" &&
+ seccomp_arch_add(ctx, SCMP_ARCH_MIPSEL64N32) != 0)
+ printError("unable to add mips64el-*abin32 seccomp architecture");
+
/* Prevent builders from creating setuid/setgid binaries. */
for (int perm : { S_ISUID, S_ISGID }) {
if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(EPERM), SCMP_SYS(chmod), 1,
@@ -1763,6 +1871,13 @@ void LocalDerivationGoal::runChild()
if (mount("none", (chrootRootDir + "/proc").c_str(), "proc", 0, 0) == -1)
throw SysError("mounting /proc");
+ /* Mount sysfs on /sys. */
+ if (buildUser && buildUser->getUIDCount() != 1) {
+ createDirs(chrootRootDir + "/sys");
+ if (mount("none", (chrootRootDir + "/sys").c_str(), "sysfs", 0, 0) == -1)
+ throw SysError("mounting /sys");
+ }
+
/* Mount a new tmpfs on /dev/shm to ensure that whatever
the builder puts in /dev/shm is cleaned up automatically. */
if (pathExists("/dev/shm") && mount("none", (chrootRootDir + "/dev/shm").c_str(), "tmpfs", 0,
@@ -1805,6 +1920,12 @@ void LocalDerivationGoal::runChild()
if (unshare(CLONE_NEWNS) == -1)
throw SysError("unsharing mount namespace");
+ /* Unshare the cgroup namespace. This means
+ /proc/self/cgroup will show the child's cgroup as '/'
+ rather than whatever it is in the parent. */
+ if (cgroup && unshare(CLONE_NEWCGROUP) == -1)
+ throw SysError("unsharing cgroup namespace");
+
/* Do the chroot(). */
if (chdir(chrootRootDir.c_str()) == -1)
throw SysError("cannot change directory to '%1%'", chrootRootDir);
@@ -1842,33 +1963,7 @@ void LocalDerivationGoal::runChild()
/* Close all other file descriptors. */
closeMostFDs({STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO});
-#if __linux__
- /* Change the personality to 32-bit if we're doing an
- i686-linux build on an x86_64-linux machine. */
- struct utsname utsbuf;
- uname(&utsbuf);
- if ((drv->platform == "i686-linux"
- && (settings.thisSystem == "x86_64-linux"
- || (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64"))))
- || drv->platform == "armv7l-linux"
- || drv->platform == "armv6l-linux")
- {
- if (personality(PER_LINUX32) == -1)
- throw SysError("cannot set 32-bit personality");
- }
-
- /* Impersonate a Linux 2.6 machine to get some determinism in
- builds that depend on the kernel version. */
- if ((drv->platform == "i686-linux" || drv->platform == "x86_64-linux") && settings.impersonateLinux26) {
- int cur = personality(0xffffffff);
- if (cur != -1) personality(cur | 0x0020000 /* == UNAME26 */);
- }
-
- /* Disable address space randomization for improved
- determinism. */
- int cur = personality(0xffffffff);
- if (cur != -1) personality(cur | ADDR_NO_RANDOMIZE);
-#endif
+ setPersonality(drv->platform);
/* Disable core dumps by default. */
struct rlimit limit = { 0, RLIM_INFINITY };
@@ -1890,9 +1985,8 @@ void LocalDerivationGoal::runChild()
if (setUser && buildUser) {
/* Preserve supplementary groups of the build user, to allow
admins to specify groups such as "kvm". */
- if (!buildUser->getSupplementaryGIDs().empty() &&
- setgroups(buildUser->getSupplementaryGIDs().size(),
- buildUser->getSupplementaryGIDs().data()) == -1)
+ auto gids = buildUser->getSupplementaryGIDs();
+ if (setgroups(gids.size(), gids.data()) == -1)
throw SysError("cannot set supplementary groups of build user");
if (setgid(buildUser->getGID()) == -1 ||
@@ -2139,7 +2233,6 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
InodesSeen inodesSeen;
Path checkSuffix = ".check";
- bool keepPreviousRound = settings.keepFailed || settings.runDiffHook;
std::exception_ptr delayedException;
@@ -2221,7 +2314,10 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* Canonicalise first. This ensures that the path we're
rewriting doesn't contain a hard link to /etc/shadow or
something like that. */
- canonicalisePathMetaData(actualPath, buildUser ? buildUser->getUID() : -1, inodesSeen);
+ canonicalisePathMetaData(
+ actualPath,
+ buildUser ? std::optional(buildUser->getUIDRange()) : std::nullopt,
+ inodesSeen);
debug("scanning for references for output '%s' in temp location '%s'", outputName, actualPath);
@@ -2314,6 +2410,10 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
sink.s = rewriteStrings(sink.s, outputRewrites);
StringSource source(sink.s);
restorePath(actualPath, source);
+
+ /* FIXME: set proper permissions in restorePath() so
+ we don't have to do another traversal. */
+ canonicalisePathMetaData(actualPath, {}, inodesSeen);
}
};
@@ -2476,7 +2576,7 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* FIXME: set proper permissions in restorePath() so
we don't have to do another traversal. */
- canonicalisePathMetaData(actualPath, -1, inodesSeen);
+ canonicalisePathMetaData(actualPath, {}, inodesSeen);
/* Calculate where we'll move the output files. In the checking case we
will leave leave them where they are, for now, rather than move to
@@ -2560,10 +2660,8 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
debug("unreferenced input: '%1%'", worker.store.printStorePath(i));
}
- if (curRound == nrRounds) {
- localStore.optimisePath(actualPath, NoRepair); // FIXME: combine with scanForReferences()
- worker.markContentsGood(newInfo.path);
- }
+ localStore.optimisePath(actualPath, NoRepair); // FIXME: combine with scanForReferences()
+ worker.markContentsGood(newInfo.path);
newInfo.deriver = drvPath;
newInfo.ultimate = true;
@@ -2592,61 +2690,6 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* Apply output checks. */
checkOutputs(infos);
- /* Compare the result with the previous round, and report which
- path is different, if any.*/
- if (curRound > 1 && prevInfos != infos) {
- assert(prevInfos.size() == infos.size());
- for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); ++i, ++j)
- if (!(*i == *j)) {
- buildResult.isNonDeterministic = true;
- Path prev = worker.store.printStorePath(i->second.path) + checkSuffix;
- bool prevExists = keepPreviousRound && pathExists(prev);
- hintformat hint = prevExists
- ? hintfmt("output '%s' of '%s' differs from '%s' from previous round",
- worker.store.printStorePath(i->second.path), worker.store.printStorePath(drvPath), prev)
- : hintfmt("output '%s' of '%s' differs from previous round",
- worker.store.printStorePath(i->second.path), worker.store.printStorePath(drvPath));
-
- handleDiffHook(
- buildUser ? buildUser->getUID() : getuid(),
- buildUser ? buildUser->getGID() : getgid(),
- prev, worker.store.printStorePath(i->second.path),
- worker.store.printStorePath(drvPath), tmpDir);
-
- if (settings.enforceDeterminism)
- throw NotDeterministic(hint);
-
- printError(hint);
-
- curRound = nrRounds; // we know enough, bail out early
- }
- }
-
- /* If this is the first round of several, then move the output out of the way. */
- if (nrRounds > 1 && curRound == 1 && curRound < nrRounds && keepPreviousRound) {
- for (auto & [_, outputStorePath] : finalOutputs) {
- auto path = worker.store.printStorePath(outputStorePath);
- Path prev = path + checkSuffix;
- deletePath(prev);
- Path dst = path + checkSuffix;
- renameFile(path, dst);
- }
- }
-
- if (curRound < nrRounds) {
- prevInfos = std::move(infos);
- return {};
- }
-
- /* Remove the .check directories if we're done. FIXME: keep them
- if the result was not determistic? */
- if (curRound == nrRounds) {
- for (auto & [_, outputStorePath] : finalOutputs) {
- Path prev = worker.store.printStorePath(outputStorePath) + checkSuffix;
- deletePath(prev);
- }
- }
-
/* Register each output path as valid, and register the sets of
paths referenced by each of them. If there are cycles in the
outputs, this will fail. */
diff --git a/src/libstore/build/local-derivation-goal.hh b/src/libstore/build/local-derivation-goal.hh
index d456e9cae..34c4e9187 100644
--- a/src/libstore/build/local-derivation-goal.hh
+++ b/src/libstore/build/local-derivation-goal.hh
@@ -15,6 +15,9 @@ struct LocalDerivationGoal : public DerivationGoal
/* The process ID of the builder. */
Pid pid;
+ /* The cgroup of the builder, if any. */
+ std::optional<Path> cgroup;
+
/* The temporary directory. */
Path tmpDir;
@@ -92,8 +95,8 @@ struct LocalDerivationGoal : public DerivationGoal
result. */
std::map<Path, ValidPathInfo> prevInfos;
- uid_t sandboxUid() { return usingUserNamespace ? 1000 : buildUser->getUID(); }
- gid_t sandboxGid() { return usingUserNamespace ? 100 : buildUser->getGID(); }
+ uid_t sandboxUid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 1000 : 0) : buildUser->getUID(); }
+ gid_t sandboxGid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 100 : 0) : buildUser->getGID(); }
const static Path homeDir;
@@ -197,6 +200,10 @@ struct LocalDerivationGoal : public DerivationGoal
/* Forcibly kill the child process, if any. */
void killChild() override;
+ /* Kill any processes running under the build user UID or in the
+ cgroup of the build. */
+ void killSandbox(bool getStats);
+
/* Create alternative path calculated from but distinct from the
input, so we can avoid overwriting outputs (or other store paths)
that already exist. */
diff --git a/src/libstore/build/personality.cc b/src/libstore/build/personality.cc
new file mode 100644
index 000000000..4ad477869
--- /dev/null
+++ b/src/libstore/build/personality.cc
@@ -0,0 +1,44 @@
+#include "personality.hh"
+#include "globals.hh"
+
+#if __linux__
+#include <sys/utsname.h>
+#include <sys/personality.h>
+#endif
+
+#include <cstring>
+
+namespace nix {
+
+void setPersonality(std::string_view system)
+{
+#if __linux__
+ /* Change the personality to 32-bit if we're doing an
+ i686-linux build on an x86_64-linux machine. */
+ struct utsname utsbuf;
+ uname(&utsbuf);
+ if ((system == "i686-linux"
+ && (std::string_view(SYSTEM) == "x86_64-linux"
+ || (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64"))))
+ || system == "armv7l-linux"
+ || system == "armv6l-linux")
+ {
+ if (personality(PER_LINUX32) == -1)
+ throw SysError("cannot set 32-bit personality");
+ }
+
+ /* Impersonate a Linux 2.6 machine to get some determinism in
+ builds that depend on the kernel version. */
+ if ((system == "i686-linux" || system == "x86_64-linux") && settings.impersonateLinux26) {
+ int cur = personality(0xffffffff);
+ if (cur != -1) personality(cur | 0x0020000 /* == UNAME26 */);
+ }
+
+ /* Disable address space randomization for improved
+ determinism. */
+ int cur = personality(0xffffffff);
+ if (cur != -1) personality(cur | ADDR_NO_RANDOMIZE);
+#endif
+}
+
+}
diff --git a/src/libstore/build/personality.hh b/src/libstore/build/personality.hh
new file mode 100644
index 000000000..30e4f4062
--- /dev/null
+++ b/src/libstore/build/personality.hh
@@ -0,0 +1,11 @@
+#pragma once
+
+#include <string>
+
+namespace nix {
+
+void setPersonality(std::string_view system);
+
+}
+
+
diff --git a/src/libstore/builtins/buildenv.cc b/src/libstore/builtins/buildenv.cc
index 47458a388..b1fbda13d 100644
--- a/src/libstore/builtins/buildenv.cc
+++ b/src/libstore/builtins/buildenv.cc
@@ -95,7 +95,7 @@ static void createLinks(State & state, const Path & srcDir, const Path & dstDir,
throw Error(
"files '%1%' and '%2%' have the same priority %3%; "
"use 'nix-env --set-flag priority NUMBER INSTALLED_PKGNAME' "
- "or type 'nix profile install --help' if using 'nix profile' to find out how"
+ "or type 'nix profile install --help' if using 'nix profile' to find out how "
"to change the priority of one of the conflicting packages"
" (0 being the highest priority)",
srcFile, readLink(dstFile), priority);
diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc
index 48dd5c247..12596ba49 100644
--- a/src/libstore/daemon.cc
+++ b/src/libstore/daemon.cc
@@ -238,7 +238,6 @@ struct ClientSettings
}
else if (trusted
|| name == settings.buildTimeout.name
- || name == settings.buildRepeat.name
|| name == settings.maxSilentTime.name
|| name == settings.pollInterval.name
|| name == "connect-timeout"
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
index fe99c3c5e..42a53912e 100644
--- a/src/libstore/derivations.cc
+++ b/src/libstore/derivations.cc
@@ -448,7 +448,7 @@ std::string Derivation::unparse(const Store & store, bool maskOutputs,
// FIXME: remove
-bool isDerivation(const std::string & fileName)
+bool isDerivation(std::string_view fileName)
{
return hasSuffix(fileName, drvExtension);
}
diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh
index af198a767..f3cd87fb1 100644
--- a/src/libstore/derivations.hh
+++ b/src/libstore/derivations.hh
@@ -224,7 +224,7 @@ StorePath writeDerivation(Store & store,
Derivation parseDerivation(const Store & store, std::string && s, std::string_view name);
// FIXME: remove
-bool isDerivation(const std::string & fileName);
+bool isDerivation(std::string_view fileName);
/* Calculate the name that will be used for the store path for this
output.
diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc
index 44587ae78..3fa5ae4f7 100644
--- a/src/libstore/derived-path.cc
+++ b/src/libstore/derived-path.cc
@@ -20,11 +20,12 @@ nlohmann::json DerivedPath::Built::toJSON(ref<Store> store) const {
// Fallback for the input-addressed derivation case: We expect to always be
// able to print the output paths, so let’s do it
const auto knownOutputs = store->queryPartialDerivationOutputMap(drvPath);
- for (const auto& output : outputs) {
+ for (const auto & output : outputs) {
auto knownOutput = get(knownOutputs, output);
- res["outputs"][output] = (knownOutput && *knownOutput)
- ? store->printStorePath(**knownOutput)
- : nullptr;
+ if (knownOutput && *knownOutput)
+ res["outputs"][output] = store->printStorePath(**knownOutput);
+ else
+ res["outputs"][output] = nullptr;
}
return res;
}
@@ -53,28 +54,13 @@ StorePathSet BuiltPath::outPaths() const
);
}
-template<typename T>
-nlohmann::json stuffToJSON(const std::vector<T> & ts, ref<Store> store) {
- auto res = nlohmann::json::array();
- for (const T & t : ts) {
- std::visit([&res, store](const auto & t) {
- res.push_back(t.toJSON(store));
- }, t.raw());
- }
- return res;
-}
-
-nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store)
-{ return stuffToJSON<BuiltPath>(buildables, store); }
-nlohmann::json derivedPathsToJSON(const DerivedPaths & paths, ref<Store> store)
-{ return stuffToJSON<DerivedPath>(paths, store); }
-
-
-std::string DerivedPath::Opaque::to_string(const Store & store) const {
+std::string DerivedPath::Opaque::to_string(const Store & store) const
+{
return store.printStorePath(path);
}
-std::string DerivedPath::Built::to_string(const Store & store) const {
+std::string DerivedPath::Built::to_string(const Store & store) const
+{
return store.printStorePath(drvPath)
+ "!"
+ (outputs.empty() ? std::string { "*" } : concatStringsSep(",", outputs));
@@ -93,15 +79,16 @@ DerivedPath::Opaque DerivedPath::Opaque::parse(const Store & store, std::string_
return {store.parseStorePath(s)};
}
-DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_view s)
+DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_view drvS, std::string_view outputsS)
{
- size_t n = s.find("!");
- assert(n != s.npos);
- auto drvPath = store.parseStorePath(s.substr(0, n));
- auto outputsS = s.substr(n + 1);
+ auto drvPath = store.parseStorePath(drvS);
std::set<std::string> outputs;
- if (outputsS != "*")
+ if (outputsS != "*") {
outputs = tokenizeString<std::set<std::string>>(outputsS, ",");
+ if (outputs.empty())
+ throw Error(
+ "Explicit list of wanted outputs '%s' must not be empty. Consider using '*' as a wildcard meaning all outputs if no output in particular is wanted.", outputsS);
+ }
return {drvPath, outputs};
}
@@ -110,7 +97,7 @@ DerivedPath DerivedPath::parse(const Store & store, std::string_view s)
size_t n = s.find("!");
return n == s.npos
? (DerivedPath) DerivedPath::Opaque::parse(store, s)
- : (DerivedPath) DerivedPath::Built::parse(store, s);
+ : (DerivedPath) DerivedPath::Built::parse(store, s.substr(0, n), s.substr(n + 1));
}
RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const
diff --git a/src/libstore/derived-path.hh b/src/libstore/derived-path.hh
index 24a0ae773..706e5dcb4 100644
--- a/src/libstore/derived-path.hh
+++ b/src/libstore/derived-path.hh
@@ -47,7 +47,7 @@ struct DerivedPathBuilt {
std::set<std::string> outputs;
std::string to_string(const Store & store) const;
- static DerivedPathBuilt parse(const Store & store, std::string_view);
+ static DerivedPathBuilt parse(const Store & store, std::string_view, std::string_view);
nlohmann::json toJSON(ref<Store> store) const;
bool operator < (const DerivedPathBuilt & b) const
@@ -125,7 +125,4 @@ struct BuiltPath : _BuiltPathRaw {
typedef std::vector<DerivedPath> DerivedPaths;
typedef std::vector<BuiltPath> BuiltPaths;
-nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store);
-nlohmann::json derivedPathsToJSON(const DerivedPaths & , ref<Store> store);
-
}
diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc
index 5746c32a3..756bd4423 100644
--- a/src/libstore/filetransfer.cc
+++ b/src/libstore/filetransfer.cc
@@ -33,14 +33,6 @@ FileTransferSettings fileTransferSettings;
static GlobalConfig::Register rFileTransferSettings(&fileTransferSettings);
-std::string resolveUri(std::string_view uri)
-{
- if (uri.compare(0, 8, "channel:") == 0)
- return "https://nixos.org/channels/" + std::string(uri.substr(8)) + "/nixexprs.tar.xz";
- else
- return std::string(uri);
-}
-
struct curlFileTransfer : public FileTransfer
{
CURLM * curlm = 0;
@@ -142,9 +134,9 @@ struct curlFileTransfer : public FileTransfer
}
template<class T>
- void fail(const T & e)
+ void fail(T && e)
{
- failEx(std::make_exception_ptr(e));
+ failEx(std::make_exception_ptr(std::move(e)));
}
LambdaSink finalSink;
@@ -472,7 +464,7 @@ struct curlFileTransfer : public FileTransfer
fileTransfer.enqueueItem(shared_from_this());
}
else
- fail(exc);
+ fail(std::move(exc));
}
}
};
@@ -873,14 +865,4 @@ FileTransferError::FileTransferError(FileTransfer::Error error, std::optional<st
err.msg = hf;
}
-bool isUri(std::string_view s)
-{
- if (s.compare(0, 8, "channel:") == 0) return true;
- size_t pos = s.find("://");
- if (pos == std::string::npos) return false;
- std::string scheme(s, 0, pos);
- return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git" || scheme == "s3" || scheme == "ssh";
-}
-
-
}
diff --git a/src/libstore/filetransfer.hh b/src/libstore/filetransfer.hh
index 40e7cf52c..07d58f53a 100644
--- a/src/libstore/filetransfer.hh
+++ b/src/libstore/filetransfer.hh
@@ -125,9 +125,4 @@ public:
FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args & ... args);
};
-bool isUri(std::string_view s);
-
-/* Resolve deprecated 'channel:<foo>' URLs. */
-std::string resolveUri(std::string_view uri);
-
}
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
index 9ef8972f3..5d91829f1 100644
--- a/src/libstore/gc.cc
+++ b/src/libstore/gc.cc
@@ -147,7 +147,7 @@ void LocalStore::addTempRoot(const StorePath & path)
} catch (SysError & e) {
/* The garbage collector may have exited, so we need to
restart. */
- if (e.errNo == EPIPE) {
+ if (e.errNo == EPIPE || e.errNo == ECONNRESET) {
debug("GC socket disconnected");
state->fdRootsSocket.close();
goto restart;
@@ -506,6 +506,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
Finally cleanup([&]() {
debug("GC roots server shutting down");
+ fdServer.close();
while (true) {
auto item = remove_begin(*connections.lock());
if (!item) break;
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index ff658c428..130c5b670 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -131,6 +131,10 @@ StringSet Settings::getDefaultSystemFeatures()
StringSet features{"nixos-test", "benchmark", "big-parallel"};
#if __linux__
+ features.insert("uid-range");
+ #endif
+
+ #if __linux__
if (access("/dev/kvm", R_OK | W_OK) == 0)
features.insert("kvm");
#endif
@@ -287,4 +291,18 @@ void initPlugins()
settings.pluginFiles.pluginsLoaded = true;
}
+static bool initLibStoreDone = false;
+
+void assertLibStoreInitialized() {
+ if (!initLibStoreDone) {
+ printError("The program must call nix::initNix() before calling any libstore library functions.");
+ abort();
+ };
+}
+
+void initLibStore() {
+ initLibStoreDone = true;
+}
+
+
}
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index 3dcf3d479..f026c8808 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -46,6 +46,14 @@ struct PluginFilesSetting : public BaseSetting<Paths>
void set(const std::string & str, bool append = false) override;
};
+const uint32_t maxIdsPerBuild =
+ #if __linux__
+ 1 << 16
+ #else
+ 1
+ #endif
+ ;
+
class Settings : public Config {
unsigned int getDefaultCores();
@@ -273,7 +281,68 @@ public:
`NIX_REMOTE` is empty, the uid under which the Nix daemon runs if
`NIX_REMOTE` is `daemon`). Obviously, this should not be used in
multi-user settings with untrusted users.
+
+ Defaults to `nixbld` when running as root, *empty* otherwise.
+ )",
+ {}, false};
+
+ Setting<bool> autoAllocateUids{this, false, "auto-allocate-uids",
+ R"(
+ Whether to select UIDs for builds automatically, instead of using the
+ users in `build-users-group`.
+
+ UIDs are allocated starting at 872415232 (0x34000000) on Linux and 56930 on macOS.
+
+ > **Warning**
+ > This is an experimental feature.
+
+ To enable it, add the following to [`nix.conf`](#):
+
+ ```
+ extra-experimental-features = auto-allocate-uids
+ auto-allocate-uids = true
+ ```
+ )"};
+
+ Setting<uint32_t> startId{this,
+ #if __linux__
+ 0x34000000,
+ #else
+ 56930,
+ #endif
+ "start-id",
+ "The first UID and GID to use for dynamic ID allocation."};
+
+ Setting<uint32_t> uidCount{this,
+ #if __linux__
+ maxIdsPerBuild * 128,
+ #else
+ 128,
+ #endif
+ "id-count",
+ "The number of UIDs/GIDs to use for dynamic ID allocation."};
+
+ #if __linux__
+ Setting<bool> useCgroups{
+ this, false, "use-cgroups",
+ R"(
+ Whether to execute builds inside cgroups.
+ This is only supported on Linux.
+
+ Cgroups are required and enabled automatically for derivations
+ that require the `uid-range` system feature.
+
+ > **Warning**
+ > This is an experimental feature.
+
+ To enable it, add the following to [`nix.conf`](#):
+
+ ```
+ extra-experimental-features = cgroups
+ use-cgroups = true
+ ```
)"};
+ #endif
Setting<bool> impersonateLinux26{this, false, "impersonate-linux-26",
"Whether to impersonate a Linux 2.6 machine on newer kernels.",
@@ -307,11 +376,6 @@ public:
)",
{"build-max-log-size"}};
- /* When buildRepeat > 0 and verboseBuild == true, whether to print
- repeated builds (i.e. builds other than the first one) to
- stderr. Hack to prevent Hydra logs from being polluted. */
- bool printRepeatedBuilds = true;
-
Setting<unsigned int> pollInterval{this, 5, "build-poll-interval",
"How often (in seconds) to poll for locks."};
@@ -427,6 +491,9 @@ public:
for example, `/dev/nvidiactl?` specifies that `/dev/nvidiactl` will
only be mounted in the sandbox if it exists in the host filesystem.
+ If the source is in the Nix store, then its closure will be added to
+ the sandbox as well.
+
Depending on how Nix was built, the default value for this option
may be empty or provide `/bin/sh` as a bind-mount of `bash`.
)",
@@ -435,19 +502,6 @@ public:
Setting<bool> sandboxFallback{this, true, "sandbox-fallback",
"Whether to disable sandboxing when the kernel doesn't allow it."};
- Setting<size_t> buildRepeat{
- this, 0, "repeat",
- R"(
- How many times to repeat builds to check whether they are
- deterministic. The default value is 0. If the value is non-zero,
- every build is repeated the specified number of times. If the
- contents of any of the runs differs from the previous ones and
- `enforce-determinism` is true, the build is rejected and the
- resulting store paths are not registered as “valid” in Nix’s
- database.
- )",
- {"build-repeat"}};
-
#if __linux__
Setting<std::string> sandboxShmSize{
this, "50%", "sandbox-dev-shm-size",
@@ -511,10 +565,6 @@ public:
configuration file, and cannot be passed at the command line.
)"};
- Setting<bool> enforceDeterminism{
- this, true, "enforce-determinism",
- "Whether to fail if repeated builds produce different output. See `repeat`."};
-
Setting<Strings> trustedPublicKeys{
this,
{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="},
@@ -563,10 +613,10 @@ public:
cache) must have a signature by a trusted key. A trusted key is one
listed in `trusted-public-keys`, or a public key counterpart to a
private key stored in a file listed in `secret-key-files`.
-
+
Set to `false` to disable signature checking and trust all
non-content-addressed paths unconditionally.
-
+
(Content-addressed paths are inherently trustworthy and thus
unaffected by this configuration option.)
)"};
@@ -937,4 +987,12 @@ std::vector<Path> getUserConfigFiles();
extern const std::string nixVersion;
+/* NB: This is not sufficient. You need to call initNix() */
+void initLibStore();
+
+/* It's important to initialize before doing _anything_, which is why we
+ call upon the programmer to handle this correctly. However, we only add
+ this in a key locations, so as not to litter the code. */
+void assertLibStoreInitialized();
+
}
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
index dd34b19c6..4d398b21d 100644
--- a/src/libstore/legacy-ssh-store.cc
+++ b/src/libstore/legacy-ssh-store.cc
@@ -255,8 +255,8 @@ private:
<< settings.maxLogSize;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3)
conn.to
- << settings.buildRepeat
- << settings.enforceDeterminism;
+ << 0 // buildRepeat hasn't worked for ages anyway
+ << 0;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 7) {
conn.to << ((int) settings.keepFailed);
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index d374d4558..3bab10af9 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -91,6 +91,7 @@ void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
if (!lockFile(lockFd.get(), ltWrite, false)) {
printInfo("waiting for exclusive access to the Nix store for ca drvs...");
+ lockFile(lockFd.get(), ltNone, false); // We have acquired a shared lock; release it to prevent deadlocks
lockFile(lockFd.get(), ltWrite, true);
}
@@ -299,6 +300,7 @@ LocalStore::LocalStore(const Params & params)
if (!lockFile(globalLock.get(), ltWrite, false)) {
printInfo("waiting for exclusive access to the Nix store...");
+ lockFile(globalLock.get(), ltNone, false); // We have acquired a shared lock; release it to prevent deadlocks
lockFile(globalLock.get(), ltWrite, true);
}
@@ -583,7 +585,10 @@ void canonicaliseTimestampAndPermissions(const Path & path)
}
-static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
+static void canonicalisePathMetaData_(
+ const Path & path,
+ std::optional<std::pair<uid_t, uid_t>> uidRange,
+ InodesSeen & inodesSeen)
{
checkInterrupt();
@@ -630,7 +635,7 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
However, ignore files that we chown'ed ourselves previously to
ensure that we don't fail on hard links within the same build
(i.e. "touch $out/foo; ln $out/foo $out/bar"). */
- if (fromUid != (uid_t) -1 && st.st_uid != fromUid) {
+ if (uidRange && (st.st_uid < uidRange->first || st.st_uid > uidRange->second)) {
if (S_ISDIR(st.st_mode) || !inodesSeen.count(Inode(st.st_dev, st.st_ino)))
throw BuildError("invalid ownership on file '%1%'", path);
mode_t mode = st.st_mode & ~S_IFMT;
@@ -663,14 +668,17 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
if (S_ISDIR(st.st_mode)) {
DirEntries entries = readDirectory(path);
for (auto & i : entries)
- canonicalisePathMetaData_(path + "/" + i.name, fromUid, inodesSeen);
+ canonicalisePathMetaData_(path + "/" + i.name, uidRange, inodesSeen);
}
}
-void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
+void canonicalisePathMetaData(
+ const Path & path,
+ std::optional<std::pair<uid_t, uid_t>> uidRange,
+ InodesSeen & inodesSeen)
{
- canonicalisePathMetaData_(path, fromUid, inodesSeen);
+ canonicalisePathMetaData_(path, uidRange, inodesSeen);
/* On platforms that don't have lchown(), the top-level path can't
be a symlink, since we can't change its ownership. */
@@ -683,10 +691,11 @@ void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & ino
}
-void canonicalisePathMetaData(const Path & path, uid_t fromUid)
+void canonicalisePathMetaData(const Path & path,
+ std::optional<std::pair<uid_t, uid_t>> uidRange)
{
InodesSeen inodesSeen;
- canonicalisePathMetaData(path, fromUid, inodesSeen);
+ canonicalisePathMetaData(path, uidRange, inodesSeen);
}
@@ -1331,7 +1340,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
autoGC();
- canonicalisePathMetaData(realPath, -1);
+ canonicalisePathMetaData(realPath, {});
optimisePath(realPath, repair); // FIXME: combine with hashPath()
@@ -1444,7 +1453,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name
narHash = narSink.finish();
}
- canonicalisePathMetaData(realPath, -1); // FIXME: merge into restorePath
+ canonicalisePathMetaData(realPath, {}); // FIXME: merge into restorePath
optimisePath(realPath, repair);
@@ -1486,7 +1495,7 @@ StorePath LocalStore::addTextToStore(
writeFile(realPath, s);
- canonicalisePathMetaData(realPath, -1);
+ canonicalisePathMetaData(realPath, {});
StringSink sink;
dumpString(s, sink);
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
index bd0ce1fe6..4579c2f62 100644
--- a/src/libstore/local-store.hh
+++ b/src/libstore/local-store.hh
@@ -310,9 +310,18 @@ typedef std::set<Inode> InodesSeen;
- the permissions are set of 444 or 555 (i.e., read-only with or
without execute permission; setuid bits etc. are cleared)
- the owner and group are set to the Nix user and group, if we're
- running as root. */
-void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen);
-void canonicalisePathMetaData(const Path & path, uid_t fromUid);
+ running as root.
+ If uidRange is not empty, this function will throw an error if it
+ encounters files owned by a user outside of the closed interval
+ [uidRange->first, uidRange->second].
+*/
+void canonicalisePathMetaData(
+ const Path & path,
+ std::optional<std::pair<uid_t, uid_t>> uidRange,
+ InodesSeen & inodesSeen);
+void canonicalisePathMetaData(
+ const Path & path,
+ std::optional<std::pair<uid_t, uid_t>> uidRange);
void canonicaliseTimestampAndPermissions(const Path & path);
diff --git a/src/libstore/local.mk b/src/libstore/local.mk
index 1d26ac918..8f28bec6c 100644
--- a/src/libstore/local.mk
+++ b/src/libstore/local.mk
@@ -20,7 +20,7 @@ endif
$(foreach file,$(libstore_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/sandbox)))
ifeq ($(ENABLE_S3), 1)
- libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core
+ libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core -laws-crt-cpp
endif
ifdef HOST_SOLARIS
diff --git a/src/libstore/lock.cc b/src/libstore/lock.cc
index fa718f55d..d02d20b4c 100644
--- a/src/libstore/lock.cc
+++ b/src/libstore/lock.cc
@@ -2,105 +2,197 @@
#include "globals.hh"
#include "pathlocks.hh"
-#include <grp.h>
#include <pwd.h>
-
-#include <fcntl.h>
-#include <unistd.h>
+#include <grp.h>
namespace nix {
-UserLock::UserLock()
+struct SimpleUserLock : UserLock
{
- assert(settings.buildUsersGroup != "");
- createDirs(settings.nixStateDir + "/userpool");
-}
+ AutoCloseFD fdUserLock;
+ uid_t uid;
+ gid_t gid;
+ std::vector<gid_t> supplementaryGIDs;
+
+ uid_t getUID() override { assert(uid); return uid; }
+ uid_t getUIDCount() override { return 1; }
+ gid_t getGID() override { assert(gid); return gid; }
+
+ std::vector<gid_t> getSupplementaryGIDs() override { return supplementaryGIDs; }
+
+ static std::unique_ptr<UserLock> acquire()
+ {
+ assert(settings.buildUsersGroup != "");
+ createDirs(settings.nixStateDir + "/userpool");
+
+ /* Get the members of the build-users-group. */
+ struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
+ if (!gr)
+ throw Error("the group '%s' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
+
+ /* Copy the result of getgrnam. */
+ Strings users;
+ for (char * * p = gr->gr_mem; *p; ++p) {
+ debug("found build user '%s'", *p);
+ users.push_back(*p);
+ }
-bool UserLock::findFreeUser() {
- if (enabled()) return true;
-
- /* Get the members of the build-users-group. */
- struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
- if (!gr)
- throw Error("the group '%1%' specified in 'build-users-group' does not exist",
- settings.buildUsersGroup);
- gid = gr->gr_gid;
-
- /* Copy the result of getgrnam. */
- Strings users;
- for (char * * p = gr->gr_mem; *p; ++p) {
- debug("found build user '%1%'", *p);
- users.push_back(*p);
+ if (users.empty())
+ throw Error("the build users group '%s' has no members", settings.buildUsersGroup);
+
+ /* Find a user account that isn't currently in use for another
+ build. */
+ for (auto & i : users) {
+ debug("trying user '%s'", i);
+
+ struct passwd * pw = getpwnam(i.c_str());
+ if (!pw)
+ throw Error("the user '%s' in the group '%s' does not exist", i, settings.buildUsersGroup);
+
+ auto fnUserLock = fmt("%s/userpool/%s", settings.nixStateDir,pw->pw_uid);
+
+ AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
+ if (!fd)
+ throw SysError("opening user lock '%s'", fnUserLock);
+
+ if (lockFile(fd.get(), ltWrite, false)) {
+ auto lock = std::make_unique<SimpleUserLock>();
+
+ lock->fdUserLock = std::move(fd);
+ lock->uid = pw->pw_uid;
+ lock->gid = gr->gr_gid;
+
+ /* Sanity check... */
+ if (lock->uid == getuid() || lock->uid == geteuid())
+ throw Error("the Nix user should not be a member of '%s'", settings.buildUsersGroup);
+
+ #if __linux__
+ /* Get the list of supplementary groups of this build
+ user. This is usually either empty or contains a
+ group such as "kvm". */
+ int ngroups = 32; // arbitrary initial guess
+ std::vector<gid_t> gids;
+ gids.resize(ngroups);
+
+ int err = getgrouplist(
+ pw->pw_name, pw->pw_gid,
+ gids.data(),
+ &ngroups);
+
+ /* Our initial size of 32 wasn't sufficient, the
+ correct size has been stored in ngroups, so we try
+ again. */
+ if (err == -1) {
+ gids.resize(ngroups);
+ err = getgrouplist(
+ pw->pw_name, pw->pw_gid,
+ gids.data(),
+ &ngroups);
+ }
+
+ // If it failed once more, then something must be broken.
+ if (err == -1)
+ throw Error("failed to get list of supplementary groups for '%s'", pw->pw_name);
+
+ // Finally, trim back the GID list to its real size.
+ for (auto i = 0; i < ngroups; i++)
+ if (gids[i] != lock->gid)
+ lock->supplementaryGIDs.push_back(gids[i]);
+ #endif
+
+ return lock;
+ }
+ }
+
+ return nullptr;
}
+};
- if (users.empty())
- throw Error("the build users group '%1%' has no members",
- settings.buildUsersGroup);
+struct AutoUserLock : UserLock
+{
+ AutoCloseFD fdUserLock;
+ uid_t firstUid = 0;
+ gid_t firstGid = 0;
+ uid_t nrIds = 1;
- /* Find a user account that isn't currently in use for another
- build. */
- for (auto & i : users) {
- debug("trying user '%1%'", i);
+ uid_t getUID() override { assert(firstUid); return firstUid; }
- struct passwd * pw = getpwnam(i.c_str());
- if (!pw)
- throw Error("the user '%1%' in the group '%2%' does not exist",
- i, settings.buildUsersGroup);
+ gid_t getUIDCount() override { return nrIds; }
+ gid_t getGID() override { assert(firstGid); return firstGid; }
- fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str();
+ std::vector<gid_t> getSupplementaryGIDs() override { return {}; }
- AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
- if (!fd)
- throw SysError("opening user lock '%1%'", fnUserLock);
+ static std::unique_ptr<UserLock> acquire(uid_t nrIds, bool useChroot)
+ {
+ settings.requireExperimentalFeature(Xp::AutoAllocateUids);
+ assert(settings.startId > 0);
+ assert(settings.uidCount % maxIdsPerBuild == 0);
+ assert((uint64_t) settings.startId + (uint64_t) settings.uidCount <= std::numeric_limits<uid_t>::max());
+ assert(nrIds <= maxIdsPerBuild);
- if (lockFile(fd.get(), ltWrite, false)) {
- fdUserLock = std::move(fd);
- user = i;
- uid = pw->pw_uid;
+ createDirs(settings.nixStateDir + "/userpool2");
- /* Sanity check... */
- if (uid == getuid() || uid == geteuid())
- throw Error("the Nix user should not be a member of '%1%'",
- settings.buildUsersGroup);
+ size_t nrSlots = settings.uidCount / maxIdsPerBuild;
-#if __linux__
- /* Get the list of supplementary groups of this build user. This
- is usually either empty or contains a group such as "kvm". */
- int ngroups = 32; // arbitrary initial guess
- supplementaryGIDs.resize(ngroups);
+ for (size_t i = 0; i < nrSlots; i++) {
+ debug("trying user slot '%d'", i);
- int err = getgrouplist(pw->pw_name, pw->pw_gid, supplementaryGIDs.data(),
- &ngroups);
+ createDirs(settings.nixStateDir + "/userpool2");
- // Our initial size of 32 wasn't sufficient, the correct size has
- // been stored in ngroups, so we try again.
- if (err == -1) {
- supplementaryGIDs.resize(ngroups);
- err = getgrouplist(pw->pw_name, pw->pw_gid, supplementaryGIDs.data(),
- &ngroups);
- }
+ auto fnUserLock = fmt("%s/userpool2/slot-%d", settings.nixStateDir, i);
+
+ AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
+ if (!fd)
+ throw SysError("opening user lock '%s'", fnUserLock);
- // If it failed once more, then something must be broken.
- if (err == -1)
- throw Error("failed to get list of supplementary groups for '%1%'",
- pw->pw_name);
+ if (lockFile(fd.get(), ltWrite, false)) {
- // Finally, trim back the GID list to its real size
- supplementaryGIDs.resize(ngroups);
-#endif
+ auto firstUid = settings.startId + i * maxIdsPerBuild;
- isEnabled = true;
- return true;
+ auto pw = getpwuid(firstUid);
+ if (pw)
+ throw Error("auto-allocated UID %d clashes with existing user account '%s'", firstUid, pw->pw_name);
+
+ auto lock = std::make_unique<AutoUserLock>();
+ lock->fdUserLock = std::move(fd);
+ lock->firstUid = firstUid;
+ if (useChroot)
+ lock->firstGid = firstUid;
+ else {
+ struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
+ if (!gr)
+ throw Error("the group '%s' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
+ lock->firstGid = gr->gr_gid;
+ }
+ lock->nrIds = nrIds;
+ return lock;
+ }
}
+
+ return nullptr;
}
+};
- return false;
+std::unique_ptr<UserLock> acquireUserLock(uid_t nrIds, bool useChroot)
+{
+ if (settings.autoAllocateUids)
+ return AutoUserLock::acquire(nrIds, useChroot);
+ else
+ return SimpleUserLock::acquire();
}
-void UserLock::kill()
+bool useBuildUsers()
{
- killUser(uid);
+ #if __linux__
+ static bool b = (settings.buildUsersGroup != "" || settings.autoAllocateUids) && getuid() == 0;
+ return b;
+ #elif __APPLE__
+ static bool b = settings.buildUsersGroup != "" && getuid() == 0;
+ return b;
+ #else
+ return false;
+ #endif
}
}
diff --git a/src/libstore/lock.hh b/src/libstore/lock.hh
index 3d29a7b5b..49ad86de7 100644
--- a/src/libstore/lock.hh
+++ b/src/libstore/lock.hh
@@ -1,37 +1,38 @@
#pragma once
-#include "sync.hh"
#include "types.hh"
-#include "util.hh"
+
+#include <optional>
+
+#include <sys/types.h>
namespace nix {
-class UserLock
+struct UserLock
{
-private:
- Path fnUserLock;
- AutoCloseFD fdUserLock;
+ virtual ~UserLock() { }
- bool isEnabled = false;
- std::string user;
- uid_t uid = 0;
- gid_t gid = 0;
- std::vector<gid_t> supplementaryGIDs;
+ /* Get the first and last UID. */
+ std::pair<uid_t, uid_t> getUIDRange()
+ {
+ auto first = getUID();
+ return {first, first + getUIDCount() - 1};
+ }
-public:
- UserLock();
+ /* Get the first UID. */
+ virtual uid_t getUID() = 0;
- void kill();
+ virtual uid_t getUIDCount() = 0;
- std::string getUser() { return user; }
- uid_t getUID() { assert(uid); return uid; }
- uid_t getGID() { assert(gid); return gid; }
- std::vector<gid_t> getSupplementaryGIDs() { return supplementaryGIDs; }
+ virtual gid_t getGID() = 0;
- bool findFreeUser();
+ virtual std::vector<gid_t> getSupplementaryGIDs() = 0;
+};
- bool enabled() { return isEnabled; }
+/* Acquire a user lock for a UID range of size `nrIds`. Note that this
+ may return nullptr if no user is available. */
+std::unique_ptr<UserLock> acquireUserLock(uid_t nrIds, bool useChroot);
-};
+bool useBuildUsers();
}
diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc
index 398147fc3..9a0003588 100644
--- a/src/libstore/nar-accessor.cc
+++ b/src/libstore/nar-accessor.cc
@@ -1,6 +1,5 @@
#include "nar-accessor.hh"
#include "archive.hh"
-#include "json.hh"
#include <map>
#include <stack>
@@ -243,42 +242,43 @@ ref<FSAccessor> makeLazyNarAccessor(const std::string & listing,
return make_ref<NarAccessor>(listing, getNarBytes);
}
-void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor,
- const Path & path, bool recurse)
+using nlohmann::json;
+json listNar(ref<FSAccessor> accessor, const Path & path, bool recurse)
{
auto st = accessor->stat(path);
- auto obj = res.object();
+ json obj = json::object();
switch (st.type) {
case FSAccessor::Type::tRegular:
- obj.attr("type", "regular");
- obj.attr("size", st.fileSize);
+ obj["type"] = "regular";
+ obj["size"] = st.fileSize;
if (st.isExecutable)
- obj.attr("executable", true);
+ obj["executable"] = true;
if (st.narOffset)
- obj.attr("narOffset", st.narOffset);
+ obj["narOffset"] = st.narOffset;
break;
case FSAccessor::Type::tDirectory:
- obj.attr("type", "directory");
+ obj["type"] = "directory";
{
- auto res2 = obj.object("entries");
+ obj["entries"] = json::object();
+ json &res2 = obj["entries"];
for (auto & name : accessor->readDirectory(path)) {
if (recurse) {
- auto res3 = res2.placeholder(name);
- listNar(res3, accessor, path + "/" + name, true);
+ res2[name] = listNar(accessor, path + "/" + name, true);
} else
- res2.object(name);
+ res2[name] = json::object();
}
}
break;
case FSAccessor::Type::tSymlink:
- obj.attr("type", "symlink");
- obj.attr("target", accessor->readLink(path));
+ obj["type"] = "symlink";
+ obj["target"] = accessor->readLink(path);
break;
default:
throw Error("path '%s' does not exist in NAR", path);
}
+ return obj;
}
}
diff --git a/src/libstore/nar-accessor.hh b/src/libstore/nar-accessor.hh
index c2241a04c..7d998ae0b 100644
--- a/src/libstore/nar-accessor.hh
+++ b/src/libstore/nar-accessor.hh
@@ -2,6 +2,7 @@
#include <functional>
+#include <nlohmann/json_fwd.hpp>
#include "fs-accessor.hh"
namespace nix {
@@ -24,11 +25,8 @@ ref<FSAccessor> makeLazyNarAccessor(
const std::string & listing,
GetNarBytes getNarBytes);
-class JSONPlaceholder;
-
/* Write a JSON representation of the contents of a NAR (except file
contents). */
-void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor,
- const Path & path, bool recurse);
+nlohmann::json listNar(ref<FSAccessor> accessor, const Path & path, bool recurse);
}
diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc
index f4ea739b0..3e0689534 100644
--- a/src/libstore/nar-info-disk-cache.cc
+++ b/src/libstore/nar-info-disk-cache.cc
@@ -166,16 +166,37 @@ public:
return i->second;
}
+ std::optional<Cache> queryCacheRaw(State & state, const std::string & uri)
+ {
+ auto i = state.caches.find(uri);
+ if (i == state.caches.end()) {
+ auto queryCache(state.queryCache.use()(uri)(time(0) - cacheInfoTtl));
+ if (!queryCache.next())
+ return std::nullopt;
+ state.caches.emplace(uri,
+ Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
+ }
+ return getCache(state, uri);
+ }
+
void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
{
retrySQLite<void>([&]() {
auto state(_state.lock());
+ SQLiteTxn txn(state->db);
+
+ // To avoid the race, we have to check if maybe someone hasn't yet created
+ // the cache for this URI in the meantime.
+ auto cache(queryCacheRaw(*state, uri));
- // FIXME: race
+ if (cache)
+ return;
state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec();
assert(sqlite3_changes(state->db) == 1);
state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority};
+
+ txn.commit();
});
}
@@ -183,21 +204,12 @@ public:
{
return retrySQLite<std::optional<CacheInfo>>([&]() -> std::optional<CacheInfo> {
auto state(_state.lock());
-
- auto i = state->caches.find(uri);
- if (i == state->caches.end()) {
- auto queryCache(state->queryCache.use()(uri)(time(0) - cacheInfoTtl));
- if (!queryCache.next())
- return std::nullopt;
- state->caches.emplace(uri,
- Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
- }
-
- auto & cache(getCache(*state, uri));
-
+ auto cache(queryCacheRaw(*state, uri));
+ if (!cache)
+ return std::nullopt;
return CacheInfo {
- .wantMassQuery = cache.wantMassQuery,
- .priority = cache.priority
+ .wantMassQuery = cache->wantMassQuery,
+ .priority = cache->priority
};
});
}
diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc
index f2288a04e..cc4a94fab 100644
--- a/src/libstore/parsed-derivations.cc
+++ b/src/libstore/parsed-derivations.cc
@@ -2,7 +2,6 @@
#include <nlohmann/json.hpp>
#include <regex>
-#include "json.hh"
namespace nix {
@@ -90,6 +89,7 @@ std::optional<Strings> ParsedDerivation::getStringsAttr(const std::string & name
StringSet ParsedDerivation::getRequiredSystemFeatures() const
{
+ // FIXME: cache this?
StringSet res;
for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings()))
res.insert(i);
@@ -125,6 +125,11 @@ bool ParsedDerivation::substitutesAllowed() const
return getBoolAttr("allowSubstitutes", true);
}
+bool ParsedDerivation::useUidRange() const
+{
+ return getRequiredSystemFeatures().count("uid-range");
+}
+
static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*");
std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths)
@@ -144,16 +149,11 @@ std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & s
auto e = json.find("exportReferencesGraph");
if (e != json.end() && e->is_object()) {
for (auto i = e->begin(); i != e->end(); ++i) {
- std::ostringstream str;
- {
- JSONPlaceholder jsonRoot(str, true);
- StorePathSet storePaths;
- for (auto & p : *i)
- storePaths.insert(store.parseStorePath(p.get<std::string>()));
- store.pathInfoToJSON(jsonRoot,
- store.exportReferences(storePaths, inputPaths), false, true);
- }
- json[i.key()] = nlohmann::json::parse(str.str()); // urgh
+ StorePathSet storePaths;
+ for (auto & p : *i)
+ storePaths.insert(store.parseStorePath(p.get<std::string>()));
+ json[i.key()] = store.pathInfoToJSON(
+ store.exportReferences(storePaths, inputPaths), false, true);
}
}
diff --git a/src/libstore/parsed-derivations.hh b/src/libstore/parsed-derivations.hh
index 95bec21e8..bfb3857c0 100644
--- a/src/libstore/parsed-derivations.hh
+++ b/src/libstore/parsed-derivations.hh
@@ -38,6 +38,8 @@ public:
bool substitutesAllowed() const;
+ bool useUidRange() const;
+
std::optional<nlohmann::json> prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths);
};
diff --git a/src/libstore/references.cc b/src/libstore/references.cc
index 34dce092c..3bb297fc8 100644
--- a/src/libstore/references.cc
+++ b/src/libstore/references.cc
@@ -67,20 +67,12 @@ void RefScanSink::operator () (std::string_view data)
}
-std::pair<StorePathSet, HashResult> scanForReferences(
- const std::string & path,
- const StorePathSet & refs)
-{
- HashSink hashSink { htSHA256 };
- auto found = scanForReferences(hashSink, path, refs);
- auto hash = hashSink.finish();
- return std::pair<StorePathSet, HashResult>(found, hash);
-}
+PathRefScanSink::PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap)
+ : RefScanSink(std::move(hashes))
+ , backMap(std::move(backMap))
+{ }
-StorePathSet scanForReferences(
- Sink & toTee,
- const Path & path,
- const StorePathSet & refs)
+PathRefScanSink PathRefScanSink::fromPaths(const StorePathSet & refs)
{
StringSet hashes;
std::map<std::string, StorePath> backMap;
@@ -92,14 +84,14 @@ StorePathSet scanForReferences(
hashes.insert(hashPart);
}
- /* Look for the hashes in the NAR dump of the path. */
- RefScanSink refsSink(std::move(hashes));
- TeeSink sink { refsSink, toTee };
- dumpPath(path, sink);
+ return PathRefScanSink(std::move(hashes), std::move(backMap));
+}
+StorePathSet PathRefScanSink::getResultPaths()
+{
/* Map the hashes found back to their store paths. */
StorePathSet found;
- for (auto & i : refsSink.getResult()) {
+ for (auto & i : getResult()) {
auto j = backMap.find(i);
assert(j != backMap.end());
found.insert(j->second);
@@ -109,6 +101,31 @@ StorePathSet scanForReferences(
}
+std::pair<StorePathSet, HashResult> scanForReferences(
+ const std::string & path,
+ const StorePathSet & refs)
+{
+ HashSink hashSink { htSHA256 };
+ auto found = scanForReferences(hashSink, path, refs);
+ auto hash = hashSink.finish();
+ return std::pair<StorePathSet, HashResult>(found, hash);
+}
+
+StorePathSet scanForReferences(
+ Sink & toTee,
+ const Path & path,
+ const StorePathSet & refs)
+{
+ PathRefScanSink refsSink = PathRefScanSink::fromPaths(refs);
+ TeeSink sink { refsSink, toTee };
+
+ /* Look for the hashes in the NAR dump of the path. */
+ dumpPath(path, sink);
+
+ return refsSink.getResultPaths();
+}
+
+
RewritingSink::RewritingSink(const std::string & from, const std::string & to, Sink & nextSink)
: from(from), to(to), nextSink(nextSink)
{
diff --git a/src/libstore/references.hh b/src/libstore/references.hh
index a6119c861..6f381f96c 100644
--- a/src/libstore/references.hh
+++ b/src/libstore/references.hh
@@ -27,6 +27,19 @@ public:
void operator () (std::string_view data) override;
};
+class PathRefScanSink : public RefScanSink
+{
+ std::map<std::string, StorePath> backMap;
+
+ PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap);
+
+public:
+
+ static PathRefScanSink fromPaths(const StorePathSet & refs);
+
+ StorePathSet getResultPaths();
+};
+
struct RewritingSink : Sink
{
std::string from, to, prev;
diff --git a/src/libstore/remote-fs-accessor.cc b/src/libstore/remote-fs-accessor.cc
index 0ce335646..fcfb527f5 100644
--- a/src/libstore/remote-fs-accessor.cc
+++ b/src/libstore/remote-fs-accessor.cc
@@ -1,6 +1,6 @@
+#include <nlohmann/json.hpp>
#include "remote-fs-accessor.hh"
#include "nar-accessor.hh"
-#include "json.hh"
#include <sys/types.h>
#include <sys/stat.h>
@@ -38,10 +38,8 @@ ref<FSAccessor> RemoteFSAccessor::addToCache(std::string_view hashPart, std::str
if (cacheDir != "") {
try {
- std::ostringstream str;
- JSONPlaceholder jsonRoot(str);
- listNar(jsonRoot, narAccessor, "", true);
- writeFile(makeCacheFile(hashPart, "ls"), str.str());
+ nlohmann::json j = listNar(narAccessor, "", true);
+ writeFile(makeCacheFile(hashPart, "ls"), j.dump());
} catch (...) {
ignoreException();
}
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index 96a29155c..48cf731a8 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -447,7 +447,7 @@ void RemoteStore::queryPathInfoUncached(const StorePath & path,
} catch (Error & e) {
// Ugly backwards compatibility hack.
if (e.msg().find("is not valid") != std::string::npos)
- throw InvalidPath(e.info());
+ throw InvalidPath(std::move(e.info()));
throw;
}
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc
index 2090beabd..6c350888f 100644
--- a/src/libstore/sqlite.cc
+++ b/src/libstore/sqlite.cc
@@ -8,12 +8,15 @@
namespace nix {
-SQLiteError::SQLiteError(const char *path, int errNo, int extendedErrNo, hintformat && hf)
- : Error(""), path(path), errNo(errNo), extendedErrNo(extendedErrNo)
+SQLiteError::SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, hintformat && hf)
+ : Error(""), path(path), errMsg(errMsg), errNo(errNo), extendedErrNo(extendedErrNo), offset(offset)
{
- err.msg = hintfmt("%s: %s (in '%s')",
+ auto offsetStr = (offset == -1) ? "" : "at offset " + std::to_string(offset) + ": ";
+ err.msg = hintfmt("%s: %s%s, %s (in '%s')",
normaltxt(hf.str()),
+ offsetStr,
sqlite3_errstr(extendedErrNo),
+ errMsg,
path ? path : "(in-memory)");
}
@@ -21,11 +24,13 @@ SQLiteError::SQLiteError(const char *path, int errNo, int extendedErrNo, hintfor
{
int err = sqlite3_errcode(db);
int exterr = sqlite3_extended_errcode(db);
+ int offset = sqlite3_error_offset(db);
auto path = sqlite3_db_filename(db, nullptr);
+ auto errMsg = sqlite3_errmsg(db);
if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) {
- auto exp = SQLiteBusy(path, err, exterr, std::move(hf));
+ auto exp = SQLiteBusy(path, errMsg, err, exterr, offset, std::move(hf));
exp.err.msg = hintfmt(
err == SQLITE_PROTOCOL
? "SQLite database '%s' is busy (SQLITE_PROTOCOL)"
@@ -33,7 +38,7 @@ SQLiteError::SQLiteError(const char *path, int errNo, int extendedErrNo, hintfor
path ? path : "(in-memory)");
throw exp;
} else
- throw SQLiteError(path, err, exterr, std::move(hf));
+ throw SQLiteError(path, errMsg, err, exterr, offset, std::move(hf));
}
SQLite::SQLite(const Path & path, bool create)
diff --git a/src/libstore/sqlite.hh b/src/libstore/sqlite.hh
index 1d1c553ea..1853731a2 100644
--- a/src/libstore/sqlite.hh
+++ b/src/libstore/sqlite.hh
@@ -98,21 +98,22 @@ struct SQLiteTxn
struct SQLiteError : Error
{
- const char *path;
- int errNo, extendedErrNo;
+ std::string path;
+ std::string errMsg;
+ int errNo, extendedErrNo, offset;
template<typename... Args>
[[noreturn]] static void throw_(sqlite3 * db, const std::string & fs, const Args & ... args) {
throw_(db, hintfmt(fs, args...));
}
- SQLiteError(const char *path, int errNo, int extendedErrNo, hintformat && hf);
+ SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, hintformat && hf);
protected:
template<typename... Args>
- SQLiteError(const char *path, int errNo, int extendedErrNo, const std::string & fs, const Args & ... args)
- : SQLiteError(path, errNo, extendedErrNo, hintfmt(fs, args...))
+ SQLiteError(const char *path, const char *errMsg, int errNo, int extendedErrNo, int offset, const std::string & fs, const Args & ... args)
+ : SQLiteError(path, errNo, extendedErrNo, offset, hintfmt(fs, args...))
{ }
[[noreturn]] static void throw_(sqlite3 * db, hintformat && hf);
diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc
index 1bbad71f2..69bfe3418 100644
--- a/src/libstore/ssh.cc
+++ b/src/libstore/ssh.cc
@@ -67,7 +67,7 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string
if (fakeSSH) {
args = { "bash", "-c" };
} else {
- args = { "ssh", host.c_str(), "-x", "-a" };
+ args = { "ssh", host.c_str(), "-x" };
addCommonSSHOpts(args);
if (socketPath != "")
args.insert(args.end(), {"-S", socketPath});
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index 06a9758fc..426230ca5 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -6,32 +6,34 @@
#include "util.hh"
#include "nar-info-disk-cache.hh"
#include "thread-pool.hh"
-#include "json.hh"
#include "url.hh"
#include "archive.hh"
#include "callback.hh"
#include "remote-store.hh"
+#include <nlohmann/json.hpp>
#include <regex>
+using json = nlohmann::json;
+
namespace nix {
-bool Store::isInStore(const Path & path) const
+bool Store::isInStore(PathView path) const
{
return isInDir(path, storeDir);
}
-std::pair<StorePath, Path> Store::toStorePath(const Path & path) const
+std::pair<StorePath, Path> Store::toStorePath(PathView path) const
{
if (!isInStore(path))
throw Error("path '%1%' is not in the Nix store", path);
- Path::size_type slash = path.find('/', storeDir.size() + 1);
+ auto slash = path.find('/', storeDir.size() + 1);
if (slash == Path::npos)
return {parseStorePath(path), ""};
else
- return {parseStorePath(std::string_view(path).substr(0, slash)), path.substr(slash)};
+ return {parseStorePath(path.substr(0, slash)), (Path) path.substr(slash)};
}
@@ -456,6 +458,7 @@ Store::Store(const Params & params)
: StoreConfig(params)
, state({(size_t) pathInfoCacheSize})
{
+ assertLibStoreInitialized();
}
@@ -838,56 +841,53 @@ StorePathSet Store::exportReferences(const StorePathSet & storePaths, const Stor
return paths;
}
-
-void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & storePaths,
+json Store::pathInfoToJSON(const StorePathSet & storePaths,
bool includeImpureInfo, bool showClosureSize,
Base hashBase,
AllowInvalidFlag allowInvalid)
{
- auto jsonList = jsonOut.list();
+ json::array_t jsonList = json::array();
for (auto & storePath : storePaths) {
- auto jsonPath = jsonList.object();
+ auto& jsonPath = jsonList.emplace_back(json::object());
try {
auto info = queryPathInfo(storePath);
- jsonPath.attr("path", printStorePath(info->path));
- jsonPath
- .attr("narHash", info->narHash.to_string(hashBase, true))
- .attr("narSize", info->narSize);
+ jsonPath["path"] = printStorePath(info->path);
+ jsonPath["narHash"] = info->narHash.to_string(hashBase, true);
+ jsonPath["narSize"] = info->narSize;
{
- auto jsonRefs = jsonPath.list("references");
+ auto& jsonRefs = (jsonPath["references"] = json::array());
for (auto & ref : info->references)
- jsonRefs.elem(printStorePath(ref));
+ jsonRefs.emplace_back(printStorePath(ref));
}
if (info->ca)
- jsonPath.attr("ca", renderContentAddress(info->ca));
+ jsonPath["ca"] = renderContentAddress(info->ca);
std::pair<uint64_t, uint64_t> closureSizes;
if (showClosureSize) {
closureSizes = getClosureSize(info->path);
- jsonPath.attr("closureSize", closureSizes.first);
+ jsonPath["closureSize"] = closureSizes.first;
}
if (includeImpureInfo) {
if (info->deriver)
- jsonPath.attr("deriver", printStorePath(*info->deriver));
+ jsonPath["deriver"] = printStorePath(*info->deriver);
if (info->registrationTime)
- jsonPath.attr("registrationTime", info->registrationTime);
+ jsonPath["registrationTime"] = info->registrationTime;
if (info->ultimate)
- jsonPath.attr("ultimate", info->ultimate);
+ jsonPath["ultimate"] = info->ultimate;
if (!info->sigs.empty()) {
- auto jsonSigs = jsonPath.list("signatures");
for (auto & sig : info->sigs)
- jsonSigs.elem(sig);
+ jsonPath["signatures"].push_back(sig);
}
auto narInfo = std::dynamic_pointer_cast<const NarInfo>(
@@ -895,21 +895,22 @@ void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & store
if (narInfo) {
if (!narInfo->url.empty())
- jsonPath.attr("url", narInfo->url);
+ jsonPath["url"] = narInfo->url;
if (narInfo->fileHash)
- jsonPath.attr("downloadHash", narInfo->fileHash->to_string(hashBase, true));
+ jsonPath["downloadHash"] = narInfo->fileHash->to_string(hashBase, true);
if (narInfo->fileSize)
- jsonPath.attr("downloadSize", narInfo->fileSize);
+ jsonPath["downloadSize"] = narInfo->fileSize;
if (showClosureSize)
- jsonPath.attr("closureDownloadSize", closureSizes.second);
+ jsonPath["closureDownloadSize"] = closureSizes.second;
}
}
} catch (InvalidPath &) {
- jsonPath.attr("path", printStorePath(storePath));
- jsonPath.attr("valid", false);
+ jsonPath["path"] = printStorePath(storePath);
+ jsonPath["valid"] = false;
}
}
+ return jsonList;
}
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index c8a667c6d..4a88d7216 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -14,6 +14,7 @@
#include "path-info.hh"
#include "repair-flag.hh"
+#include <nlohmann/json_fwd.hpp>
#include <atomic>
#include <limits>
#include <map>
@@ -68,7 +69,6 @@ struct Derivation;
class FSAccessor;
class NarInfoDiskCache;
class Store;
-class JSONPlaceholder;
enum CheckSigsFlag : bool { NoCheckSigs = false, CheckSigs = true };
@@ -179,7 +179,7 @@ public:
/* Return true if ‘path’ is in the Nix store (but not the Nix
store itself). */
- bool isInStore(const Path & path) const;
+ bool isInStore(PathView path) const;
/* Return true if ‘path’ is a store path, i.e. a direct child of
the Nix store. */
@@ -187,7 +187,7 @@ public:
/* Split a path like /nix/store/<hash>-<name>/<bla> into
/nix/store/<hash>-<name> and /<bla>. */
- std::pair<StorePath, Path> toStorePath(const Path & path) const;
+ std::pair<StorePath, Path> toStorePath(PathView path) const;
/* Follow symlinks until we end up with a path in the Nix store. */
Path followLinksToStore(std::string_view path) const;
@@ -512,7 +512,7 @@ public:
variable elements such as the registration time are
included. If ‘showClosureSize’ is true, the closure size of
each path is included. */
- void pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & storePaths,
+ nlohmann::json pathInfoToJSON(const StorePathSet & storePaths,
bool includeImpureInfo, bool showClosureSize,
Base hashBase = Base32,
AllowInvalidFlag allowInvalid = DisallowInvalid);
diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc
index 4b0636129..0e2b9d12c 100644
--- a/src/libutil/archive.cc
+++ b/src/libutil/archive.cc
@@ -35,10 +35,6 @@ static ArchiveSettings archiveSettings;
static GlobalConfig::Register rArchiveSettings(&archiveSettings);
-const std::string narVersionMagic1 = "nix-archive-1";
-
-static std::string caseHackSuffix = "~nix~case~hack~";
-
PathFilter defaultPathFilter = [](const Path &) { return true; };
diff --git a/src/libutil/archive.hh b/src/libutil/archive.hh
index ac4183bf5..e42dea540 100644
--- a/src/libutil/archive.hh
+++ b/src/libutil/archive.hh
@@ -103,7 +103,9 @@ void copyNAR(Source & source, Sink & sink);
void copyPath(const Path & from, const Path & to);
-extern const std::string narVersionMagic1;
+inline constexpr std::string_view narVersionMagic1 = "nix-archive-1";
+
+inline constexpr std::string_view caseHackSuffix = "~nix~case~hack~";
}
diff --git a/src/libutil/canon-path.cc b/src/libutil/canon-path.cc
new file mode 100644
index 000000000..b132b4262
--- /dev/null
+++ b/src/libutil/canon-path.cc
@@ -0,0 +1,103 @@
+#include "canon-path.hh"
+#include "util.hh"
+
+namespace nix {
+
+CanonPath CanonPath::root = CanonPath("/");
+
+CanonPath::CanonPath(std::string_view raw)
+ : path(absPath((Path) raw, "/"))
+{ }
+
+CanonPath::CanonPath(std::string_view raw, const CanonPath & root)
+ : path(absPath((Path) raw, root.abs()))
+{ }
+
+std::optional<CanonPath> CanonPath::parent() const
+{
+ if (isRoot()) return std::nullopt;
+ return CanonPath(unchecked_t(), path.substr(0, std::max((size_t) 1, path.rfind('/'))));
+}
+
+void CanonPath::pop()
+{
+ assert(!isRoot());
+ path.resize(std::max((size_t) 1, path.rfind('/')));
+}
+
+bool CanonPath::isWithin(const CanonPath & parent) const
+{
+ return !(
+ path.size() < parent.path.size()
+ || path.substr(0, parent.path.size()) != parent.path
+ || (parent.path.size() > 1 && path.size() > parent.path.size()
+ && path[parent.path.size()] != '/'));
+}
+
+CanonPath CanonPath::removePrefix(const CanonPath & prefix) const
+{
+ assert(isWithin(prefix));
+ if (prefix.isRoot()) return *this;
+ if (path.size() == prefix.path.size()) return root;
+ return CanonPath(unchecked_t(), path.substr(prefix.path.size()));
+}
+
+void CanonPath::extend(const CanonPath & x)
+{
+ if (x.isRoot()) return;
+ if (isRoot())
+ path += x.rel();
+ else
+ path += x.abs();
+}
+
+CanonPath CanonPath::operator + (const CanonPath & x) const
+{
+ auto res = *this;
+ res.extend(x);
+ return res;
+}
+
+void CanonPath::push(std::string_view c)
+{
+ assert(c.find('/') == c.npos);
+ assert(c != "." && c != "..");
+ if (!isRoot()) path += '/';
+ path += c;
+}
+
+CanonPath CanonPath::operator + (std::string_view c) const
+{
+ auto res = *this;
+ res.push(c);
+ return res;
+}
+
+bool CanonPath::isAllowed(const std::set<CanonPath> & allowed) const
+{
+ /* Check if `this` is an exact match or the parent of an
+ allowed path. */
+ auto lb = allowed.lower_bound(*this);
+ if (lb != allowed.end()) {
+ if (lb->isWithin(*this))
+ return true;
+ }
+
+ /* Check if a parent of `this` is allowed. */
+ auto path = *this;
+ while (!path.isRoot()) {
+ path.pop();
+ if (allowed.count(path))
+ return true;
+ }
+
+ return false;
+}
+
+std::ostream & operator << (std::ostream & stream, const CanonPath & path)
+{
+ stream << path.abs();
+ return stream;
+}
+
+}
diff --git a/src/libutil/canon-path.hh b/src/libutil/canon-path.hh
new file mode 100644
index 000000000..9d5984584
--- /dev/null
+++ b/src/libutil/canon-path.hh
@@ -0,0 +1,173 @@
+#pragma once
+
+#include <string>
+#include <optional>
+#include <cassert>
+#include <iostream>
+#include <set>
+
+namespace nix {
+
+/* A canonical representation of a path. It ensures the following:
+
+ - It always starts with a slash.
+
+ - It never ends with a slash, except if the path is "/".
+
+ - A slash is never followed by a slash (i.e. no empty components).
+
+ - There are no components equal to '.' or '..'.
+
+ Note that the path does not need to correspond to an actually
+ existing path, and there is no guarantee that symlinks are
+ resolved.
+*/
+class CanonPath
+{
+ std::string path;
+
+public:
+
+ /* Construct a canon path from a non-canonical path. Any '.', '..'
+ or empty components are removed. */
+ CanonPath(std::string_view raw);
+
+ explicit CanonPath(const char * raw)
+ : CanonPath(std::string_view(raw))
+ { }
+
+ struct unchecked_t { };
+
+ CanonPath(unchecked_t _, std::string path)
+ : path(std::move(path))
+ { }
+
+ static CanonPath root;
+
+ /* If `raw` starts with a slash, return
+ `CanonPath(raw)`. Otherwise return a `CanonPath` representing
+ `root + "/" + raw`. */
+ CanonPath(std::string_view raw, const CanonPath & root);
+
+ bool isRoot() const
+ { return path.size() <= 1; }
+
+ explicit operator std::string_view() const
+ { return path; }
+
+ const std::string & abs() const
+ { return path; }
+
+ /* Like abs(), but return an empty string if this path is
+ '/'. Thus the returned string never ends in a slash. */
+ const std::string & absOrEmpty() const
+ {
+ const static std::string epsilon;
+ return isRoot() ? epsilon : path;
+ }
+
+ const char * c_str() const
+ { return path.c_str(); }
+
+ std::string_view rel() const
+ { return ((std::string_view) path).substr(1); }
+
+ struct Iterator
+ {
+ std::string_view remaining;
+ size_t slash;
+
+ Iterator(std::string_view remaining)
+ : remaining(remaining)
+ , slash(remaining.find('/'))
+ { }
+
+ bool operator != (const Iterator & x) const
+ { return remaining.data() != x.remaining.data(); }
+
+ const std::string_view operator * () const
+ { return remaining.substr(0, slash); }
+
+ void operator ++ ()
+ {
+ if (slash == remaining.npos)
+ remaining = remaining.substr(remaining.size());
+ else {
+ remaining = remaining.substr(slash + 1);
+ slash = remaining.find('/');
+ }
+ }
+ };
+
+ Iterator begin() const { return Iterator(rel()); }
+ Iterator end() const { return Iterator(rel().substr(path.size() - 1)); }
+
+ std::optional<CanonPath> parent() const;
+
+ /* Remove the last component. Panics if this path is the root. */
+ void pop();
+
+ std::optional<std::string_view> dirOf() const
+ {
+ if (isRoot()) return std::nullopt;
+ return ((std::string_view) path).substr(0, path.rfind('/'));
+ }
+
+ std::optional<std::string_view> baseName() const
+ {
+ if (isRoot()) return std::nullopt;
+ return ((std::string_view) path).substr(path.rfind('/') + 1);
+ }
+
+ bool operator == (const CanonPath & x) const
+ { return path == x.path; }
+
+ bool operator != (const CanonPath & x) const
+ { return path != x.path; }
+
+ /* Compare paths lexicographically except that path separators
+ are sorted before any other character. That is, in the sorted order
+ a directory is always followed directly by its children. For
+ instance, 'foo' < 'foo/bar' < 'foo!'. */
+ bool operator < (const CanonPath & x) const
+ {
+ auto i = path.begin();
+ auto j = x.path.begin();
+ for ( ; i != path.end() && j != x.path.end(); ++i, ++j) {
+ auto c_i = *i;
+ if (c_i == '/') c_i = 0;
+ auto c_j = *j;
+ if (c_j == '/') c_j = 0;
+ if (c_i < c_j) return true;
+ if (c_i > c_j) return false;
+ }
+ return i == path.end() && j != x.path.end();
+ }
+
+ /* Return true if `this` is equal to `parent` or a child of
+ `parent`. */
+ bool isWithin(const CanonPath & parent) const;
+
+ CanonPath removePrefix(const CanonPath & prefix) const;
+
+ /* Append another path to this one. */
+ void extend(const CanonPath & x);
+
+ /* Concatenate two paths. */
+ CanonPath operator + (const CanonPath & x) const;
+
+ /* Add a path component to this one. It must not contain any slashes. */
+ void push(std::string_view c);
+
+ CanonPath operator + (std::string_view c) const;
+
+ /* Check whether access to this path is allowed, which is the case
+ if 1) `this` is within any of the `allowed` paths; or 2) any of
+ the `allowed` paths are within `this`. (The latter condition
+ ensures access to the parents of allowed paths.) */
+ bool isAllowed(const std::set<CanonPath> & allowed) const;
+};
+
+std::ostream & operator << (std::ostream & stream, const CanonPath & path);
+
+}
diff --git a/src/libutil/cgroup.cc b/src/libutil/cgroup.cc
new file mode 100644
index 000000000..a008481ca
--- /dev/null
+++ b/src/libutil/cgroup.cc
@@ -0,0 +1,148 @@
+#if __linux__
+
+#include "cgroup.hh"
+#include "util.hh"
+#include "finally.hh"
+
+#include <chrono>
+#include <cmath>
+#include <regex>
+#include <unordered_set>
+#include <thread>
+
+#include <dirent.h>
+#include <mntent.h>
+
+namespace nix {
+
+std::optional<Path> getCgroupFS()
+{
+ static auto res = [&]() -> std::optional<Path> {
+ auto fp = fopen("/proc/mounts", "r");
+ if (!fp) return std::nullopt;
+ Finally delFP = [&]() { fclose(fp); };
+ while (auto ent = getmntent(fp))
+ if (std::string_view(ent->mnt_type) == "cgroup2")
+ return ent->mnt_dir;
+
+ return std::nullopt;
+ }();
+ return res;
+}
+
+// FIXME: obsolete, check for cgroup2
+std::map<std::string, std::string> getCgroups(const Path & cgroupFile)
+{
+ std::map<std::string, std::string> cgroups;
+
+ for (auto & line : tokenizeString<std::vector<std::string>>(readFile(cgroupFile), "\n")) {
+ static std::regex regex("([0-9]+):([^:]*):(.*)");
+ std::smatch match;
+ if (!std::regex_match(line, match, regex))
+ throw Error("invalid line '%s' in '%s'", line, cgroupFile);
+
+ std::string name = hasPrefix(std::string(match[2]), "name=") ? std::string(match[2], 5) : match[2];
+ cgroups.insert_or_assign(name, match[3]);
+ }
+
+ return cgroups;
+}
+
+static CgroupStats destroyCgroup(const Path & cgroup, bool returnStats)
+{
+ if (!pathExists(cgroup)) return {};
+
+ auto procsFile = cgroup + "/cgroup.procs";
+
+ if (!pathExists(procsFile))
+ throw Error("'%s' is not a cgroup", cgroup);
+
+ /* Use the fast way to kill every process in a cgroup, if
+ available. */
+ auto killFile = cgroup + "/cgroup.kill";
+ if (pathExists(killFile))
+ writeFile(killFile, "1");
+
+ /* Otherwise, manually kill every process in the subcgroups and
+ this cgroup. */
+ for (auto & entry : readDirectory(cgroup)) {
+ if (entry.type != DT_DIR) continue;
+ destroyCgroup(cgroup + "/" + entry.name, false);
+ }
+
+ int round = 1;
+
+ std::unordered_set<pid_t> pidsShown;
+
+ while (true) {
+ auto pids = tokenizeString<std::vector<std::string>>(readFile(procsFile));
+
+ if (pids.empty()) break;
+
+ if (round > 20)
+ throw Error("cannot kill cgroup '%s'", cgroup);
+
+ for (auto & pid_s : pids) {
+ pid_t pid;
+ if (auto o = string2Int<pid_t>(pid_s))
+ pid = *o;
+ else
+ throw Error("invalid pid '%s'", pid);
+ if (pidsShown.insert(pid).second) {
+ try {
+ auto cmdline = readFile(fmt("/proc/%d/cmdline", pid));
+ using namespace std::string_literals;
+ warn("killing stray builder process %d (%s)...",
+ pid, trim(replaceStrings(cmdline, "\0"s, " ")));
+ } catch (SysError &) {
+ }
+ }
+ // FIXME: pid wraparound
+ if (kill(pid, SIGKILL) == -1 && errno != ESRCH)
+ throw SysError("killing member %d of cgroup '%s'", pid, cgroup);
+ }
+
+ auto sleep = std::chrono::milliseconds((int) std::pow(2.0, std::min(round, 10)));
+ if (sleep.count() > 100)
+ printError("waiting for %d ms for cgroup '%s' to become empty", sleep.count(), cgroup);
+ std::this_thread::sleep_for(sleep);
+ round++;
+ }
+
+ CgroupStats stats;
+
+ if (returnStats) {
+ auto cpustatPath = cgroup + "/cpu.stat";
+
+ if (pathExists(cpustatPath)) {
+ for (auto & line : tokenizeString<std::vector<std::string>>(readFile(cpustatPath), "\n")) {
+ std::string_view userPrefix = "user_usec ";
+ if (hasPrefix(line, userPrefix)) {
+ auto n = string2Int<uint64_t>(line.substr(userPrefix.size()));
+ if (n) stats.cpuUser = std::chrono::microseconds(*n);
+ }
+
+ std::string_view systemPrefix = "system_usec ";
+ if (hasPrefix(line, systemPrefix)) {
+ auto n = string2Int<uint64_t>(line.substr(systemPrefix.size()));
+ if (n) stats.cpuSystem = std::chrono::microseconds(*n);
+ }
+ }
+ }
+
+ }
+
+ if (rmdir(cgroup.c_str()) == -1)
+ throw SysError("deleting cgroup '%s'", cgroup);
+
+ return stats;
+}
+
+CgroupStats destroyCgroup(const Path & cgroup)
+{
+ return destroyCgroup(cgroup, true);
+}
+
+}
+
+#endif
diff --git a/src/libutil/cgroup.hh b/src/libutil/cgroup.hh
new file mode 100644
index 000000000..d08c8ad29
--- /dev/null
+++ b/src/libutil/cgroup.hh
@@ -0,0 +1,29 @@
+#pragma once
+
+#if __linux__
+
+#include <chrono>
+#include <optional>
+
+#include "types.hh"
+
+namespace nix {
+
+std::optional<Path> getCgroupFS();
+
+std::map<std::string, std::string> getCgroups(const Path & cgroupFile);
+
+struct CgroupStats
+{
+ std::optional<std::chrono::microseconds> cpuUser, cpuSystem;
+};
+
+/* Destroy the cgroup denoted by 'path'. The postcondition is that
+ 'path' does not exist, and thus any processes in the cgroup have
+ been killed. Also return statistics from the cgroup just before
+ destruction. */
+CgroupStats destroyCgroup(const Path & cgroup);
+
+}
+
+#endif
diff --git a/src/libutil/error.cc b/src/libutil/error.cc
index cf4f4a56f..7f7c27267 100644
--- a/src/libutil/error.cc
+++ b/src/libutil/error.cc
@@ -9,9 +9,9 @@ namespace nix {
const std::string nativeSystem = SYSTEM;
-void BaseError::addTrace(std::optional<ErrPos> e, hintformat hint, bool frame)
+void BaseError::addTrace(std::shared_ptr<AbstractPos> && e, hintformat hint, bool frame)
{
- err.traces.push_front(Trace { .pos = e, .hint = hint, .frame = frame });
+ err.traces.push_front(Trace { .pos = std::move(e), .hint = hint, .frame = frame });
}
// c++ std::exception descendants must have a 'const char* what()' function.
@@ -30,91 +30,46 @@ const std::string & BaseError::calcWhat() const
std::optional<std::string> ErrorInfo::programName = std::nullopt;
-std::ostream & operator<<(std::ostream & os, const hintformat & hf)
+std::ostream & operator <<(std::ostream & os, const hintformat & hf)
{
return os << hf.str();
}
-std::string showErrPos(const ErrPos & errPos)
+std::ostream & operator <<(std::ostream & str, const AbstractPos & pos)
{
- if (errPos.line > 0) {
- if (errPos.column > 0) {
- return fmt("%d:%d", errPos.line, errPos.column);
- } else {
- return fmt("%d", errPos.line);
- }
- }
- else {
- return "";
- }
+ pos.print(str);
+ str << ":" << pos.line;
+ if (pos.column > 0)
+ str << ":" << pos.column;
+ return str;
}
-std::optional<LinesOfCode> getCodeLines(const ErrPos & errPos)
+std::optional<LinesOfCode> AbstractPos::getCodeLines() const
{
- if (errPos.line <= 0)
+ if (line == 0)
return std::nullopt;
- if (errPos.origin == foFile) {
- LinesOfCode loc;
- try {
- // FIXME: when running as the daemon, make sure we don't
- // open a file to which the client doesn't have access.
- AutoCloseFD fd = open(errPos.file.c_str(), O_RDONLY | O_CLOEXEC);
- if (!fd) return {};
-
- // count the newlines.
- int count = 0;
- std::string line;
- int pl = errPos.line - 1;
- do
- {
- line = readLine(fd.get());
- ++count;
- if (count < pl)
- ;
- else if (count == pl)
- loc.prevLineOfCode = line;
- else if (count == pl + 1)
- loc.errLineOfCode = line;
- else if (count == pl + 2) {
- loc.nextLineOfCode = line;
- break;
- }
- } while (true);
- return loc;
- }
- catch (EndOfFile & eof) {
- if (loc.errLineOfCode.has_value())
- return loc;
- else
- return std::nullopt;
- }
- catch (std::exception & e) {
- return std::nullopt;
- }
- } else {
- std::istringstream iss(errPos.file);
+ if (auto source = getSource()) {
+
+ std::istringstream iss(*source);
// count the newlines.
int count = 0;
- std::string line;
- int pl = errPos.line - 1;
+ std::string curLine;
+ int pl = line - 1;
LinesOfCode loc;
- do
- {
- std::getline(iss, line);
+ do {
+ std::getline(iss, curLine);
++count;
if (count < pl)
- {
;
- }
else if (count == pl) {
- loc.prevLineOfCode = line;
+ loc.prevLineOfCode = curLine;
} else if (count == pl + 1) {
- loc.errLineOfCode = line;
+ loc.errLineOfCode = curLine;
} else if (count == pl + 2) {
- loc.nextLineOfCode = line;
+ loc.nextLineOfCode = curLine;
break;
}
@@ -124,12 +79,14 @@ std::optional<LinesOfCode> getCodeLines(const ErrPos & errPos)
return loc;
}
+
+ return std::nullopt;
}
// print lines of code to the ostream, indicating the error column.
void printCodeLines(std::ostream & out,
const std::string & prefix,
- const ErrPos & errPos,
+ const AbstractPos & errPos,
const LinesOfCode & loc)
{
// previous line of code.
@@ -176,32 +133,6 @@ void printCodeLines(std::ostream & out,
}
}
-// Enough indent to align with with the `... `
-// prepended to each element of the trace
-#define ELLIPSIS_INDENT " "
-
-void printAtPos(const ErrPos & pos, std::ostream & out)
-{
- if (pos) {
- switch (pos.origin) {
- case foFile: {
- out << fmt(ELLIPSIS_INDENT "at " ANSI_WARNING "%s:%s" ANSI_NORMAL ":", pos.file, showErrPos(pos));
- break;
- }
- case foString: {
- out << fmt(ELLIPSIS_INDENT "at " ANSI_WARNING "«string»:%s" ANSI_NORMAL ":", showErrPos(pos));
- break;
- }
- case foStdin: {
- out << fmt(ELLIPSIS_INDENT "at " ANSI_WARNING "«stdin»:%s" ANSI_NORMAL ":", showErrPos(pos));
- break;
- }
- default:
- throw Error("invalid FileOrigin in errPos");
- }
- }
-}
-
static std::string indent(std::string_view indentFirst, std::string_view indentRest, std::string_view s)
{
std::string res;
@@ -266,27 +197,8 @@ std::ostream & showErrorInfo(std::ostream & out, const ErrorInfo & einfo, bool s
prefix += ":" ANSI_NORMAL " ";
std::ostringstream oss;
- oss << einfo.msg << "\n";
-
- if (einfo.errPos.has_value() && *einfo.errPos) {
- printAtPos(*einfo.errPos, oss);
-
- auto loc = getCodeLines(*einfo.errPos);
- // lines of code.
- if (loc.has_value()) {
- oss << "\n";
- printCodeLines(oss, "", *einfo.errPos, *loc);
- }
- oss << "\n";
- }
-
- auto suggestions = einfo.suggestions.trim();
- if (! suggestions.suggestions.empty()){
- oss << "Did you mean " <<
- suggestions.trim() <<
- "?" << std::endl;
- }
+ auto noSource = ANSI_ITALIC " (source not available)" ANSI_NORMAL "\n";
/*
* Traces
@@ -382,36 +294,61 @@ std::ostream & showErrorInfo(std::ostream & out, const ErrorInfo & einfo, bool s
*
*/
+ // Enough indent to align with with the `... `
+ // prepended to each element of the trace
+ auto ellipsisIndent = " ";
+
bool frameOnly = false;
if (!einfo.traces.empty()) {
- unsigned int count = 0;
- for (auto iter = einfo.traces.rbegin(); iter != einfo.traces.rend(); ++iter) {
+ size_t count = 0;
+ for (const auto & trace : einfo.traces) {
if (!showTrace && count > 3) {
- oss << "\n" << "(truncated)" << "\n";
+ oss << "\n" << ANSI_ITALIC "(stack trace truncated)" ANSI_NORMAL << "\n";
break;
}
- if (iter->hint.str().empty()) continue;
- if (frameOnly && !iter->frame) continue;
+ if (trace.hint.str().empty()) continue;
+ if (frameOnly && !trace.frame) continue;
count++;
- frameOnly = iter->frame;
+ frameOnly = trace.frame;
- oss << "\n" << "… " << iter->hint.str() << "\n";
+ oss << "\n" << "… " << trace.hint.str() << "\n";
- if (iter->pos.has_value() && (*iter->pos)) {
+ if (trace.pos) {
count++;
- auto pos = iter->pos.value();
- printAtPos(pos, oss);
- auto loc = getCodeLines(pos);
- if (loc.has_value()) {
+ oss << "\n" << ellipsisIndent << ANSI_BLUE << "at " ANSI_WARNING << *trace.pos << ANSI_NORMAL << ":";
+
+ if (auto loc = trace.pos->getCodeLines()) {
+ oss << "\n";
+ printCodeLines(oss, "", *trace.pos, *loc);
oss << "\n";
- printCodeLines(oss, "", pos, *loc);
- }
- oss << "\n";
+ } else
+ oss << noSource;
}
}
+ oss << "\n" << prefix;
+ }
+
+ oss << einfo.msg << "\n";
+
+ if (einfo.errPos) {
+ oss << "\n" << ANSI_BLUE << "at " ANSI_WARNING << *einfo.errPos << ANSI_NORMAL << ":";
+
+ if (auto loc = einfo.errPos->getCodeLines()) {
+ oss << "\n";
+ printCodeLines(oss, "", *einfo.errPos, *loc);
+ oss << "\n";
+ } else
+ oss << noSource;
+ }
+
+ auto suggestions = einfo.suggestions.trim();
+ if (!suggestions.suggestions.empty()) {
+ oss << "Did you mean " <<
+ suggestions.trim() <<
+ "?" << std::endl;
}
out << indent(prefix, std::string(filterANSIEscapes(prefix, true).size(), ' '), chomp(oss.str()));
diff --git a/src/libutil/error.hh b/src/libutil/error.hh
index 6db77bcbf..7d236028c 100644
--- a/src/libutil/error.hh
+++ b/src/libutil/error.hh
@@ -54,13 +54,6 @@ typedef enum {
lvlVomit
} Verbosity;
-/* adjust Pos::origin bit width when adding stuff here */
-typedef enum {
- foFile,
- foStdin,
- foString
-} FileOrigin;
-
// the lines of code surrounding an error.
struct LinesOfCode {
std::optional<std::string> prevLineOfCode;
@@ -68,47 +61,30 @@ struct LinesOfCode {
std::optional<std::string> nextLineOfCode;
};
-// ErrPos indicates the location of an error in a nix file.
-struct ErrPos {
- int line = 0;
- int column = 0;
- std::string file;
- FileOrigin origin;
+/* An abstract type that represents a location in a source file. */
+struct AbstractPos
+{
+ uint32_t line = 0;
+ uint32_t column = 0;
- operator bool() const
- {
- return line != 0;
- }
+ /* Return the contents of the source file. */
+ virtual std::optional<std::string> getSource() const
+ { return std::nullopt; };
- // convert from the Pos struct, found in libexpr.
- template <class P>
- ErrPos & operator=(const P & pos)
- {
- origin = pos.origin;
- line = pos.line;
- column = pos.column;
- file = pos.file;
- return *this;
- }
+ virtual void print(std::ostream & out) const = 0;
- template <class P>
- ErrPos(const P & p)
- {
- *this = p;
- }
+ std::optional<LinesOfCode> getCodeLines() const;
};
-std::optional<LinesOfCode> getCodeLines(const ErrPos & errPos);
+std::ostream & operator << (std::ostream & str, const AbstractPos & pos);
void printCodeLines(std::ostream & out,
const std::string & prefix,
- const ErrPos & errPos,
+ const AbstractPos & errPos,
const LinesOfCode & loc);
-void printAtPos(const ErrPos & pos, std::ostream & out);
-
struct Trace {
- std::optional<ErrPos> pos;
+ std::shared_ptr<AbstractPos> pos;
hintformat hint;
bool frame;
};
@@ -116,7 +92,7 @@ struct Trace {
struct ErrorInfo {
Verbosity level;
hintformat msg;
- std::optional<ErrPos> errPos;
+ std::shared_ptr<AbstractPos> errPos;
std::list<Trace> traces;
Suggestions suggestions;
@@ -179,17 +155,18 @@ public:
const std::string & msg() const { return calcWhat(); }
const ErrorInfo & info() const { calcWhat(); return err; }
- void pushTrace(Trace trace) {
+ void pushTrace(Trace trace)
+ {
err.traces.push_front(trace);
}
template<typename... Args>
- void addTrace(std::optional<ErrPos> e, std::string_view fs, const Args & ... args)
+ void addTrace(std::shared_ptr<AbstractPos> && e, std::string_view fs, const Args & ... args)
{
- addTrace(e, hintfmt(std::string(fs), args...));
+ addTrace(std::move(e), hintfmt(std::string(fs), args...));
}
- void addTrace(std::optional<ErrPos> e, hintformat hint, bool frame = false);
+ void addTrace(std::shared_ptr<AbstractPos> && e, hintformat hint, bool frame = false);
bool hasTrace() const { return !err.traces.empty(); }
diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc
index fa79cca6b..e0902971e 100644
--- a/src/libutil/experimental-features.cc
+++ b/src/libutil/experimental-features.cc
@@ -14,6 +14,8 @@ std::map<ExperimentalFeature, std::string> stringifiedXpFeatures = {
{ Xp::NoUrlLiterals, "no-url-literals" },
{ Xp::FetchClosure, "fetch-closure" },
{ Xp::ReplFlake, "repl-flake" },
+ { Xp::AutoAllocateUids, "auto-allocate-uids" },
+ { Xp::Cgroups, "cgroups" },
};
const std::optional<ExperimentalFeature> parseExperimentalFeature(const std::string_view & name)
diff --git a/src/libutil/experimental-features.hh b/src/libutil/experimental-features.hh
index d09ab025c..af775feb0 100644
--- a/src/libutil/experimental-features.hh
+++ b/src/libutil/experimental-features.hh
@@ -23,6 +23,8 @@ enum struct ExperimentalFeature
NoUrlLiterals,
FetchClosure,
ReplFlake,
+ AutoAllocateUids,
+ Cgroups,
};
/**
diff --git a/src/libutil/filesystem.cc b/src/libutil/filesystem.cc
index 403389e60..3a732cff8 100644
--- a/src/libutil/filesystem.cc
+++ b/src/libutil/filesystem.cc
@@ -1,5 +1,6 @@
#include <sys/time.h>
#include <filesystem>
+#include <atomic>
#include "finally.hh"
#include "util.hh"
@@ -10,7 +11,7 @@ namespace fs = std::filesystem;
namespace nix {
static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
- int & counter)
+ std::atomic<unsigned int> & counter)
{
tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR").value_or("/tmp") : tmpRoot, true);
if (includePid)
@@ -22,9 +23,9 @@ static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
Path createTempDir(const Path & tmpRoot, const Path & prefix,
bool includePid, bool useGlobalCounter, mode_t mode)
{
- static int globalCounter = 0;
- int localCounter = 0;
- int & counter(useGlobalCounter ? globalCounter : localCounter);
+ static std::atomic<unsigned int> globalCounter = 0;
+ std::atomic<unsigned int> localCounter = 0;
+ auto & counter(useGlobalCounter ? globalCounter : localCounter);
while (1) {
checkInterrupt();
diff --git a/src/libutil/fmt.hh b/src/libutil/fmt.hh
index 7664e5c04..e879fd3b8 100644
--- a/src/libutil/fmt.hh
+++ b/src/libutil/fmt.hh
@@ -148,7 +148,7 @@ inline hintformat hintfmt(const std::string & fs, const Args & ... args)
return f;
}
-inline hintformat hintfmt(std::string plain_string)
+inline hintformat hintfmt(const std::string & plain_string)
{
// we won't be receiving any args in this case, so just print the original string
return hintfmt("%s", normaltxt(plain_string));
diff --git a/src/libutil/json.cc b/src/libutil/json.cc
deleted file mode 100644
index 2f9e97ff5..000000000
--- a/src/libutil/json.cc
+++ /dev/null
@@ -1,203 +0,0 @@
-#include "json.hh"
-
-#include <iomanip>
-#include <cstdint>
-#include <cstring>
-
-namespace nix {
-
-template<>
-void toJSON<std::string_view>(std::ostream & str, const std::string_view & s)
-{
- constexpr size_t BUF_SIZE = 4096;
- char buf[BUF_SIZE + 7]; // BUF_SIZE + largest single sequence of puts
- size_t bufPos = 0;
-
- const auto flush = [&] {
- str.write(buf, bufPos);
- bufPos = 0;
- };
- const auto put = [&] (char c) {
- buf[bufPos++] = c;
- };
-
- put('"');
- for (auto i = s.begin(); i != s.end(); i++) {
- if (bufPos >= BUF_SIZE) flush();
- if (*i == '\"' || *i == '\\') { put('\\'); put(*i); }
- else if (*i == '\n') { put('\\'); put('n'); }
- else if (*i == '\r') { put('\\'); put('r'); }
- else if (*i == '\t') { put('\\'); put('t'); }
- else if (*i >= 0 && *i < 32) {
- const char hex[17] = "0123456789abcdef";
- put('\\');
- put('u');
- put(hex[(uint16_t(*i) >> 12) & 0xf]);
- put(hex[(uint16_t(*i) >> 8) & 0xf]);
- put(hex[(uint16_t(*i) >> 4) & 0xf]);
- put(hex[(uint16_t(*i) >> 0) & 0xf]);
- }
- else put(*i);
- }
- put('"');
- flush();
-}
-
-void toJSON(std::ostream & str, const char * s)
-{
- if (!s) str << "null"; else toJSON(str, std::string_view(s));
-}
-
-template<> void toJSON<int>(std::ostream & str, const int & n) { str << n; }
-template<> void toJSON<unsigned int>(std::ostream & str, const unsigned int & n) { str << n; }
-template<> void toJSON<long>(std::ostream & str, const long & n) { str << n; }
-template<> void toJSON<unsigned long>(std::ostream & str, const unsigned long & n) { str << n; }
-template<> void toJSON<long long>(std::ostream & str, const long long & n) { str << n; }
-template<> void toJSON<unsigned long long>(std::ostream & str, const unsigned long long & n) { str << n; }
-template<> void toJSON<float>(std::ostream & str, const float & n) { str << n; }
-template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
-template<> void toJSON<std::string>(std::ostream & str, const std::string & s) { toJSON(str, (std::string_view) s); }
-
-template<> void toJSON<bool>(std::ostream & str, const bool & b)
-{
- str << (b ? "true" : "false");
-}
-
-template<> void toJSON<std::nullptr_t>(std::ostream & str, const std::nullptr_t & b)
-{
- str << "null";
-}
-
-JSONWriter::JSONWriter(std::ostream & str, bool indent)
- : state(new JSONState(str, indent))
-{
- state->stack++;
-}
-
-JSONWriter::JSONWriter(JSONState * state)
- : state(state)
-{
- state->stack++;
-}
-
-JSONWriter::~JSONWriter()
-{
- if (state) {
- assertActive();
- state->stack--;
- if (state->stack == 0) delete state;
- }
-}
-
-void JSONWriter::comma()
-{
- assertActive();
- if (first) {
- first = false;
- } else {
- state->str << ',';
- }
- if (state->indent) indent();
-}
-
-void JSONWriter::indent()
-{
- state->str << '\n' << std::string(state->depth * 2, ' ');
-}
-
-void JSONList::open()
-{
- state->depth++;
- state->str << '[';
-}
-
-JSONList::~JSONList()
-{
- state->depth--;
- if (state->indent && !first) indent();
- state->str << "]";
-}
-
-JSONList JSONList::list()
-{
- comma();
- return JSONList(state);
-}
-
-JSONObject JSONList::object()
-{
- comma();
- return JSONObject(state);
-}
-
-JSONPlaceholder JSONList::placeholder()
-{
- comma();
- return JSONPlaceholder(state);
-}
-
-void JSONObject::open()
-{
- state->depth++;
- state->str << '{';
-}
-
-JSONObject::~JSONObject()
-{
- if (state) {
- state->depth--;
- if (state->indent && !first) indent();
- state->str << "}";
- }
-}
-
-void JSONObject::attr(std::string_view s)
-{
- comma();
- toJSON(state->str, s);
- state->str << ':';
- if (state->indent) state->str << ' ';
-}
-
-JSONList JSONObject::list(std::string_view name)
-{
- attr(name);
- return JSONList(state);
-}
-
-JSONObject JSONObject::object(std::string_view name)
-{
- attr(name);
- return JSONObject(state);
-}
-
-JSONPlaceholder JSONObject::placeholder(std::string_view name)
-{
- attr(name);
- return JSONPlaceholder(state);
-}
-
-JSONList JSONPlaceholder::list()
-{
- assertValid();
- first = false;
- return JSONList(state);
-}
-
-JSONObject JSONPlaceholder::object()
-{
- assertValid();
- first = false;
- return JSONObject(state);
-}
-
-JSONPlaceholder::~JSONPlaceholder()
-{
- if (first) {
- assert(std::uncaught_exceptions());
- if (state->stack != 0)
- write(nullptr);
- }
-}
-
-}
diff --git a/src/libutil/json.hh b/src/libutil/json.hh
deleted file mode 100644
index 3790b1a2e..000000000
--- a/src/libutil/json.hh
+++ /dev/null
@@ -1,185 +0,0 @@
-#pragma once
-
-#include <iostream>
-#include <vector>
-#include <cassert>
-
-namespace nix {
-
-void toJSON(std::ostream & str, const char * s);
-
-template<typename T>
-void toJSON(std::ostream & str, const T & n);
-
-class JSONWriter
-{
-protected:
-
- struct JSONState
- {
- std::ostream & str;
- bool indent;
- size_t depth = 0;
- size_t stack = 0;
- JSONState(std::ostream & str, bool indent) : str(str), indent(indent) { }
- ~JSONState()
- {
- assert(stack == 0);
- }
- };
-
- JSONState * state;
-
- bool first = true;
-
- JSONWriter(std::ostream & str, bool indent);
-
- JSONWriter(JSONState * state);
-
- ~JSONWriter();
-
- void assertActive()
- {
- assert(state->stack != 0);
- }
-
- void comma();
-
- void indent();
-};
-
-class JSONObject;
-class JSONPlaceholder;
-
-class JSONList : JSONWriter
-{
-private:
-
- friend class JSONObject;
- friend class JSONPlaceholder;
-
- void open();
-
- JSONList(JSONState * state)
- : JSONWriter(state)
- {
- open();
- }
-
-public:
-
- JSONList(std::ostream & str, bool indent = false)
- : JSONWriter(str, indent)
- {
- open();
- }
-
- ~JSONList();
-
- template<typename T>
- JSONList & elem(const T & v)
- {
- comma();
- toJSON(state->str, v);
- return *this;
- }
-
- JSONList list();
-
- JSONObject object();
-
- JSONPlaceholder placeholder();
-};
-
-class JSONObject : JSONWriter
-{
-private:
-
- friend class JSONList;
- friend class JSONPlaceholder;
-
- void open();
-
- JSONObject(JSONState * state)
- : JSONWriter(state)
- {
- open();
- }
-
- void attr(std::string_view s);
-
-public:
-
- JSONObject(std::ostream & str, bool indent = false)
- : JSONWriter(str, indent)
- {
- open();
- }
-
- JSONObject(const JSONObject & obj) = delete;
-
- JSONObject(JSONObject && obj)
- : JSONWriter(obj.state)
- {
- obj.state = 0;
- }
-
- ~JSONObject();
-
- template<typename T>
- JSONObject & attr(std::string_view name, const T & v)
- {
- attr(name);
- toJSON(state->str, v);
- return *this;
- }
-
- JSONList list(std::string_view name);
-
- JSONObject object(std::string_view name);
-
- JSONPlaceholder placeholder(std::string_view name);
-};
-
-class JSONPlaceholder : JSONWriter
-{
-
-private:
-
- friend class JSONList;
- friend class JSONObject;
-
- JSONPlaceholder(JSONState * state)
- : JSONWriter(state)
- {
- }
-
- void assertValid()
- {
- assertActive();
- assert(first);
- }
-
-public:
-
- JSONPlaceholder(std::ostream & str, bool indent = false)
- : JSONWriter(str, indent)
- {
- }
-
- ~JSONPlaceholder();
-
- template<typename T>
- void write(const T & v)
- {
- assertValid();
- first = false;
- toJSON(state->str, v);
- }
-
- JSONList list();
-
- JSONObject object();
-};
-
-}
diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc
index cb2b15b41..904ba6ebe 100644
--- a/src/libutil/logging.cc
+++ b/src/libutil/logging.cc
@@ -105,14 +105,6 @@ public:
Verbosity verbosity = lvlInfo;
-void warnOnce(bool & haveWarned, const FormatOrString & fs)
-{
- if (!haveWarned) {
- warn(fs.s);
- haveWarned = true;
- }
-}
-
void writeToStderr(std::string_view s)
{
try {
@@ -130,15 +122,30 @@ Logger * makeSimpleLogger(bool printBuildLogs)
return new SimpleLogger(printBuildLogs);
}
-std::atomic<uint64_t> nextId{(uint64_t) getpid() << 32};
+std::atomic<uint64_t> nextId{0};
Activity::Activity(Logger & logger, Verbosity lvl, ActivityType type,
const std::string & s, const Logger::Fields & fields, ActivityId parent)
- : logger(logger), id(nextId++)
+ : logger(logger), id(nextId++ + (((uint64_t) getpid()) << 32))
{
logger.startActivity(id, lvl, type, s, fields, parent);
}
+void to_json(nlohmann::json & json, std::shared_ptr<AbstractPos> pos)
+{
+ if (pos) {
+ json["line"] = pos->line;
+ json["column"] = pos->column;
+ std::ostringstream str;
+ pos->print(str);
+ json["file"] = str.str();
+ } else {
+ json["line"] = nullptr;
+ json["column"] = nullptr;
+ json["file"] = nullptr;
+ }
+}
+
struct JSONLogger : Logger {
Logger & prevLogger;
@@ -185,27 +192,14 @@ struct JSONLogger : Logger {
json["level"] = ei.level;
json["msg"] = oss.str();
json["raw_msg"] = ei.msg.str();
-
- if (ei.errPos.has_value() && (*ei.errPos)) {
- json["line"] = ei.errPos->line;
- json["column"] = ei.errPos->column;
- json["file"] = ei.errPos->file;
- } else {
- json["line"] = nullptr;
- json["column"] = nullptr;
- json["file"] = nullptr;
- }
+ to_json(json, ei.errPos);
if (loggerSettings.showTrace.get() && !ei.traces.empty()) {
nlohmann::json traces = nlohmann::json::array();
for (auto iter = ei.traces.rbegin(); iter != ei.traces.rend(); ++iter) {
nlohmann::json stackFrame;
stackFrame["raw_msg"] = iter->hint.str();
- if (iter->pos.has_value() && (*iter->pos)) {
- stackFrame["line"] = iter->pos->line;
- stackFrame["column"] = iter->pos->column;
- stackFrame["file"] = iter->pos->file;
- }
+ to_json(stackFrame, iter->pos);
traces.push_back(stackFrame);
}
diff --git a/src/libutil/logging.hh b/src/libutil/logging.hh
index d0817b4a9..4642c49f7 100644
--- a/src/libutil/logging.hh
+++ b/src/libutil/logging.hh
@@ -82,7 +82,7 @@ public:
log(lvlInfo, fs);
}
- virtual void logEI(const ErrorInfo &ei) = 0;
+ virtual void logEI(const ErrorInfo & ei) = 0;
void logEI(Verbosity lvl, ErrorInfo ei)
{
@@ -225,7 +225,11 @@ inline void warn(const std::string & fs, const Args & ... args)
logger->warn(f.str());
}
-void warnOnce(bool & haveWarned, const FormatOrString & fs);
+#define warnOnce(haveWarned, args...) \
+ if (!haveWarned) { \
+ haveWarned = true; \
+ warn(args); \
+ }
void writeToStderr(std::string_view s);
diff --git a/src/libutil/ref.hh b/src/libutil/ref.hh
index bf26321db..7d38b059c 100644
--- a/src/libutil/ref.hh
+++ b/src/libutil/ref.hh
@@ -83,6 +83,11 @@ public:
return p != other.p;
}
+ bool operator < (const ref<T> & other) const
+ {
+ return p < other.p;
+ }
+
private:
template<typename T2, typename... Args>
diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc
index 2c3597775..c653db9d0 100644
--- a/src/libutil/serialise.cc
+++ b/src/libutil/serialise.cc
@@ -338,7 +338,7 @@ Sink & operator << (Sink & sink, const StringSet & s)
Sink & operator << (Sink & sink, const Error & ex)
{
- auto info = ex.info();
+ auto & info = ex.info();
sink
<< "Error"
<< info.level
diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh
index 84847835a..7da5b07fd 100644
--- a/src/libutil/serialise.hh
+++ b/src/libutil/serialise.hh
@@ -331,17 +331,9 @@ T readNum(Source & source)
unsigned char buf[8];
source((char *) buf, sizeof(buf));
- uint64_t n =
- ((uint64_t) buf[0]) |
- ((uint64_t) buf[1] << 8) |
- ((uint64_t) buf[2] << 16) |
- ((uint64_t) buf[3] << 24) |
- ((uint64_t) buf[4] << 32) |
- ((uint64_t) buf[5] << 40) |
- ((uint64_t) buf[6] << 48) |
- ((uint64_t) buf[7] << 56);
-
- if (n > (uint64_t)std::numeric_limits<T>::max())
+ auto n = readLittleEndian<uint64_t>(buf);
+
+ if (n > (uint64_t) std::numeric_limits<T>::max())
throw SerialisationError("serialised integer %d is too large for type '%s'", n, typeid(T).name());
return (T) n;
diff --git a/src/libutil/tarfile.cc b/src/libutil/tarfile.cc
index a7db58559..238d0a7a6 100644
--- a/src/libutil/tarfile.cc
+++ b/src/libutil/tarfile.cc
@@ -77,9 +77,7 @@ TarArchive::~TarArchive()
static void extract_archive(TarArchive & archive, const Path & destDir)
{
- int flags = ARCHIVE_EXTRACT_FFLAGS
- | ARCHIVE_EXTRACT_PERM
- | ARCHIVE_EXTRACT_TIME
+ int flags = ARCHIVE_EXTRACT_TIME
| ARCHIVE_EXTRACT_SECURE_SYMLINKS
| ARCHIVE_EXTRACT_SECURE_NODOTDOT;
@@ -98,6 +96,10 @@ static void extract_archive(TarArchive & archive, const Path & destDir)
archive_entry_copy_pathname(entry,
(destDir + "/" + name).c_str());
+ // sources can and do contain dirs with no rx bits
+ if (archive_entry_filetype(entry) == AE_IFDIR && (archive_entry_mode(entry) & 0500) != 0500)
+ archive_entry_set_mode(entry, archive_entry_mode(entry) | 0500);
+
// Patch hardlink path
const char *original_hardlink = archive_entry_hardlink(entry);
if (original_hardlink) {
diff --git a/src/libutil/tests/canon-path.cc b/src/libutil/tests/canon-path.cc
new file mode 100644
index 000000000..c1c5adadf
--- /dev/null
+++ b/src/libutil/tests/canon-path.cc
@@ -0,0 +1,155 @@
+#include "canon-path.hh"
+
+#include <gtest/gtest.h>
+
+namespace nix {
+
+ TEST(CanonPath, basic) {
+ {
+ CanonPath p("/");
+ ASSERT_EQ(p.abs(), "/");
+ ASSERT_EQ(p.rel(), "");
+ ASSERT_EQ(p.baseName(), std::nullopt);
+ ASSERT_EQ(p.dirOf(), std::nullopt);
+ ASSERT_FALSE(p.parent());
+ }
+
+ {
+ CanonPath p("/foo//");
+ ASSERT_EQ(p.abs(), "/foo");
+ ASSERT_EQ(p.rel(), "foo");
+ ASSERT_EQ(*p.baseName(), "foo");
+ ASSERT_EQ(*p.dirOf(), ""); // FIXME: do we want this?
+ ASSERT_EQ(p.parent()->abs(), "/");
+ }
+
+ {
+ CanonPath p("foo/bar");
+ ASSERT_EQ(p.abs(), "/foo/bar");
+ ASSERT_EQ(p.rel(), "foo/bar");
+ ASSERT_EQ(*p.baseName(), "bar");
+ ASSERT_EQ(*p.dirOf(), "/foo");
+ ASSERT_EQ(p.parent()->abs(), "/foo");
+ }
+
+ {
+ CanonPath p("foo//bar/");
+ ASSERT_EQ(p.abs(), "/foo/bar");
+ ASSERT_EQ(p.rel(), "foo/bar");
+ ASSERT_EQ(*p.baseName(), "bar");
+ ASSERT_EQ(*p.dirOf(), "/foo");
+ }
+ }
+
+ TEST(CanonPath, pop) {
+ CanonPath p("foo/bar/x");
+ ASSERT_EQ(p.abs(), "/foo/bar/x");
+ p.pop();
+ ASSERT_EQ(p.abs(), "/foo/bar");
+ p.pop();
+ ASSERT_EQ(p.abs(), "/foo");
+ p.pop();
+ ASSERT_EQ(p.abs(), "/");
+ }
+
+ TEST(CanonPath, removePrefix) {
+ CanonPath p1("foo/bar");
+ CanonPath p2("foo/bar/a/b/c");
+ ASSERT_EQ(p2.removePrefix(p1).abs(), "/a/b/c");
+ ASSERT_EQ(p1.removePrefix(p1).abs(), "/");
+ ASSERT_EQ(p1.removePrefix(CanonPath("/")).abs(), "/foo/bar");
+ }
+
+ TEST(CanonPath, iter) {
+ {
+ CanonPath p("a//foo/bar//");
+ std::vector<std::string_view> ss;
+ for (auto & c : p) ss.push_back(c);
+ ASSERT_EQ(ss, std::vector<std::string_view>({"a", "foo", "bar"}));
+ }
+
+ {
+ CanonPath p("/");
+ std::vector<std::string_view> ss;
+ for (auto & c : p) ss.push_back(c);
+ ASSERT_EQ(ss, std::vector<std::string_view>());
+ }
+ }
+
+ TEST(CanonPath, concat) {
+ {
+ CanonPath p1("a//foo/bar//");
+ CanonPath p2("xyzzy/bla");
+ ASSERT_EQ((p1 + p2).abs(), "/a/foo/bar/xyzzy/bla");
+ }
+
+ {
+ CanonPath p1("/");
+ CanonPath p2("/a/b");
+ ASSERT_EQ((p1 + p2).abs(), "/a/b");
+ }
+
+ {
+ CanonPath p1("/a/b");
+ CanonPath p2("/");
+ ASSERT_EQ((p1 + p2).abs(), "/a/b");
+ }
+
+ {
+ CanonPath p("/foo/bar");
+ ASSERT_EQ((p + "x").abs(), "/foo/bar/x");
+ }
+
+ {
+ CanonPath p("/");
+ ASSERT_EQ((p + "foo" + "bar").abs(), "/foo/bar");
+ }
+ }
+
+ TEST(CanonPath, within) {
+ {
+ ASSERT_TRUE(CanonPath("foo").isWithin(CanonPath("foo")));
+ ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("bar")));
+ ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("fo")));
+ ASSERT_TRUE(CanonPath("foo/bar").isWithin(CanonPath("foo")));
+ ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("foo/bar")));
+ ASSERT_TRUE(CanonPath("/foo/bar/default.nix").isWithin(CanonPath("/")));
+ ASSERT_TRUE(CanonPath("/").isWithin(CanonPath("/")));
+ }
+ }
+
+ TEST(CanonPath, sort) {
+ ASSERT_FALSE(CanonPath("foo") < CanonPath("foo"));
+ ASSERT_TRUE (CanonPath("foo") < CanonPath("foo/bar"));
+ ASSERT_TRUE (CanonPath("foo/bar") < CanonPath("foo!"));
+ ASSERT_FALSE(CanonPath("foo!") < CanonPath("foo"));
+ ASSERT_TRUE (CanonPath("foo") < CanonPath("foo!"));
+ }
+
+ TEST(CanonPath, allowed) {
+ {
+ std::set<CanonPath> allowed {
+ CanonPath("foo/bar"),
+ CanonPath("foo!"),
+ CanonPath("xyzzy"),
+ CanonPath("a/b/c"),
+ };
+
+ ASSERT_TRUE (CanonPath("foo/bar").isAllowed(allowed));
+ ASSERT_TRUE (CanonPath("foo/bar/bla").isAllowed(allowed));
+ ASSERT_TRUE (CanonPath("foo").isAllowed(allowed));
+ ASSERT_FALSE(CanonPath("bar").isAllowed(allowed));
+ ASSERT_FALSE(CanonPath("bar/a").isAllowed(allowed));
+ ASSERT_TRUE (CanonPath("a").isAllowed(allowed));
+ ASSERT_TRUE (CanonPath("a/b").isAllowed(allowed));
+ ASSERT_TRUE (CanonPath("a/b/c").isAllowed(allowed));
+ ASSERT_TRUE (CanonPath("a/b/c/d").isAllowed(allowed));
+ ASSERT_TRUE (CanonPath("a/b/c/d/e").isAllowed(allowed));
+ ASSERT_FALSE(CanonPath("a/b/a").isAllowed(allowed));
+ ASSERT_FALSE(CanonPath("a/b/d").isAllowed(allowed));
+ ASSERT_FALSE(CanonPath("aaa").isAllowed(allowed));
+ ASSERT_FALSE(CanonPath("zzz").isAllowed(allowed));
+ ASSERT_TRUE (CanonPath("/").isAllowed(allowed));
+ }
+ }
+}
diff --git a/src/libutil/tests/json.cc b/src/libutil/tests/json.cc
deleted file mode 100644
index 156286999..000000000
--- a/src/libutil/tests/json.cc
+++ /dev/null
@@ -1,193 +0,0 @@
-#include "json.hh"
-#include <gtest/gtest.h>
-#include <sstream>
-
-namespace nix {
-
- /* ----------------------------------------------------------------------------
- * toJSON
- * --------------------------------------------------------------------------*/
-
- TEST(toJSON, quotesCharPtr) {
- const char* input = "test";
- std::stringstream out;
- toJSON(out, input);
-
- ASSERT_EQ(out.str(), "\"test\"");
- }
-
- TEST(toJSON, quotesStdString) {
- std::string input = "test";
- std::stringstream out;
- toJSON(out, input);
-
- ASSERT_EQ(out.str(), "\"test\"");
- }
-
- TEST(toJSON, convertsNullptrtoNull) {
- auto input = nullptr;
- std::stringstream out;
- toJSON(out, input);
-
- ASSERT_EQ(out.str(), "null");
- }
-
- TEST(toJSON, convertsNullToNull) {
- const char* input = 0;
- std::stringstream out;
- toJSON(out, input);
-
- ASSERT_EQ(out.str(), "null");
- }
-
-
- TEST(toJSON, convertsFloat) {
- auto input = 1.024f;
- std::stringstream out;
- toJSON(out, input);
-
- ASSERT_EQ(out.str(), "1.024");
- }
-
- TEST(toJSON, convertsDouble) {
- const double input = 1.024;
- std::stringstream out;
- toJSON(out, input);
-
- ASSERT_EQ(out.str(), "1.024");
- }
-
- TEST(toJSON, convertsBool) {
- auto input = false;
- std::stringstream out;
- toJSON(out, input);
-
- ASSERT_EQ(out.str(), "false");
- }
-
- TEST(toJSON, quotesTab) {
- std::stringstream out;
- toJSON(out, "\t");
-
- ASSERT_EQ(out.str(), "\"\\t\"");
- }
-
- TEST(toJSON, quotesNewline) {
- std::stringstream out;
- toJSON(out, "\n");
-
- ASSERT_EQ(out.str(), "\"\\n\"");
- }
-
- TEST(toJSON, quotesCreturn) {
- std::stringstream out;
- toJSON(out, "\r");
-
- ASSERT_EQ(out.str(), "\"\\r\"");
- }
-
- TEST(toJSON, quotesCreturnNewLine) {
- std::stringstream out;
- toJSON(out, "\r\n");
-
- ASSERT_EQ(out.str(), "\"\\r\\n\"");
- }
-
- TEST(toJSON, quotesDoublequotes) {
- std::stringstream out;
- toJSON(out, "\"");
-
- ASSERT_EQ(out.str(), "\"\\\"\"");
- }
-
- TEST(toJSON, substringEscape) {
- std::stringstream out;
- std::string_view s = "foo\t";
- toJSON(out, s.substr(3));
-
- ASSERT_EQ(out.str(), "\"\\t\"");
- }
-
- /* ----------------------------------------------------------------------------
- * JSONObject
- * --------------------------------------------------------------------------*/
-
- TEST(JSONObject, emptyObject) {
- std::stringstream out;
- {
- JSONObject t(out);
- }
- ASSERT_EQ(out.str(), "{}");
- }
-
- TEST(JSONObject, objectWithList) {
- std::stringstream out;
- {
- JSONObject t(out);
- auto l = t.list("list");
- l.elem("element");
- }
- ASSERT_EQ(out.str(), R"#({"list":["element"]})#");
- }
-
- TEST(JSONObject, objectWithListIndent) {
- std::stringstream out;
- {
- JSONObject t(out, true);
- auto l = t.list("list");
- l.elem("element");
- }
- ASSERT_EQ(out.str(),
-R"#({
- "list": [
- "element"
- ]
-})#");
- }
-
- TEST(JSONObject, objectWithPlaceholderAndList) {
- std::stringstream out;
- {
- JSONObject t(out);
- auto l = t.placeholder("list");
- l.list().elem("element");
- }
-
- ASSERT_EQ(out.str(), R"#({"list":["element"]})#");
- }
-
- TEST(JSONObject, objectWithPlaceholderAndObject) {
- std::stringstream out;
- {
- JSONObject t(out);
- auto l = t.placeholder("object");
- l.object().attr("key", "value");
- }
-
- ASSERT_EQ(out.str(), R"#({"object":{"key":"value"}})#");
- }
-
- /* ----------------------------------------------------------------------------
- * JSONList
- * --------------------------------------------------------------------------*/
-
- TEST(JSONList, empty) {
- std::stringstream out;
- {
- JSONList l(out);
- }
- ASSERT_EQ(out.str(), R"#([])#");
- }
-
- TEST(JSONList, withElements) {
- std::stringstream out;
- {
- JSONList l(out);
- l.elem("one");
- l.object();
- l.placeholder().write("three");
- }
- ASSERT_EQ(out.str(), R"#(["one",{},"three"])#");
- }
-}
-
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 623b74bdd..993dc1cb6 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -2,6 +2,7 @@
#include "sync.hh"
#include "finally.hh"
#include "serialise.hh"
+#include "cgroup.hh"
#include <array>
#include <cctype>
@@ -36,7 +37,6 @@
#include <sys/prctl.h>
#include <sys/resource.h>
-#include <mntent.h>
#include <cmath>
#endif
@@ -727,45 +727,22 @@ unsigned int getMaxCPU()
{
#if __linux__
try {
- FILE *fp = fopen("/proc/mounts", "r");
- if (!fp)
- return 0;
-
- Strings cgPathParts;
-
- struct mntent *ent;
- while ((ent = getmntent(fp))) {
- std::string mountType, mountPath;
-
- mountType = ent->mnt_type;
- mountPath = ent->mnt_dir;
-
- if (mountType == "cgroup2") {
- cgPathParts.push_back(mountPath);
- break;
- }
- }
-
- fclose(fp);
-
- if (cgPathParts.size() > 0 && pathExists("/proc/self/cgroup")) {
- std::string currentCgroup = readFile("/proc/self/cgroup");
- Strings cgValues = tokenizeString<Strings>(currentCgroup, ":");
- cgPathParts.push_back(trim(cgValues.back(), "\n"));
- cgPathParts.push_back("cpu.max");
- std::string fullCgPath = canonPath(concatStringsSep("/", cgPathParts));
-
- if (pathExists(fullCgPath)) {
- std::string cpuMax = readFile(fullCgPath);
- std::vector<std::string> cpuMaxParts = tokenizeString<std::vector<std::string>>(cpuMax, " ");
- std::string quota = cpuMaxParts[0];
- std::string period = trim(cpuMaxParts[1], "\n");
-
- if (quota != "max")
- return std::ceil(std::stoi(quota) / std::stof(period));
- }
- }
- } catch (Error &) { ignoreException(); }
+ auto cgroupFS = getCgroupFS();
+ if (!cgroupFS) return 0;
+
+ auto cgroups = getCgroups("/proc/self/cgroup");
+ auto cgroup = cgroups[""];
+ if (cgroup == "") return 0;
+
+ auto cpuFile = *cgroupFS + "/" + cgroup + "/cpu.max";
+
+ auto cpuMax = readFile(cpuFile);
+ auto cpuMaxParts = tokenizeString<std::vector<std::string>>(cpuMax, " \n");
+ auto quota = cpuMaxParts[0];
+ auto period = cpuMaxParts[1];
+ if (quota != "max")
+ return std::ceil(std::stoi(quota) / std::stof(period));
+ } catch (Error &) { ignoreException(lvlDebug); }
#endif
return 0;
@@ -1427,7 +1404,7 @@ std::string shellEscape(const std::string_view s)
}
-void ignoreException()
+void ignoreException(Verbosity lvl)
{
/* Make sure no exceptions leave this function.
printError() also throws when remote is closed. */
@@ -1435,7 +1412,7 @@ void ignoreException()
try {
throw;
} catch (std::exception & e) {
- printError("error (ignored): %1%", e.what());
+ printMsg(lvl, "error (ignored): %1%", e.what());
}
} catch (...) { }
}
@@ -1617,6 +1594,21 @@ std::string stripIndentation(std::string_view s)
}
+std::pair<std::string_view, std::string_view> getLine(std::string_view s)
+{
+ auto newline = s.find('\n');
+
+ if (newline == s.npos) {
+ return {s, ""};
+ } else {
+ auto line = s.substr(0, newline);
+ if (!line.empty() && line[line.size() - 1] == '\r')
+ line = line.substr(0, line.size() - 1);
+ return {line, s.substr(newline + 1)};
+ }
+}
+
+
//////////////////////////////////////////////////////////////////////
static Sync<std::pair<unsigned short, unsigned short>> windowSize{{0, 0}};
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
index e5c678682..9b149de80 100644
--- a/src/libutil/util.hh
+++ b/src/libutil/util.hh
@@ -510,6 +510,18 @@ std::optional<N> string2Float(const std::string_view s)
}
+/* Convert a little-endian integer to host order. */
+template<typename T>
+T readLittleEndian(unsigned char * p)
+{
+ T x = 0;
+ for (size_t i = 0; i < sizeof(x); ++i, ++p) {
+ x |= ((T) *p) << (i * 8);
+ }
+ return x;
+}
+
+
/* Return true iff `s' starts with `prefix'. */
bool hasPrefix(std::string_view s, std::string_view prefix);
@@ -528,7 +540,7 @@ std::string shellEscape(const std::string_view s);
/* Exception handling in destructors: print an error message, then
ignore the exception. */
-void ignoreException();
+void ignoreException(Verbosity lvl = lvlError);
@@ -563,6 +575,12 @@ std::string base64Decode(std::string_view s);
std::string stripIndentation(std::string_view s);
+/* Get the prefix of 's' up to and excluding the next line break (LF
+ optionally preceded by CR), and the remainder following the line
+ break. */
+std::pair<std::string_view, std::string_view> getLine(std::string_view s);
+
+
/* Get a value for the specified key from an associate container. */
template <class T>
const typename T::mapped_type * get(const T & map, const typename T::key_type & key)
@@ -737,4 +755,13 @@ inline std::string operator + (std::string && s, std::string_view s2)
return std::move(s);
}
+inline std::string operator + (std::string_view s1, const char * s2)
+{
+ std::string s;
+ s.reserve(s1.size() + strlen(s2));
+ s.append(s1);
+ s.append(s2);
+ return s;
+}
+
}
diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc
index fdd66220a..31823a966 100644
--- a/src/nix-env/nix-env.cc
+++ b/src/nix-env/nix-env.cc
@@ -12,7 +12,6 @@
#include "local-fs-store.hh"
#include "user-env.hh"
#include "util.hh"
-#include "json.hh"
#include "value-to-json.hh"
#include "xml-writer.hh"
#include "legacy.hh"
@@ -26,6 +25,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
+#include <nlohmann/json.hpp>
using namespace nix;
using std::cout;
@@ -647,7 +647,7 @@ static void upgradeDerivations(Globals & globals,
} else newElems.push_back(i);
} catch (Error & e) {
- e.addTrace(std::nullopt, "while trying to find an upgrade for '%s'", i.queryName());
+ e.addTrace(nullptr, "while trying to find an upgrade for '%s'", i.queryName());
throw;
}
}
@@ -911,53 +911,58 @@ static VersionDiff compareVersionAgainstSet(
static void queryJSON(Globals & globals, std::vector<DrvInfo> & elems, bool printOutPath, bool printMeta)
{
- JSONObject topObj(cout, true);
+ using nlohmann::json;
+ json topObj = json::object();
for (auto & i : elems) {
try {
if (i.hasFailed()) continue;
- JSONObject pkgObj = topObj.object(i.attrPath);
auto drvName = DrvName(i.queryName());
- pkgObj.attr("name", drvName.fullName);
- pkgObj.attr("pname", drvName.name);
- pkgObj.attr("version", drvName.version);
- pkgObj.attr("system", i.querySystem());
- pkgObj.attr("outputName", i.queryOutputName());
+ json &pkgObj = topObj[i.attrPath];
+ pkgObj = {
+ {"name", drvName.fullName},
+ {"pname", drvName.name},
+ {"version", drvName.version},
+ {"system", i.querySystem()},
+ {"outputName", i.queryOutputName()},
+ };
{
DrvInfo::Outputs outputs = i.queryOutputs(printOutPath);
- JSONObject outputObj = pkgObj.object("outputs");
+ json &outputObj = pkgObj["outputs"];
+ outputObj = json::object();
for (auto & j : outputs) {
if (j.second)
- outputObj.attr(j.first, globals.state->store->printStorePath(*j.second));
+ outputObj[j.first] = globals.state->store->printStorePath(*j.second);
else
- outputObj.attr(j.first, nullptr);
+ outputObj[j.first] = nullptr;
}
}
if (printMeta) {
- JSONObject metaObj = pkgObj.object("meta");
+ json &metaObj = pkgObj["meta"];
+ metaObj = json::object();
StringSet metaNames = i.queryMetaNames();
for (auto & j : metaNames) {
Value * v = i.queryMeta(j);
if (!v) {
printError("derivation '%s' has invalid meta attribute '%s'", i.queryName(), j);
- metaObj.attr(j, nullptr);
+ metaObj[j] = nullptr;
} else {
- auto placeholder = metaObj.placeholder(j);
PathSet context;
- printValueAsJSON(*globals.state, true, *v, noPos, placeholder, context);
+ metaObj[j] = printValueAsJSON(*globals.state, true, *v, noPos, context);
}
}
}
} catch (AssertionError & e) {
printMsg(lvlTalkative, "skipping derivation named '%1%' which gives an assertion failure", i.queryName());
} catch (Error & e) {
- e.addTrace(std::nullopt, "while querying the derivation named '%1%'", i.queryName());
+ e.addTrace(nullptr, "while querying the derivation named '%1%'", i.queryName());
throw;
}
}
+ std::cout << topObj.dump(2);
}
@@ -1257,7 +1262,7 @@ static void opQuery(Globals & globals, Strings opFlags, Strings opArgs)
} catch (AssertionError & e) {
printMsg(lvlTalkative, "skipping derivation named '%1%' which gives an assertion failure", i.queryName());
} catch (Error & e) {
- e.addTrace(std::nullopt, "while querying the derivation named '%1%'", i.queryName());
+ e.addTrace(nullptr, "while querying the derivation named '%1%'", i.queryName());
throw;
}
}
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index 23f2ad3cf..3bbefedbe 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -516,7 +516,7 @@ static void registerValidity(bool reregister, bool hashGiven, bool canonicalise)
if (!store->isValidPath(info->path) || reregister) {
/* !!! races */
if (canonicalise)
- canonicalisePathMetaData(store->printStorePath(info->path), -1);
+ canonicalisePathMetaData(store->printStorePath(info->path), {});
if (!hashGiven) {
HashResult hash = hashPath(htSHA256, store->printStorePath(info->path));
info->narHash = hash.first;
@@ -808,14 +808,23 @@ static void opServe(Strings opFlags, Strings opArgs)
if (GET_PROTOCOL_MINOR(clientVersion) >= 2)
settings.maxLogSize = readNum<unsigned long>(in);
if (GET_PROTOCOL_MINOR(clientVersion) >= 3) {
- settings.buildRepeat = readInt(in);
- settings.enforceDeterminism = readInt(in);
+ auto nrRepeats = readInt(in);
+ if (nrRepeats != 0) {
+ throw Error("client requested repeating builds, but this is not currently implemented");
+ }
+ // Ignore 'enforceDeterminism'. It used to be true by
+ // default, but also only never had any effect when
+ // `nrRepeats == 0`. We have already asserted that
+ // `nrRepeats` in fact is 0, so we can safely ignore this
+ // without doing something other than what the client
+ // asked for.
+ readInt(in);
+
settings.runDiffHook = true;
}
if (GET_PROTOCOL_MINOR(clientVersion) >= 7) {
settings.keepFailed = (bool) readInt(in);
}
- settings.printRepeatedBuilds = false;
};
while (true) {
@@ -926,7 +935,6 @@ static void opServe(Strings opFlags, Strings opArgs)
worker_proto::write(*store, out, status.builtOutputs);
}
-
break;
}
diff --git a/src/nix/app.cc b/src/nix/app.cc
index 821964f86..5658f2a52 100644
--- a/src/nix/app.cc
+++ b/src/nix/app.cc
@@ -37,11 +37,13 @@ struct InstallableDerivedPath : Installable
* Return the rewrites that are needed to resolve a string whose context is
* included in `dependencies`.
*/
-StringPairs resolveRewrites(Store & store, const BuiltPaths dependencies)
+StringPairs resolveRewrites(
+ Store & store,
+ const std::vector<BuiltPathWithResult> & dependencies)
{
StringPairs res;
for (auto & dep : dependencies)
- if (auto drvDep = std::get_if<BuiltPathBuilt>(&dep))
+ if (auto drvDep = std::get_if<BuiltPathBuilt>(&dep.path))
for (auto & [ outputName, outputPath ] : drvDep->outputs)
res.emplace(
downstreamPlaceholder(store, drvDep->drvPath, outputName),
@@ -53,7 +55,10 @@ StringPairs resolveRewrites(Store & store, const BuiltPaths dependencies)
/**
* Resolve the given string assuming the given context.
*/
-std::string resolveString(Store & store, const std::string & toResolve, const BuiltPaths dependencies)
+std::string resolveString(
+ Store & store,
+ const std::string & toResolve,
+ const std::vector<BuiltPathWithResult> & dependencies)
{
auto rewrites = resolveRewrites(store, dependencies);
return rewriteStrings(toResolve, rewrites);
@@ -66,7 +71,9 @@ UnresolvedApp Installable::toApp(EvalState & state)
auto type = cursor->getAttr("type")->getString();
- std::string expected = !attrPath.empty() && state.symbols[attrPath[0]] == "apps" ? "app" : "derivation";
+ std::string expected = !attrPath.empty() &&
+ (state.symbols[attrPath[0]] == "apps" || state.symbols[attrPath[0]] == "defaultApp")
+ ? "app" : "derivation";
if (type != expected)
throw Error("attribute '%s' should have type '%s'", cursor->getAttrPathStr(), expected);
diff --git a/src/nix/build.cc b/src/nix/build.cc
index 9c648d28e..94b169167 100644
--- a/src/nix/build.cc
+++ b/src/nix/build.cc
@@ -10,6 +10,37 @@
using namespace nix;
+nlohmann::json derivedPathsToJSON(const DerivedPaths & paths, ref<Store> store)
+{
+ auto res = nlohmann::json::array();
+ for (auto & t : paths) {
+ std::visit([&res, store](const auto & t) {
+ res.push_back(t.toJSON(store));
+ }, t.raw());
+ }
+ return res;
+}
+
+nlohmann::json builtPathsWithResultToJSON(const std::vector<BuiltPathWithResult> & buildables, ref<Store> store)
+{
+ auto res = nlohmann::json::array();
+ for (auto & b : buildables) {
+ std::visit([&](const auto & t) {
+ auto j = t.toJSON(store);
+ if (b.result) {
+ j["startTime"] = b.result->startTime;
+ j["stopTime"] = b.result->stopTime;
+ if (b.result->cpuUser)
+ j["cpuUser"] = ((double) b.result->cpuUser->count()) / 1000000;
+ if (b.result->cpuSystem)
+ j["cpuSystem"] = ((double) b.result->cpuSystem->count()) / 1000000;
+ }
+ res.push_back(j);
+ }, b.path.raw());
+ }
+ return res;
+}
+
struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
{
Path outLink = "result";
@@ -78,7 +109,7 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
Realise::Outputs,
installables, buildMode);
- if (json) logger->cout("%s", derivedPathsWithHintsToJSON(buildables, store).dump());
+ if (json) logger->cout("%s", builtPathsWithResultToJSON(buildables, store).dump());
if (outLink != "")
if (auto store2 = store.dynamic_pointer_cast<LocalFSStore>())
@@ -98,7 +129,7 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
store2->addPermRoot(output.second, absPath(symlink));
}
},
- }, buildable.raw());
+ }, buildable.path.raw());
}
if (printOutputPaths) {
@@ -113,11 +144,14 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
std::cout << store->printStorePath(output.second) << std::endl;
}
},
- }, buildable.raw());
+ }, buildable.path.raw());
}
}
- updateProfile(buildables);
+ BuiltPaths buildables2;
+ for (auto & b : buildables)
+ buildables2.push_back(b.path);
+ updateProfile(buildables2);
}
};
diff --git a/src/nix/daemon.cc b/src/nix/daemon.cc
index 940923d3b..c527fdb0a 100644
--- a/src/nix/daemon.cc
+++ b/src/nix/daemon.cc
@@ -257,7 +257,7 @@ static void daemonLoop()
} catch (Interrupted & e) {
return;
} catch (Error & error) {
- ErrorInfo ei = error.info();
+ auto ei = error.info();
// FIXME: add to trace?
ei.msg = hintfmt("error processing connection: %1%", ei.msg.str());
logError(ei);
diff --git a/src/nix/daemon.md b/src/nix/daemon.md
index e97016a94..d5cdadf08 100644
--- a/src/nix/daemon.md
+++ b/src/nix/daemon.md
@@ -11,7 +11,7 @@ R""(
# Description
This command runs the Nix daemon, which is a required component in
-multi-user Nix installations. It performs build actions and other
+multi-user Nix installations. It runs build tasks and other
operations on the Nix store on behalf of non-root users. Usually you
don't run the daemon directly; instead it's managed by a service
management framework such as `systemd`.
diff --git a/src/nix/develop.cc b/src/nix/develop.cc
index 4de109754..1d90d1dac 100644
--- a/src/nix/develop.cc
+++ b/src/nix/develop.cc
@@ -164,6 +164,14 @@ struct BuildEnvironment
{
return vars == other.vars && bashFunctions == other.bashFunctions;
}
+
+ std::string getSystem() const
+ {
+ if (auto v = get(vars, "system"))
+ return getString(*v);
+ else
+ return settings.thisSystem;
+ }
};
const static std::string getEnvSh =
@@ -192,10 +200,12 @@ static StorePath getDerivationEnvironment(ref<Store> store, ref<Store> evalStore
drv.env.erase("allowedRequisites");
drv.env.erase("disallowedReferences");
drv.env.erase("disallowedRequisites");
+ drv.env.erase("name");
/* Rehash and write the derivation. FIXME: would be nice to use
'buildDerivation', but that's privileged. */
drv.name += "-env";
+ drv.env.emplace("name", drv.name);
drv.inputSrcs.insert(std::move(getEnvShPath));
if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
for (auto & output : drv.outputs) {
@@ -568,7 +578,7 @@ struct CmdDevelop : Common, MixEnvironment
}
}
- runProgramInStore(store, shell, args);
+ runProgramInStore(store, shell, args, buildEnvironment.getSystem());
}
};
diff --git a/src/nix/eval.cc b/src/nix/eval.cc
index 23fef7c68..ccee074e9 100644
--- a/src/nix/eval.cc
+++ b/src/nix/eval.cc
@@ -4,10 +4,11 @@
#include "store-api.hh"
#include "eval.hh"
#include "eval-inline.hh"
-#include "json.hh"
#include "value-to-json.hh"
#include "progress-bar.hh"
+#include <nlohmann/json.hpp>
+
using namespace nix;
struct CmdEval : MixJSON, InstallableCommand
@@ -115,9 +116,7 @@ struct CmdEval : MixJSON, InstallableCommand
}
else if (json) {
- JSONPlaceholder jsonOut(std::cout);
- printValueAsJSON(*state, true, *v, pos, jsonOut, context, false);
- std::cout << std::endl;
+ std::cout << printValueAsJSON(*state, true, *v, pos, context, false).dump() << std::endl;
}
else {
diff --git a/src/nix/flake-update.md b/src/nix/flake-update.md
index 2ee8a707d..8c6042d94 100644
--- a/src/nix/flake-update.md
+++ b/src/nix/flake-update.md
@@ -16,7 +16,7 @@ R""(
# Description
This command recreates the lock file of a flake (`flake.lock`), thus
-updating the lock for every mutable input (like `nixpkgs`) to its
+updating the lock for every unlocked input (like `nixpkgs`) to its
current version. This is equivalent to passing `--recreate-lock-file`
to any command that operates on a flake. That is,
diff --git a/src/nix/flake.cc b/src/nix/flake.cc
index 3d90cfc05..06fd87ef2 100644
--- a/src/nix/flake.cc
+++ b/src/nix/flake.cc
@@ -11,7 +11,6 @@
#include "attr-path.hh"
#include "fetchers.hh"
#include "registry.hh"
-#include "json.hh"
#include "eval-cache.hh"
#include "markdown.hh"
@@ -21,6 +20,7 @@
using namespace nix;
using namespace nix::flake;
+using json = nlohmann::json;
class FlakeCommand : virtual Args, public MixFlakeOptions
{
@@ -215,7 +215,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
if (!lockedFlake.lockFile.root->inputs.empty())
logger->cout(ANSI_BOLD "Inputs:" ANSI_NORMAL);
- std::unordered_set<std::shared_ptr<Node>> visited;
+ std::set<ref<Node>> visited;
std::function<void(const Node & node, const std::string & prefix)> recurse;
@@ -227,7 +227,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
if (auto lockedNode = std::get_if<0>(&input.second)) {
logger->cout("%s" ANSI_BOLD "%s" ANSI_NORMAL ": %s",
prefix + (last ? treeLast : treeConn), input.first,
- *lockedNode ? (*lockedNode)->lockedRef : flake.lockedRef);
+ (*lockedNode)->lockedRef);
bool firstVisit = visited.insert(*lockedNode).second;
@@ -917,35 +917,44 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun
{
auto flake = lockFlake();
- auto jsonRoot = json ? std::optional<JSONObject>(std::cout) : std::nullopt;
-
StorePathSet sources;
sources.insert(flake.flake.sourceInfo->storePath);
- if (jsonRoot)
- jsonRoot->attr("path", store->printStorePath(flake.flake.sourceInfo->storePath));
// FIXME: use graph output, handle cycles.
- std::function<void(const Node & node, std::optional<JSONObject> & jsonObj)> traverse;
- traverse = [&](const Node & node, std::optional<JSONObject> & jsonObj)
+ std::function<nlohmann::json(const Node & node)> traverse;
+ traverse = [&](const Node & node)
{
- auto jsonObj2 = jsonObj ? jsonObj->object("inputs") : std::optional<JSONObject>();
+ nlohmann::json jsonObj2 = json ? json::object() : nlohmann::json(nullptr);
for (auto & [inputName, input] : node.inputs) {
if (auto inputNode = std::get_if<0>(&input)) {
- auto jsonObj3 = jsonObj2 ? jsonObj2->object(inputName) : std::optional<JSONObject>();
auto storePath =
dryRun
? (*inputNode)->lockedRef.input.computeStorePath(*store)
: (*inputNode)->lockedRef.input.fetch(store).first.storePath;
- if (jsonObj3)
- jsonObj3->attr("path", store->printStorePath(storePath));
- sources.insert(std::move(storePath));
- traverse(**inputNode, jsonObj3);
+ if (json) {
+ auto& jsonObj3 = jsonObj2[inputName];
+ jsonObj3["path"] = store->printStorePath(storePath);
+ sources.insert(std::move(storePath));
+ jsonObj3["inputs"] = traverse(**inputNode);
+ } else {
+ sources.insert(std::move(storePath));
+ traverse(**inputNode);
+ }
}
}
+ return jsonObj2;
};
- traverse(*flake.lockFile.root, jsonRoot);
+ if (json) {
+ nlohmann::json jsonRoot = {
+ {"path", store->printStorePath(flake.flake.sourceInfo->storePath)},
+ {"inputs", traverse(*flake.lockFile.root)},
+ };
+ std::cout << jsonRoot.dump() << std::endl;
+ } else {
+ traverse(*flake.lockFile.root);
+ }
if (!dryRun && !dstUri.empty()) {
ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
diff --git a/src/nix/flake.md b/src/nix/flake.md
index a1ab43281..810e9ebea 100644
--- a/src/nix/flake.md
+++ b/src/nix/flake.md
@@ -18,51 +18,56 @@ values such as packages or NixOS modules provided by the flake).
Flake references (*flakerefs*) are a way to specify the location of a
flake. These have two different forms:
-* An attribute set representation, e.g.
- ```nix
- {
- type = "github";
- owner = "NixOS";
- repo = "nixpkgs";
- }
- ```
+## Attribute set representation
- The only required attribute is `type`. The supported types are
- listed below.
+Example:
-* A URL-like syntax, e.g.
+```nix
+{
+ type = "github";
+ owner = "NixOS";
+ repo = "nixpkgs";
+}
+```
- ```
- github:NixOS/nixpkgs
- ```
+The only required attribute is `type`. The supported types are
+listed below.
- These are used on the command line as a more convenient alternative
- to the attribute set representation. For instance, in the command
+## URL-like syntax
- ```console
- # nix build github:NixOS/nixpkgs#hello
- ```
+Example:
- `github:NixOS/nixpkgs` is a flake reference (while `hello` is an
- output attribute). They are also allowed in the `inputs` attribute
- of a flake, e.g.
+```
+github:NixOS/nixpkgs
+```
- ```nix
- inputs.nixpkgs.url = github:NixOS/nixpkgs;
- ```
+These are used on the command line as a more convenient alternative
+to the attribute set representation. For instance, in the command
- is equivalent to
+```console
+# nix build github:NixOS/nixpkgs#hello
+```
- ```nix
- inputs.nixpkgs = {
- type = "github";
- owner = "NixOS";
- repo = "nixpkgs";
- };
- ```
+`github:NixOS/nixpkgs` is a flake reference (while `hello` is an
+output attribute). They are also allowed in the `inputs` attribute
+of a flake, e.g.
+
+```nix
+inputs.nixpkgs.url = github:NixOS/nixpkgs;
+```
+
+is equivalent to
+
+```nix
+inputs.nixpkgs = {
+ type = "github";
+ owner = "NixOS";
+ repo = "nixpkgs";
+};
+```
-## Examples
+### Examples
Here are some examples of flake references in their URL-like representation:
diff --git a/src/nix/local.mk b/src/nix/local.mk
index e4ec7634d..0f2f016ec 100644
--- a/src/nix/local.mk
+++ b/src/nix/local.mk
@@ -18,7 +18,7 @@ nix_CXXFLAGS += -I src/libutil -I src/libstore -I src/libfetchers -I src/libexpr
nix_LIBS = libexpr libmain libfetchers libstore libutil libcmd
-nix_LDFLAGS = -pthread $(SODIUM_LIBS) $(EDITLINE_LIBS) $(BOOST_LDFLAGS) -llowdown
+nix_LDFLAGS = -pthread $(SODIUM_LIBS) $(EDITLINE_LIBS) $(BOOST_LDFLAGS) $(LOWDOWN_LIBS)
$(foreach name, \
nix-build nix-channel nix-collect-garbage nix-copy-closure nix-daemon nix-env nix-hash nix-instantiate nix-prefetch-url nix-shell nix-store, \
diff --git a/src/nix/ls.cc b/src/nix/ls.cc
index 07554994b..e964b01b3 100644
--- a/src/nix/ls.cc
+++ b/src/nix/ls.cc
@@ -3,7 +3,7 @@
#include "fs-accessor.hh"
#include "nar-accessor.hh"
#include "common-args.hh"
-#include "json.hh"
+#include <nlohmann/json.hpp>
using namespace nix;
@@ -91,10 +91,9 @@ struct MixLs : virtual Args, MixJSON
if (path == "/") path = "";
if (json) {
- JSONPlaceholder jsonRoot(std::cout);
if (showDirectory)
throw UsageError("'--directory' is useless with '--json'");
- listNar(jsonRoot, accessor, path, recursive);
+ std::cout << listNar(accessor, path, recursive);
} else
listText(accessor);
}
diff --git a/src/nix/main.cc b/src/nix/main.cc
index 7172b4bd2..d3d2f5b16 100644
--- a/src/nix/main.cc
+++ b/src/nix/main.cc
@@ -53,7 +53,6 @@ static bool haveInternet()
}
std::string programPath;
-char * * savedArgv;
struct HelpRequested { };
@@ -270,7 +269,7 @@ void mainWrapped(int argc, char * * argv)
programPath = argv[0];
auto programName = std::string(baseNameOf(programPath));
- if (argc > 0 && std::string_view(argv[0]) == "__build-remote") {
+ if (argc > 1 && std::string_view(argv[1]) == "__build-remote") {
programName = "build-remote";
argv++; argc--;
}
diff --git a/src/nix/make-content-addressed.cc b/src/nix/make-content-addressed.cc
index 34860c38f..d86b90fc7 100644
--- a/src/nix/make-content-addressed.cc
+++ b/src/nix/make-content-addressed.cc
@@ -2,10 +2,13 @@
#include "store-api.hh"
#include "make-content-addressed.hh"
#include "common-args.hh"
-#include "json.hh"
+
+#include <nlohmann/json.hpp>
using namespace nix;
+using nlohmann::json;
+
struct CmdMakeContentAddressed : virtual CopyCommand, virtual StorePathsCommand, MixJSON
{
CmdMakeContentAddressed()
@@ -25,6 +28,7 @@ struct CmdMakeContentAddressed : virtual CopyCommand, virtual StorePathsCommand,
;
}
+ using StorePathsCommand::run;
void run(ref<Store> srcStore, StorePaths && storePaths) override
{
auto dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
@@ -33,13 +37,15 @@ struct CmdMakeContentAddressed : virtual CopyCommand, virtual StorePathsCommand,
StorePathSet(storePaths.begin(), storePaths.end()));
if (json) {
- JSONObject jsonRoot(std::cout);
- JSONObject jsonRewrites(jsonRoot.object("rewrites"));
+ auto jsonRewrites = json::object();
for (auto & path : storePaths) {
auto i = remappings.find(path);
assert(i != remappings.end());
- jsonRewrites.attr(srcStore->printStorePath(path), srcStore->printStorePath(i->second));
+ jsonRewrites[srcStore->printStorePath(path)] = srcStore->printStorePath(i->second);
}
+ auto json = json::object();
+ json["rewrites"] = jsonRewrites;
+ std::cout << json.dump();
} else {
for (auto & path : storePaths) {
auto i = remappings.find(path);
diff --git a/src/nix/nix.md b/src/nix/nix.md
index d48682a94..db60c59ff 100644
--- a/src/nix/nix.md
+++ b/src/nix/nix.md
@@ -115,12 +115,11 @@ the Nix store. Here are the recognised types of installables:
* **Store derivations**: `/nix/store/p7gp6lxdg32h4ka1q398wd9r2zkbbz2v-hello-2.10.drv`
- Store derivations are store paths with extension `.drv` and are a
- low-level representation of a build-time dependency graph used
- internally by Nix. By default, if you pass a store derivation to a
- `nix` subcommand, it will operate on the *output paths* of the
- derivation. For example, `nix path-info` prints information about
- the output paths:
+ By default, if you pass a [store derivation] path to a `nix` subcommand, the command will operate on the [output path]s of the derivation.
+
+ [output path]: ../../glossary.md#gloss-output-path
+
+ For example, `nix path-info` prints information about the output paths:
```console
# nix path-info --json /nix/store/p7gp6lxdg32h4ka1q398wd9r2zkbbz2v-hello-2.10.drv
@@ -164,6 +163,13 @@ operate are determined as follows:
```
+ and likewise, using a store path to a "drv" file to specify the derivation:
+
+ ```console
+ # nix build '/nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^dev,static'
+ …
+ ```
+
* You can also specify that *all* outputs should be used using the
syntax *installable*`^*`. For example, the following shows the size
of all outputs of the `glibc` package in the binary cache:
@@ -177,6 +183,12 @@ operate are determined as follows:
/nix/store/q6580lr01jpcsqs4r5arlh4ki2c1m9rv-glibc-2.33-123-dev 44200560
```
+ and likewise, using a store path to a "drv" file to specify the derivation:
+
+ ```console
+ # nix path-info -S '/nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^*'
+ …
+ ```
* If you didn't specify the desired outputs, but the derivation has an
attribute `meta.outputsToInstall`, Nix will use those outputs. For
example, since the package `nixpkgs#libxml2` has this attribute:
@@ -189,6 +201,11 @@ operate are determined as follows:
a command like `nix shell nixpkgs#libxml2` will provide only those
two outputs by default.
+ Note that a [store derivation] (given by its `.drv` file store path) doesn't have
+ any attributes like `meta`, and thus this case doesn't apply to it.
+
+ [store derivation]: ../../glossary.md#gloss-store-derivation
+
* Otherwise, Nix will use all outputs of the derivation.
# Nix stores
diff --git a/src/nix/path-from-hash-part.cc b/src/nix/path-from-hash-part.cc
new file mode 100644
index 000000000..7f7cda8d3
--- /dev/null
+++ b/src/nix/path-from-hash-part.cc
@@ -0,0 +1,39 @@
+#include "command.hh"
+#include "store-api.hh"
+
+using namespace nix;
+
+struct CmdPathFromHashPart : StoreCommand
+{
+ std::string hashPart;
+
+ CmdPathFromHashPart()
+ {
+ expectArgs({
+ .label = "hash-part",
+ .handler = {&hashPart},
+ });
+ }
+
+ std::string description() override
+ {
+ return "get a store path from its hash part";
+ }
+
+ std::string doc() override
+ {
+ return
+ #include "path-from-hash-part.md"
+ ;
+ }
+
+ void run(ref<Store> store) override
+ {
+ if (auto storePath = store->queryPathFromHashPart(hashPart))
+ logger->cout(store->printStorePath(*storePath));
+ else
+ throw Error("there is no store path corresponding to '%s'", hashPart);
+ }
+};
+
+static auto rCmdPathFromHashPart = registerCommand2<CmdPathFromHashPart>({"store", "path-from-hash-part"});
diff --git a/src/nix/path-from-hash-part.md b/src/nix/path-from-hash-part.md
new file mode 100644
index 000000000..788e13ab6
--- /dev/null
+++ b/src/nix/path-from-hash-part.md
@@ -0,0 +1,20 @@
+R""(
+
+# Examples
+
+* Return the full store path with the given hash part:
+
+ ```console
+ # nix store path-from-hash-part --store https://cache.nixos.org/ 0i2jd68mp5g6h2sa5k9c85rb80sn8hi9
+ /nix/store/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9-hello-2.10
+ ```
+
+# Description
+
+Given the hash part of a store path (that is, the 32 characters
+following `/nix/store/`), return the full store path. This is
+primarily useful in the implementation of binary caches, where a
+request for a `.narinfo` file only supplies the hash part
+(e.g. `https://cache.nixos.org/0i2jd68mp5g6h2sa5k9c85rb80sn8hi9.narinfo`).
+
+)""
diff --git a/src/nix/path-info.cc b/src/nix/path-info.cc
index d690fe594..613c5b191 100644
--- a/src/nix/path-info.cc
+++ b/src/nix/path-info.cc
@@ -1,12 +1,13 @@
#include "command.hh"
#include "shared.hh"
#include "store-api.hh"
-#include "json.hh"
#include "common-args.hh"
#include <algorithm>
#include <array>
+#include <nlohmann/json.hpp>
+
using namespace nix;
struct CmdPathInfo : StorePathsCommand, MixJSON
@@ -86,11 +87,10 @@ struct CmdPathInfo : StorePathsCommand, MixJSON
pathLen = std::max(pathLen, store->printStorePath(storePath).size());
if (json) {
- JSONPlaceholder jsonRoot(std::cout);
- store->pathInfoToJSON(jsonRoot,
+ std::cout << store->pathInfoToJSON(
// FIXME: preserve order?
StorePathSet(storePaths.begin(), storePaths.end()),
- true, showClosureSize, SRI, AllowInvalid);
+ true, showClosureSize, SRI, AllowInvalid).dump();
}
else {
diff --git a/src/nix/path-info.md b/src/nix/path-info.md
index 7a1714ba4..b30898ac0 100644
--- a/src/nix/path-info.md
+++ b/src/nix/path-info.md
@@ -68,7 +68,9 @@ R""(
]
```
-* Print the path of the store derivation produced by `nixpkgs#hello`:
+* Print the path of the [store derivation] produced by `nixpkgs#hello`:
+
+ [store derivation]: ../../glossary.md#gloss-store-derivation
```console
# nix path-info --derivation nixpkgs#hello
diff --git a/src/nix/profile-list.md b/src/nix/profile-list.md
index bdab9a208..fa786162f 100644
--- a/src/nix/profile-list.md
+++ b/src/nix/profile-list.md
@@ -20,11 +20,11 @@ following fields:
* An integer that can be used to unambiguously identify the package in
invocations of `nix profile remove` and `nix profile upgrade`.
-* The original ("mutable") flake reference and output attribute path
+* The original ("unlocked") flake reference and output attribute path
used at installation time.
-* The immutable flake reference to which the mutable flake reference
- was resolved.
+* The locked flake reference to which the unlocked flake reference was
+ resolved.
* The store path(s) of the package.
diff --git a/src/nix/profile-upgrade.md b/src/nix/profile-upgrade.md
index e06e74abe..39cca428b 100644
--- a/src/nix/profile-upgrade.md
+++ b/src/nix/profile-upgrade.md
@@ -2,7 +2,7 @@ R""(
# Examples
-* Upgrade all packages that were installed using a mutable flake
+* Upgrade all packages that were installed using an unlocked flake
reference:
```console
@@ -32,9 +32,9 @@ the package was installed.
> **Warning**
>
-> This only works if you used a *mutable* flake reference at
+> This only works if you used an *unlocked* flake reference at
> installation time, e.g. `nixpkgs#hello`. It does not work if you
-> used an *immutable* flake reference
+> used a *locked* flake reference
> (e.g. `github:NixOS/nixpkgs/13d0c311e3ae923a00f734b43fd1d35b47d8943a#hello`),
> since in that case the "latest version" is always the same.
diff --git a/src/nix/profile.cc b/src/nix/profile.cc
index 3814e7d5a..11910523d 100644
--- a/src/nix/profile.cc
+++ b/src/nix/profile.cc
@@ -253,11 +253,11 @@ struct ProfileManifest
static std::map<Installable *, BuiltPaths>
builtPathsPerInstallable(
- const std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> & builtPaths)
+ const std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> & builtPaths)
{
std::map<Installable *, BuiltPaths> res;
for (auto & [installable, builtPath] : builtPaths)
- res[installable.get()].push_back(builtPath);
+ res[installable.get()].push_back(builtPath.path);
return res;
}
diff --git a/src/nix/profile.md b/src/nix/profile.md
index be3c5ba1a..273e02280 100644
--- a/src/nix/profile.md
+++ b/src/nix/profile.md
@@ -88,8 +88,7 @@ has the following fields:
the user at the time of installation (e.g. `nixpkgs`). This is also
the flake reference that will be used by `nix profile upgrade`.
-* `uri`: The immutable flake reference to which `originalUrl`
- resolved.
+* `uri`: The locked flake reference to which `originalUrl` resolved.
* `attrPath`: The flake output attribute that provided this
package. Note that this is not necessarily the attribute that the
diff --git a/src/nix/registry.cc b/src/nix/registry.cc
index c496f94f8..b5bdfba95 100644
--- a/src/nix/registry.cc
+++ b/src/nix/registry.cc
@@ -183,14 +183,12 @@ struct CmdRegistryPin : RegistryCommand, EvalCommand
void run(nix::ref<nix::Store> store) override
{
- if (locked.empty()) {
- locked = url;
- }
+ if (locked.empty()) locked = url;
auto registry = getRegistry();
auto ref = parseFlakeRef(url);
- auto locked_ref = parseFlakeRef(locked);
+ auto lockedRef = parseFlakeRef(locked);
registry->remove(ref.input);
- auto [tree, resolved] = locked_ref.resolve(store).input.fetch(store);
+ auto [tree, resolved] = lockedRef.resolve(store).input.fetch(store);
fetchers::Attrs extraAttrs;
if (ref.subdir != "") extraAttrs["dir"] = ref.subdir;
registry->add(ref.input, resolved, extraAttrs);
diff --git a/src/nix/repl.md b/src/nix/repl.md
index 23ef0f4e6..c5113be61 100644
--- a/src/nix/repl.md
+++ b/src/nix/repl.md
@@ -36,7 +36,7 @@ R""(
Loading Installable ''...
Added 1 variables.
- # nix repl --extra_experimental_features 'flakes repl-flake' nixpkgs
+ # nix repl --extra-experimental-features 'flakes repl-flake' nixpkgs
Loading Installable 'flake:nixpkgs#'...
Added 5 variables.
diff --git a/src/nix/run.cc b/src/nix/run.cc
index 45d2dfd0d..6fca68047 100644
--- a/src/nix/run.cc
+++ b/src/nix/run.cc
@@ -9,6 +9,7 @@
#include "fs-accessor.hh"
#include "progress-bar.hh"
#include "eval.hh"
+#include "build/personality.hh"
#if __linux__
#include <sys/mount.h>
@@ -24,7 +25,8 @@ namespace nix {
void runProgramInStore(ref<Store> store,
const std::string & program,
- const Strings & args)
+ const Strings & args,
+ std::optional<std::string_view> system)
{
stopProgressBar();
@@ -44,7 +46,7 @@ void runProgramInStore(ref<Store> store,
throw Error("store '%s' is not a local store so it does not support command execution", store->getUri());
if (store->storeDir != store2->getRealStoreDir()) {
- Strings helperArgs = { chrootHelperName, store->storeDir, store2->getRealStoreDir(), program };
+ Strings helperArgs = { chrootHelperName, store->storeDir, store2->getRealStoreDir(), std::string(system.value_or("")), program };
for (auto & arg : args) helperArgs.push_back(arg);
execv(getSelfExe().value_or("nix").c_str(), stringsToCharPtrs(helperArgs).data());
@@ -52,6 +54,9 @@ void runProgramInStore(ref<Store> store,
throw SysError("could not execute chroot helper");
}
+ if (system)
+ setPersonality(*system);
+
execvp(program.c_str(), stringsToCharPtrs(args).data());
throw SysError("unable to execute '%s'", program);
@@ -199,6 +204,7 @@ void chrootHelper(int argc, char * * argv)
int p = 1;
std::string storeDir = argv[p++];
std::string realStoreDir = argv[p++];
+ std::string system = argv[p++];
std::string cmd = argv[p++];
Strings args;
while (p < argc)
@@ -262,6 +268,9 @@ void chrootHelper(int argc, char * * argv)
writeFile("/proc/self/uid_map", fmt("%d %d %d", uid, uid, 1));
writeFile("/proc/self/gid_map", fmt("%d %d %d", gid, gid, 1));
+ if (system != "")
+ setPersonality(system);
+
execvp(cmd.c_str(), stringsToCharPtrs(args).data());
throw SysError("unable to exec '%s'", cmd);
diff --git a/src/nix/run.hh b/src/nix/run.hh
index 6180a87dd..fed360158 100644
--- a/src/nix/run.hh
+++ b/src/nix/run.hh
@@ -6,6 +6,7 @@ namespace nix {
void runProgramInStore(ref<Store> store,
const std::string & program,
- const Strings & args);
+ const Strings & args,
+ std::optional<std::string_view> system = std::nullopt);
}
diff --git a/src/nix/search.cc b/src/nix/search.cc
index bdd45cbed..d2a31607d 100644
--- a/src/nix/search.cc
+++ b/src/nix/search.cc
@@ -5,7 +5,6 @@
#include "names.hh"
#include "get-drvs.hh"
#include "common-args.hh"
-#include "json.hh"
#include "shared.hh"
#include "eval-cache.hh"
#include "attr-path.hh"
@@ -13,8 +12,10 @@
#include <regex>
#include <fstream>
+#include <nlohmann/json.hpp>
using namespace nix;
+using json = nlohmann::json;
std::string wrap(std::string prefix, std::string s)
{
@@ -84,7 +85,8 @@ struct CmdSearch : InstallableCommand, MixJSON
auto state = getEvalState();
- auto jsonOut = json ? std::make_unique<JSONObject>(std::cout) : nullptr;
+ std::optional<nlohmann::json> jsonOut;
+ if (json) jsonOut = json::object();
uint64_t results = 0;
@@ -151,10 +153,11 @@ struct CmdSearch : InstallableCommand, MixJSON
{
results++;
if (json) {
- auto jsonElem = jsonOut->object(attrPath2);
- jsonElem.attr("pname", name.name);
- jsonElem.attr("version", name.version);
- jsonElem.attr("description", description);
+ (*jsonOut)[attrPath2] = {
+ {"pname", name.name},
+ {"version", name.version},
+ {"description", description},
+ };
} else {
auto name2 = hiliteMatches(name.name, nameMatches, ANSI_GREEN, "\e[0;2m");
if (results > 1) logger->cout("");
@@ -193,6 +196,10 @@ struct CmdSearch : InstallableCommand, MixJSON
for (auto & cursor : installable->getCursors(*state))
visit(*cursor, cursor->getAttrPath(), true);
+ if (json) {
+ std::cout << jsonOut->dump() << std::endl;
+ }
+
if (!json && !results)
throw Error("no results for the given search term(s)!");
}
diff --git a/src/nix/show-derivation.cc b/src/nix/show-derivation.cc
index fb46b4dbf..af2e676a4 100644
--- a/src/nix/show-derivation.cc
+++ b/src/nix/show-derivation.cc
@@ -5,10 +5,11 @@
#include "common-args.hh"
#include "store-api.hh"
#include "archive.hh"
-#include "json.hh"
#include "derivations.hh"
+#include <nlohmann/json.hpp>
using namespace nix;
+using json = nlohmann::json;
struct CmdShowDerivation : InstallablesCommand
{
@@ -48,77 +49,63 @@ struct CmdShowDerivation : InstallablesCommand
drvPaths = std::move(closure);
}
- {
-
- JSONObject jsonRoot(std::cout, true);
+ json jsonRoot = json::object();
for (auto & drvPath : drvPaths) {
if (!drvPath.isDerivation()) continue;
- auto drvObj(jsonRoot.object(store->printStorePath(drvPath)));
+ json& drvObj = jsonRoot[store->printStorePath(drvPath)];
auto drv = store->readDerivation(drvPath);
{
- auto outputsObj(drvObj.object("outputs"));
+ json& outputsObj = drvObj["outputs"];
+ outputsObj = json::object();
for (auto & [_outputName, output] : drv.outputs) {
auto & outputName = _outputName; // work around clang bug
- auto outputObj { outputsObj.object(outputName) };
+ auto& outputObj = outputsObj[outputName];
+ outputObj = json::object();
std::visit(overloaded {
[&](const DerivationOutput::InputAddressed & doi) {
- outputObj.attr("path", store->printStorePath(doi.path));
+ outputObj["path"] = store->printStorePath(doi.path);
},
[&](const DerivationOutput::CAFixed & dof) {
- outputObj.attr("path", store->printStorePath(dof.path(*store, drv.name, outputName)));
- outputObj.attr("hashAlgo", dof.hash.printMethodAlgo());
- outputObj.attr("hash", dof.hash.hash.to_string(Base16, false));
+ outputObj["path"] = store->printStorePath(dof.path(*store, drv.name, outputName));
+ outputObj["hashAlgo"] = dof.hash.printMethodAlgo();
+ outputObj["hash"] = dof.hash.hash.to_string(Base16, false);
},
[&](const DerivationOutput::CAFloating & dof) {
- outputObj.attr("hashAlgo", makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType));
+ outputObj["hashAlgo"] = makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType);
},
[&](const DerivationOutput::Deferred &) {},
[&](const DerivationOutput::Impure & doi) {
- outputObj.attr("hashAlgo", makeFileIngestionPrefix(doi.method) + printHashType(doi.hashType));
- outputObj.attr("impure", true);
+ outputObj["hashAlgo"] = makeFileIngestionPrefix(doi.method) + printHashType(doi.hashType);
+ outputObj["impure"] = true;
},
}, output.raw());
}
}
{
- auto inputsList(drvObj.list("inputSrcs"));
+ auto& inputsList = drvObj["inputSrcs"];
+ inputsList = json::array();
for (auto & input : drv.inputSrcs)
- inputsList.elem(store->printStorePath(input));
- }
-
- {
- auto inputDrvsObj(drvObj.object("inputDrvs"));
- for (auto & input : drv.inputDrvs) {
- auto inputList(inputDrvsObj.list(store->printStorePath(input.first)));
- for (auto & outputId : input.second)
- inputList.elem(outputId);
- }
+ inputsList.emplace_back(store->printStorePath(input));
}
- drvObj.attr("system", drv.platform);
- drvObj.attr("builder", drv.builder);
-
{
- auto argsList(drvObj.list("args"));
- for (auto & arg : drv.args)
- argsList.elem(arg);
+ auto& inputDrvsObj = drvObj["inputDrvs"];
+ inputDrvsObj = json::object();
+ for (auto & input : drv.inputDrvs)
+ inputDrvsObj[store->printStorePath(input.first)] = input.second;
}
- {
- auto envObj(drvObj.object("env"));
- for (auto & var : drv.env)
- envObj.attr(var.first, var.second);
- }
+ drvObj["system"] = drv.platform;
+ drvObj["builder"] = drv.builder;
+ drvObj["args"] = drv.args;
+ drvObj["env"] = drv.env;
}
-
- }
-
- std::cout << "\n";
+ std::cout << jsonRoot.dump(2) << std::endl;
}
};
diff --git a/src/nix/show-derivation.md b/src/nix/show-derivation.md
index aa863899c..2cd93aa62 100644
--- a/src/nix/show-derivation.md
+++ b/src/nix/show-derivation.md
@@ -2,9 +2,11 @@ R""(
# Examples
-* Show the store derivation that results from evaluating the Hello
+* Show the [store derivation] that results from evaluating the Hello
package:
+ [store derivation]: ../../glossary.md#gloss-store-derivation
+
```console
# nix show-derivation nixpkgs#hello
{
@@ -37,7 +39,7 @@ R""(
# Description
This command prints on standard output a JSON representation of the
-store derivations to which *installables* evaluate. Store derivations
+[store derivation]s to which *installables* evaluate. Store derivations
are used internally by Nix. They are store paths with extension `.drv`
that represent the build-time dependency graph to which a Nix
expression evaluates.
diff --git a/src/nix/store-copy-log.md b/src/nix/store-copy-log.md
index 19ae57079..0937250f2 100644
--- a/src/nix/store-copy-log.md
+++ b/src/nix/store-copy-log.md
@@ -18,7 +18,9 @@ R""(
(The flag `--substituters ''` avoids querying
`https://cache.nixos.org` for the log.)
-* To copy the log for a specific store derivation via SSH:
+* To copy the log for a specific [store derivation] via SSH:
+
+ [store derivation]: ../../glossary.md#gloss-store-derivation
```console
# nix store copy-log --to ssh-ng://machine /nix/store/ilgm50plpmcgjhcp33z6n4qbnpqfhxym-glibc-2.33-59.drv
diff --git a/src/nix/why-depends.cc b/src/nix/why-depends.cc
index 1d9ab28ba..723017497 100644
--- a/src/nix/why-depends.cc
+++ b/src/nix/why-depends.cc
@@ -83,20 +83,47 @@ struct CmdWhyDepends : SourceExprCommand
{
auto package = parseInstallable(store, _package);
auto packagePath = Installable::toStorePath(getEvalStore(), store, Realise::Outputs, operateOn, package);
+
+ /* We don't need to build `dependency`. We try to get the store
+ * path if it's already known, and if not, then it's not a dependency.
+ *
+ * Why? If `package` does depends on `dependency`, then getting the
+ * store path of `package` above necessitated having the store path
+ * of `dependency`. The contrapositive is, if the store path of
+ * `dependency` is not already known at this point (i.e. it's a CA
+ * derivation which hasn't been built), then `package` did not need it
+ * to build.
+ */
auto dependency = parseInstallable(store, _dependency);
- auto dependencyPath = Installable::toStorePath(getEvalStore(), store, Realise::Derivation, operateOn, dependency);
- auto dependencyPathHash = dependencyPath.hashPart();
+ auto derivedDependency = dependency->toDerivedPath();
+ auto optDependencyPath = std::visit(overloaded {
+ [](const DerivedPath::Opaque & nodrv) -> std::optional<StorePath> {
+ return { nodrv.path };
+ },
+ [&](const DerivedPath::Built & hasdrv) -> std::optional<StorePath> {
+ if (hasdrv.outputs.size() != 1) {
+ throw Error("argument '%s' should evaluate to one store path", dependency->what());
+ }
+ auto outputMap = store->queryPartialDerivationOutputMap(hasdrv.drvPath);
+ auto maybePath = outputMap.find(*hasdrv.outputs.begin());
+ if (maybePath == outputMap.end()) {
+ throw Error("unexpected end of iterator");
+ }
+ return maybePath->second;
+ },
+ }, derivedDependency.raw());
StorePathSet closure;
store->computeFSClosure({packagePath}, closure, false, false);
- if (!closure.count(dependencyPath)) {
- printError("'%s' does not depend on '%s'",
- store->printStorePath(packagePath),
- store->printStorePath(dependencyPath));
+ if (!optDependencyPath.has_value() || !closure.count(*optDependencyPath)) {
+ printError("'%s' does not depend on '%s'", package->what(), dependency->what());
return;
}
+ auto dependencyPath = *optDependencyPath;
+ auto dependencyPathHash = dependencyPath.hashPart();
+
stopProgressBar(); // FIXME
auto accessor = store->getFSAccessor();