aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/build-remote/build-remote.cc2
-rw-r--r--src/libcmd/command.cc4
-rw-r--r--src/libcmd/common-eval-args.cc2
-rw-r--r--src/libcmd/installables.cc6
-rw-r--r--src/libexpr/eval.cc2
-rw-r--r--src/libexpr/eval.hh11
-rw-r--r--src/libexpr/parser.y2
-rw-r--r--src/libexpr/primops.cc61
-rw-r--r--src/libexpr/primops.hh5
-rw-r--r--src/libexpr/primops/context.cc54
-rw-r--r--src/libexpr/primops/fetchMercurial.cc6
-rw-r--r--src/libexpr/primops/fetchTree.cc26
-rw-r--r--src/libexpr/primops/fromTOML.cc36
-rw-r--r--src/libexpr/tests/error_traces.cc2
-rw-r--r--src/libfetchers/attrs.hh1
-rw-r--r--src/libfetchers/fetchers.cc6
-rw-r--r--src/libfetchers/fetchers.hh10
-rw-r--r--src/libfetchers/github.cc10
-rw-r--r--src/libfetchers/input-accessor.cc30
-rw-r--r--src/libfetchers/tarball.cc68
-rw-r--r--src/libstore/build/derivation-goal.cc7
-rw-r--r--src/libstore/build/hook-instance.cc6
-rw-r--r--src/libstore/build/local-derivation-goal.cc77
-rw-r--r--src/libstore/build/personality.cc3
-rw-r--r--src/libstore/daemon.cc163
-rw-r--r--src/libstore/derivations.cc8
-rw-r--r--src/libstore/export-import.cc8
-rw-r--r--src/libstore/filetransfer.cc22
-rw-r--r--src/libstore/filetransfer.hh4
-rw-r--r--src/libstore/gc.cc7
-rw-r--r--src/libstore/globals.cc30
-rw-r--r--src/libstore/globals.hh46
-rw-r--r--src/libstore/legacy-ssh-store.cc69
-rw-r--r--src/libstore/local-fs-store.hh14
-rw-r--r--src/libstore/local-store.cc45
-rw-r--r--src/libstore/local-store.hh21
-rw-r--r--src/libstore/path-info.cc8
-rw-r--r--src/libstore/path-references.cc73
-rw-r--r--src/libstore/path-references.hh25
-rw-r--r--src/libstore/path.cc4
-rw-r--r--src/libstore/profiles.cc123
-rw-r--r--src/libstore/profiles.hh143
-rw-r--r--src/libstore/remote-store-connection.hh97
-rw-r--r--src/libstore/remote-store.cc153
-rw-r--r--src/libstore/remote-store.hh16
-rw-r--r--src/libstore/serve-protocol.hh58
-rw-r--r--src/libstore/sqlite.cc11
-rw-r--r--src/libstore/sqlite.hh23
-rw-r--r--src/libstore/ssh-store.cc1
-rw-r--r--src/libstore/store-api.hh2
-rw-r--r--src/libstore/uds-remote-store.cc8
-rw-r--r--src/libstore/uds-remote-store.hh8
-rw-r--r--src/libstore/worker-protocol-impl.hh78
-rw-r--r--src/libstore/worker-protocol.cc103
-rw-r--r--src/libstore/worker-protocol.hh275
-rw-r--r--src/libutil/abstract-setting-to-json.hh1
-rw-r--r--src/libutil/args.cc15
-rw-r--r--src/libutil/config-impl.hh64
-rw-r--r--src/libutil/config.cc63
-rw-r--r--src/libutil/config.hh31
-rw-r--r--src/libutil/experimental-features.cc20
-rw-r--r--src/libutil/experimental-features.hh10
-rw-r--r--src/libutil/filesystem.cc17
-rw-r--r--src/libutil/json-utils.cc19
-rw-r--r--src/libutil/json-utils.hh78
-rw-r--r--src/libutil/references.cc (renamed from src/libstore/references.cc)80
-rw-r--r--src/libutil/references.hh (renamed from src/libstore/references.hh)23
-rw-r--r--src/libutil/tests/references.cc46
-rw-r--r--src/libutil/tests/tests.cc2
-rw-r--r--src/libutil/util.cc30
-rw-r--r--src/libutil/util.hh14
-rwxr-xr-xsrc/nix-channel/nix-channel.cc8
-rw-r--r--src/nix-collect-garbage/nix-collect-garbage.cc7
-rw-r--r--src/nix-env/nix-env.cc7
-rw-r--r--src/nix-env/user-env.cc2
-rw-r--r--src/nix-store/nix-store.cc44
-rw-r--r--src/nix/daemon.cc2
-rw-r--r--src/nix/flake.cc121
-rw-r--r--src/nix/nix.md1
-rw-r--r--src/nix/profile.cc7
80 files changed, 1906 insertions, 889 deletions
diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc
index 323e04fdb..2fb17d06f 100644
--- a/src/build-remote/build-remote.cc
+++ b/src/build-remote/build-remote.cc
@@ -299,7 +299,7 @@ connected:
!trusted || *trusted;
});
- // See the very large comment in `case wopBuildDerivation:` in
+ // See the very large comment in `case WorkerProto::Op::BuildDerivation:` in
// `src/libstore/daemon.cc` that explains the trust model here.
//
// This condition mirrors that: that code enforces the "rules" outlined there;
diff --git a/src/libcmd/command.cc b/src/libcmd/command.cc
index 6c4648b34..4fc197956 100644
--- a/src/libcmd/command.cc
+++ b/src/libcmd/command.cc
@@ -239,9 +239,7 @@ void MixProfile::updateProfile(const StorePath & storePath)
if (!store) throw Error("'--profile' is not supported for this Nix store");
auto profile2 = absPath(*profile);
switchLink(profile2,
- createGeneration(
- ref<LocalFSStore>(store),
- profile2, storePath));
+ createGeneration(*store, profile2, storePath));
}
void MixProfile::updateProfile(const BuiltPaths & buildables)
diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc
index ff3abd534..7f97364a1 100644
--- a/src/libcmd/common-eval-args.cc
+++ b/src/libcmd/common-eval-args.cc
@@ -165,7 +165,7 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s)
{
if (EvalSettings::isPseudoUrl(s)) {
auto storePath = fetchers::downloadTarball(
- state.store, EvalSettings::resolvePseudoUrl(s), "source", false).first.storePath;
+ state.store, EvalSettings::resolvePseudoUrl(s), "source", false).tree.storePath;
return state.rootPath(CanonPath(state.store->toRealPath(storePath)));
}
diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc
index a2b882355..10b077fb5 100644
--- a/src/libcmd/installables.cc
+++ b/src/libcmd/installables.cc
@@ -701,7 +701,7 @@ RawInstallablesCommand::RawInstallablesCommand()
{
addFlag({
.longName = "stdin",
- .description = "Read installables from the standard input.",
+ .description = "Read installables from the standard input. No default installable applied.",
.handler = {&readFromStdIn, true}
});
@@ -730,9 +730,9 @@ void RawInstallablesCommand::run(ref<Store> store)
while (std::cin >> word) {
rawInstallables.emplace_back(std::move(word));
}
+ } else {
+ applyDefaultInstallables(rawInstallables);
}
-
- applyDefaultInstallables(rawInstallables);
run(store, std::move(rawInstallables));
}
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
index 585670e69..71fd6e6e4 100644
--- a/src/libexpr/eval.cc
+++ b/src/libexpr/eval.cc
@@ -2620,7 +2620,7 @@ Strings EvalSettings::getDefaultNixPath()
{
Strings res;
auto add = [&](const Path & p, const std::string & s = std::string()) {
- if (pathExists(p)) {
+ if (pathAccessible(p)) {
if (s.empty()) {
res.push_back(p);
} else {
diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh
index 62b380929..8e41bdbd0 100644
--- a/src/libexpr/eval.hh
+++ b/src/libexpr/eval.hh
@@ -741,11 +741,18 @@ struct EvalSettings : Config
If set to `true`, the Nix evaluator will not allow access to any
files outside of the Nix search path (as set via the `NIX_PATH`
environment variable or the `-I` option), or to URIs outside of
- `allowed-uri`. The default is `false`.
+ [`allowed-uris`](../command-ref/conf-file.md#conf-allowed-uris).
+ The default is `false`.
)"};
Setting<bool> pureEval{this, false, "pure-eval",
- "Whether to restrict file system and network access to files specified by cryptographic hash."};
+ R"(
+ Pure evaluation mode ensures that the result of Nix expressions is fully determined by explicitly declared inputs, and not influenced by external state:
+
+ - Restrict file system and network access to files specified by cryptographic hash
+ - Disable [`bultins.currentSystem`](@docroot@/language/builtin-constants.md#builtins-currentSystem) and [`builtins.currentTime`](@docroot@/language/builtin-constants.md#builtins-currentTime)
+ )"
+ };
Setting<bool> enableImportFromDerivation{
this, true, "allow-import-from-derivation",
diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y
index 4d981712a..3b545fd84 100644
--- a/src/libexpr/parser.y
+++ b/src/libexpr/parser.y
@@ -793,7 +793,7 @@ std::pair<bool, std::string> EvalState::resolveSearchPathElem(const SearchPathEl
if (EvalSettings::isPseudoUrl(elem.second)) {
try {
auto storePath = fetchers::downloadTarball(
- store, EvalSettings::resolvePseudoUrl(elem.second), "source", false).first.storePath;
+ store, EvalSettings::resolvePseudoUrl(elem.second), "source", false).tree.storePath;
res = { true, store->toRealPath(storePath) };
} catch (FileTransferError & e) {
logWarning({
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
index cfae1e5f8..5b2f7e8b7 100644
--- a/src/libexpr/primops.cc
+++ b/src/libexpr/primops.cc
@@ -6,7 +6,7 @@
#include "globals.hh"
#include "json-to-value.hh"
#include "names.hh"
-#include "references.hh"
+#include "path-references.hh"
#include "store-api.hh"
#include "util.hh"
#include "value-to-json.hh"
@@ -1152,16 +1152,14 @@ drvName, Bindings * attrs, Value & v)
if (i->value->type() == nNull) continue;
}
- if (i->name == state.sContentAddressed) {
- contentAddressed = state.forceBool(*i->value, noPos, context_below);
- if (contentAddressed)
- experimentalFeatureSettings.require(Xp::CaDerivations);
+ if (i->name == state.sContentAddressed && state.forceBool(*i->value, noPos, context_below)) {
+ contentAddressed = true;
+ experimentalFeatureSettings.require(Xp::CaDerivations);
}
- else if (i->name == state.sImpure) {
- isImpure = state.forceBool(*i->value, noPos, context_below);
- if (isImpure)
- experimentalFeatureSettings.require(Xp::ImpureDerivations);
+ else if (i->name == state.sImpure && state.forceBool(*i->value, noPos, context_below)) {
+ isImpure = true;
+ experimentalFeatureSettings.require(Xp::ImpureDerivations);
}
/* The `args' attribute is special: it supplies the
@@ -1503,7 +1501,7 @@ static RegisterPrimOp primop_storePath({
causes the path to be *copied* again to the Nix store, resulting
in a new path (e.g. `/nix/store/ld01dnzc…-source-source`).
- This function is not available in pure evaluation mode.
+ Not available in [pure evaluation mode](@docroot@/command-ref/conf-file.md#conf-pure-eval).
)",
.fun = prim_storePath,
});
@@ -3910,13 +3908,8 @@ static void prim_replaceStrings(EvalState & state, const PosIdx pos, Value * * a
for (auto elem : args[0]->listItems())
from.emplace_back(state.forceString(*elem, pos, "while evaluating one of the strings to replace passed to builtins.replaceStrings"));
- std::vector<std::pair<std::string, NixStringContext>> to;
- to.reserve(args[1]->listSize());
- for (auto elem : args[1]->listItems()) {
- NixStringContext ctx;
- auto s = state.forceString(*elem, ctx, pos, "while evaluating one of the replacement strings passed to builtins.replaceStrings");
- to.emplace_back(s, std::move(ctx));
- }
+ std::unordered_map<size_t, std::string> cache;
+ auto to = args[1]->listItems();
NixStringContext context;
auto s = state.forceString(*args[2], context, pos, "while evaluating the third argument passed to builtins.replaceStrings");
@@ -3927,10 +3920,19 @@ static void prim_replaceStrings(EvalState & state, const PosIdx pos, Value * * a
bool found = false;
auto i = from.begin();
auto j = to.begin();
- for (; i != from.end(); ++i, ++j)
+ size_t j_index = 0;
+ for (; i != from.end(); ++i, ++j, ++j_index)
if (s.compare(p, i->size(), *i) == 0) {
found = true;
- res += j->first;
+ auto v = cache.find(j_index);
+ if (v == cache.end()) {
+ NixStringContext ctx;
+ auto ts = state.forceString(**j, ctx, pos, "while evaluating one of the replacement strings passed to builtins.replaceStrings");
+ v = (cache.emplace(j_index, ts)).first;
+ for (auto& path : ctx)
+ context.insert(path);
+ }
+ res += v->second;
if (i->empty()) {
if (p < s.size())
res += s[p];
@@ -3938,9 +3940,6 @@ static void prim_replaceStrings(EvalState & state, const PosIdx pos, Value * * a
} else {
p += i->size();
}
- for (auto& path : j->second)
- context.insert(path);
- j->second.clear();
break;
}
if (!found) {
@@ -3958,7 +3957,11 @@ static RegisterPrimOp primop_replaceStrings({
.args = {"from", "to", "s"},
.doc = R"(
Given string *s*, replace every occurrence of the strings in *from*
- with the corresponding string in *to*. For example,
+ with the corresponding string in *to*.
+
+ The argument *to* is lazy, that is, it is only evaluated when its corresponding pattern in *from* is matched in the string *s*
+
+ Example:
```nix
builtins.replaceStrings ["oo" "a"] ["a" "i"] "foobar"
@@ -4055,18 +4058,6 @@ static RegisterPrimOp primop_splitVersion({
RegisterPrimOp::PrimOps * RegisterPrimOp::primOps;
-RegisterPrimOp::RegisterPrimOp(std::string name, size_t arity, PrimOpFun fun)
-{
- if (!primOps) primOps = new PrimOps;
- primOps->push_back({
- .name = name,
- .args = {},
- .arity = arity,
- .fun = fun,
- });
-}
-
-
RegisterPrimOp::RegisterPrimOp(Info && info)
{
if (!primOps) primOps = new PrimOps;
diff --git a/src/libexpr/primops.hh b/src/libexpr/primops.hh
index 4ae73fe1f..73b7b866c 100644
--- a/src/libexpr/primops.hh
+++ b/src/libexpr/primops.hh
@@ -28,11 +28,6 @@ struct RegisterPrimOp
* will get called during EvalState initialization, so there
* may be primops not yet added and builtins is not yet sorted.
*/
- RegisterPrimOp(
- std::string name,
- size_t arity,
- PrimOpFun fun);
-
RegisterPrimOp(Info && info);
};
diff --git a/src/libexpr/primops/context.cc b/src/libexpr/primops/context.cc
index 07bf400cf..8b3468009 100644
--- a/src/libexpr/primops/context.cc
+++ b/src/libexpr/primops/context.cc
@@ -12,7 +12,11 @@ static void prim_unsafeDiscardStringContext(EvalState & state, const PosIdx pos,
v.mkString(*s);
}
-static RegisterPrimOp primop_unsafeDiscardStringContext("__unsafeDiscardStringContext", 1, prim_unsafeDiscardStringContext);
+static RegisterPrimOp primop_unsafeDiscardStringContext({
+ .name = "__unsafeDiscardStringContext",
+ .arity = 1,
+ .fun = prim_unsafeDiscardStringContext
+});
static void prim_hasContext(EvalState & state, const PosIdx pos, Value * * args, Value & v)
@@ -22,7 +26,16 @@ static void prim_hasContext(EvalState & state, const PosIdx pos, Value * * args,
v.mkBool(!context.empty());
}
-static RegisterPrimOp primop_hasContext("__hasContext", 1, prim_hasContext);
+static RegisterPrimOp primop_hasContext({
+ .name = "__hasContext",
+ .args = {"s"},
+ .doc = R"(
+ Return `true` if string *s* has a non-empty context. The
+ context can be obtained with
+ [`getContext`](#builtins-getContext).
+ )",
+ .fun = prim_hasContext
+});
/* Sometimes we want to pass a derivation path (i.e. pkg.drvPath) to a
@@ -51,7 +64,11 @@ static void prim_unsafeDiscardOutputDependency(EvalState & state, const PosIdx p
v.mkString(*s, context2);
}
-static RegisterPrimOp primop_unsafeDiscardOutputDependency("__unsafeDiscardOutputDependency", 1, prim_unsafeDiscardOutputDependency);
+static RegisterPrimOp primop_unsafeDiscardOutputDependency({
+ .name = "__unsafeDiscardOutputDependency",
+ .arity = 1,
+ .fun = prim_unsafeDiscardOutputDependency
+});
/* Extract the context of a string as a structured Nix value.
@@ -119,7 +136,30 @@ static void prim_getContext(EvalState & state, const PosIdx pos, Value * * args,
v.mkAttrs(attrs);
}
-static RegisterPrimOp primop_getContext("__getContext", 1, prim_getContext);
+static RegisterPrimOp primop_getContext({
+ .name = "__getContext",
+ .args = {"s"},
+ .doc = R"(
+ Return the string context of *s*.
+
+ The string context tracks references to derivations within a string.
+ It is represented as an attribute set of [store derivation](@docroot@/glossary.md#gloss-store-derivation) paths mapping to output names.
+
+ Using [string interpolation](@docroot@/language/string-interpolation.md) on a derivation will add that derivation to the string context.
+ For example,
+
+ ```nix
+ builtins.getContext "${derivation { name = "a"; builder = "b"; system = "c"; }}"
+ ```
+
+ evaluates to
+
+ ```
+ { "/nix/store/arhvjaf6zmlyn8vh8fgn55rpwnxq0n7l-a.drv" = { outputs = [ "out" ]; }; }
+ ```
+ )",
+ .fun = prim_getContext
+});
/* Append the given context to a given string.
@@ -192,6 +232,10 @@ static void prim_appendContext(EvalState & state, const PosIdx pos, Value * * ar
v.mkString(orig, context);
}
-static RegisterPrimOp primop_appendContext("__appendContext", 2, prim_appendContext);
+static RegisterPrimOp primop_appendContext({
+ .name = "__appendContext",
+ .arity = 2,
+ .fun = prim_appendContext
+});
}
diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc
index 2c0d98e74..322692b52 100644
--- a/src/libexpr/primops/fetchMercurial.cc
+++ b/src/libexpr/primops/fetchMercurial.cc
@@ -88,6 +88,10 @@ static void prim_fetchMercurial(EvalState & state, const PosIdx pos, Value * * a
state.allowPath(tree.storePath);
}
-static RegisterPrimOp r_fetchMercurial("fetchMercurial", 1, prim_fetchMercurial);
+static RegisterPrimOp r_fetchMercurial({
+ .name = "fetchMercurial",
+ .arity = 1,
+ .fun = prim_fetchMercurial
+});
}
diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc
index cd7039025..1d23ef53b 100644
--- a/src/libexpr/primops/fetchTree.cc
+++ b/src/libexpr/primops/fetchTree.cc
@@ -194,7 +194,11 @@ static void prim_fetchTree(EvalState & state, const PosIdx pos, Value * * args,
}
// FIXME: document
-static RegisterPrimOp primop_fetchTree("fetchTree", 1, prim_fetchTree);
+static RegisterPrimOp primop_fetchTree({
+ .name = "fetchTree",
+ .arity = 1,
+ .fun = prim_fetchTree
+});
static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v,
const std::string & who, bool unpack, std::string name)
@@ -262,7 +266,7 @@ static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v
// https://github.com/NixOS/nix/issues/4313
auto storePath =
unpack
- ? fetchers::downloadTarball(state.store, *url, name, (bool) expectedHash).first.storePath
+ ? fetchers::downloadTarball(state.store, *url, name, (bool) expectedHash).tree.storePath
: fetchers::downloadFile(state.store, *url, name, (bool) expectedHash).storePath;
if (expectedHash) {
@@ -286,9 +290,9 @@ static RegisterPrimOp primop_fetchurl({
.name = "__fetchurl",
.args = {"url"},
.doc = R"(
- Download the specified URL and return the path of the downloaded
- file. This function is not available if [restricted evaluation
- mode](../command-ref/conf-file.md) is enabled.
+ Download the specified URL and return the path of the downloaded file.
+
+ Not available in [restricted evaluation mode](@docroot@/command-ref/conf-file.md#conf-restrict-eval).
)",
.fun = prim_fetchurl,
});
@@ -338,8 +342,7 @@ static RegisterPrimOp primop_fetchTarball({
stdenv.mkDerivation { … }
```
- This function is not available if [restricted evaluation
- mode](../command-ref/conf-file.md) is enabled.
+ Not available in [restricted evaluation mode](@docroot@/command-ref/conf-file.md#conf-restrict-eval).
)",
.fun = prim_fetchTarball,
});
@@ -470,14 +473,9 @@ static RegisterPrimOp primop_fetchGit({
}
```
- > **Note**
- >
- > Nix will refetch the branch in accordance with
- > the option `tarball-ttl`.
+ Nix will refetch the branch according to the [`tarball-ttl`](@docroot@/command-ref/conf-file.md#conf-tarball-ttl) setting.
- > **Note**
- >
- > This behavior is disabled in *Pure evaluation mode*.
+ This behavior is disabled in [pure evaluation mode](@docroot@/command-ref/conf-file.md#conf-pure-eval).
- To fetch the content of a checked-out work directory:
diff --git a/src/libexpr/primops/fromTOML.cc b/src/libexpr/primops/fromTOML.cc
index 8a5231781..2f4d4022e 100644
--- a/src/libexpr/primops/fromTOML.cc
+++ b/src/libexpr/primops/fromTOML.cc
@@ -3,6 +3,8 @@
#include "../../toml11/toml.hpp"
+#include <sstream>
+
namespace nix {
static void prim_fromTOML(EvalState & state, const PosIdx pos, Value * * args, Value & val)
@@ -58,8 +60,18 @@ static void prim_fromTOML(EvalState & state, const PosIdx pos, Value * * args, V
case toml::value_t::offset_datetime:
case toml::value_t::local_date:
case toml::value_t::local_time:
- // We fail since Nix doesn't have date and time types
- throw std::runtime_error("Dates and times are not supported");
+ {
+ if (experimentalFeatureSettings.isEnabled(Xp::ParseTomlTimestamps)) {
+ auto attrs = state.buildBindings(2);
+ attrs.alloc("_type").mkString("timestamp");
+ std::ostringstream s;
+ s << t;
+ attrs.alloc("value").mkString(s.str());
+ v.mkAttrs(attrs);
+ } else {
+ throw std::runtime_error("Dates and times are not supported");
+ }
+ }
break;;
case toml::value_t::empty:
v.mkNull();
@@ -78,6 +90,24 @@ static void prim_fromTOML(EvalState & state, const PosIdx pos, Value * * args, V
}
}
-static RegisterPrimOp primop_fromTOML("fromTOML", 1, prim_fromTOML);
+static RegisterPrimOp primop_fromTOML({
+ .name = "fromTOML",
+ .args = {"e"},
+ .doc = R"(
+ Convert a TOML string to a Nix value. For example,
+
+ ```nix
+ builtins.fromTOML ''
+ x=1
+ s="a"
+ [table]
+ y=2
+ ''
+ ```
+
+ returns the value `{ s = "a"; table = { y = 2; }; x = 1; }`.
+ )",
+ .fun = prim_fromTOML
+});
}
diff --git a/src/libexpr/tests/error_traces.cc b/src/libexpr/tests/error_traces.cc
index 24e95ac39..285651256 100644
--- a/src/libexpr/tests/error_traces.cc
+++ b/src/libexpr/tests/error_traces.cc
@@ -171,7 +171,7 @@ namespace nix {
hintfmt("value is %s while a string was expected", "an integer"),
hintfmt("while evaluating one of the strings to replace passed to builtins.replaceStrings"));
- ASSERT_TRACE2("replaceStrings [ \"old\" ] [ true ] {}",
+ ASSERT_TRACE2("replaceStrings [ \"oo\" ] [ true ] \"foo\"",
TypeError,
hintfmt("value is %s while a string was expected", "a Boolean"),
hintfmt("while evaluating one of the replacement strings passed to builtins.replaceStrings"));
diff --git a/src/libfetchers/attrs.hh b/src/libfetchers/attrs.hh
index 1a14bb023..9f885a793 100644
--- a/src/libfetchers/attrs.hh
+++ b/src/libfetchers/attrs.hh
@@ -2,6 +2,7 @@
///@file
#include "types.hh"
+#include "hash.hh"
#include <variant>
diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc
index 91db3a9eb..2860c1ceb 100644
--- a/src/libfetchers/fetchers.cc
+++ b/src/libfetchers/fetchers.cc
@@ -159,6 +159,12 @@ std::pair<Tree, Input> Input::fetch(ref<Store> store) const
input.to_string(), *prevLastModified);
}
+ if (auto prevRev = getRev()) {
+ if (input.getRev() != prevRev)
+ throw Error("'rev' attribute mismatch in input '%s', expected %s",
+ input.to_string(), prevRev->gitRev());
+ }
+
if (auto prevRevCount = getRevCount()) {
if (input.getRevCount() != prevRevCount)
throw Error("'revCount' attribute mismatch in input '%s', expected %d",
diff --git a/src/libfetchers/fetchers.hh b/src/libfetchers/fetchers.hh
index 498ad7e4d..d0738f619 100644
--- a/src/libfetchers/fetchers.hh
+++ b/src/libfetchers/fetchers.hh
@@ -158,6 +158,7 @@ struct DownloadFileResult
StorePath storePath;
std::string etag;
std::string effectiveUrl;
+ std::optional<std::string> immutableUrl;
};
DownloadFileResult downloadFile(
@@ -167,7 +168,14 @@ DownloadFileResult downloadFile(
bool locked,
const Headers & headers = {});
-std::pair<Tree, time_t> downloadTarball(
+struct DownloadTarballResult
+{
+ Tree tree;
+ time_t lastModified;
+ std::optional<std::string> immutableUrl;
+};
+
+DownloadTarballResult downloadTarball(
ref<Store> store,
const std::string & url,
const std::string & name,
diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc
index 6c1d573ce..80598e7f8 100644
--- a/src/libfetchers/github.cc
+++ b/src/libfetchers/github.cc
@@ -207,21 +207,21 @@ struct GitArchiveInputScheme : InputScheme
auto url = getDownloadUrl(input);
- auto [tree, lastModified] = downloadTarball(store, url.url, input.getName(), true, url.headers);
+ auto result = downloadTarball(store, url.url, input.getName(), true, url.headers);
- input.attrs.insert_or_assign("lastModified", uint64_t(lastModified));
+ input.attrs.insert_or_assign("lastModified", uint64_t(result.lastModified));
getCache()->add(
store,
lockedAttrs,
{
{"rev", rev->gitRev()},
- {"lastModified", uint64_t(lastModified)}
+ {"lastModified", uint64_t(result.lastModified)}
},
- tree.storePath,
+ result.tree.storePath,
true);
- return {std::move(tree.storePath), input};
+ return {result.tree.storePath, input};
}
};
diff --git a/src/libfetchers/input-accessor.cc b/src/libfetchers/input-accessor.cc
index f9909c218..f37a8058b 100644
--- a/src/libfetchers/input-accessor.cc
+++ b/src/libfetchers/input-accessor.cc
@@ -75,22 +75,28 @@ SourcePath SourcePath::resolveSymlinks() const
int linksAllowed = 1024;
- for (auto & component : path) {
- res.path.push(component);
- while (true) {
- if (auto st = res.maybeLstat()) {
+ std::list<std::string> todo;
+ for (auto & c : path)
+ todo.push_back(std::string(c));
+
+ while (!todo.empty()) {
+ auto c = *todo.begin();
+ todo.pop_front();
+ if (c == "" || c == ".")
+ ;
+ else if (c == "..")
+ res.path.pop();
+ else {
+ res.path.push(c);
+ if (auto st = res.maybeLstat(); st && st->type == InputAccessor::tSymlink) {
if (!linksAllowed--)
throw Error("infinite symlink recursion in path '%s'", path);
- if (st->type != InputAccessor::tSymlink) break;
auto target = res.readLink();
+ res.path.pop();
if (hasPrefix(target, "/"))
- res = CanonPath(target);
- else {
- res.path.pop();
- res.path.extend(CanonPath(target));
- }
- } else
- break;
+ res.path = CanonPath::root;
+ todo.splice(todo.begin(), tokenizeString<std::list<std::string>>(target, "/"));
+ }
}
}
diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc
index 96fe5faca..e42aca6db 100644
--- a/src/libfetchers/tarball.cc
+++ b/src/libfetchers/tarball.cc
@@ -32,7 +32,8 @@ DownloadFileResult downloadFile(
return {
.storePath = std::move(cached->storePath),
.etag = getStrAttr(cached->infoAttrs, "etag"),
- .effectiveUrl = getStrAttr(cached->infoAttrs, "url")
+ .effectiveUrl = getStrAttr(cached->infoAttrs, "url"),
+ .immutableUrl = maybeGetStrAttr(cached->infoAttrs, "immutableUrl"),
};
};
@@ -55,12 +56,14 @@ DownloadFileResult downloadFile(
}
// FIXME: write to temporary file.
-
Attrs infoAttrs({
{"etag", res.etag},
{"url", res.effectiveUri},
});
+ if (res.immutableUrl)
+ infoAttrs.emplace("immutableUrl", *res.immutableUrl);
+
std::optional<StorePath> storePath;
if (res.cached) {
@@ -111,10 +114,11 @@ DownloadFileResult downloadFile(
.storePath = std::move(*storePath),
.etag = res.etag,
.effectiveUrl = res.effectiveUri,
+ .immutableUrl = res.immutableUrl,
};
}
-std::pair<Tree, time_t> downloadTarball(
+DownloadTarballResult downloadTarball(
ref<Store> store,
const std::string & url,
const std::string & name,
@@ -131,8 +135,9 @@ std::pair<Tree, time_t> downloadTarball(
if (cached && !cached->expired)
return {
- Tree { .actualPath = store->toRealPath(cached->storePath), .storePath = std::move(cached->storePath) },
- getIntAttr(cached->infoAttrs, "lastModified")
+ .tree = Tree { .actualPath = store->toRealPath(cached->storePath), .storePath = std::move(cached->storePath) },
+ .lastModified = (time_t) getIntAttr(cached->infoAttrs, "lastModified"),
+ .immutableUrl = maybeGetStrAttr(cached->infoAttrs, "immutableUrl"),
};
auto res = downloadFile(store, url, name, locked, headers);
@@ -160,6 +165,9 @@ std::pair<Tree, time_t> downloadTarball(
{"etag", res.etag},
});
+ if (res.immutableUrl)
+ infoAttrs.emplace("immutableUrl", *res.immutableUrl);
+
getCache()->add(
store,
inAttrs,
@@ -168,8 +176,9 @@ std::pair<Tree, time_t> downloadTarball(
locked);
return {
- Tree { .actualPath = store->toRealPath(*unpackedStorePath), .storePath = std::move(*unpackedStorePath) },
- lastModified,
+ .tree = Tree { .actualPath = store->toRealPath(*unpackedStorePath), .storePath = std::move(*unpackedStorePath) },
+ .lastModified = lastModified,
+ .immutableUrl = res.immutableUrl,
};
}
@@ -189,21 +198,33 @@ struct CurlInputScheme : InputScheme
virtual bool isValidURL(const ParsedURL & url) const = 0;
- std::optional<Input> inputFromURL(const ParsedURL & url) const override
+ std::optional<Input> inputFromURL(const ParsedURL & _url) const override
{
- if (!isValidURL(url))
+ if (!isValidURL(_url))
return std::nullopt;
Input input;
- auto urlWithoutApplicationScheme = url;
- urlWithoutApplicationScheme.scheme = parseUrlScheme(url.scheme).transport;
+ auto url = _url;
+
+ url.scheme = parseUrlScheme(url.scheme).transport;
- input.attrs.insert_or_assign("type", inputType());
- input.attrs.insert_or_assign("url", urlWithoutApplicationScheme.to_string());
auto narHash = url.query.find("narHash");
if (narHash != url.query.end())
input.attrs.insert_or_assign("narHash", narHash->second);
+
+ if (auto i = get(url.query, "rev"))
+ input.attrs.insert_or_assign("rev", *i);
+
+ if (auto i = get(url.query, "revCount"))
+ if (auto n = string2Int<uint64_t>(*i))
+ input.attrs.insert_or_assign("revCount", *n);
+
+ url.query.erase("rev");
+ url.query.erase("revCount");
+
+ input.attrs.insert_or_assign("type", inputType());
+ input.attrs.insert_or_assign("url", url.to_string());
return input;
}
@@ -212,7 +233,8 @@ struct CurlInputScheme : InputScheme
auto type = maybeGetStrAttr(attrs, "type");
if (type != inputType()) return {};
- std::set<std::string> allowedNames = {"type", "url", "narHash", "name", "unpack"};
+ // FIXME: some of these only apply to TarballInputScheme.
+ std::set<std::string> allowedNames = {"type", "url", "narHash", "name", "unpack", "rev", "revCount"};
for (auto & [name, value] : attrs)
if (!allowedNames.count(name))
throw Error("unsupported %s input attribute '%s'", *type, name);
@@ -275,10 +297,22 @@ struct TarballInputScheme : CurlInputScheme
: hasTarballExtension(url.path));
}
- std::pair<StorePath, Input> fetch(ref<Store> store, const Input & input) override
+ std::pair<StorePath, Input> fetch(ref<Store> store, const Input & _input) override
{
- auto tree = downloadTarball(store, getStrAttr(input.attrs, "url"), input.getName(), false).first;
- return {std::move(tree.storePath), input};
+ Input input(_input);
+ auto url = getStrAttr(input.attrs, "url");
+ auto result = downloadTarball(store, url, input.getName(), false);
+
+ if (result.immutableUrl) {
+ auto immutableInput = Input::fromURL(*result.immutableUrl);
+ // FIXME: would be nice to support arbitrary flakerefs
+ // here, e.g. git flakes.
+ if (immutableInput.getType() != "tarball")
+ throw Error("tarball 'Link' headers that redirect to non-tarball URLs are not supported");
+ input = immutableInput;
+ }
+
+ return {result.tree.storePath, std::move(input)};
}
};
diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc
index df7d21e54..5e37f7ecb 100644
--- a/src/libstore/build/derivation-goal.cc
+++ b/src/libstore/build/derivation-goal.cc
@@ -9,6 +9,7 @@
#include "archive.hh"
#include "compression.hh"
#include "worker-protocol.hh"
+#include "worker-protocol-impl.hh"
#include "topo-sort.hh"
#include "callback.hh"
#include "local-store.hh" // TODO remove, along with remaining downcasts
@@ -1150,9 +1151,11 @@ HookReply DerivationGoal::tryBuildHook()
throw;
}
+ WorkerProto::WriteConn conn { hook->sink };
+
/* Tell the hook all the inputs that have to be copied to the
remote system. */
- workerProtoWrite(worker.store, hook->sink, inputPaths);
+ WorkerProto::write(worker.store, conn, inputPaths);
/* Tell the hooks the missing outputs that have to be copied back
from the remote system. */
@@ -1163,7 +1166,7 @@ HookReply DerivationGoal::tryBuildHook()
if (buildMode != bmCheck && status.known && status.known->isValid()) continue;
missingOutputs.insert(outputName);
}
- workerProtoWrite(worker.store, hook->sink, missingOutputs);
+ WorkerProto::write(worker.store, conn, missingOutputs);
}
hook->sink = FdSink();
diff --git a/src/libstore/build/hook-instance.cc b/src/libstore/build/hook-instance.cc
index 075ad554f..337c60bd4 100644
--- a/src/libstore/build/hook-instance.cc
+++ b/src/libstore/build/hook-instance.cc
@@ -5,14 +5,14 @@ namespace nix {
HookInstance::HookInstance()
{
- debug("starting build hook '%s'", settings.buildHook);
+ debug("starting build hook '%s'", concatStringsSep(" ", settings.buildHook.get()));
- auto buildHookArgs = tokenizeString<std::list<std::string>>(settings.buildHook.get());
+ auto buildHookArgs = settings.buildHook.get();
if (buildHookArgs.empty())
throw Error("'build-hook' setting is empty");
- auto buildHook = buildHookArgs.front();
+ auto buildHook = canonPath(buildHookArgs.front());
buildHookArgs.pop_front();
Strings args;
diff --git a/src/libstore/build/local-derivation-goal.cc b/src/libstore/build/local-derivation-goal.cc
index 05d6685da..ea7c52098 100644
--- a/src/libstore/build/local-derivation-goal.cc
+++ b/src/libstore/build/local-derivation-goal.cc
@@ -4,13 +4,12 @@
#include "worker.hh"
#include "builtins.hh"
#include "builtins/buildenv.hh"
-#include "references.hh"
+#include "path-references.hh"
#include "finally.hh"
#include "util.hh"
#include "archive.hh"
#include "compression.hh"
#include "daemon.hh"
-#include "worker-protocol.hh"
#include "topo-sort.hh"
#include "callback.hh"
#include "json-utils.hh"
@@ -65,8 +64,9 @@ void handleDiffHook(
const Path & tryA, const Path & tryB,
const Path & drvPath, const Path & tmpDir)
{
- auto diffHook = settings.diffHook;
- if (diffHook != "" && settings.runDiffHook) {
+ auto & diffHookOpt = settings.diffHook.get();
+ if (diffHookOpt && settings.runDiffHook) {
+ auto & diffHook = *diffHookOpt;
try {
auto diffRes = runProgram(RunOptions {
.program = diffHook,
@@ -357,7 +357,7 @@ bool LocalDerivationGoal::cleanupDecideWhetherDiskFull()
for (auto & [_, status] : initialOutputs) {
if (!status.known) continue;
if (buildMode != bmCheck && status.known->isValid()) continue;
- auto p = worker.store.printStorePath(status.known->path);
+ auto p = worker.store.toRealPath(status.known->path);
if (pathExists(chrootRootDir + p))
renameFile((chrootRootDir + p), p);
}
@@ -1428,7 +1428,8 @@ void LocalDerivationGoal::startDaemon()
Store::Params params;
params["path-info-cache-size"] = "0";
params["store"] = worker.store.storeDir;
- params["root"] = getLocalStore().rootDir;
+ if (auto & optRoot = getLocalStore().rootDir.get())
+ params["root"] = *optRoot;
params["state"] = "/no-such-path";
params["log"] = "/no-such-path";
auto store = make_ref<RestrictedStore>(params,
@@ -1457,7 +1458,7 @@ void LocalDerivationGoal::startDaemon()
(struct sockaddr *) &remoteAddr, &remoteAddrLen);
if (!remote) {
if (errno == EINTR || errno == EAGAIN) continue;
- if (errno == EINVAL) break;
+ if (errno == EINVAL || errno == ECONNABORTED) break;
throw SysError("accepting connection");
}
@@ -1487,8 +1488,22 @@ void LocalDerivationGoal::startDaemon()
void LocalDerivationGoal::stopDaemon()
{
- if (daemonSocket && shutdown(daemonSocket.get(), SHUT_RDWR) == -1)
- throw SysError("shutting down daemon socket");
+ if (daemonSocket && shutdown(daemonSocket.get(), SHUT_RDWR) == -1) {
+ // According to the POSIX standard, the 'shutdown' function should
+ // return an ENOTCONN error when attempting to shut down a socket that
+ // hasn't been connected yet. This situation occurs when the 'accept'
+ // function is called on a socket without any accepted connections,
+ // leaving the socket unconnected. While Linux doesn't seem to produce
+ // an error for sockets that have only been accepted, more
+ // POSIX-compliant operating systems like OpenBSD, macOS, and others do
+ // return the ENOTCONN error. Therefore, we handle this error here to
+ // avoid raising an exception for compliant behaviour.
+ if (errno == ENOTCONN) {
+ daemonSocket.close();
+ } else {
+ throw SysError("shutting down daemon socket");
+ }
+ }
if (daemonThread.joinable())
daemonThread.join();
@@ -1499,7 +1514,8 @@ void LocalDerivationGoal::stopDaemon()
thread.join();
daemonWorkerThreads.clear();
- daemonSocket = -1;
+ // release the socket.
+ daemonSocket.close();
}
@@ -2379,18 +2395,21 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
continue;
auto references = *referencesOpt;
- auto rewriteOutput = [&]() {
+ auto rewriteOutput = [&](const StringMap & rewrites) {
/* Apply hash rewriting if necessary. */
- if (!outputRewrites.empty()) {
+ if (!rewrites.empty()) {
debug("rewriting hashes in '%1%'; cross fingers", actualPath);
- /* FIXME: this is in-memory. */
- StringSink sink;
- dumpPath(actualPath, sink);
+ /* FIXME: Is this actually streaming? */
+ auto source = sinkToSource([&](Sink & nextSink) {
+ RewritingSink rsink(rewrites, nextSink);
+ dumpPath(actualPath, rsink);
+ rsink.flush();
+ });
+ Path tmpPath = actualPath + ".tmp";
+ restorePath(tmpPath, *source);
deletePath(actualPath);
- sink.s = rewriteStrings(sink.s, outputRewrites);
- StringSource source(sink.s);
- restorePath(actualPath, source);
+ movePath(tmpPath, actualPath);
/* FIXME: set proper permissions in restorePath() so
we don't have to do another traversal. */
@@ -2439,7 +2458,7 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
"since recursive hashing is not enabled (one of outputHashMode={flat,text} is true)",
actualPath);
}
- rewriteOutput();
+ rewriteOutput(outputRewrites);
/* FIXME optimize and deduplicate with addToStore */
std::string oldHashPart { scratchPath->hashPart() };
HashModuloSink caSink { outputHash.hashType, oldHashPart };
@@ -2477,16 +2496,14 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
Hash::dummy,
};
if (*scratchPath != newInfo0.path) {
- // Also rewrite the output path
- auto source = sinkToSource([&](Sink & nextSink) {
- RewritingSink rsink2(oldHashPart, std::string(newInfo0.path.hashPart()), nextSink);
- dumpPath(actualPath, rsink2);
- rsink2.flush();
- });
- Path tmpPath = actualPath + ".tmp";
- restorePath(tmpPath, *source);
- deletePath(actualPath);
- movePath(tmpPath, actualPath);
+ // If the path has some self-references, we need to rewrite
+ // them.
+ // (note that this doesn't invalidate the ca hash we calculated
+ // above because it's computed *modulo the self-references*, so
+ // it already takes this rewrite into account).
+ rewriteOutput(
+ StringMap{{oldHashPart,
+ std::string(newInfo0.path.hashPart())}});
}
HashResult narHashAndSize = hashPath(htSHA256, actualPath);
@@ -2508,7 +2525,7 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs()
outputRewrites.insert_or_assign(
std::string { scratchPath->hashPart() },
std::string { requiredFinalPath.hashPart() });
- rewriteOutput();
+ rewriteOutput(outputRewrites);
auto narHashAndSize = hashPath(htSHA256, actualPath);
ValidPathInfo newInfo0 { requiredFinalPath, narHashAndSize.first };
newInfo0.narSize = narHashAndSize.second;
diff --git a/src/libstore/build/personality.cc b/src/libstore/build/personality.cc
index 4ad477869..1a6201758 100644
--- a/src/libstore/build/personality.cc
+++ b/src/libstore/build/personality.cc
@@ -21,7 +21,8 @@ void setPersonality(std::string_view system)
&& (std::string_view(SYSTEM) == "x86_64-linux"
|| (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64"))))
|| system == "armv7l-linux"
- || system == "armv6l-linux")
+ || system == "armv6l-linux"
+ || system == "armv5tel-linux")
{
if (personality(PER_LINUX32) == -1)
throw SysError("cannot set 32-bit personality");
diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc
index b6dd83684..75c3d2aca 100644
--- a/src/libstore/daemon.cc
+++ b/src/libstore/daemon.cc
@@ -1,6 +1,7 @@
#include "daemon.hh"
#include "monitor-fd.hh"
#include "worker-protocol.hh"
+#include "worker-protocol-impl.hh"
#include "build-result.hh"
#include "store-api.hh"
#include "store-cast.hh"
@@ -259,13 +260,13 @@ struct ClientSettings
}
};
-static std::vector<DerivedPath> readDerivedPaths(Store & store, unsigned int clientVersion, Source & from)
+static std::vector<DerivedPath> readDerivedPaths(Store & store, unsigned int clientVersion, WorkerProto::ReadConn conn)
{
std::vector<DerivedPath> reqs;
if (GET_PROTOCOL_MINOR(clientVersion) >= 30) {
- reqs = WorkerProto<std::vector<DerivedPath>>::read(store, from);
+ reqs = WorkerProto::Serialise<std::vector<DerivedPath>>::read(store, conn);
} else {
- for (auto & s : readStrings<Strings>(from))
+ for (auto & s : readStrings<Strings>(conn.from))
reqs.push_back(parsePathWithOutputs(store, s).toDerivedPath());
}
return reqs;
@@ -273,11 +274,14 @@ static std::vector<DerivedPath> readDerivedPaths(Store & store, unsigned int cli
static void performOp(TunnelLogger * logger, ref<Store> store,
TrustedFlag trusted, RecursiveFlag recursive, unsigned int clientVersion,
- Source & from, BufferedSink & to, unsigned int op)
+ Source & from, BufferedSink & to, WorkerProto::Op op)
{
+ WorkerProto::ReadConn rconn { .from = from };
+ WorkerProto::WriteConn wconn { .to = to };
+
switch (op) {
- case wopIsValidPath: {
+ case WorkerProto::Op::IsValidPath: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
bool result = store->isValidPath(path);
@@ -286,8 +290,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopQueryValidPaths: {
- auto paths = WorkerProto<StorePathSet>::read(*store, from);
+ case WorkerProto::Op::QueryValidPaths: {
+ auto paths = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
SubstituteFlag substitute = NoSubstitute;
if (GET_PROTOCOL_MINOR(clientVersion) >= 27) {
@@ -300,11 +304,11 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
auto res = store->queryValidPaths(paths, substitute);
logger->stopWork();
- workerProtoWrite(*store, to, res);
+ WorkerProto::write(*store, wconn, res);
break;
}
- case wopHasSubstitutes: {
+ case WorkerProto::Op::HasSubstitutes: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
StorePathSet paths; // FIXME
@@ -315,16 +319,16 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopQuerySubstitutablePaths: {
- auto paths = WorkerProto<StorePathSet>::read(*store, from);
+ case WorkerProto::Op::QuerySubstitutablePaths: {
+ auto paths = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
logger->startWork();
auto res = store->querySubstitutablePaths(paths);
logger->stopWork();
- workerProtoWrite(*store, to, res);
+ WorkerProto::write(*store, wconn, res);
break;
}
- case wopQueryPathHash: {
+ case WorkerProto::Op::QueryPathHash: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
auto hash = store->queryPathInfo(path)->narHash;
@@ -333,27 +337,27 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopQueryReferences:
- case wopQueryReferrers:
- case wopQueryValidDerivers:
- case wopQueryDerivationOutputs: {
+ case WorkerProto::Op::QueryReferences:
+ case WorkerProto::Op::QueryReferrers:
+ case WorkerProto::Op::QueryValidDerivers:
+ case WorkerProto::Op::QueryDerivationOutputs: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
StorePathSet paths;
- if (op == wopQueryReferences)
+ if (op == WorkerProto::Op::QueryReferences)
for (auto & i : store->queryPathInfo(path)->references)
paths.insert(i);
- else if (op == wopQueryReferrers)
+ else if (op == WorkerProto::Op::QueryReferrers)
store->queryReferrers(path, paths);
- else if (op == wopQueryValidDerivers)
+ else if (op == WorkerProto::Op::QueryValidDerivers)
paths = store->queryValidDerivers(path);
else paths = store->queryDerivationOutputs(path);
logger->stopWork();
- workerProtoWrite(*store, to, paths);
+ WorkerProto::write(*store, wconn, paths);
break;
}
- case wopQueryDerivationOutputNames: {
+ case WorkerProto::Op::QueryDerivationOutputNames: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
auto names = store->readDerivation(path).outputNames();
@@ -362,16 +366,16 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopQueryDerivationOutputMap: {
+ case WorkerProto::Op::QueryDerivationOutputMap: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
auto outputs = store->queryPartialDerivationOutputMap(path);
logger->stopWork();
- workerProtoWrite(*store, to, outputs);
+ WorkerProto::write(*store, wconn, outputs);
break;
}
- case wopQueryDeriver: {
+ case WorkerProto::Op::QueryDeriver: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
auto info = store->queryPathInfo(path);
@@ -380,7 +384,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopQueryPathFromHashPart: {
+ case WorkerProto::Op::QueryPathFromHashPart: {
auto hashPart = readString(from);
logger->startWork();
auto path = store->queryPathFromHashPart(hashPart);
@@ -389,11 +393,11 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopAddToStore: {
+ case WorkerProto::Op::AddToStore: {
if (GET_PROTOCOL_MINOR(clientVersion) >= 25) {
auto name = readString(from);
auto camStr = readString(from);
- auto refs = WorkerProto<StorePathSet>::read(*store, from);
+ auto refs = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
bool repairBool;
from >> repairBool;
auto repair = RepairFlag{repairBool};
@@ -475,7 +479,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopAddMultipleToStore: {
+ case WorkerProto::Op::AddMultipleToStore: {
bool repair, dontCheckSigs;
from >> repair >> dontCheckSigs;
if (!trusted && dontCheckSigs)
@@ -492,10 +496,10 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopAddTextToStore: {
+ case WorkerProto::Op::AddTextToStore: {
std::string suffix = readString(from);
std::string s = readString(from);
- auto refs = WorkerProto<StorePathSet>::read(*store, from);
+ auto refs = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
logger->startWork();
auto path = store->addTextToStore(suffix, s, refs, NoRepair);
logger->stopWork();
@@ -503,7 +507,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopExportPath: {
+ case WorkerProto::Op::ExportPath: {
auto path = store->parseStorePath(readString(from));
readInt(from); // obsolete
logger->startWork();
@@ -514,7 +518,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopImportPaths: {
+ case WorkerProto::Op::ImportPaths: {
logger->startWork();
TunnelSource source(from, to);
auto paths = store->importPaths(source,
@@ -526,8 +530,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopBuildPaths: {
- auto drvs = readDerivedPaths(*store, clientVersion, from);
+ case WorkerProto::Op::BuildPaths: {
+ auto drvs = readDerivedPaths(*store, clientVersion, rconn);
BuildMode mode = bmNormal;
if (GET_PROTOCOL_MINOR(clientVersion) >= 15) {
mode = (BuildMode) readInt(from);
@@ -551,8 +555,8 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopBuildPathsWithResults: {
- auto drvs = readDerivedPaths(*store, clientVersion, from);
+ case WorkerProto::Op::BuildPathsWithResults: {
+ auto drvs = readDerivedPaths(*store, clientVersion, rconn);
BuildMode mode = bmNormal;
mode = (BuildMode) readInt(from);
@@ -567,12 +571,12 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
auto results = store->buildPathsWithResults(drvs, mode);
logger->stopWork();
- workerProtoWrite(*store, to, results);
+ WorkerProto::write(*store, wconn, results);
break;
}
- case wopBuildDerivation: {
+ case WorkerProto::Op::BuildDerivation: {
auto drvPath = store->parseStorePath(readString(from));
BasicDerivation drv;
readDerivation(from, *store, drv, Derivation::nameFromPath(drvPath));
@@ -644,12 +648,12 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
DrvOutputs builtOutputs;
for (auto & [output, realisation] : res.builtOutputs)
builtOutputs.insert_or_assign(realisation.id, realisation);
- workerProtoWrite(*store, to, builtOutputs);
+ WorkerProto::write(*store, wconn, builtOutputs);
}
break;
}
- case wopEnsurePath: {
+ case WorkerProto::Op::EnsurePath: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
store->ensurePath(path);
@@ -658,7 +662,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopAddTempRoot: {
+ case WorkerProto::Op::AddTempRoot: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
store->addTempRoot(path);
@@ -667,7 +671,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopAddIndirectRoot: {
+ case WorkerProto::Op::AddIndirectRoot: {
Path path = absPath(readString(from));
logger->startWork();
@@ -680,14 +684,14 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
}
// Obsolete.
- case wopSyncWithGC: {
+ case WorkerProto::Op::SyncWithGC: {
logger->startWork();
logger->stopWork();
to << 1;
break;
}
- case wopFindRoots: {
+ case WorkerProto::Op::FindRoots: {
logger->startWork();
auto & gcStore = require<GcStore>(*store);
Roots roots = gcStore.findRoots(!trusted);
@@ -706,10 +710,10 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopCollectGarbage: {
+ case WorkerProto::Op::CollectGarbage: {
GCOptions options;
options.action = (GCOptions::GCAction) readInt(from);
- options.pathsToDelete = WorkerProto<StorePathSet>::read(*store, from);
+ options.pathsToDelete = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
from >> options.ignoreLiveness >> options.maxFreed;
// obsolete fields
readInt(from);
@@ -730,7 +734,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopSetOptions: {
+ case WorkerProto::Op::SetOptions: {
ClientSettings clientSettings;
@@ -767,7 +771,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopQuerySubstitutablePathInfo: {
+ case WorkerProto::Op::QuerySubstitutablePathInfo: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
SubstitutablePathInfos infos;
@@ -779,22 +783,22 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
else {
to << 1
<< (i->second.deriver ? store->printStorePath(*i->second.deriver) : "");
- workerProtoWrite(*store, to, i->second.references);
+ WorkerProto::write(*store, wconn, i->second.references);
to << i->second.downloadSize
<< i->second.narSize;
}
break;
}
- case wopQuerySubstitutablePathInfos: {
+ case WorkerProto::Op::QuerySubstitutablePathInfos: {
SubstitutablePathInfos infos;
StorePathCAMap pathsMap = {};
if (GET_PROTOCOL_MINOR(clientVersion) < 22) {
- auto paths = WorkerProto<StorePathSet>::read(*store, from);
+ auto paths = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
for (auto & path : paths)
pathsMap.emplace(path, std::nullopt);
} else
- pathsMap = WorkerProto<StorePathCAMap>::read(*store, from);
+ pathsMap = WorkerProto::Serialise<StorePathCAMap>::read(*store, rconn);
logger->startWork();
store->querySubstitutablePathInfos(pathsMap, infos);
logger->stopWork();
@@ -802,21 +806,21 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
for (auto & i : infos) {
to << store->printStorePath(i.first)
<< (i.second.deriver ? store->printStorePath(*i.second.deriver) : "");
- workerProtoWrite(*store, to, i.second.references);
+ WorkerProto::write(*store, wconn, i.second.references);
to << i.second.downloadSize << i.second.narSize;
}
break;
}
- case wopQueryAllValidPaths: {
+ case WorkerProto::Op::QueryAllValidPaths: {
logger->startWork();
auto paths = store->queryAllValidPaths();
logger->stopWork();
- workerProtoWrite(*store, to, paths);
+ WorkerProto::write(*store, wconn, paths);
break;
}
- case wopQueryPathInfo: {
+ case WorkerProto::Op::QueryPathInfo: {
auto path = store->parseStorePath(readString(from));
std::shared_ptr<const ValidPathInfo> info;
logger->startWork();
@@ -837,14 +841,14 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopOptimiseStore:
+ case WorkerProto::Op::OptimiseStore:
logger->startWork();
store->optimiseStore();
logger->stopWork();
to << 1;
break;
- case wopVerifyStore: {
+ case WorkerProto::Op::VerifyStore: {
bool checkContents, repair;
from >> checkContents >> repair;
logger->startWork();
@@ -856,7 +860,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopAddSignatures: {
+ case WorkerProto::Op::AddSignatures: {
auto path = store->parseStorePath(readString(from));
StringSet sigs = readStrings<StringSet>(from);
logger->startWork();
@@ -868,7 +872,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopNarFromPath: {
+ case WorkerProto::Op::NarFromPath: {
auto path = store->parseStorePath(readString(from));
logger->startWork();
logger->stopWork();
@@ -876,7 +880,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopAddToStoreNar: {
+ case WorkerProto::Op::AddToStoreNar: {
bool repair, dontCheckSigs;
auto path = store->parseStorePath(readString(from));
auto deriver = readString(from);
@@ -884,7 +888,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
ValidPathInfo info { path, narHash };
if (deriver != "")
info.deriver = store->parseStorePath(deriver);
- info.references = WorkerProto<StorePathSet>::read(*store, from);
+ info.references = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
from >> info.registrationTime >> info.narSize >> info.ultimate;
info.sigs = readStrings<StringSet>(from);
info.ca = ContentAddress::parseOpt(readString(from));
@@ -928,21 +932,21 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
- case wopQueryMissing: {
- auto targets = readDerivedPaths(*store, clientVersion, from);
+ case WorkerProto::Op::QueryMissing: {
+ auto targets = readDerivedPaths(*store, clientVersion, rconn);
logger->startWork();
StorePathSet willBuild, willSubstitute, unknown;
uint64_t downloadSize, narSize;
store->queryMissing(targets, willBuild, willSubstitute, unknown, downloadSize, narSize);
logger->stopWork();
- workerProtoWrite(*store, to, willBuild);
- workerProtoWrite(*store, to, willSubstitute);
- workerProtoWrite(*store, to, unknown);
+ WorkerProto::write(*store, wconn, willBuild);
+ WorkerProto::write(*store, wconn, willSubstitute);
+ WorkerProto::write(*store, wconn, unknown);
to << downloadSize << narSize;
break;
}
- case wopRegisterDrvOutput: {
+ case WorkerProto::Op::RegisterDrvOutput: {
logger->startWork();
if (GET_PROTOCOL_MINOR(clientVersion) < 31) {
auto outputId = DrvOutput::parse(readString(from));
@@ -950,14 +954,14 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
store->registerDrvOutput(Realisation{
.id = outputId, .outPath = outputPath});
} else {
- auto realisation = WorkerProto<Realisation>::read(*store, from);
+ auto realisation = WorkerProto::Serialise<Realisation>::read(*store, rconn);
store->registerDrvOutput(realisation);
}
logger->stopWork();
break;
}
- case wopQueryRealisation: {
+ case WorkerProto::Op::QueryRealisation: {
logger->startWork();
auto outputId = DrvOutput::parse(readString(from));
auto info = store->queryRealisation(outputId);
@@ -965,16 +969,16 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
if (GET_PROTOCOL_MINOR(clientVersion) < 31) {
std::set<StorePath> outPaths;
if (info) outPaths.insert(info->outPath);
- workerProtoWrite(*store, to, outPaths);
+ WorkerProto::write(*store, wconn, outPaths);
} else {
std::set<Realisation> realisations;
if (info) realisations.insert(*info);
- workerProtoWrite(*store, to, realisations);
+ WorkerProto::write(*store, wconn, realisations);
}
break;
}
- case wopAddBuildLog: {
+ case WorkerProto::Op::AddBuildLog: {
StorePath path{readString(from)};
logger->startWork();
if (!trusted)
@@ -991,6 +995,10 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
+ case WorkerProto::Op::QueryFailedPaths:
+ case WorkerProto::Op::ClearFailedPaths:
+ throw Error("Removed operation %1%", op);
+
default:
throw Error("invalid operation %1%", op);
}
@@ -1045,7 +1053,8 @@ void processConnection(
auto temp = trusted
? store->isTrustedClient()
: std::optional { NotTrusted };
- workerProtoWrite(*store, to, temp);
+ WorkerProto::WriteConn wconn { .to = to };
+ WorkerProto::write(*store, wconn, temp);
}
/* Send startup error messages to the client. */
@@ -1058,9 +1067,9 @@ void processConnection(
/* Process client requests. */
while (true) {
- WorkerOp op;
+ WorkerProto::Op op;
try {
- op = (WorkerOp) readInt(from);
+ op = (enum WorkerProto::Op) readInt(from);
} catch (Interrupted & e) {
break;
} catch (EndOfFile & e) {
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
index ccb165d68..6f63685d4 100644
--- a/src/libstore/derivations.cc
+++ b/src/libstore/derivations.cc
@@ -5,6 +5,7 @@
#include "util.hh"
#include "split.hh"
#include "worker-protocol.hh"
+#include "worker-protocol-impl.hh"
#include "fs-accessor.hh"
#include <boost/container/small_vector.hpp>
#include <nlohmann/json.hpp>
@@ -749,7 +750,8 @@ Source & readDerivation(Source & in, const Store & store, BasicDerivation & drv,
drv.outputs.emplace(std::move(name), std::move(output));
}
- drv.inputSrcs = WorkerProto<StorePathSet>::read(store, in);
+ drv.inputSrcs = WorkerProto::Serialise<StorePathSet>::read(store,
+ WorkerProto::ReadConn { .from = in });
in >> drv.platform >> drv.builder;
drv.args = readStrings<Strings>(in);
@@ -797,7 +799,9 @@ void writeDerivation(Sink & out, const Store & store, const BasicDerivation & dr
},
}, i.second.raw());
}
- workerProtoWrite(store, out, drv.inputSrcs);
+ WorkerProto::write(store,
+ WorkerProto::WriteConn { .to = out },
+ drv.inputSrcs);
out << drv.platform << drv.builder << drv.args;
out << drv.env.size();
for (auto & i : drv.env)
diff --git a/src/libstore/export-import.cc b/src/libstore/export-import.cc
index 5ea263a86..e866aeb42 100644
--- a/src/libstore/export-import.cc
+++ b/src/libstore/export-import.cc
@@ -2,6 +2,7 @@
#include "store-api.hh"
#include "archive.hh"
#include "worker-protocol.hh"
+#include "worker-protocol-impl.hh"
#include <algorithm>
@@ -45,7 +46,9 @@ void Store::exportPath(const StorePath & path, Sink & sink)
teeSink
<< exportMagic
<< printStorePath(path);
- workerProtoWrite(*this, teeSink, info->references);
+ WorkerProto::write(*this,
+ WorkerProto::WriteConn { .to = teeSink },
+ info->references);
teeSink
<< (info->deriver ? printStorePath(*info->deriver) : "")
<< 0;
@@ -73,7 +76,8 @@ StorePaths Store::importPaths(Source & source, CheckSigsFlag checkSigs)
//Activity act(*logger, lvlInfo, "importing path '%s'", info.path);
- auto references = WorkerProto<StorePathSet>::read(*this, source);
+ auto references = WorkerProto::Serialise<StorePathSet>::read(*this,
+ WorkerProto::ReadConn { .from = source });
auto deriver = readString(source);
auto narHash = hashString(htSHA256, saved.s);
diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc
index 2346accbe..38b691279 100644
--- a/src/libstore/filetransfer.cc
+++ b/src/libstore/filetransfer.cc
@@ -186,9 +186,9 @@ struct curlFileTransfer : public FileTransfer
size_t realSize = size * nmemb;
std::string line((char *) contents, realSize);
printMsg(lvlVomit, "got header for '%s': %s", request.uri, trim(line));
+
static std::regex statusLine("HTTP/[^ ]+ +[0-9]+(.*)", std::regex::extended | std::regex::icase);
- std::smatch match;
- if (std::regex_match(line, match, statusLine)) {
+ if (std::smatch match; std::regex_match(line, match, statusLine)) {
result.etag = "";
result.data.clear();
result.bodySize = 0;
@@ -196,9 +196,11 @@ struct curlFileTransfer : public FileTransfer
acceptRanges = false;
encoding = "";
} else {
+
auto i = line.find(':');
if (i != std::string::npos) {
std::string name = toLower(trim(line.substr(0, i)));
+
if (name == "etag") {
result.etag = trim(line.substr(i + 1));
/* Hack to work around a GitHub bug: it sends
@@ -212,10 +214,22 @@ struct curlFileTransfer : public FileTransfer
debug("shutting down on 200 HTTP response with expected ETag");
return 0;
}
- } else if (name == "content-encoding")
+ }
+
+ else if (name == "content-encoding")
encoding = trim(line.substr(i + 1));
+
else if (name == "accept-ranges" && toLower(trim(line.substr(i + 1))) == "bytes")
acceptRanges = true;
+
+ else if (name == "link" || name == "x-amz-meta-link") {
+ auto value = trim(line.substr(i + 1));
+ static std::regex linkRegex("<([^>]*)>; rel=\"immutable\"", std::regex::extended | std::regex::icase);
+ if (std::smatch match; std::regex_match(value, match, linkRegex))
+ result.immutableUrl = match.str(1);
+ else
+ debug("got invalid link header '%s'", value);
+ }
}
}
return realSize;
@@ -345,7 +359,7 @@ struct curlFileTransfer : public FileTransfer
{
auto httpStatus = getHTTPStatus();
- char * effectiveUriCStr;
+ char * effectiveUriCStr = nullptr;
curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUriCStr);
if (effectiveUriCStr)
result.effectiveUri = effectiveUriCStr;
diff --git a/src/libstore/filetransfer.hh b/src/libstore/filetransfer.hh
index 378c6ff78..a3b0dde1f 100644
--- a/src/libstore/filetransfer.hh
+++ b/src/libstore/filetransfer.hh
@@ -80,6 +80,10 @@ struct FileTransferResult
std::string effectiveUri;
std::string data;
uint64_t bodySize = 0;
+ /* An "immutable" URL for this resource (i.e. one whose contents
+ will never change), as returned by the `Link: <url>;
+ rel="immutable"` header. */
+ std::optional<std::string> immutableUrl;
};
class Store;
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
index 0038ec802..20720fb99 100644
--- a/src/libstore/gc.cc
+++ b/src/libstore/gc.cc
@@ -110,6 +110,11 @@ void LocalStore::createTempRootsFile()
void LocalStore::addTempRoot(const StorePath & path)
{
+ if (readOnly) {
+ debug("Read-only store doesn't support creating lock files for temp roots, but nothing can be deleted anyways.");
+ return;
+ }
+
createTempRootsFile();
/* Open/create the global GC lock file. */
@@ -563,7 +568,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
/* On macOS, accepted sockets inherit the
non-blocking flag from the server socket, so
explicitly make it blocking. */
- if (fcntl(fdServer.get(), F_SETFL, fcntl(fdServer.get(), F_GETFL) & ~O_NONBLOCK) == -1)
+ if (fcntl(fdClient.get(), F_SETFL, fcntl(fdClient.get(), F_GETFL) & ~O_NONBLOCK) == -1)
abort();
while (true) {
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index 4c66d08ee..5a4cb1824 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -77,7 +77,33 @@ Settings::Settings()
allowedImpureHostPrefixes = tokenizeString<StringSet>("/System/Library /usr/lib /dev /bin/sh");
#endif
- buildHook = getSelfExe().value_or("nix") + " __build-remote";
+ /* Set the build hook location
+
+ For builds we perform a self-invocation, so Nix has to be self-aware.
+ That is, it has to know where it is installed. We don't think it's sentient.
+
+ Normally, nix is installed according to `nixBinDir`, which is set at compile time,
+ but can be overridden. This makes for a great default that works even if this
+ code is linked as a library into some other program whose main is not aware
+ that it might need to be a build remote hook.
+
+ However, it may not have been installed at all. For example, if it's a static build,
+ there's a good chance that it has been moved out of its installation directory.
+ That makes `nixBinDir` useless. Instead, we'll query the OS for the path to the
+ current executable, using `getSelfExe()`.
+
+ As a last resort, we resort to `PATH`. Hopefully we find a `nix` there that's compatible.
+ If you're porting Nix to a new platform, that might be good enough for a while, but
+ you'll want to improve `getSelfExe()` to work on your platform.
+ */
+ std::string nixExePath = nixBinDir + "/nix";
+ if (!pathExists(nixExePath)) {
+ nixExePath = getSelfExe().value_or("nix");
+ }
+ buildHook = {
+ nixExePath,
+ "__build-remote",
+ };
}
void loadConfFile()
@@ -183,7 +209,7 @@ bool Settings::isWSL1()
Path Settings::getDefaultSSLCertFile()
{
for (auto & fn : {"/etc/ssl/certs/ca-certificates.crt", "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"})
- if (pathExists(fn)) return fn;
+ if (pathAccessible(fn)) return fn;
return "";
}
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index 31dfe5b4e..b8dcf1f76 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -236,7 +236,7 @@ public:
)",
{"build-timeout"}};
- PathSetting buildHook{this, true, "", "build-hook",
+ Setting<Strings> buildHook{this, {}, "build-hook",
R"(
The path to the helper program that executes remote builds.
@@ -556,8 +556,8 @@ public:
line.
)"};
- PathSetting diffHook{
- this, true, "", "diff-hook",
+ OptionalPathSetting diffHook{
+ this, std::nullopt, "diff-hook",
R"(
Absolute path to an executable capable of diffing build
results. The hook is executed if `run-diff-hook` is true, and the
@@ -691,20 +691,19 @@ public:
Strings{"https://cache.nixos.org/"},
"substituters",
R"(
- A list of [URLs of Nix stores](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format)
- to be used as substituters, separated by whitespace.
- Substituters are tried based on their Priority value, which each substituter can set
- independently. Lower value means higher priority.
- The default is `https://cache.nixos.org`, with a Priority of 40.
+ A list of [URLs of Nix stores](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format) to be used as substituters, separated by whitespace.
+ A substituter is an additional [store]{@docroot@/glossary.md##gloss-store} from which Nix can obtain [store objects](@docroot@/glossary.md#gloss-store-object) instead of building them.
- At least one of the following conditions must be met for Nix to use
- a substituter:
+ Substituters are tried based on their priority value, which each substituter can set independently.
+ Lower value means higher priority.
+ The default is `https://cache.nixos.org`, which has a priority of 40.
+
+ At least one of the following conditions must be met for Nix to use a substituter:
- the substituter is in the [`trusted-substituters`](#conf-trusted-substituters) list
- the user calling Nix is in the [`trusted-users`](#conf-trusted-users) list
- In addition, each store path should be trusted as described
- in [`trusted-public-keys`](#conf-trusted-public-keys)
+ In addition, each store path should be trusted as described in [`trusted-public-keys`](#conf-trusted-public-keys)
)",
{"binary-caches"}};
@@ -896,12 +895,11 @@ public:
this, {}, "hashed-mirrors",
R"(
A list of web servers used by `builtins.fetchurl` to obtain files by
- hash. The default is `http://tarballs.nixos.org/`. Given a hash type
- *ht* and a base-16 hash *h*, Nix will try to download the file from
- *hashed-mirror*/*ht*/*h*. This allows files to be downloaded even if
- they have disappeared from their original URI. For example, given
- the default mirror `http://tarballs.nixos.org/`, when building the
- derivation
+ hash. Given a hash type *ht* and a base-16 hash *h*, Nix will try to
+ download the file from *hashed-mirror*/*ht*/*h*. This allows files to
+ be downloaded even if they have disappeared from their original URI.
+ For example, given an example mirror `http://tarballs.nixos.org/`,
+ when building the derivation
```nix
builtins.fetchurl {
@@ -995,6 +993,18 @@ public:
| `~/.nix-profile` | `$XDG_STATE_HOME/nix/profile` |
| `~/.nix-defexpr` | `$XDG_STATE_HOME/nix/defexpr` |
| `~/.nix-channels` | `$XDG_STATE_HOME/nix/channels` |
+
+ If you already have Nix installed and are using [profiles](@docroot@/package-management/profiles.md) or [channels](@docroot@/package-management/channels.md), you should migrate manually when you enable this option.
+ If `$XDG_STATE_HOME` is not set, use `$HOME/.local/state/nix` instead of `$XDG_STATE_HOME/nix`.
+ This can be achieved with the following shell commands:
+
+ ```sh
+ nix_state_home=${XDG_STATE_HOME-$HOME/.local/state}/nix
+ mkdir -p $nix_state_home
+ mv $HOME/.nix-profile $nix_state_home/profile
+ mv $HOME/.nix-defexpr $nix_state_home/defexpr
+ mv $HOME/.nix-channels $nix_state_home/channels
+ ```
)"
};
};
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
index 2b7bebe9d..fa17d606d 100644
--- a/src/libstore/legacy-ssh-store.cc
+++ b/src/libstore/legacy-ssh-store.cc
@@ -7,6 +7,7 @@
#include "store-api.hh"
#include "path-with-outputs.hh"
#include "worker-protocol.hh"
+#include "worker-protocol-impl.hh"
#include "ssh.hh"
#include "derivations.hh"
#include "callback.hh"
@@ -47,6 +48,42 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
FdSource from;
int remoteVersion;
bool good = true;
+
+ /**
+ * Coercion to `WorkerProto::ReadConn`. This makes it easy to use the
+ * factored out worker protocol searlizers with a
+ * `LegacySSHStore::Connection`.
+ *
+ * The worker protocol connection types are unidirectional, unlike
+ * this type.
+ *
+ * @todo Use server protocol serializers, not worker protocol
+ * serializers, once we have made that distiction.
+ */
+ operator WorkerProto::ReadConn ()
+ {
+ return WorkerProto::ReadConn {
+ .from = from,
+ };
+ }
+
+ /*
+ * Coercion to `WorkerProto::WriteConn`. This makes it easy to use the
+ * factored out worker protocol searlizers with a
+ * `LegacySSHStore::Connection`.
+ *
+ * The worker protocol connection types are unidirectional, unlike
+ * this type.
+ *
+ * @todo Use server protocol serializers, not worker protocol
+ * serializers, once we have made that distiction.
+ */
+ operator WorkerProto::WriteConn ()
+ {
+ return WorkerProto::WriteConn {
+ .to = to,
+ };
+ }
};
std::string host;
@@ -133,7 +170,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
debug("querying remote host '%s' for info on '%s'", host, printStorePath(path));
- conn->to << cmdQueryPathInfos << PathSet{printStorePath(path)};
+ conn->to << ServeProto::Command::QueryPathInfos << PathSet{printStorePath(path)};
conn->to.flush();
auto p = readString(conn->from);
@@ -146,7 +183,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
auto deriver = readString(conn->from);
if (deriver != "")
info->deriver = parseStorePath(deriver);
- info->references = WorkerProto<StorePathSet>::read(*this, conn->from);
+ info->references = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
readLongLong(conn->from); // download size
info->narSize = readLongLong(conn->from);
@@ -176,11 +213,11 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 5) {
conn->to
- << cmdAddToStoreNar
+ << ServeProto::Command::AddToStoreNar
<< printStorePath(info.path)
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< info.narHash.to_string(Base16, false);
- workerProtoWrite(*this, conn->to, info.references);
+ WorkerProto::write(*this, *conn, info.references);
conn->to
<< info.registrationTime
<< info.narSize
@@ -198,7 +235,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
} else {
conn->to
- << cmdImportPaths
+ << ServeProto::Command::ImportPaths
<< 1;
try {
copyNAR(source, conn->to);
@@ -209,7 +246,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
conn->to
<< exportMagic
<< printStorePath(info.path);
- workerProtoWrite(*this, conn->to, info.references);
+ WorkerProto::write(*this, *conn, info.references);
conn->to
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< 0
@@ -226,7 +263,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
{
auto conn(connections->get());
- conn->to << cmdDumpStorePath << printStorePath(path);
+ conn->to << ServeProto::Command::DumpStorePath << printStorePath(path);
conn->to.flush();
copyNAR(conn->from, sink);
}
@@ -279,7 +316,7 @@ public:
auto conn(connections->get());
conn->to
- << cmdBuildDerivation
+ << ServeProto::Command::BuildDerivation
<< printStorePath(drvPath);
writeDerivation(conn->to, *this, drv);
@@ -294,7 +331,7 @@ public:
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3)
conn->from >> status.timesBuilt >> status.isNonDeterministic >> status.startTime >> status.stopTime;
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 6) {
- auto builtOutputs = WorkerProto<DrvOutputs>::read(*this, conn->from);
+ auto builtOutputs = WorkerProto::Serialise<DrvOutputs>::read(*this, *conn);
for (auto && [output, realisation] : builtOutputs)
status.builtOutputs.insert_or_assign(
std::move(output.outputName),
@@ -310,7 +347,7 @@ public:
auto conn(connections->get());
- conn->to << cmdBuildPaths;
+ conn->to << ServeProto::Command::BuildPaths;
Strings ss;
for (auto & p : drvPaths) {
auto sOrDrvPath = StorePathWithOutputs::tryFromDerivedPath(p);
@@ -367,12 +404,12 @@ public:
auto conn(connections->get());
conn->to
- << cmdQueryClosure
+ << ServeProto::Command::QueryClosure
<< includeOutputs;
- workerProtoWrite(*this, conn->to, paths);
+ WorkerProto::write(*this, *conn, paths);
conn->to.flush();
- for (auto & i : WorkerProto<StorePathSet>::read(*this, conn->from))
+ for (auto & i : WorkerProto::Serialise<StorePathSet>::read(*this, *conn))
out.insert(i);
}
@@ -382,13 +419,13 @@ public:
auto conn(connections->get());
conn->to
- << cmdQueryValidPaths
+ << ServeProto::Command::QueryValidPaths
<< false // lock
<< maybeSubstitute;
- workerProtoWrite(*this, conn->to, paths);
+ WorkerProto::write(*this, *conn, paths);
conn->to.flush();
- return WorkerProto<StorePathSet>::read(*this, conn->from);
+ return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
void connect() override
diff --git a/src/libstore/local-fs-store.hh b/src/libstore/local-fs-store.hh
index a03bb88f5..2ee2ef0c8 100644
--- a/src/libstore/local-fs-store.hh
+++ b/src/libstore/local-fs-store.hh
@@ -15,22 +15,22 @@ struct LocalFSStoreConfig : virtual StoreConfig
// it to omit the call to the Setting constructor. Clang works fine
// either way.
- const PathSetting rootDir{(StoreConfig*) this, true, "",
+ const OptionalPathSetting rootDir{(StoreConfig*) this, std::nullopt,
"root",
"Directory prefixed to all other paths."};
- const PathSetting stateDir{(StoreConfig*) this, false,
- rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir,
+ const PathSetting stateDir{(StoreConfig*) this,
+ rootDir.get() ? *rootDir.get() + "/nix/var/nix" : settings.nixStateDir,
"state",
"Directory where Nix will store state."};
- const PathSetting logDir{(StoreConfig*) this, false,
- rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir,
+ const PathSetting logDir{(StoreConfig*) this,
+ rootDir.get() ? *rootDir.get() + "/nix/var/log/nix" : settings.nixLogDir,
"log",
"directory where Nix will store log files."};
- const PathSetting realStoreDir{(StoreConfig*) this, false,
- rootDir != "" ? rootDir + "/nix/store" : storeDir, "real",
+ const PathSetting realStoreDir{(StoreConfig*) this,
+ rootDir.get() ? *rootDir.get() + "/nix/store" : storeDir, "real",
"Physical path of the Nix store."};
};
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index 7fb312c37..e69460e6c 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -190,7 +190,11 @@ LocalStore::LocalStore(const Params & params)
/* Create missing state directories if they don't already exist. */
createDirs(realStoreDir);
- makeStoreWritable();
+ if (readOnly) {
+ experimentalFeatureSettings.require(Xp::ReadOnlyLocalStore);
+ } else {
+ makeStoreWritable();
+ }
createDirs(linksDir);
Path profilesDir = stateDir + "/profiles";
createDirs(profilesDir);
@@ -204,8 +208,10 @@ LocalStore::LocalStore(const Params & params)
for (auto & perUserDir : {profilesDir + "/per-user", gcRootsDir + "/per-user"}) {
createDirs(perUserDir);
- if (chmod(perUserDir.c_str(), 0755) == -1)
- throw SysError("could not set permissions on '%s' to 755", perUserDir);
+ if (!readOnly) {
+ if (chmod(perUserDir.c_str(), 0755) == -1)
+ throw SysError("could not set permissions on '%s' to 755", perUserDir);
+ }
}
/* Optionally, create directories and set permissions for a
@@ -269,10 +275,12 @@ LocalStore::LocalStore(const Params & params)
/* Acquire the big fat lock in shared mode to make sure that no
schema upgrade is in progress. */
- Path globalLockPath = dbDir + "/big-lock";
- globalLock = openLockFile(globalLockPath.c_str(), true);
+ if (!readOnly) {
+ Path globalLockPath = dbDir + "/big-lock";
+ globalLock = openLockFile(globalLockPath.c_str(), true);
+ }
- if (!lockFile(globalLock.get(), ltRead, false)) {
+ if (!readOnly && !lockFile(globalLock.get(), ltRead, false)) {
printInfo("waiting for the big Nix store lock...");
lockFile(globalLock.get(), ltRead, true);
}
@@ -280,6 +288,14 @@ LocalStore::LocalStore(const Params & params)
/* Check the current database schema and if necessary do an
upgrade. */
int curSchema = getSchema();
+ if (readOnly && curSchema < nixSchemaVersion) {
+ debug("current schema version: %d", curSchema);
+ debug("supported schema version: %d", nixSchemaVersion);
+ throw Error(curSchema == 0 ?
+ "database does not exist, and cannot be created in read-only mode" :
+ "database schema needs migrating, but this cannot be done in read-only mode");
+ }
+
if (curSchema > nixSchemaVersion)
throw Error("current Nix store schema is version %1%, but I only support %2%",
curSchema, nixSchemaVersion);
@@ -344,7 +360,11 @@ LocalStore::LocalStore(const Params & params)
else openDB(*state, false);
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
- migrateCASchema(state->db, dbDir + "/ca-schema", globalLock);
+ if (!readOnly) {
+ migrateCASchema(state->db, dbDir + "/ca-schema", globalLock);
+ } else {
+ throw Error("need to migrate to content-addressed schema, but this cannot be done in read-only mode");
+ }
}
/* Prepare SQL statements. */
@@ -475,13 +495,20 @@ int LocalStore::getSchema()
void LocalStore::openDB(State & state, bool create)
{
- if (access(dbDir.c_str(), R_OK | W_OK))
+ if (create && readOnly) {
+ throw Error("cannot create database while in read-only mode");
+ }
+
+ if (access(dbDir.c_str(), R_OK | (readOnly ? 0 : W_OK)))
throw SysError("Nix database directory '%1%' is not writable", dbDir);
/* Open the Nix database. */
std::string dbPath = dbDir + "/db.sqlite";
auto & db(state.db);
- state.db = SQLite(dbPath, create);
+ auto openMode = readOnly ? SQLiteOpenMode::Immutable
+ : create ? SQLiteOpenMode::Normal
+ : SQLiteOpenMode::NoCreate;
+ state.db = SQLite(dbPath, openMode);
#ifdef __CYGWIN__
/* The cygwin version of sqlite3 has a patch which calls
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
index 70debad38..8a3b0b43f 100644
--- a/src/libstore/local-store.hh
+++ b/src/libstore/local-store.hh
@@ -46,6 +46,23 @@ struct LocalStoreConfig : virtual LocalFSStoreConfig
"require-sigs",
"Whether store paths copied into this store should have a trusted signature."};
+ Setting<bool> readOnly{(StoreConfig*) this,
+ false,
+ "read-only",
+ R"(
+ Allow this store to be opened when its [database](@docroot@/glossary.md#gloss-nix-database) is on a read-only filesystem.
+
+ Normally Nix will attempt to open the store database in read-write mode, even for querying (when write access is not needed), causing it to fail if the database is on a read-only filesystem.
+
+ Enable read-only mode to disable locking and open the SQLite database with the [`immutable` parameter](https://www.sqlite.org/c3ref/open.html) set.
+
+ > **Warning**
+ > Do not use this unless the filesystem is read-only.
+ >
+ > Using it when the filesystem is writable can cause incorrect query results or corruption errors if the database is changed by another process.
+ > While the filesystem the database resides on might appear to be read-only, consider whether another user or system might have write access to it.
+ )"};
+
const std::string name() override { return "Local Store"; }
std::string doc() override;
@@ -269,6 +286,10 @@ public:
private:
+ /**
+ * Retrieve the current version of the database schema.
+ * If the database does not exist yet, the version returned will be 0.
+ */
int getSchema();
void openDB(State & state, bool create);
diff --git a/src/libstore/path-info.cc b/src/libstore/path-info.cc
index 97b72faa3..981bbfb14 100644
--- a/src/libstore/path-info.cc
+++ b/src/libstore/path-info.cc
@@ -1,5 +1,6 @@
#include "path-info.hh"
#include "worker-protocol.hh"
+#include "worker-protocol-impl.hh"
#include "store-api.hh"
namespace nix {
@@ -132,7 +133,8 @@ ValidPathInfo ValidPathInfo::read(Source & source, const Store & store, unsigned
auto narHash = Hash::parseAny(readString(source), htSHA256);
ValidPathInfo info(path, narHash);
if (deriver != "") info.deriver = store.parseStorePath(deriver);
- info.references = WorkerProto<StorePathSet>::read(store, source);
+ info.references = WorkerProto::Serialise<StorePathSet>::read(store,
+ WorkerProto::ReadConn { .from = source });
source >> info.registrationTime >> info.narSize;
if (format >= 16) {
source >> info.ultimate;
@@ -153,7 +155,9 @@ void ValidPathInfo::write(
sink << store.printStorePath(path);
sink << (deriver ? store.printStorePath(*deriver) : "")
<< narHash.to_string(Base16, false);
- workerProtoWrite(store, sink, references);
+ WorkerProto::write(store,
+ WorkerProto::WriteConn { .to = sink },
+ references);
sink << registrationTime << narSize;
if (format >= 16) {
sink << ultimate
diff --git a/src/libstore/path-references.cc b/src/libstore/path-references.cc
new file mode 100644
index 000000000..33cf66ce3
--- /dev/null
+++ b/src/libstore/path-references.cc
@@ -0,0 +1,73 @@
+#include "path-references.hh"
+#include "hash.hh"
+#include "util.hh"
+#include "archive.hh"
+
+#include <map>
+#include <cstdlib>
+#include <mutex>
+#include <algorithm>
+
+
+namespace nix {
+
+
+PathRefScanSink::PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap)
+ : RefScanSink(std::move(hashes))
+ , backMap(std::move(backMap))
+{ }
+
+PathRefScanSink PathRefScanSink::fromPaths(const StorePathSet & refs)
+{
+ StringSet hashes;
+ std::map<std::string, StorePath> backMap;
+
+ for (auto & i : refs) {
+ std::string hashPart(i.hashPart());
+ auto inserted = backMap.emplace(hashPart, i).second;
+ assert(inserted);
+ hashes.insert(hashPart);
+ }
+
+ return PathRefScanSink(std::move(hashes), std::move(backMap));
+}
+
+StorePathSet PathRefScanSink::getResultPaths()
+{
+ /* Map the hashes found back to their store paths. */
+ StorePathSet found;
+ for (auto & i : getResult()) {
+ auto j = backMap.find(i);
+ assert(j != backMap.end());
+ found.insert(j->second);
+ }
+
+ return found;
+}
+
+
+std::pair<StorePathSet, HashResult> scanForReferences(
+ const std::string & path,
+ const StorePathSet & refs)
+{
+ HashSink hashSink { htSHA256 };
+ auto found = scanForReferences(hashSink, path, refs);
+ auto hash = hashSink.finish();
+ return std::pair<StorePathSet, HashResult>(found, hash);
+}
+
+StorePathSet scanForReferences(
+ Sink & toTee,
+ const Path & path,
+ const StorePathSet & refs)
+{
+ PathRefScanSink refsSink = PathRefScanSink::fromPaths(refs);
+ TeeSink sink { refsSink, toTee };
+
+ /* Look for the hashes in the NAR dump of the path. */
+ dumpPath(path, sink);
+
+ return refsSink.getResultPaths();
+}
+
+}
diff --git a/src/libstore/path-references.hh b/src/libstore/path-references.hh
new file mode 100644
index 000000000..7b44e3261
--- /dev/null
+++ b/src/libstore/path-references.hh
@@ -0,0 +1,25 @@
+#pragma once
+
+#include "references.hh"
+#include "path.hh"
+
+namespace nix {
+
+std::pair<StorePathSet, HashResult> scanForReferences(const Path & path, const StorePathSet & refs);
+
+StorePathSet scanForReferences(Sink & toTee, const Path & path, const StorePathSet & refs);
+
+class PathRefScanSink : public RefScanSink
+{
+ std::map<std::string, StorePath> backMap;
+
+ PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap);
+
+public:
+
+ static PathRefScanSink fromPaths(const StorePathSet & refs);
+
+ StorePathSet getResultPaths();
+};
+
+}
diff --git a/src/libstore/path.cc b/src/libstore/path.cc
index 46be54281..552e83114 100644
--- a/src/libstore/path.cc
+++ b/src/libstore/path.cc
@@ -9,8 +9,8 @@ static void checkName(std::string_view path, std::string_view name)
if (name.empty())
throw BadStorePath("store path '%s' has an empty name", path);
if (name.size() > StorePath::MaxPathLen)
- throw BadStorePath("store path '%s' has a name longer than '%d characters",
- StorePath::MaxPathLen, path);
+ throw BadStorePath("store path '%s' has a name longer than %d characters",
+ path, StorePath::MaxPathLen);
// See nameRegexStr for the definition
for (auto c : name)
if (!((c >= '0' && c <= '9')
diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc
index ba5c8583f..4e9955948 100644
--- a/src/libstore/profiles.cc
+++ b/src/libstore/profiles.cc
@@ -13,8 +13,10 @@
namespace nix {
-/* Parse a generation name of the format
- `<profilename>-<number>-link'. */
+/**
+ * Parse a generation name of the format
+ * `<profilename>-<number>-link'.
+ */
static std::optional<GenerationNumber> parseName(const std::string & profileName, const std::string & name)
{
if (name.substr(0, profileName.size() + 1) != profileName + "-") return {};
@@ -28,7 +30,6 @@ static std::optional<GenerationNumber> parseName(const std::string & profileName
}
-
std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path profile)
{
Generations gens;
@@ -61,15 +62,16 @@ std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path pro
}
-static void makeName(const Path & profile, GenerationNumber num,
- Path & outLink)
+/**
+ * Create a generation name that can be parsed by `parseName()`.
+ */
+static Path makeName(const Path & profile, GenerationNumber num)
{
- Path prefix = fmt("%1%-%2%", profile, num);
- outLink = prefix + "-link";
+ return fmt("%s-%s-link", profile, num);
}
-Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath)
+Path createGeneration(LocalFSStore & store, Path profile, StorePath outPath)
{
/* The new generation number should be higher than old the
previous ones. */
@@ -79,7 +81,7 @@ Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath)
if (gens.size() > 0) {
Generation last = gens.back();
- if (readLink(last.path) == store->printStorePath(outPath)) {
+ if (readLink(last.path) == store.printStorePath(outPath)) {
/* We only create a new generation symlink if it differs
from the last one.
@@ -89,7 +91,7 @@ Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath)
return last.path;
}
- num = gens.back().number;
+ num = last.number;
} else {
num = 0;
}
@@ -100,9 +102,8 @@ Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath)
to the permanent roots (of which the GC would have a stale
view). If we didn't do it this way, the GC might remove the
user environment etc. we've just built. */
- Path generation;
- makeName(profile, num + 1, generation);
- store->addPermRoot(outPath, generation);
+ Path generation = makeName(profile, num + 1);
+ store.addPermRoot(outPath, generation);
return generation;
}
@@ -117,12 +118,19 @@ static void removeFile(const Path & path)
void deleteGeneration(const Path & profile, GenerationNumber gen)
{
- Path generation;
- makeName(profile, gen, generation);
+ Path generation = makeName(profile, gen);
removeFile(generation);
}
-
+/**
+ * Delete a generation with dry-run mode.
+ *
+ * Like `deleteGeneration()` but:
+ *
+ * - We log what we are going to do.
+ *
+ * - We only actually delete if `dryRun` is false.
+ */
static void deleteGeneration2(const Path & profile, GenerationNumber gen, bool dryRun)
{
if (dryRun)
@@ -150,27 +158,36 @@ void deleteGenerations(const Path & profile, const std::set<GenerationNumber> &
}
}
+/**
+ * Advanced the iterator until the given predicate `cond` returns `true`.
+ */
+static inline void iterDropUntil(Generations & gens, auto && i, auto && cond)
+{
+ for (; i != gens.rend() && !cond(*i); ++i);
+}
+
void deleteGenerationsGreaterThan(const Path & profile, GenerationNumber max, bool dryRun)
{
+ if (max == 0)
+ throw Error("Must keep at least one generation, otherwise the current one would be deleted");
+
PathLocks lock;
lockProfile(lock, profile);
- bool fromCurGen = false;
- auto [gens, curGen] = findGenerations(profile);
- for (auto i = gens.rbegin(); i != gens.rend(); ++i) {
- if (i->number == curGen) {
- fromCurGen = true;
- max--;
- continue;
- }
- if (fromCurGen) {
- if (max) {
- max--;
- continue;
- }
- deleteGeneration2(profile, i->number, dryRun);
- }
- }
+ auto [gens, _curGen] = findGenerations(profile);
+ auto curGen = _curGen;
+
+ auto i = gens.rbegin();
+
+ // Find the current generation
+ iterDropUntil(gens, i, [&](auto & g) { return g.number == curGen; });
+
+ // Skip over `max` generations, preserving them
+ for (auto keep = 0; i != gens.rend() && keep < max; ++i, ++keep);
+
+ // Delete the rest
+ for (; i != gens.rend(); ++i)
+ deleteGeneration2(profile, i->number, dryRun);
}
void deleteOldGenerations(const Path & profile, bool dryRun)
@@ -193,23 +210,33 @@ void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun)
auto [gens, curGen] = findGenerations(profile);
- bool canDelete = false;
- for (auto i = gens.rbegin(); i != gens.rend(); ++i)
- if (canDelete) {
- assert(i->creationTime < t);
- if (i->number != curGen)
- deleteGeneration2(profile, i->number, dryRun);
- } else if (i->creationTime < t) {
- /* We may now start deleting generations, but we don't
- delete this generation yet, because this generation was
- still the one that was active at the requested point in
- time. */
- canDelete = true;
- }
+ auto i = gens.rbegin();
+
+ // Predicate that the generation is older than the given time.
+ auto older = [&](auto & g) { return g.creationTime < t; };
+
+ // Find the first older generation, if one exists
+ iterDropUntil(gens, i, older);
+
+ /* Take the previous generation
+
+ We don't want delete this one yet because it
+ existed at the requested point in time, and
+ we want to be able to roll back to it. */
+ if (i != gens.rend()) ++i;
+
+ // Delete all previous generations (unless current).
+ for (; i != gens.rend(); ++i) {
+ /* Creating date and generations should be monotonic, so lower
+ numbered derivations should also be older. */
+ assert(older(*i));
+ if (i->number != curGen)
+ deleteGeneration2(profile, i->number, dryRun);
+ }
}
-void deleteGenerationsOlderThan(const Path & profile, std::string_view timeSpec, bool dryRun)
+time_t parseOlderThanTimeSpec(std::string_view timeSpec)
{
if (timeSpec.empty() || timeSpec[timeSpec.size() - 1] != 'd')
throw UsageError("invalid number of days specifier '%1%', expected something like '14d'", timeSpec);
@@ -221,9 +248,7 @@ void deleteGenerationsOlderThan(const Path & profile, std::string_view timeSpec,
if (!days || *days < 1)
throw UsageError("invalid number of days specifier '%1%'", timeSpec);
- time_t oldTime = curTime - *days * 24 * 3600;
-
- deleteGenerationsOlderThan(profile, oldTime, dryRun);
+ return curTime - *days * 24 * 3600;
}
diff --git a/src/libstore/profiles.hh b/src/libstore/profiles.hh
index 4e1f42e83..193c0bf21 100644
--- a/src/libstore/profiles.hh
+++ b/src/libstore/profiles.hh
@@ -1,7 +1,11 @@
#pragma once
-///@file
+/**
+ * @file Implementation of Profiles.
+ *
+ * See the manual for additional information.
+ */
- #include "types.hh"
+#include "types.hh"
#include "pathlocks.hh"
#include <time.h>
@@ -12,41 +16,166 @@ namespace nix {
class StorePath;
+/**
+ * A positive number identifying a generation for a given profile.
+ *
+ * Generation numbers are assigned sequentially. Each new generation is
+ * assigned 1 + the current highest generation number.
+ */
typedef uint64_t GenerationNumber;
+/**
+ * A generation is a revision of a profile.
+ *
+ * Each generation is a mapping (key-value pair) from an identifier
+ * (`number`) to a store object (specified by `path`).
+ */
struct Generation
{
+ /**
+ * The number of a generation is its unique identifier within the
+ * profile.
+ */
GenerationNumber number;
+ /**
+ * The store path identifies the store object that is the contents
+ * of the generation.
+ *
+ * These store paths / objects are not unique to the generation
+ * within a profile. Nix tries to ensure successive generations have
+ * distinct contents to avoid bloat, but nothing stops two
+ * non-adjacent generations from having the same contents.
+ *
+ * @todo Use `StorePath` instead of `Path`?
+ */
Path path;
+
+ /**
+ * When the generation was created. This is extra metadata about the
+ * generation used to make garbage collecting old generations more
+ * convenient.
+ */
time_t creationTime;
};
+/**
+ * All the generations of a profile
+ */
typedef std::list<Generation> Generations;
/**
- * Returns the list of currently present generations for the specified
- * profile, sorted by generation number. Also returns the number of
- * the current generation.
+ * Find all generations for the given profile.
+ *
+ * @param profile A profile specified by its name and location combined
+ * into a path. E.g. if "foo" is the name of the profile, and "/bar/baz"
+ * is the directory it is in, then the path "/bar/baz/foo" would be the
+ * argument for this parameter.
+ *
+ * @return The pair of:
+ *
+ * - The list of currently present generations for the specified profile,
+ * sorted by ascending generation number.
+ *
+ * - The number of the current/active generation.
+ *
+ * Note that the current/active generation need not be the latest one.
*/
std::pair<Generations, std::optional<GenerationNumber>> findGenerations(Path profile);
class LocalFSStore;
-Path createGeneration(ref<LocalFSStore> store, Path profile, StorePath outPath);
+/**
+ * Create a new generation of the given profile
+ *
+ * If the previous generation (not the currently active one!) has a
+ * distinct store object, a fresh generation number is mapped to the
+ * given store object, referenced by path. Otherwise, the previous
+ * generation is assumed.
+ *
+ * The behavior of reusing existing generations like this makes this
+ * procedure idempotent. It also avoids clutter.
+ */
+Path createGeneration(LocalFSStore & store, Path profile, StorePath outPath);
+/**
+ * Unconditionally delete a generation
+ *
+ * @param profile A profile specified by its name and location combined into a path.
+ *
+ * @param gen The generation number specifying exactly which generation
+ * to delete.
+ *
+ * Because there is no check of whether the generation to delete is
+ * active, this is somewhat unsafe.
+ *
+ * @todo Should we expose this at all?
+ */
void deleteGeneration(const Path & profile, GenerationNumber gen);
+/**
+ * Delete the given set of generations.
+ *
+ * @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete.
+ *
+ * @param gensToDelete The generations to delete, specified by a set of
+ * numbers.
+ *
+ * @param dryRun Log what would be deleted instead of actually doing
+ * so.
+ *
+ * Trying to delete the currently active generation will fail, and cause
+ * no generations to be deleted.
+ */
void deleteGenerations(const Path & profile, const std::set<GenerationNumber> & gensToDelete, bool dryRun);
+/**
+ * Delete generations older than `max` passed the current generation.
+ *
+ * @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete.
+ *
+ * @param max How many generations to keep up to the current one. Must
+ * be at least 1 so we don't delete the current one.
+ *
+ * @param dryRun Log what would be deleted instead of actually doing
+ * so.
+ */
void deleteGenerationsGreaterThan(const Path & profile, GenerationNumber max, bool dryRun);
+/**
+ * Delete all generations other than the current one
+ *
+ * @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete.
+ *
+ * @param dryRun Log what would be deleted instead of actually doing
+ * so.
+ */
void deleteOldGenerations(const Path & profile, bool dryRun);
+/**
+ * Delete generations older than `t`, except for the most recent one
+ * older than `t`.
+ *
+ * @param profile The profile, specified by its name and location combined into a path, whose generations we want to delete.
+ *
+ * @param dryRun Log what would be deleted instead of actually doing
+ * so.
+ */
void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun);
-void deleteGenerationsOlderThan(const Path & profile, std::string_view timeSpec, bool dryRun);
+/**
+ * Parse a temp spec intended for `deleteGenerationsOlderThan()`.
+ *
+ * Throws an exception if `timeSpec` fails to parse.
+ */
+time_t parseOlderThanTimeSpec(std::string_view timeSpec);
+/**
+ * Smaller wrapper around `replaceSymlink` for replacing the current
+ * generation of a profile. Does not enforce proper structure.
+ *
+ * @todo Always use `switchGeneration()` instead, and delete this.
+ */
void switchLink(Path link, Path target);
/**
diff --git a/src/libstore/remote-store-connection.hh b/src/libstore/remote-store-connection.hh
new file mode 100644
index 000000000..d32d91a60
--- /dev/null
+++ b/src/libstore/remote-store-connection.hh
@@ -0,0 +1,97 @@
+#include "remote-store.hh"
+#include "worker-protocol.hh"
+
+namespace nix {
+
+/**
+ * Bidirectional connection (send and receive) used by the Remote Store
+ * implementation.
+ *
+ * Contains `Source` and `Sink` for actual communication, along with
+ * other information learned when negotiating the connection.
+ */
+struct RemoteStore::Connection
+{
+ /**
+ * Send with this.
+ */
+ FdSink to;
+
+ /**
+ * Receive with this.
+ */
+ FdSource from;
+
+ /**
+ * Worker protocol version used for the connection.
+ *
+ * Despite its name, I think it is actually the maximum version both
+ * sides support. (If the maximum doesn't exist, we would fail to
+ * establish a connection and produce a value of this type.)
+ */
+ unsigned int daemonVersion;
+
+ /**
+ * Whether the remote side trusts us or not.
+ *
+ * 3 values: "yes", "no", or `std::nullopt` for "unknown".
+ *
+ * Note that the "remote side" might not be just the end daemon, but
+ * also an intermediary forwarder that can make its own trusting
+ * decisions. This would be the intersection of all their trust
+ * decisions, since it takes only one link in the chain to start
+ * denying operations.
+ */
+ std::optional<TrustedFlag> remoteTrustsUs;
+
+ /**
+ * The version of the Nix daemon that is processing our requests.
+ *
+ * Do note, it may or may not communicating with another daemon,
+ * rather than being an "end" `LocalStore` or similar.
+ */
+ std::optional<std::string> daemonNixVersion;
+
+ /**
+ * Time this connection was established.
+ */
+ std::chrono::time_point<std::chrono::steady_clock> startTime;
+
+ /**
+ * Coercion to `WorkerProto::ReadConn`. This makes it easy to use the
+ * factored out worker protocol searlizers with a
+ * `RemoteStore::Connection`.
+ *
+ * The worker protocol connection types are unidirectional, unlike
+ * this type.
+ */
+ operator WorkerProto::ReadConn ()
+ {
+ return WorkerProto::ReadConn {
+ .from = from,
+ };
+ }
+
+ /**
+ * Coercion to `WorkerProto::WriteConn`. This makes it easy to use the
+ * factored out worker protocol searlizers with a
+ * `RemoteStore::Connection`.
+ *
+ * The worker protocol connection types are unidirectional, unlike
+ * this type.
+ */
+ operator WorkerProto::WriteConn ()
+ {
+ return WorkerProto::WriteConn {
+ .to = to,
+ };
+ }
+
+ virtual ~Connection();
+
+ virtual void closeWrite() = 0;
+
+ std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0, bool flush = true);
+};
+
+}
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index c3dfb5979..1e2104e1f 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -5,7 +5,9 @@
#include "remote-fs-accessor.hh"
#include "build-result.hh"
#include "remote-store.hh"
+#include "remote-store-connection.hh"
#include "worker-protocol.hh"
+#include "worker-protocol-impl.hh"
#include "archive.hh"
#include "globals.hh"
#include "derivations.hh"
@@ -100,7 +102,7 @@ void RemoteStore::initConnection(Connection & conn)
}
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 35) {
- conn.remoteTrustsUs = WorkerProto<std::optional<TrustedFlag>>::read(*this, conn.from);
+ conn.remoteTrustsUs = WorkerProto::Serialise<std::optional<TrustedFlag>>::read(*this, conn);
} else {
// We don't know the answer; protocol to old.
conn.remoteTrustsUs = std::nullopt;
@@ -119,7 +121,7 @@ void RemoteStore::initConnection(Connection & conn)
void RemoteStore::setOptions(Connection & conn)
{
- conn.to << wopSetOptions
+ conn.to << WorkerProto::Op::SetOptions
<< settings.keepFailed
<< settings.keepGoing
<< settings.tryFallback
@@ -184,6 +186,7 @@ struct ConnectionHandle
}
RemoteStore::Connection * operator -> () { return &*handle; }
+ RemoteStore::Connection & operator * () { return *handle; }
void processStderr(Sink * sink = 0, Source * source = 0, bool flush = true)
{
@@ -211,7 +214,7 @@ void RemoteStore::setOptions()
bool RemoteStore::isValidPathUncached(const StorePath & path)
{
auto conn(getConnection());
- conn->to << wopIsValidPath << printStorePath(path);
+ conn->to << WorkerProto::Op::IsValidPath << printStorePath(path);
conn.processStderr();
return readInt(conn->from);
}
@@ -226,13 +229,13 @@ StorePathSet RemoteStore::queryValidPaths(const StorePathSet & paths, Substitute
if (isValidPath(i)) res.insert(i);
return res;
} else {
- conn->to << wopQueryValidPaths;
- workerProtoWrite(*this, conn->to, paths);
+ conn->to << WorkerProto::Op::QueryValidPaths;
+ WorkerProto::write(*this, *conn, paths);
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 27) {
conn->to << (settings.buildersUseSubstitutes ? 1 : 0);
}
conn.processStderr();
- return WorkerProto<StorePathSet>::read(*this, conn->from);
+ return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
}
@@ -240,9 +243,9 @@ StorePathSet RemoteStore::queryValidPaths(const StorePathSet & paths, Substitute
StorePathSet RemoteStore::queryAllValidPaths()
{
auto conn(getConnection());
- conn->to << wopQueryAllValidPaths;
+ conn->to << WorkerProto::Op::QueryAllValidPaths;
conn.processStderr();
- return WorkerProto<StorePathSet>::read(*this, conn->from);
+ return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
@@ -252,16 +255,16 @@ StorePathSet RemoteStore::querySubstitutablePaths(const StorePathSet & paths)
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
StorePathSet res;
for (auto & i : paths) {
- conn->to << wopHasSubstitutes << printStorePath(i);
+ conn->to << WorkerProto::Op::HasSubstitutes << printStorePath(i);
conn.processStderr();
if (readInt(conn->from)) res.insert(i);
}
return res;
} else {
- conn->to << wopQuerySubstitutablePaths;
- workerProtoWrite(*this, conn->to, paths);
+ conn->to << WorkerProto::Op::QuerySubstitutablePaths;
+ WorkerProto::write(*this, *conn, paths);
conn.processStderr();
- return WorkerProto<StorePathSet>::read(*this, conn->from);
+ return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
}
@@ -276,14 +279,14 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S
for (auto & i : pathsMap) {
SubstitutablePathInfo info;
- conn->to << wopQuerySubstitutablePathInfo << printStorePath(i.first);
+ conn->to << WorkerProto::Op::QuerySubstitutablePathInfo << printStorePath(i.first);
conn.processStderr();
unsigned int reply = readInt(conn->from);
if (reply == 0) continue;
auto deriver = readString(conn->from);
if (deriver != "")
info.deriver = parseStorePath(deriver);
- info.references = WorkerProto<StorePathSet>::read(*this, conn->from);
+ info.references = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
info.downloadSize = readLongLong(conn->from);
info.narSize = readLongLong(conn->from);
infos.insert_or_assign(i.first, std::move(info));
@@ -291,14 +294,14 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S
} else {
- conn->to << wopQuerySubstitutablePathInfos;
+ conn->to << WorkerProto::Op::QuerySubstitutablePathInfos;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 22) {
StorePathSet paths;
for (auto & path : pathsMap)
paths.insert(path.first);
- workerProtoWrite(*this, conn->to, paths);
+ WorkerProto::write(*this, *conn, paths);
} else
- workerProtoWrite(*this, conn->to, pathsMap);
+ WorkerProto::write(*this, *conn, pathsMap);
conn.processStderr();
size_t count = readNum<size_t>(conn->from);
for (size_t n = 0; n < count; n++) {
@@ -306,7 +309,7 @@ void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, S
auto deriver = readString(conn->from);
if (deriver != "")
info.deriver = parseStorePath(deriver);
- info.references = WorkerProto<StorePathSet>::read(*this, conn->from);
+ info.references = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
info.downloadSize = readLongLong(conn->from);
info.narSize = readLongLong(conn->from);
}
@@ -322,7 +325,7 @@ void RemoteStore::queryPathInfoUncached(const StorePath & path,
std::shared_ptr<const ValidPathInfo> info;
{
auto conn(getConnection());
- conn->to << wopQueryPathInfo << printStorePath(path);
+ conn->to << WorkerProto::Op::QueryPathInfo << printStorePath(path);
try {
conn.processStderr();
} catch (Error & e) {
@@ -347,9 +350,9 @@ void RemoteStore::queryReferrers(const StorePath & path,
StorePathSet & referrers)
{
auto conn(getConnection());
- conn->to << wopQueryReferrers << printStorePath(path);
+ conn->to << WorkerProto::Op::QueryReferrers << printStorePath(path);
conn.processStderr();
- for (auto & i : WorkerProto<StorePathSet>::read(*this, conn->from))
+ for (auto & i : WorkerProto::Serialise<StorePathSet>::read(*this, *conn))
referrers.insert(i);
}
@@ -357,9 +360,9 @@ void RemoteStore::queryReferrers(const StorePath & path,
StorePathSet RemoteStore::queryValidDerivers(const StorePath & path)
{
auto conn(getConnection());
- conn->to << wopQueryValidDerivers << printStorePath(path);
+ conn->to << WorkerProto::Op::QueryValidDerivers << printStorePath(path);
conn.processStderr();
- return WorkerProto<StorePathSet>::read(*this, conn->from);
+ return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
@@ -369,9 +372,9 @@ StorePathSet RemoteStore::queryDerivationOutputs(const StorePath & path)
return Store::queryDerivationOutputs(path);
}
auto conn(getConnection());
- conn->to << wopQueryDerivationOutputs << printStorePath(path);
+ conn->to << WorkerProto::Op::QueryDerivationOutputs << printStorePath(path);
conn.processStderr();
- return WorkerProto<StorePathSet>::read(*this, conn->from);
+ return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
@@ -379,9 +382,9 @@ std::map<std::string, std::optional<StorePath>> RemoteStore::queryPartialDerivat
{
if (GET_PROTOCOL_MINOR(getProtocol()) >= 0x16) {
auto conn(getConnection());
- conn->to << wopQueryDerivationOutputMap << printStorePath(path);
+ conn->to << WorkerProto::Op::QueryDerivationOutputMap << printStorePath(path);
conn.processStderr();
- return WorkerProto<std::map<std::string, std::optional<StorePath>>>::read(*this, conn->from);
+ return WorkerProto::Serialise<std::map<std::string, std::optional<StorePath>>>::read(*this, *conn);
} else {
// Fallback for old daemon versions.
// For floating-CA derivations (and their co-dependencies) this is an
@@ -402,7 +405,7 @@ std::map<std::string, std::optional<StorePath>> RemoteStore::queryPartialDerivat
std::optional<StorePath> RemoteStore::queryPathFromHashPart(const std::string & hashPart)
{
auto conn(getConnection());
- conn->to << wopQueryPathFromHashPart << hashPart;
+ conn->to << WorkerProto::Op::QueryPathFromHashPart << hashPart;
conn.processStderr();
Path path = readString(conn->from);
if (path.empty()) return {};
@@ -424,10 +427,10 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 25) {
conn->to
- << wopAddToStore
+ << WorkerProto::Op::AddToStore
<< name
<< caMethod.render(hashType);
- workerProtoWrite(*this, conn->to, references);
+ WorkerProto::write(*this, *conn, references);
conn->to << repair;
// The dump source may invoke the store, so we need to make some room.
@@ -451,13 +454,13 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
throw UnimplementedError("When adding text-hashed data called '%s', only SHA-256 is supported but '%s' was given",
name, printHashType(hashType));
std::string s = dump.drain();
- conn->to << wopAddTextToStore << name << s;
- workerProtoWrite(*this, conn->to, references);
+ conn->to << WorkerProto::Op::AddTextToStore << name << s;
+ WorkerProto::write(*this, *conn, references);
conn.processStderr();
},
[&](const FileIngestionMethod & fim) -> void {
conn->to
- << wopAddToStore
+ << WorkerProto::Op::AddToStore
<< name
<< ((hashType == htSHA256 && fim == FileIngestionMethod::Recursive) ? 0 : 1) /* backwards compatibility hack */
<< (fim == FileIngestionMethod::Recursive ? 1 : 0)
@@ -509,7 +512,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) {
- conn->to << wopImportPaths;
+ conn->to << WorkerProto::Op::ImportPaths;
auto source2 = sinkToSource([&](Sink & sink) {
sink << 1 // == path follows
@@ -518,7 +521,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
sink
<< exportMagic
<< printStorePath(info.path);
- workerProtoWrite(*this, sink, info.references);
+ WorkerProto::write(*this, *conn, info.references);
sink
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< 0 // == no legacy signature
@@ -528,16 +531,16 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
conn.processStderr(0, source2.get());
- auto importedPaths = WorkerProto<StorePathSet>::read(*this, conn->from);
+ auto importedPaths = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
assert(importedPaths.size() <= 1);
}
else {
- conn->to << wopAddToStoreNar
+ conn->to << WorkerProto::Op::AddToStoreNar
<< printStorePath(info.path)
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< info.narHash.to_string(Base16, false);
- workerProtoWrite(*this, conn->to, info.references);
+ WorkerProto::write(*this, *conn, info.references);
conn->to << info.registrationTime << info.narSize
<< info.ultimate << info.sigs << renderContentAddress(info.ca)
<< repair << !checkSigs;
@@ -581,7 +584,7 @@ void RemoteStore::addMultipleToStore(
if (GET_PROTOCOL_MINOR(getConnection()->daemonVersion) >= 32) {
auto conn(getConnection());
conn->to
- << wopAddMultipleToStore
+ << WorkerProto::Op::AddMultipleToStore
<< repair
<< !checkSigs;
conn.withFramedSink([&](Sink & sink) {
@@ -605,12 +608,12 @@ StorePath RemoteStore::addTextToStore(
void RemoteStore::registerDrvOutput(const Realisation & info)
{
auto conn(getConnection());
- conn->to << wopRegisterDrvOutput;
+ conn->to << WorkerProto::Op::RegisterDrvOutput;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
conn->to << info.id.to_string();
conn->to << std::string(info.outPath.to_string());
} else {
- workerProtoWrite(*this, conn->to, info);
+ WorkerProto::write(*this, *conn, info);
}
conn.processStderr();
}
@@ -626,20 +629,20 @@ void RemoteStore::queryRealisationUncached(const DrvOutput & id,
return callback(nullptr);
}
- conn->to << wopQueryRealisation;
+ conn->to << WorkerProto::Op::QueryRealisation;
conn->to << id.to_string();
conn.processStderr();
auto real = [&]() -> std::shared_ptr<const Realisation> {
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
- auto outPaths = WorkerProto<std::set<StorePath>>::read(
- *this, conn->from);
+ auto outPaths = WorkerProto::Serialise<std::set<StorePath>>::read(
+ *this, *conn);
if (outPaths.empty())
return nullptr;
return std::make_shared<const Realisation>(Realisation { .id = id, .outPath = *outPaths.begin() });
} else {
- auto realisations = WorkerProto<std::set<Realisation>>::read(
- *this, conn->from);
+ auto realisations = WorkerProto::Serialise<std::set<Realisation>>::read(
+ *this, *conn);
if (realisations.empty())
return nullptr;
return std::make_shared<const Realisation>(*realisations.begin());
@@ -650,10 +653,10 @@ void RemoteStore::queryRealisationUncached(const DrvOutput & id,
} catch (...) { return callback.rethrow(); }
}
-static void writeDerivedPaths(RemoteStore & store, ConnectionHandle & conn, const std::vector<DerivedPath> & reqs)
+static void writeDerivedPaths(RemoteStore & store, RemoteStore::Connection & conn, const std::vector<DerivedPath> & reqs)
{
- if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 30) {
- workerProtoWrite(store, conn->to, reqs);
+ if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 30) {
+ WorkerProto::write(store, conn, reqs);
} else {
Strings ss;
for (auto & p : reqs) {
@@ -665,12 +668,12 @@ static void writeDerivedPaths(RemoteStore & store, ConnectionHandle & conn, cons
[&](const StorePath & drvPath) {
throw Error("trying to request '%s', but daemon protocol %d.%d is too old (< 1.29) to request a derivation file",
store.printStorePath(drvPath),
- GET_PROTOCOL_MAJOR(conn->daemonVersion),
- GET_PROTOCOL_MINOR(conn->daemonVersion));
+ GET_PROTOCOL_MAJOR(conn.daemonVersion),
+ GET_PROTOCOL_MINOR(conn.daemonVersion));
},
}, sOrDrvPath);
}
- conn->to << ss;
+ conn.to << ss;
}
}
@@ -694,9 +697,9 @@ void RemoteStore::buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMod
copyDrvsFromEvalStore(drvPaths, evalStore);
auto conn(getConnection());
- conn->to << wopBuildPaths;
+ conn->to << WorkerProto::Op::BuildPaths;
assert(GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13);
- writeDerivedPaths(*this, conn, drvPaths);
+ writeDerivedPaths(*this, *conn, drvPaths);
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 15)
conn->to << buildMode;
else
@@ -719,11 +722,11 @@ std::vector<KeyedBuildResult> RemoteStore::buildPathsWithResults(
auto & conn = *conn_;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 34) {
- conn->to << wopBuildPathsWithResults;
- writeDerivedPaths(*this, conn, paths);
+ conn->to << WorkerProto::Op::BuildPathsWithResults;
+ writeDerivedPaths(*this, *conn, paths);
conn->to << buildMode;
conn.processStderr();
- return WorkerProto<std::vector<KeyedBuildResult>>::read(*this, conn->from);
+ return WorkerProto::Serialise<std::vector<KeyedBuildResult>>::read(*this, *conn);
} else {
// Avoid deadlock.
conn_.reset();
@@ -795,7 +798,7 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD
BuildMode buildMode)
{
auto conn(getConnection());
- conn->to << wopBuildDerivation << printStorePath(drvPath);
+ conn->to << WorkerProto::Op::BuildDerivation << printStorePath(drvPath);
writeDerivation(conn->to, *this, drv);
conn->to << buildMode;
conn.processStderr();
@@ -806,7 +809,7 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD
conn->from >> res.timesBuilt >> res.isNonDeterministic >> res.startTime >> res.stopTime;
}
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 28) {
- auto builtOutputs = WorkerProto<DrvOutputs>::read(*this, conn->from);
+ auto builtOutputs = WorkerProto::Serialise<DrvOutputs>::read(*this, *conn);
for (auto && [output, realisation] : builtOutputs)
res.builtOutputs.insert_or_assign(
std::move(output.outputName),
@@ -819,7 +822,7 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD
void RemoteStore::ensurePath(const StorePath & path)
{
auto conn(getConnection());
- conn->to << wopEnsurePath << printStorePath(path);
+ conn->to << WorkerProto::Op::EnsurePath << printStorePath(path);
conn.processStderr();
readInt(conn->from);
}
@@ -828,7 +831,7 @@ void RemoteStore::ensurePath(const StorePath & path)
void RemoteStore::addTempRoot(const StorePath & path)
{
auto conn(getConnection());
- conn->to << wopAddTempRoot << printStorePath(path);
+ conn->to << WorkerProto::Op::AddTempRoot << printStorePath(path);
conn.processStderr();
readInt(conn->from);
}
@@ -837,7 +840,7 @@ void RemoteStore::addTempRoot(const StorePath & path)
void RemoteStore::addIndirectRoot(const Path & path)
{
auto conn(getConnection());
- conn->to << wopAddIndirectRoot << path;
+ conn->to << WorkerProto::Op::AddIndirectRoot << path;
conn.processStderr();
readInt(conn->from);
}
@@ -846,7 +849,7 @@ void RemoteStore::addIndirectRoot(const Path & path)
Roots RemoteStore::findRoots(bool censor)
{
auto conn(getConnection());
- conn->to << wopFindRoots;
+ conn->to << WorkerProto::Op::FindRoots;
conn.processStderr();
size_t count = readNum<size_t>(conn->from);
Roots result;
@@ -864,8 +867,8 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
auto conn(getConnection());
conn->to
- << wopCollectGarbage << options.action;
- workerProtoWrite(*this, conn->to, options.pathsToDelete);
+ << WorkerProto::Op::CollectGarbage << options.action;
+ WorkerProto::write(*this, *conn, options.pathsToDelete);
conn->to << options.ignoreLiveness
<< options.maxFreed
/* removed options */
@@ -887,7 +890,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
void RemoteStore::optimiseStore()
{
auto conn(getConnection());
- conn->to << wopOptimiseStore;
+ conn->to << WorkerProto::Op::OptimiseStore;
conn.processStderr();
readInt(conn->from);
}
@@ -896,7 +899,7 @@ void RemoteStore::optimiseStore()
bool RemoteStore::verifyStore(bool checkContents, RepairFlag repair)
{
auto conn(getConnection());
- conn->to << wopVerifyStore << checkContents << repair;
+ conn->to << WorkerProto::Op::VerifyStore << checkContents << repair;
conn.processStderr();
return readInt(conn->from);
}
@@ -905,7 +908,7 @@ bool RemoteStore::verifyStore(bool checkContents, RepairFlag repair)
void RemoteStore::addSignatures(const StorePath & storePath, const StringSet & sigs)
{
auto conn(getConnection());
- conn->to << wopAddSignatures << printStorePath(storePath) << sigs;
+ conn->to << WorkerProto::Op::AddSignatures << printStorePath(storePath) << sigs;
conn.processStderr();
readInt(conn->from);
}
@@ -921,12 +924,12 @@ void RemoteStore::queryMissing(const std::vector<DerivedPath> & targets,
// Don't hold the connection handle in the fallback case
// to prevent a deadlock.
goto fallback;
- conn->to << wopQueryMissing;
- writeDerivedPaths(*this, conn, targets);
+ conn->to << WorkerProto::Op::QueryMissing;
+ writeDerivedPaths(*this, *conn, targets);
conn.processStderr();
- willBuild = WorkerProto<StorePathSet>::read(*this, conn->from);
- willSubstitute = WorkerProto<StorePathSet>::read(*this, conn->from);
- unknown = WorkerProto<StorePathSet>::read(*this, conn->from);
+ willBuild = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
+ willSubstitute = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
+ unknown = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
conn->from >> downloadSize >> narSize;
return;
}
@@ -940,7 +943,7 @@ void RemoteStore::queryMissing(const std::vector<DerivedPath> & targets,
void RemoteStore::addBuildLog(const StorePath & drvPath, std::string_view log)
{
auto conn(getConnection());
- conn->to << wopAddBuildLog << drvPath.to_string();
+ conn->to << WorkerProto::Op::AddBuildLog << drvPath.to_string();
StringSource source(log);
conn.withFramedSink([&](Sink & sink) {
source.drainInto(sink);
@@ -992,7 +995,7 @@ RemoteStore::Connection::~Connection()
void RemoteStore::narFromPath(const StorePath & path, Sink & sink)
{
auto conn(connections->get());
- conn->to << wopNarFromPath << printStorePath(path);
+ conn->to << WorkerProto::Op::NarFromPath << printStorePath(path);
conn->processStderr();
copyNAR(conn->from, sink);
}
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
index 4f3971bfd..cb7a71acf 100644
--- a/src/libstore/remote-store.hh
+++ b/src/libstore/remote-store.hh
@@ -166,21 +166,7 @@ public:
void flushBadConnections();
- struct Connection
- {
- FdSink to;
- FdSource from;
- unsigned int daemonVersion;
- std::optional<TrustedFlag> remoteTrustsUs;
- std::optional<std::string> daemonNixVersion;
- std::chrono::time_point<std::chrono::steady_clock> startTime;
-
- virtual ~Connection();
-
- virtual void closeWrite() = 0;
-
- std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0, bool flush = true);
- };
+ struct Connection;
ref<Connection> openConnectionWrapper();
diff --git a/src/libstore/serve-protocol.hh b/src/libstore/serve-protocol.hh
index 553fd3a09..7e43b3969 100644
--- a/src/libstore/serve-protocol.hh
+++ b/src/libstore/serve-protocol.hh
@@ -10,16 +10,52 @@ namespace nix {
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
-typedef enum {
- cmdQueryValidPaths = 1,
- cmdQueryPathInfos = 2,
- cmdDumpStorePath = 3,
- cmdImportPaths = 4,
- cmdExportPaths = 5,
- cmdBuildPaths = 6,
- cmdQueryClosure = 7,
- cmdBuildDerivation = 8,
- cmdAddToStoreNar = 9,
-} ServeCommand;
+/**
+ * The "serve protocol", used by ssh:// stores.
+ *
+ * This `struct` is basically just a `namespace`; We use a type rather
+ * than a namespace just so we can use it as a template argument.
+ */
+struct ServeProto
+{
+ /**
+ * Enumeration of all the request types for the protocol.
+ */
+ enum struct Command : uint64_t;
+};
+
+enum struct ServeProto::Command : uint64_t
+{
+ QueryValidPaths = 1,
+ QueryPathInfos = 2,
+ DumpStorePath = 3,
+ ImportPaths = 4,
+ ExportPaths = 5,
+ BuildPaths = 6,
+ QueryClosure = 7,
+ BuildDerivation = 8,
+ AddToStoreNar = 9,
+};
+
+/**
+ * Convenience for sending operation codes.
+ *
+ * @todo Switch to using `ServeProto::Serialize` instead probably. But
+ * this was not done at this time so there would be less churn.
+ */
+inline Sink & operator << (Sink & sink, ServeProto::Command op)
+{
+ return sink << (uint64_t) op;
+}
+
+/**
+ * Convenience for debugging.
+ *
+ * @todo Perhaps render known opcodes more nicely.
+ */
+inline std::ostream & operator << (std::ostream & s, ServeProto::Command op)
+{
+ return s << (uint64_t) op;
+}
}
diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc
index df334c23c..7c8decb74 100644
--- a/src/libstore/sqlite.cc
+++ b/src/libstore/sqlite.cc
@@ -1,6 +1,7 @@
#include "sqlite.hh"
#include "globals.hh"
#include "util.hh"
+#include "url.hh"
#include <sqlite3.h>
@@ -50,15 +51,17 @@ static void traceSQL(void * x, const char * sql)
notice("SQL<[%1%]>", sql);
};
-SQLite::SQLite(const Path & path, bool create)
+SQLite::SQLite(const Path & path, SQLiteOpenMode mode)
{
// useSQLiteWAL also indicates what virtual file system we need. Using
// `unix-dotfile` is needed on NFS file systems and on Windows' Subsystem
// for Linux (WSL) where useSQLiteWAL should be false by default.
const char *vfs = settings.useSQLiteWAL ? 0 : "unix-dotfile";
- int flags = SQLITE_OPEN_READWRITE;
- if (create) flags |= SQLITE_OPEN_CREATE;
- int ret = sqlite3_open_v2(path.c_str(), &db, flags, vfs);
+ bool immutable = mode == SQLiteOpenMode::Immutable;
+ int flags = immutable ? SQLITE_OPEN_READONLY : SQLITE_OPEN_READWRITE;
+ if (mode == SQLiteOpenMode::Normal) flags |= SQLITE_OPEN_CREATE;
+ auto uri = "file:" + percentEncode(path) + "?immutable=" + (immutable ? "1" : "0");
+ int ret = sqlite3_open_v2(uri.c_str(), &db, SQLITE_OPEN_URI | flags, vfs);
if (ret != SQLITE_OK) {
const char * err = sqlite3_errstr(ret);
throw Error("cannot open SQLite database '%s': %s", path, err);
diff --git a/src/libstore/sqlite.hh b/src/libstore/sqlite.hh
index 6e14852cb..0c08267f7 100644
--- a/src/libstore/sqlite.hh
+++ b/src/libstore/sqlite.hh
@@ -11,6 +11,27 @@ struct sqlite3_stmt;
namespace nix {
+enum class SQLiteOpenMode {
+ /**
+ * Open the database in read-write mode.
+ * If the database does not exist, it will be created.
+ */
+ Normal,
+ /**
+ * Open the database in read-write mode.
+ * Fails with an error if the database does not exist.
+ */
+ NoCreate,
+ /**
+ * Open the database in immutable mode.
+ * In addition to the database being read-only,
+ * no wal or journal files will be created by sqlite.
+ * Use this mode if the database is on a read-only filesystem.
+ * Fails with an error if the database does not exist.
+ */
+ Immutable,
+};
+
/**
* RAII wrapper to close a SQLite database automatically.
*/
@@ -18,7 +39,7 @@ struct SQLite
{
sqlite3 * db = 0;
SQLite() { }
- SQLite(const Path & path, bool create = true);
+ SQLite(const Path & path, SQLiteOpenMode mode = SQLiteOpenMode::Normal);
SQLite(const SQLite & from) = delete;
SQLite& operator = (const SQLite & from) = delete;
SQLite& operator = (SQLite && from) { db = from.db; from.db = 0; return *this; }
diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc
index 962221ad2..0200076c0 100644
--- a/src/libstore/ssh-store.cc
+++ b/src/libstore/ssh-store.cc
@@ -1,6 +1,7 @@
#include "ssh-store-config.hh"
#include "store-api.hh"
#include "remote-store.hh"
+#include "remote-store-connection.hh"
#include "remote-fs-accessor.hh"
#include "archive.hh"
#include "worker-protocol.hh"
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index 2ecbe2708..14a862eef 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -114,7 +114,7 @@ struct StoreConfig : public Config
return "";
}
- const PathSetting storeDir_{this, false, settings.nixStore,
+ const PathSetting storeDir_{this, settings.nixStore,
"store",
R"(
Logical location of the Nix store, usually
diff --git a/src/libstore/uds-remote-store.cc b/src/libstore/uds-remote-store.cc
index 0fb7c38e9..69dae2da5 100644
--- a/src/libstore/uds-remote-store.cc
+++ b/src/libstore/uds-remote-store.cc
@@ -13,6 +13,14 @@
namespace nix {
+std::string UDSRemoteStoreConfig::doc()
+{
+ return
+ #include "uds-remote-store.md"
+ ;
+}
+
+
UDSRemoteStore::UDSRemoteStore(const Params & params)
: StoreConfig(params)
, LocalFSStoreConfig(params)
diff --git a/src/libstore/uds-remote-store.hh b/src/libstore/uds-remote-store.hh
index bd1dcb67c..2bd6517fa 100644
--- a/src/libstore/uds-remote-store.hh
+++ b/src/libstore/uds-remote-store.hh
@@ -2,6 +2,7 @@
///@file
#include "remote-store.hh"
+#include "remote-store-connection.hh"
#include "local-fs-store.hh"
namespace nix {
@@ -17,12 +18,7 @@ struct UDSRemoteStoreConfig : virtual LocalFSStoreConfig, virtual RemoteStoreCon
const std::string name() override { return "Local Daemon Store"; }
- std::string doc() override
- {
- return
- #include "uds-remote-store.md"
- ;
- }
+ std::string doc() override;
};
class UDSRemoteStore : public virtual UDSRemoteStoreConfig, public virtual LocalFSStore, public virtual RemoteStore
diff --git a/src/libstore/worker-protocol-impl.hh b/src/libstore/worker-protocol-impl.hh
new file mode 100644
index 000000000..d3d2792ff
--- /dev/null
+++ b/src/libstore/worker-protocol-impl.hh
@@ -0,0 +1,78 @@
+#pragma once
+/**
+ * @file
+ *
+ * Template implementations (as opposed to mere declarations).
+ *
+ * This file is an exmample of the "impl.hh" pattern. See the
+ * contributing guide.
+ */
+
+#include "worker-protocol.hh"
+
+namespace nix {
+
+template<typename T>
+std::vector<T> WorkerProto::Serialise<std::vector<T>>::read(const Store & store, WorkerProto::ReadConn conn)
+{
+ std::vector<T> resSet;
+ auto size = readNum<size_t>(conn.from);
+ while (size--) {
+ resSet.push_back(WorkerProto::Serialise<T>::read(store, conn));
+ }
+ return resSet;
+}
+
+template<typename T>
+void WorkerProto::Serialise<std::vector<T>>::write(const Store & store, WorkerProto::WriteConn conn, const std::vector<T> & resSet)
+{
+ conn.to << resSet.size();
+ for (auto & key : resSet) {
+ WorkerProto::Serialise<T>::write(store, conn, key);
+ }
+}
+
+template<typename T>
+std::set<T> WorkerProto::Serialise<std::set<T>>::read(const Store & store, WorkerProto::ReadConn conn)
+{
+ std::set<T> resSet;
+ auto size = readNum<size_t>(conn.from);
+ while (size--) {
+ resSet.insert(WorkerProto::Serialise<T>::read(store, conn));
+ }
+ return resSet;
+}
+
+template<typename T>
+void WorkerProto::Serialise<std::set<T>>::write(const Store & store, WorkerProto::WriteConn conn, const std::set<T> & resSet)
+{
+ conn.to << resSet.size();
+ for (auto & key : resSet) {
+ WorkerProto::Serialise<T>::write(store, conn, key);
+ }
+}
+
+template<typename K, typename V>
+std::map<K, V> WorkerProto::Serialise<std::map<K, V>>::read(const Store & store, WorkerProto::ReadConn conn)
+{
+ std::map<K, V> resMap;
+ auto size = readNum<size_t>(conn.from);
+ while (size--) {
+ auto k = WorkerProto::Serialise<K>::read(store, conn);
+ auto v = WorkerProto::Serialise<V>::read(store, conn);
+ resMap.insert_or_assign(std::move(k), std::move(v));
+ }
+ return resMap;
+}
+
+template<typename K, typename V>
+void WorkerProto::Serialise<std::map<K, V>>::write(const Store & store, WorkerProto::WriteConn conn, const std::map<K, V> & resMap)
+{
+ conn.to << resMap.size();
+ for (auto & i : resMap) {
+ WorkerProto::Serialise<K>::write(store, conn, i.first);
+ WorkerProto::Serialise<V>::write(store, conn, i.second);
+ }
+}
+
+}
diff --git a/src/libstore/worker-protocol.cc b/src/libstore/worker-protocol.cc
index 51bb12026..a23130743 100644
--- a/src/libstore/worker-protocol.cc
+++ b/src/libstore/worker-protocol.cc
@@ -4,6 +4,7 @@
#include "store-api.hh"
#include "build-result.hh"
#include "worker-protocol.hh"
+#include "worker-protocol-impl.hh"
#include "archive.hh"
#include "derivations.hh"
@@ -11,31 +12,31 @@
namespace nix {
-std::string WorkerProto<std::string>::read(const Store & store, Source & from)
+std::string WorkerProto::Serialise<std::string>::read(const Store & store, WorkerProto::ReadConn conn)
{
- return readString(from);
+ return readString(conn.from);
}
-void WorkerProto<std::string>::write(const Store & store, Sink & out, const std::string & str)
+void WorkerProto::Serialise<std::string>::write(const Store & store, WorkerProto::WriteConn conn, const std::string & str)
{
- out << str;
+ conn.to << str;
}
-StorePath WorkerProto<StorePath>::read(const Store & store, Source & from)
+StorePath WorkerProto::Serialise<StorePath>::read(const Store & store, WorkerProto::ReadConn conn)
{
- return store.parseStorePath(readString(from));
+ return store.parseStorePath(readString(conn.from));
}
-void WorkerProto<StorePath>::write(const Store & store, Sink & out, const StorePath & storePath)
+void WorkerProto::Serialise<StorePath>::write(const Store & store, WorkerProto::WriteConn conn, const StorePath & storePath)
{
- out << store.printStorePath(storePath);
+ conn.to << store.printStorePath(storePath);
}
-std::optional<TrustedFlag> WorkerProto<std::optional<TrustedFlag>>::read(const Store & store, Source & from)
+std::optional<TrustedFlag> WorkerProto::Serialise<std::optional<TrustedFlag>>::read(const Store & store, WorkerProto::ReadConn conn)
{
- auto temp = readNum<uint8_t>(from);
+ auto temp = readNum<uint8_t>(conn.from);
switch (temp) {
case 0:
return std::nullopt;
@@ -48,17 +49,17 @@ std::optional<TrustedFlag> WorkerProto<std::optional<TrustedFlag>>::read(const S
}
}
-void WorkerProto<std::optional<TrustedFlag>>::write(const Store & store, Sink & out, const std::optional<TrustedFlag> & optTrusted)
+void WorkerProto::Serialise<std::optional<TrustedFlag>>::write(const Store & store, WorkerProto::WriteConn conn, const std::optional<TrustedFlag> & optTrusted)
{
if (!optTrusted)
- out << (uint8_t)0;
+ conn.to << (uint8_t)0;
else {
switch (*optTrusted) {
case Trusted:
- out << (uint8_t)1;
+ conn.to << (uint8_t)1;
break;
case NotTrusted:
- out << (uint8_t)2;
+ conn.to << (uint8_t)2;
break;
default:
assert(false);
@@ -67,83 +68,83 @@ void WorkerProto<std::optional<TrustedFlag>>::write(const Store & store, Sink &
}
-ContentAddress WorkerProto<ContentAddress>::read(const Store & store, Source & from)
+ContentAddress WorkerProto::Serialise<ContentAddress>::read(const Store & store, WorkerProto::ReadConn conn)
{
- return ContentAddress::parse(readString(from));
+ return ContentAddress::parse(readString(conn.from));
}
-void WorkerProto<ContentAddress>::write(const Store & store, Sink & out, const ContentAddress & ca)
+void WorkerProto::Serialise<ContentAddress>::write(const Store & store, WorkerProto::WriteConn conn, const ContentAddress & ca)
{
- out << renderContentAddress(ca);
+ conn.to << renderContentAddress(ca);
}
-DerivedPath WorkerProto<DerivedPath>::read(const Store & store, Source & from)
+DerivedPath WorkerProto::Serialise<DerivedPath>::read(const Store & store, WorkerProto::ReadConn conn)
{
- auto s = readString(from);
+ auto s = readString(conn.from);
return DerivedPath::parseLegacy(store, s);
}
-void WorkerProto<DerivedPath>::write(const Store & store, Sink & out, const DerivedPath & req)
+void WorkerProto::Serialise<DerivedPath>::write(const Store & store, WorkerProto::WriteConn conn, const DerivedPath & req)
{
- out << req.to_string_legacy(store);
+ conn.to << req.to_string_legacy(store);
}
-Realisation WorkerProto<Realisation>::read(const Store & store, Source & from)
+Realisation WorkerProto::Serialise<Realisation>::read(const Store & store, WorkerProto::ReadConn conn)
{
- std::string rawInput = readString(from);
+ std::string rawInput = readString(conn.from);
return Realisation::fromJSON(
nlohmann::json::parse(rawInput),
"remote-protocol"
);
}
-void WorkerProto<Realisation>::write(const Store & store, Sink & out, const Realisation & realisation)
+void WorkerProto::Serialise<Realisation>::write(const Store & store, WorkerProto::WriteConn conn, const Realisation & realisation)
{
- out << realisation.toJSON().dump();
+ conn.to << realisation.toJSON().dump();
}
-DrvOutput WorkerProto<DrvOutput>::read(const Store & store, Source & from)
+DrvOutput WorkerProto::Serialise<DrvOutput>::read(const Store & store, WorkerProto::ReadConn conn)
{
- return DrvOutput::parse(readString(from));
+ return DrvOutput::parse(readString(conn.from));
}
-void WorkerProto<DrvOutput>::write(const Store & store, Sink & out, const DrvOutput & drvOutput)
+void WorkerProto::Serialise<DrvOutput>::write(const Store & store, WorkerProto::WriteConn conn, const DrvOutput & drvOutput)
{
- out << drvOutput.to_string();
+ conn.to << drvOutput.to_string();
}
-KeyedBuildResult WorkerProto<KeyedBuildResult>::read(const Store & store, Source & from)
+KeyedBuildResult WorkerProto::Serialise<KeyedBuildResult>::read(const Store & store, WorkerProto::ReadConn conn)
{
- auto path = WorkerProto<DerivedPath>::read(store, from);
- auto br = WorkerProto<BuildResult>::read(store, from);
+ auto path = WorkerProto::Serialise<DerivedPath>::read(store, conn);
+ auto br = WorkerProto::Serialise<BuildResult>::read(store, conn);
return KeyedBuildResult {
std::move(br),
/* .path = */ std::move(path),
};
}
-void WorkerProto<KeyedBuildResult>::write(const Store & store, Sink & to, const KeyedBuildResult & res)
+void WorkerProto::Serialise<KeyedBuildResult>::write(const Store & store, WorkerProto::WriteConn conn, const KeyedBuildResult & res)
{
- workerProtoWrite(store, to, res.path);
- workerProtoWrite(store, to, static_cast<const BuildResult &>(res));
+ WorkerProto::write(store, conn, res.path);
+ WorkerProto::write(store, conn, static_cast<const BuildResult &>(res));
}
-BuildResult WorkerProto<BuildResult>::read(const Store & store, Source & from)
+BuildResult WorkerProto::Serialise<BuildResult>::read(const Store & store, WorkerProto::ReadConn conn)
{
BuildResult res;
- res.status = (BuildResult::Status) readInt(from);
- from
+ res.status = (BuildResult::Status) readInt(conn.from);
+ conn.from
>> res.errorMsg
>> res.timesBuilt
>> res.isNonDeterministic
>> res.startTime
>> res.stopTime;
- auto builtOutputs = WorkerProto<DrvOutputs>::read(store, from);
+ auto builtOutputs = WorkerProto::Serialise<DrvOutputs>::read(store, conn);
for (auto && [output, realisation] : builtOutputs)
res.builtOutputs.insert_or_assign(
std::move(output.outputName),
@@ -151,9 +152,9 @@ BuildResult WorkerProto<BuildResult>::read(const Store & store, Source & from)
return res;
}
-void WorkerProto<BuildResult>::write(const Store & store, Sink & to, const BuildResult & res)
+void WorkerProto::Serialise<BuildResult>::write(const Store & store, WorkerProto::WriteConn conn, const BuildResult & res)
{
- to
+ conn.to
<< res.status
<< res.errorMsg
<< res.timesBuilt
@@ -163,30 +164,30 @@ void WorkerProto<BuildResult>::write(const Store & store, Sink & to, const Build
DrvOutputs builtOutputs;
for (auto & [output, realisation] : res.builtOutputs)
builtOutputs.insert_or_assign(realisation.id, realisation);
- workerProtoWrite(store, to, builtOutputs);
+ WorkerProto::write(store, conn, builtOutputs);
}
-std::optional<StorePath> WorkerProto<std::optional<StorePath>>::read(const Store & store, Source & from)
+std::optional<StorePath> WorkerProto::Serialise<std::optional<StorePath>>::read(const Store & store, WorkerProto::ReadConn conn)
{
- auto s = readString(from);
+ auto s = readString(conn.from);
return s == "" ? std::optional<StorePath> {} : store.parseStorePath(s);
}
-void WorkerProto<std::optional<StorePath>>::write(const Store & store, Sink & out, const std::optional<StorePath> & storePathOpt)
+void WorkerProto::Serialise<std::optional<StorePath>>::write(const Store & store, WorkerProto::WriteConn conn, const std::optional<StorePath> & storePathOpt)
{
- out << (storePathOpt ? store.printStorePath(*storePathOpt) : "");
+ conn.to << (storePathOpt ? store.printStorePath(*storePathOpt) : "");
}
-std::optional<ContentAddress> WorkerProto<std::optional<ContentAddress>>::read(const Store & store, Source & from)
+std::optional<ContentAddress> WorkerProto::Serialise<std::optional<ContentAddress>>::read(const Store & store, WorkerProto::ReadConn conn)
{
- return ContentAddress::parseOpt(readString(from));
+ return ContentAddress::parseOpt(readString(conn.from));
}
-void WorkerProto<std::optional<ContentAddress>>::write(const Store & store, Sink & out, const std::optional<ContentAddress> & caOpt)
+void WorkerProto::Serialise<std::optional<ContentAddress>>::write(const Store & store, WorkerProto::WriteConn conn, const std::optional<ContentAddress> & caOpt)
{
- out << (caOpt ? renderContentAddress(*caOpt) : "");
+ conn.to << (caOpt ? renderContentAddress(*caOpt) : "");
}
}
diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh
index f06332d17..ff762c924 100644
--- a/src/libstore/worker-protocol.hh
+++ b/src/libstore/worker-protocol.hh
@@ -14,57 +14,6 @@ namespace nix {
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
-/**
- * Enumeration of all the request types for the "worker protocol", used
- * by unix:// and ssh-ng:// stores.
- */
-typedef enum {
- wopIsValidPath = 1,
- wopHasSubstitutes = 3,
- wopQueryPathHash = 4, // obsolete
- wopQueryReferences = 5, // obsolete
- wopQueryReferrers = 6,
- wopAddToStore = 7,
- wopAddTextToStore = 8, // obsolete since 1.25, Nix 3.0. Use wopAddToStore
- wopBuildPaths = 9,
- wopEnsurePath = 10,
- wopAddTempRoot = 11,
- wopAddIndirectRoot = 12,
- wopSyncWithGC = 13,
- wopFindRoots = 14,
- wopExportPath = 16, // obsolete
- wopQueryDeriver = 18, // obsolete
- wopSetOptions = 19,
- wopCollectGarbage = 20,
- wopQuerySubstitutablePathInfo = 21,
- wopQueryDerivationOutputs = 22, // obsolete
- wopQueryAllValidPaths = 23,
- wopQueryFailedPaths = 24,
- wopClearFailedPaths = 25,
- wopQueryPathInfo = 26,
- wopImportPaths = 27, // obsolete
- wopQueryDerivationOutputNames = 28, // obsolete
- wopQueryPathFromHashPart = 29,
- wopQuerySubstitutablePathInfos = 30,
- wopQueryValidPaths = 31,
- wopQuerySubstitutablePaths = 32,
- wopQueryValidDerivers = 33,
- wopOptimiseStore = 34,
- wopVerifyStore = 35,
- wopBuildDerivation = 36,
- wopAddSignatures = 37,
- wopNarFromPath = 38,
- wopAddToStoreNar = 39,
- wopQueryMissing = 40,
- wopQueryDerivationOutputMap = 41,
- wopRegisterDrvOutput = 42,
- wopQueryRealisation = 43,
- wopAddMultipleToStore = 44,
- wopAddBuildLog = 45,
- wopBuildPathsWithResults = 46,
-} WorkerOp;
-
-
#define STDERR_NEXT 0x6f6c6d67
#define STDERR_READ 0x64617461 // data needed from source
#define STDERR_WRITE 0x64617416 // data for sink
@@ -78,7 +27,7 @@ typedef enum {
class Store;
struct Source;
-// items being serialized
+// items being serialised
struct DerivedPath;
struct DrvOutput;
struct Realisation;
@@ -88,31 +37,154 @@ enum TrustedFlag : bool;
/**
- * Data type for canonical pairs of serializers for the worker protocol.
+ * The "worker protocol", used by unix:// and ssh-ng:// stores.
*
- * See https://en.cppreference.com/w/cpp/language/adl for the broader
- * concept of what is going on here.
+ * This `struct` is basically just a `namespace`; We use a type rather
+ * than a namespace just so we can use it as a template argument.
*/
-template<typename T>
-struct WorkerProto {
- static T read(const Store & store, Source & from);
- static void write(const Store & store, Sink & out, const T & t);
+struct WorkerProto
+{
+ /**
+ * Enumeration of all the request types for the protocol.
+ */
+ enum struct Op : uint64_t;
+
+ /**
+ * A unidirectional read connection, to be used by the read half of the
+ * canonical serializers below.
+ *
+ * This currently is just a `Source &`, but more fields will be added
+ * later.
+ */
+ struct ReadConn {
+ Source & from;
+ };
+
+ /**
+ * A unidirectional write connection, to be used by the write half of the
+ * canonical serializers below.
+ *
+ * This currently is just a `Sink &`, but more fields will be added
+ * later.
+ */
+ struct WriteConn {
+ Sink & to;
+ };
+
+ /**
+ * Data type for canonical pairs of serialisers for the worker protocol.
+ *
+ * See https://en.cppreference.com/w/cpp/language/adl for the broader
+ * concept of what is going on here.
+ */
+ template<typename T>
+ struct Serialise;
+ // This is the definition of `Serialise` we *want* to put here, but
+ // do not do so.
+ //
+ // The problem is that if we do so, C++ will think we have
+ // seralisers for *all* types. We don't, of course, but that won't
+ // cause an error until link time. That makes for long debug cycles
+ // when there is a missing serialiser.
+ //
+ // By not defining it globally, and instead letting individual
+ // serialisers specialise the type, we get back the compile-time
+ // errors we would like. When no serialiser exists, C++ sees an
+ // abstract "incomplete" type with no definition, and any attempt to
+ // use `to` or `from` static methods is a compile-time error because
+ // they don't exist on an incomplete type.
+ //
+ // This makes for a quicker debug cycle, as desired.
+#if 0
+ {
+ static T read(const Store & store, ReadConn conn);
+ static void write(const Store & store, WriteConn conn, const T & t);
+ };
+#endif
+
+ /**
+ * Wrapper function around `WorkerProto::Serialise<T>::write` that allows us to
+ * infer the type instead of having to write it down explicitly.
+ */
+ template<typename T>
+ static void write(const Store & store, WriteConn conn, const T & t)
+ {
+ WorkerProto::Serialise<T>::write(store, conn, t);
+ }
+};
+
+enum struct WorkerProto::Op : uint64_t
+{
+ IsValidPath = 1,
+ HasSubstitutes = 3,
+ QueryPathHash = 4, // obsolete
+ QueryReferences = 5, // obsolete
+ QueryReferrers = 6,
+ AddToStore = 7,
+ AddTextToStore = 8, // obsolete since 1.25, Nix 3.0. Use WorkerProto::Op::AddToStore
+ BuildPaths = 9,
+ EnsurePath = 10,
+ AddTempRoot = 11,
+ AddIndirectRoot = 12,
+ SyncWithGC = 13,
+ FindRoots = 14,
+ ExportPath = 16, // obsolete
+ QueryDeriver = 18, // obsolete
+ SetOptions = 19,
+ CollectGarbage = 20,
+ QuerySubstitutablePathInfo = 21,
+ QueryDerivationOutputs = 22, // obsolete
+ QueryAllValidPaths = 23,
+ QueryFailedPaths = 24,
+ ClearFailedPaths = 25,
+ QueryPathInfo = 26,
+ ImportPaths = 27, // obsolete
+ QueryDerivationOutputNames = 28, // obsolete
+ QueryPathFromHashPart = 29,
+ QuerySubstitutablePathInfos = 30,
+ QueryValidPaths = 31,
+ QuerySubstitutablePaths = 32,
+ QueryValidDerivers = 33,
+ OptimiseStore = 34,
+ VerifyStore = 35,
+ BuildDerivation = 36,
+ AddSignatures = 37,
+ NarFromPath = 38,
+ AddToStoreNar = 39,
+ QueryMissing = 40,
+ QueryDerivationOutputMap = 41,
+ RegisterDrvOutput = 42,
+ QueryRealisation = 43,
+ AddMultipleToStore = 44,
+ AddBuildLog = 45,
+ BuildPathsWithResults = 46,
};
/**
- * Wrapper function around `WorkerProto<T>::write` that allows us to
- * infer the type instead of having to write it down explicitly.
+ * Convenience for sending operation codes.
+ *
+ * @todo Switch to using `WorkerProto::Serialise` instead probably. But
+ * this was not done at this time so there would be less churn.
*/
-template<typename T>
-void workerProtoWrite(const Store & store, Sink & out, const T & t)
+inline Sink & operator << (Sink & sink, WorkerProto::Op op)
{
- WorkerProto<T>::write(store, out, t);
+ return sink << (uint64_t) op;
}
/**
- * Declare a canonical serializer pair for the worker protocol.
+ * Convenience for debugging.
*
- * We specialize the struct merely to indicate that we are implementing
+ * @todo Perhaps render known opcodes more nicely.
+ */
+inline std::ostream & operator << (std::ostream & s, WorkerProto::Op op)
+{
+ return s << (uint64_t) op;
+}
+
+/**
+ * Declare a canonical serialiser pair for the worker protocol.
+ *
+ * We specialise the struct merely to indicate that we are implementing
* the function for the given type.
*
* Some sort of `template<...>` must be used with the caller for this to
@@ -120,9 +192,9 @@ void workerProtoWrite(const Store & store, Sink & out, const T & t)
* practice.
*/
#define MAKE_WORKER_PROTO(T) \
- struct WorkerProto< T > { \
- static T read(const Store & store, Source & from); \
- static void write(const Store & store, Sink & out, const T & t); \
+ struct WorkerProto::Serialise< T > { \
+ static T read(const Store & store, WorkerProto::ReadConn conn); \
+ static void write(const Store & store, WorkerProto::WriteConn conn, const T & t); \
};
template<>
@@ -156,7 +228,7 @@ MAKE_WORKER_PROTO(X_);
/**
* These use the empty string for the null case, relying on the fact
- * that the underlying types never serialize to the empty string.
+ * that the underlying types never serialise to the empty string.
*
* We do this instead of a generic std::optional<T> instance because
* ordinal tags (0 or 1, here) are a bit of a compatability hazard. For
@@ -173,67 +245,4 @@ MAKE_WORKER_PROTO(std::optional<StorePath>);
template<>
MAKE_WORKER_PROTO(std::optional<ContentAddress>);
-template<typename T>
-std::vector<T> WorkerProto<std::vector<T>>::read(const Store & store, Source & from)
-{
- std::vector<T> resSet;
- auto size = readNum<size_t>(from);
- while (size--) {
- resSet.push_back(WorkerProto<T>::read(store, from));
- }
- return resSet;
-}
-
-template<typename T>
-void WorkerProto<std::vector<T>>::write(const Store & store, Sink & out, const std::vector<T> & resSet)
-{
- out << resSet.size();
- for (auto & key : resSet) {
- WorkerProto<T>::write(store, out, key);
- }
-}
-
-template<typename T>
-std::set<T> WorkerProto<std::set<T>>::read(const Store & store, Source & from)
-{
- std::set<T> resSet;
- auto size = readNum<size_t>(from);
- while (size--) {
- resSet.insert(WorkerProto<T>::read(store, from));
- }
- return resSet;
-}
-
-template<typename T>
-void WorkerProto<std::set<T>>::write(const Store & store, Sink & out, const std::set<T> & resSet)
-{
- out << resSet.size();
- for (auto & key : resSet) {
- WorkerProto<T>::write(store, out, key);
- }
-}
-
-template<typename K, typename V>
-std::map<K, V> WorkerProto<std::map<K, V>>::read(const Store & store, Source & from)
-{
- std::map<K, V> resMap;
- auto size = readNum<size_t>(from);
- while (size--) {
- auto k = WorkerProto<K>::read(store, from);
- auto v = WorkerProto<V>::read(store, from);
- resMap.insert_or_assign(std::move(k), std::move(v));
- }
- return resMap;
-}
-
-template<typename K, typename V>
-void WorkerProto<std::map<K, V>>::write(const Store & store, Sink & out, const std::map<K, V> & resMap)
-{
- out << resMap.size();
- for (auto & i : resMap) {
- WorkerProto<K>::write(store, out, i.first);
- WorkerProto<V>::write(store, out, i.second);
- }
-}
-
}
diff --git a/src/libutil/abstract-setting-to-json.hh b/src/libutil/abstract-setting-to-json.hh
index 7b6c3fcb5..d506dfb74 100644
--- a/src/libutil/abstract-setting-to-json.hh
+++ b/src/libutil/abstract-setting-to-json.hh
@@ -3,6 +3,7 @@
#include <nlohmann/json.hpp>
#include "config.hh"
+#include "json-utils.hh"
namespace nix {
template<typename T>
diff --git a/src/libutil/args.cc b/src/libutil/args.cc
index 081dbeb28..3cf3ed9ca 100644
--- a/src/libutil/args.cc
+++ b/src/libutil/args.cc
@@ -1,10 +1,9 @@
#include "args.hh"
#include "hash.hh"
+#include "json-utils.hh"
#include <glob.h>
-#include <nlohmann/json.hpp>
-
namespace nix {
void Args::addFlag(Flag && flag_)
@@ -247,11 +246,7 @@ nlohmann::json Args::toJSON()
j["arity"] = flag->handler.arity;
if (!flag->labels.empty())
j["labels"] = flag->labels;
- // TODO With C++23 use `std::optional::tranform`
- if (auto & xp = flag->experimentalFeature)
- j["experimental-feature"] = showExperimentalFeature(*xp);
- else
- j["experimental-feature"] = nullptr;
+ j["experimental-feature"] = flag->experimentalFeature;
flags[name] = std::move(j);
}
@@ -416,11 +411,7 @@ nlohmann::json MultiCommand::toJSON()
cat["id"] = command->category();
cat["description"] = trim(categories[command->category()]);
j["category"] = std::move(cat);
- // TODO With C++23 use `std::optional::tranform`
- if (auto xp = command->experimentalFeature())
- cat["experimental-feature"] = showExperimentalFeature(*xp);
- else
- cat["experimental-feature"] = nullptr;
+ cat["experimental-feature"] = command->experimentalFeature();
cmds[name] = std::move(j);
}
diff --git a/src/libutil/config-impl.hh b/src/libutil/config-impl.hh
index b6cae5ec3..b9639e761 100644
--- a/src/libutil/config-impl.hh
+++ b/src/libutil/config-impl.hh
@@ -4,6 +4,9 @@
*
* Template implementations (as opposed to mere declarations).
*
+ * This file is an exmample of the "impl.hh" pattern. See the
+ * contributing guide.
+ *
* One only needs to include this when one is declaring a
* `BaseClass<CustomType>` setting, or as derived class of such an
* instantiation.
@@ -50,8 +53,11 @@ template<> void BaseSetting<std::set<ExperimentalFeature>>::appendOrSet(std::set
template<typename T>
void BaseSetting<T>::appendOrSet(T && newValue, bool append)
{
- static_assert(!trait::appendable, "using default `appendOrSet` implementation with an appendable type");
+ static_assert(
+ !trait::appendable,
+ "using default `appendOrSet` implementation with an appendable type");
assert(!append);
+
value = std::move(newValue);
}
@@ -68,4 +74,60 @@ void BaseSetting<T>::set(const std::string & str, bool append)
}
}
+template<> void BaseSetting<bool>::convertToArg(Args & args, const std::string & category);
+
+template<typename T>
+void BaseSetting<T>::convertToArg(Args & args, const std::string & category)
+{
+ args.addFlag({
+ .longName = name,
+ .description = fmt("Set the `%s` setting.", name),
+ .category = category,
+ .labels = {"value"},
+ .handler = {[this](std::string s) { overridden = true; set(s); }},
+ .experimentalFeature = experimentalFeature,
+ });
+
+ if (isAppendable())
+ args.addFlag({
+ .longName = "extra-" + name,
+ .description = fmt("Append to the `%s` setting.", name),
+ .category = category,
+ .labels = {"value"},
+ .handler = {[this](std::string s) { overridden = true; set(s, true); }},
+ .experimentalFeature = experimentalFeature,
+ });
+}
+
+#define DECLARE_CONFIG_SERIALISER(TY) \
+ template<> TY BaseSetting< TY >::parse(const std::string & str) const; \
+ template<> std::string BaseSetting< TY >::to_string() const;
+
+DECLARE_CONFIG_SERIALISER(std::string)
+DECLARE_CONFIG_SERIALISER(std::optional<std::string>)
+DECLARE_CONFIG_SERIALISER(bool)
+DECLARE_CONFIG_SERIALISER(Strings)
+DECLARE_CONFIG_SERIALISER(StringSet)
+DECLARE_CONFIG_SERIALISER(StringMap)
+DECLARE_CONFIG_SERIALISER(std::set<ExperimentalFeature>)
+
+template<typename T>
+T BaseSetting<T>::parse(const std::string & str) const
+{
+ static_assert(std::is_integral<T>::value, "Integer required.");
+
+ if (auto n = string2Int<T>(str))
+ return *n;
+ else
+ throw UsageError("setting '%s' has invalid value '%s'", name, str);
+}
+
+template<typename T>
+std::string BaseSetting<T>::to_string() const
+{
+ static_assert(std::is_integral<T>::value, "Integer required.");
+
+ return std::to_string(value);
+}
+
}
diff --git a/src/libutil/config.cc b/src/libutil/config.cc
index 085a884dc..38d406e8a 100644
--- a/src/libutil/config.cc
+++ b/src/libutil/config.cc
@@ -219,29 +219,6 @@ void AbstractSetting::convertToArg(Args & args, const std::string & category)
{
}
-template<typename T>
-void BaseSetting<T>::convertToArg(Args & args, const std::string & category)
-{
- args.addFlag({
- .longName = name,
- .description = fmt("Set the `%s` setting.", name),
- .category = category,
- .labels = {"value"},
- .handler = {[this](std::string s) { overridden = true; set(s); }},
- .experimentalFeature = experimentalFeature,
- });
-
- if (isAppendable())
- args.addFlag({
- .longName = "extra-" + name,
- .description = fmt("Append to the `%s` setting.", name),
- .category = category,
- .labels = {"value"},
- .handler = {[this](std::string s) { overridden = true; set(s, true); }},
- .experimentalFeature = experimentalFeature,
- });
-}
-
template<> std::string BaseSetting<std::string>::parse(const std::string & str) const
{
return str;
@@ -252,21 +229,17 @@ template<> std::string BaseSetting<std::string>::to_string() const
return value;
}
-template<typename T>
-T BaseSetting<T>::parse(const std::string & str) const
+template<> std::optional<std::string> BaseSetting<std::optional<std::string>>::parse(const std::string & str) const
{
- static_assert(std::is_integral<T>::value, "Integer required.");
- if (auto n = string2Int<T>(str))
- return *n;
+ if (str == "")
+ return std::nullopt;
else
- throw UsageError("setting '%s' has invalid value '%s'", name, str);
+ return { str };
}
-template<typename T>
-std::string BaseSetting<T>::to_string() const
+template<> std::string BaseSetting<std::optional<std::string>>::to_string() const
{
- static_assert(std::is_integral<T>::value, "Integer required.");
- return std::to_string(value);
+ return value ? *value : "";
}
template<> bool BaseSetting<bool>::parse(const std::string & str) const
@@ -403,17 +376,27 @@ template class BaseSetting<StringSet>;
template class BaseSetting<StringMap>;
template class BaseSetting<std::set<ExperimentalFeature>>;
-Path PathSetting::parse(const std::string & str) const
+static Path parsePath(const AbstractSetting & s, const std::string & str)
{
- if (str == "") {
- if (allowEmpty)
- return "";
- else
- throw UsageError("setting '%s' cannot be empty", name);
- } else
+ if (str == "")
+ throw UsageError("setting '%s' is a path and paths cannot be empty", s.name);
+ else
return canonPath(str);
}
+Path PathSetting::parse(const std::string & str) const
+{
+ return parsePath(*this, str);
+}
+
+std::optional<Path> OptionalPathSetting::parse(const std::string & str) const
+{
+ if (str == "")
+ return std::nullopt;
+ else
+ return parsePath(*this, str);
+}
+
bool GlobalConfig::set(const std::string & name, const std::string & value)
{
for (auto & config : *configRegistrations)
diff --git a/src/libutil/config.hh b/src/libutil/config.hh
index 2675baed7..cc8532587 100644
--- a/src/libutil/config.hh
+++ b/src/libutil/config.hh
@@ -353,21 +353,20 @@ public:
/**
* A special setting for Paths. These are automatically canonicalised
* (e.g. "/foo//bar/" becomes "/foo/bar").
+ *
+ * It is mandatory to specify a path; i.e. the empty string is not
+ * permitted.
*/
class PathSetting : public BaseSetting<Path>
{
- bool allowEmpty;
-
public:
PathSetting(Config * options,
- bool allowEmpty,
const Path & def,
const std::string & name,
const std::string & description,
const std::set<std::string> & aliases = {})
: BaseSetting<Path>(def, true, name, description, aliases)
- , allowEmpty(allowEmpty)
{
options->addSetting(this);
}
@@ -379,6 +378,30 @@ public:
void operator =(const Path & v) { this->assign(v); }
};
+/**
+ * Like `PathSetting`, but the absence of a path is also allowed.
+ *
+ * `std::optional` is used instead of the empty string for clarity.
+ */
+class OptionalPathSetting : public BaseSetting<std::optional<Path>>
+{
+public:
+
+ OptionalPathSetting(Config * options,
+ const std::optional<Path> & def,
+ const std::string & name,
+ const std::string & description,
+ const std::set<std::string> & aliases = {})
+ : BaseSetting<std::optional<Path>>(def, true, name, description, aliases)
+ {
+ options->addSetting(this);
+ }
+
+ std::optional<Path> parse(const std::string & str) const override;
+
+ void operator =(const std::optional<Path> & v) { this->assign(v); }
+};
+
struct GlobalConfig : public AbstractConfig
{
typedef std::vector<Config*> ConfigRegistrations;
diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc
index 5aae0347b..7c4112d32 100644
--- a/src/libutil/experimental-features.cc
+++ b/src/libutil/experimental-features.cc
@@ -12,7 +12,7 @@ struct ExperimentalFeatureDetails
std::string_view description;
};
-constexpr std::array<ExperimentalFeatureDetails, 13> xpFeatureDetails = {{
+constexpr std::array<ExperimentalFeatureDetails, 15> xpFeatureDetails = {{
{
.tag = Xp::CaDerivations,
.name = "ca-derivations",
@@ -50,6 +50,8 @@ constexpr std::array<ExperimentalFeatureDetails, 13> xpFeatureDetails = {{
or other impure derivations can rely on impure derivations. Finally,
an impure derivation cannot also be
[content-addressed](#xp-feature-ca-derivations).
+
+ This is a more explicit alternative to using [`builtins.currentTime`](@docroot@/language/builtin-constants.md#builtins-currentTime).
)",
},
{
@@ -212,6 +214,20 @@ constexpr std::array<ExperimentalFeatureDetails, 13> xpFeatureDetails = {{
derivations that are themselves derivations outputs.
)",
},
+ {
+ .tag = Xp::ParseTomlTimestamps,
+ .name = "parse-toml-timestamps",
+ .description = R"(
+ Allow parsing of timestamps in builtins.fromTOML.
+ )",
+ },
+ {
+ .tag = Xp::ReadOnlyLocalStore,
+ .name = "read-only-local-store",
+ .description = R"(
+ Allow the use of the `read-only` parameter in [local store](@docroot@/command-ref/new-cli/nix3-help-stores.md#local-store) URIs.
+ )",
+ },
}};
static_assert(
@@ -246,7 +262,7 @@ std::string_view showExperimentalFeature(const ExperimentalFeature tag)
return xpFeatureDetails[(size_t)tag].name;
}
-nlohmann::json documentExperimentalFeatures()
+nlohmann::json documentExperimentalFeatures()
{
StringMap res;
for (auto & xpFeature : xpFeatureDetails)
diff --git a/src/libutil/experimental-features.hh b/src/libutil/experimental-features.hh
index 409100592..faf2e9398 100644
--- a/src/libutil/experimental-features.hh
+++ b/src/libutil/experimental-features.hh
@@ -3,7 +3,7 @@
#include "comparator.hh"
#include "error.hh"
-#include "nlohmann/json_fwd.hpp"
+#include "json-utils.hh"
#include "types.hh"
namespace nix {
@@ -30,6 +30,8 @@ enum struct ExperimentalFeature
DiscardReferences,
DaemonTrustOverride,
DynamicDerivations,
+ ParseTomlTimestamps,
+ ReadOnlyLocalStore,
};
/**
@@ -92,4 +94,10 @@ public:
void to_json(nlohmann::json &, const ExperimentalFeature &);
void from_json(const nlohmann::json &, ExperimentalFeature &);
+/**
+ * It is always rendered as a string
+ */
+template<>
+struct json_avoids_null<ExperimentalFeature> : std::true_type {};
+
}
diff --git a/src/libutil/filesystem.cc b/src/libutil/filesystem.cc
index 56be76ecc..11cc0c0e7 100644
--- a/src/libutil/filesystem.cc
+++ b/src/libutil/filesystem.cc
@@ -63,30 +63,19 @@ std::pair<AutoCloseFD, Path> createTempFile(const Path & prefix)
return {std::move(fd), tmpl};
}
-void createSymlink(const Path & target, const Path & link,
- std::optional<time_t> mtime)
+void createSymlink(const Path & target, const Path & link)
{
if (symlink(target.c_str(), link.c_str()))
throw SysError("creating symlink from '%1%' to '%2%'", link, target);
- if (mtime) {
- struct timeval times[2];
- times[0].tv_sec = *mtime;
- times[0].tv_usec = 0;
- times[1].tv_sec = *mtime;
- times[1].tv_usec = 0;
- if (lutimes(link.c_str(), times))
- throw SysError("setting time of symlink '%s'", link);
- }
}
-void replaceSymlink(const Path & target, const Path & link,
- std::optional<time_t> mtime)
+void replaceSymlink(const Path & target, const Path & link)
{
for (unsigned int n = 0; true; n++) {
Path tmp = canonPath(fmt("%s/.%d_%s", dirOf(link), n, baseNameOf(link)));
try {
- createSymlink(target, tmp, mtime);
+ createSymlink(target, tmp);
} catch (SysError & e) {
if (e.errNo == EEXIST) continue;
throw;
diff --git a/src/libutil/json-utils.cc b/src/libutil/json-utils.cc
new file mode 100644
index 000000000..d7220e71d
--- /dev/null
+++ b/src/libutil/json-utils.cc
@@ -0,0 +1,19 @@
+#include "json-utils.hh"
+
+namespace nix {
+
+const nlohmann::json * get(const nlohmann::json & map, const std::string & key)
+{
+ auto i = map.find(key);
+ if (i == map.end()) return nullptr;
+ return &*i;
+}
+
+nlohmann::json * get(nlohmann::json & map, const std::string & key)
+{
+ auto i = map.find(key);
+ if (i == map.end()) return nullptr;
+ return &*i;
+}
+
+}
diff --git a/src/libutil/json-utils.hh b/src/libutil/json-utils.hh
index eb00e954f..5e63c1af4 100644
--- a/src/libutil/json-utils.hh
+++ b/src/libutil/json-utils.hh
@@ -2,21 +2,77 @@
///@file
#include <nlohmann/json.hpp>
+#include <list>
namespace nix {
-const nlohmann::json * get(const nlohmann::json & map, const std::string & key)
-{
- auto i = map.find(key);
- if (i == map.end()) return nullptr;
- return &*i;
-}
+const nlohmann::json * get(const nlohmann::json & map, const std::string & key);
+
+nlohmann::json * get(nlohmann::json & map, const std::string & key);
+
+/**
+ * For `adl_serializer<std::optional<T>>` below, we need to track what
+ * types are not already using `null`. Only for them can we use `null`
+ * to represent `std::nullopt`.
+ */
+template<typename T>
+struct json_avoids_null;
+
+/**
+ * Handle numbers in default impl
+ */
+template<typename T>
+struct json_avoids_null : std::bool_constant<std::is_integral<T>::value> {};
+
+template<>
+struct json_avoids_null<std::nullptr_t> : std::false_type {};
+
+template<>
+struct json_avoids_null<bool> : std::true_type {};
+
+template<>
+struct json_avoids_null<std::string> : std::true_type {};
+
+template<typename T>
+struct json_avoids_null<std::vector<T>> : std::true_type {};
+
+template<typename T>
+struct json_avoids_null<std::list<T>> : std::true_type {};
+
+template<typename K, typename V>
+struct json_avoids_null<std::map<K, V>> : std::true_type {};
-nlohmann::json * get(nlohmann::json & map, const std::string & key)
-{
- auto i = map.find(key);
- if (i == map.end()) return nullptr;
- return &*i;
}
+namespace nlohmann {
+
+/**
+ * This "instance" is widely requested, see
+ * https://github.com/nlohmann/json/issues/1749, but momentum has stalled
+ * out. Writing there here in Nix as a stop-gap.
+ *
+ * We need to make sure the underlying type does not use `null` for this to
+ * round trip. We do that with a static assert.
+ */
+template<typename T>
+struct adl_serializer<std::optional<T>> {
+ static std::optional<T> from_json(const json & json) {
+ static_assert(
+ nix::json_avoids_null<T>::value,
+ "null is already in use for underlying type's JSON");
+ return json.is_null()
+ ? std::nullopt
+ : std::optional { adl_serializer<T>::from_json(json) };
+ }
+ static void to_json(json & json, std::optional<T> t) {
+ static_assert(
+ nix::json_avoids_null<T>::value,
+ "null is already in use for underlying type's JSON");
+ if (t)
+ adl_serializer<T>::to_json(json, *t);
+ else
+ json = nullptr;
+ }
+};
+
}
diff --git a/src/libstore/references.cc b/src/libutil/references.cc
index 345f4528b..7f59b4c09 100644
--- a/src/libstore/references.cc
+++ b/src/libutil/references.cc
@@ -6,6 +6,7 @@
#include <map>
#include <cstdlib>
#include <mutex>
+#include <algorithm>
namespace nix {
@@ -66,69 +67,20 @@ void RefScanSink::operator () (std::string_view data)
}
-PathRefScanSink::PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap)
- : RefScanSink(std::move(hashes))
- , backMap(std::move(backMap))
-{ }
-
-PathRefScanSink PathRefScanSink::fromPaths(const StorePathSet & refs)
+RewritingSink::RewritingSink(const std::string & from, const std::string & to, Sink & nextSink)
+ : RewritingSink({{from, to}}, nextSink)
{
- StringSet hashes;
- std::map<std::string, StorePath> backMap;
-
- for (auto & i : refs) {
- std::string hashPart(i.hashPart());
- auto inserted = backMap.emplace(hashPart, i).second;
- assert(inserted);
- hashes.insert(hashPart);
- }
-
- return PathRefScanSink(std::move(hashes), std::move(backMap));
}
-StorePathSet PathRefScanSink::getResultPaths()
+RewritingSink::RewritingSink(const StringMap & rewrites, Sink & nextSink)
+ : rewrites(rewrites), nextSink(nextSink)
{
- /* Map the hashes found back to their store paths. */
- StorePathSet found;
- for (auto & i : getResult()) {
- auto j = backMap.find(i);
- assert(j != backMap.end());
- found.insert(j->second);
+ std::string::size_type maxRewriteSize = 0;
+ for (auto & [from, to] : rewrites) {
+ assert(from.size() == to.size());
+ maxRewriteSize = std::max(maxRewriteSize, from.size());
}
-
- return found;
-}
-
-
-std::pair<StorePathSet, HashResult> scanForReferences(
- const std::string & path,
- const StorePathSet & refs)
-{
- HashSink hashSink { htSHA256 };
- auto found = scanForReferences(hashSink, path, refs);
- auto hash = hashSink.finish();
- return std::pair<StorePathSet, HashResult>(found, hash);
-}
-
-StorePathSet scanForReferences(
- Sink & toTee,
- const Path & path,
- const StorePathSet & refs)
-{
- PathRefScanSink refsSink = PathRefScanSink::fromPaths(refs);
- TeeSink sink { refsSink, toTee };
-
- /* Look for the hashes in the NAR dump of the path. */
- dumpPath(path, sink);
-
- return refsSink.getResultPaths();
-}
-
-
-RewritingSink::RewritingSink(const std::string & from, const std::string & to, Sink & nextSink)
- : from(from), to(to), nextSink(nextSink)
-{
- assert(from.size() == to.size());
+ this->maxRewriteSize = maxRewriteSize;
}
void RewritingSink::operator () (std::string_view data)
@@ -136,13 +88,13 @@ void RewritingSink::operator () (std::string_view data)
std::string s(prev);
s.append(data);
- size_t j = 0;
- while ((j = s.find(from, j)) != std::string::npos) {
- matches.push_back(pos + j);
- s.replace(j, from.size(), to);
- }
+ s = rewriteStrings(s, rewrites);
- prev = s.size() < from.size() ? s : std::string(s, s.size() - from.size() + 1, from.size() - 1);
+ prev = s.size() < maxRewriteSize
+ ? s
+ : maxRewriteSize == 0
+ ? ""
+ : std::string(s, s.size() - maxRewriteSize + 1, maxRewriteSize - 1);
auto consumed = s.size() - prev.size();
diff --git a/src/libstore/references.hh b/src/libutil/references.hh
index 52d71b333..f0baeffe1 100644
--- a/src/libstore/references.hh
+++ b/src/libutil/references.hh
@@ -2,14 +2,9 @@
///@file
#include "hash.hh"
-#include "path.hh"
namespace nix {
-std::pair<StorePathSet, HashResult> scanForReferences(const Path & path, const StorePathSet & refs);
-
-StorePathSet scanForReferences(Sink & toTee, const Path & path, const StorePathSet & refs);
-
class RefScanSink : public Sink
{
StringSet hashes;
@@ -28,28 +23,18 @@ public:
void operator () (std::string_view data) override;
};
-class PathRefScanSink : public RefScanSink
-{
- std::map<std::string, StorePath> backMap;
-
- PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap);
-
-public:
-
- static PathRefScanSink fromPaths(const StorePathSet & refs);
-
- StorePathSet getResultPaths();
-};
-
struct RewritingSink : Sink
{
- std::string from, to, prev;
+ const StringMap rewrites;
+ std::string::size_type maxRewriteSize;
+ std::string prev;
Sink & nextSink;
uint64_t pos = 0;
std::vector<uint64_t> matches;
RewritingSink(const std::string & from, const std::string & to, Sink & nextSink);
+ RewritingSink(const StringMap & rewrites, Sink & nextSink);
void operator () (std::string_view data) override;
diff --git a/src/libutil/tests/references.cc b/src/libutil/tests/references.cc
new file mode 100644
index 000000000..a517d9aa1
--- /dev/null
+++ b/src/libutil/tests/references.cc
@@ -0,0 +1,46 @@
+#include "references.hh"
+#include <gtest/gtest.h>
+
+namespace nix {
+
+using std::string;
+
+struct RewriteParams {
+ string originalString, finalString;
+ StringMap rewrites;
+
+ friend std::ostream& operator<<(std::ostream& os, const RewriteParams& bar) {
+ StringSet strRewrites;
+ for (auto & [from, to] : bar.rewrites)
+ strRewrites.insert(from + "->" + to);
+ return os <<
+ "OriginalString: " << bar.originalString << std::endl <<
+ "Rewrites: " << concatStringsSep(",", strRewrites) << std::endl <<
+ "Expected result: " << bar.finalString;
+ }
+};
+
+class RewriteTest : public ::testing::TestWithParam<RewriteParams> {
+};
+
+TEST_P(RewriteTest, IdentityRewriteIsIdentity) {
+ RewriteParams param = GetParam();
+ StringSink rewritten;
+ auto rewriter = RewritingSink(param.rewrites, rewritten);
+ rewriter(param.originalString);
+ rewriter.flush();
+ ASSERT_EQ(rewritten.s, param.finalString);
+}
+
+INSTANTIATE_TEST_CASE_P(
+ references,
+ RewriteTest,
+ ::testing::Values(
+ RewriteParams{ "foooo", "baroo", {{"foo", "bar"}, {"bar", "baz"}}},
+ RewriteParams{ "foooo", "bazoo", {{"fou", "bar"}, {"foo", "baz"}}},
+ RewriteParams{ "foooo", "foooo", {}}
+ )
+);
+
+}
+
diff --git a/src/libutil/tests/tests.cc b/src/libutil/tests/tests.cc
index 250e83a38..f3c1e8248 100644
--- a/src/libutil/tests/tests.cc
+++ b/src/libutil/tests/tests.cc
@@ -202,7 +202,7 @@ namespace nix {
}
TEST(pathExists, bogusPathDoesNotExist) {
- ASSERT_FALSE(pathExists("/home/schnitzel/darmstadt/pommes"));
+ ASSERT_FALSE(pathExists("/schnitzel/darmstadt/pommes"));
}
/* ----------------------------------------------------------------------------
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 3a8309149..26f9dc8a8 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -266,6 +266,17 @@ bool pathExists(const Path & path)
return false;
}
+bool pathAccessible(const Path & path)
+{
+ try {
+ return pathExists(path);
+ } catch (SysError & e) {
+ // swallow EPERM
+ if (e.errNo == EPERM) return false;
+ throw;
+ }
+}
+
Path readLink(const Path & path)
{
@@ -1842,6 +1853,7 @@ void setStackSize(size_t stackSize)
#if __linux__
static AutoCloseFD fdSavedMountNamespace;
+static AutoCloseFD fdSavedRoot;
#endif
void saveMountNamespace()
@@ -1849,10 +1861,11 @@ void saveMountNamespace()
#if __linux__
static std::once_flag done;
std::call_once(done, []() {
- AutoCloseFD fd = open("/proc/self/ns/mnt", O_RDONLY);
- if (!fd)
+ fdSavedMountNamespace = open("/proc/self/ns/mnt", O_RDONLY);
+ if (!fdSavedMountNamespace)
throw SysError("saving parent mount namespace");
- fdSavedMountNamespace = std::move(fd);
+
+ fdSavedRoot = open("/proc/self/root", O_RDONLY);
});
#endif
}
@@ -1865,9 +1878,16 @@ void restoreMountNamespace()
if (fdSavedMountNamespace && setns(fdSavedMountNamespace.get(), CLONE_NEWNS) == -1)
throw SysError("restoring parent mount namespace");
- if (chdir(savedCwd.c_str()) == -1) {
- throw SysError("restoring cwd");
+
+ if (fdSavedRoot) {
+ if (fchdir(fdSavedRoot.get()))
+ throw SysError("chdir into saved root");
+ if (chroot("."))
+ throw SysError("chroot into saved root");
}
+
+ if (chdir(savedCwd.c_str()) == -1)
+ throw SysError("restoring cwd");
} catch (Error & e) {
debug(e.msg());
}
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
index a7907cd14..b302d6f45 100644
--- a/src/libutil/util.hh
+++ b/src/libutil/util.hh
@@ -121,6 +121,14 @@ struct stat lstat(const Path & path);
bool pathExists(const Path & path);
/**
+ * A version of pathExists that returns false on a permission error.
+ * Useful for inferring default paths across directories that might not
+ * be readable.
+ * @return true iff the given path can be accessed and exists
+ */
+bool pathAccessible(const Path & path);
+
+/**
* Read the contents (target) of a symbolic link. The result is not
* in any way canonicalised.
*/
@@ -248,14 +256,12 @@ inline Paths createDirs(PathView path)
/**
* Create a symlink.
*/
-void createSymlink(const Path & target, const Path & link,
- std::optional<time_t> mtime = {});
+void createSymlink(const Path & target, const Path & link);
/**
* Atomically create or replace a symlink.
*/
-void replaceSymlink(const Path & target, const Path & link,
- std::optional<time_t> mtime = {});
+void replaceSymlink(const Path & target, const Path & link);
void renameFile(const Path & src, const Path & dst);
diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc
index 740737ffe..c1c8edd1d 100755
--- a/src/nix-channel/nix-channel.cc
+++ b/src/nix-channel/nix-channel.cc
@@ -177,6 +177,7 @@ static int main_nix_channel(int argc, char ** argv)
cRemove,
cList,
cUpdate,
+ cListGenerations,
cRollback
} cmd = cNone;
std::vector<std::string> args;
@@ -193,6 +194,8 @@ static int main_nix_channel(int argc, char ** argv)
cmd = cList;
} else if (*arg == "--update") {
cmd = cUpdate;
+ } else if (*arg == "--list-generations") {
+ cmd = cListGenerations;
} else if (*arg == "--rollback") {
cmd = cRollback;
} else {
@@ -237,6 +240,11 @@ static int main_nix_channel(int argc, char ** argv)
case cUpdate:
update(StringSet(args.begin(), args.end()));
break;
+ case cListGenerations:
+ if (!args.empty())
+ throw UsageError("'--list-generations' expects no arguments");
+ std::cout << runProgram(settings.nixBinDir + "/nix-env", false, {"--profile", profile, "--list-generations"}) << std::flush;
+ break;
case cRollback:
if (args.size() > 1)
throw UsageError("'--rollback' has at most one argument");
diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc
index cb1f42e35..70af53b28 100644
--- a/src/nix-collect-garbage/nix-collect-garbage.cc
+++ b/src/nix-collect-garbage/nix-collect-garbage.cc
@@ -41,9 +41,10 @@ void removeOldGenerations(std::string dir)
}
if (link.find("link") != std::string::npos) {
printInfo("removing old generations of profile %s", path);
- if (deleteOlderThan != "")
- deleteGenerationsOlderThan(path, deleteOlderThan, dryRun);
- else
+ if (deleteOlderThan != "") {
+ auto t = parseOlderThanTimeSpec(deleteOlderThan);
+ deleteGenerationsOlderThan(path, t, dryRun);
+ } else
deleteOldGenerations(path, dryRun);
}
} else if (type == DT_DIR) {
diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc
index 5e94f2d14..91b073b49 100644
--- a/src/nix-env/nix-env.cc
+++ b/src/nix-env/nix-env.cc
@@ -772,7 +772,7 @@ static void opSet(Globals & globals, Strings opFlags, Strings opArgs)
debug("switching to new user environment");
Path generation = createGeneration(
- ref<LocalFSStore>(store2),
+ *store2,
globals.profile,
drv.queryOutPath());
switchLink(globals.profile, generation);
@@ -1356,13 +1356,14 @@ static void opDeleteGenerations(Globals & globals, Strings opFlags, Strings opAr
if (opArgs.size() == 1 && opArgs.front() == "old") {
deleteOldGenerations(globals.profile, globals.dryRun);
} else if (opArgs.size() == 1 && opArgs.front().find('d') != std::string::npos) {
- deleteGenerationsOlderThan(globals.profile, opArgs.front(), globals.dryRun);
+ auto t = parseOlderThanTimeSpec(opArgs.front());
+ deleteGenerationsOlderThan(globals.profile, t, globals.dryRun);
} else if (opArgs.size() == 1 && opArgs.front().find('+') != std::string::npos) {
if (opArgs.front().size() < 2)
throw Error("invalid number of generations '%1%'", opArgs.front());
auto str_max = opArgs.front().substr(1);
auto max = string2Int<GenerationNumber>(str_max);
- if (!max || *max == 0)
+ if (!max)
throw Error("invalid number of generations to keep '%1%'", opArgs.front());
deleteGenerationsGreaterThan(globals.profile, *max, globals.dryRun);
} else {
diff --git a/src/nix-env/user-env.cc b/src/nix-env/user-env.cc
index 9e916abc4..d12d70f33 100644
--- a/src/nix-env/user-env.cc
+++ b/src/nix-env/user-env.cc
@@ -158,7 +158,7 @@ bool createUserEnv(EvalState & state, DrvInfos & elems,
}
debug("switching to new user environment");
- Path generation = createGeneration(ref<LocalFSStore>(store2), profile, topLevelOut);
+ Path generation = createGeneration(*store2, profile, topLevelOut);
switchLink(profile, generation);
}
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index 61c189efb..caa0248f1 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -12,6 +12,7 @@
#include "shared.hh"
#include "util.hh"
#include "worker-protocol.hh"
+#include "worker-protocol-impl.hh"
#include "graphml.hh"
#include "legacy.hh"
#include "path-with-outputs.hh"
@@ -806,6 +807,9 @@ static void opServe(Strings opFlags, Strings opArgs)
out.flush();
unsigned int clientVersion = readInt(in);
+ WorkerProto::ReadConn rconn { .from = in };
+ WorkerProto::WriteConn wconn { .to = out };
+
auto getBuildSettings = [&]() {
// FIXME: changing options here doesn't work if we're
// building through the daemon.
@@ -837,19 +841,19 @@ static void opServe(Strings opFlags, Strings opArgs)
};
while (true) {
- ServeCommand cmd;
+ ServeProto::Command cmd;
try {
- cmd = (ServeCommand) readInt(in);
+ cmd = (ServeProto::Command) readInt(in);
} catch (EndOfFile & e) {
break;
}
switch (cmd) {
- case cmdQueryValidPaths: {
+ case ServeProto::Command::QueryValidPaths: {
bool lock = readInt(in);
bool substitute = readInt(in);
- auto paths = WorkerProto<StorePathSet>::read(*store, in);
+ auto paths = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
if (lock && writeAllowed)
for (auto & path : paths)
store->addTempRoot(path);
@@ -858,19 +862,19 @@ static void opServe(Strings opFlags, Strings opArgs)
store->substitutePaths(paths);
}
- workerProtoWrite(*store, out, store->queryValidPaths(paths));
+ WorkerProto::write(*store, wconn, store->queryValidPaths(paths));
break;
}
- case cmdQueryPathInfos: {
- auto paths = WorkerProto<StorePathSet>::read(*store, in);
+ case ServeProto::Command::QueryPathInfos: {
+ auto paths = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
// !!! Maybe we want a queryPathInfos?
for (auto & i : paths) {
try {
auto info = store->queryPathInfo(i);
out << store->printStorePath(info->path)
<< (info->deriver ? store->printStorePath(*info->deriver) : "");
- workerProtoWrite(*store, out, info->references);
+ WorkerProto::write(*store, wconn, info->references);
// !!! Maybe we want compression?
out << info->narSize // downloadSize
<< info->narSize;
@@ -885,24 +889,24 @@ static void opServe(Strings opFlags, Strings opArgs)
break;
}
- case cmdDumpStorePath:
+ case ServeProto::Command::DumpStorePath:
store->narFromPath(store->parseStorePath(readString(in)), out);
break;
- case cmdImportPaths: {
+ case ServeProto::Command::ImportPaths: {
if (!writeAllowed) throw Error("importing paths is not allowed");
store->importPaths(in, NoCheckSigs); // FIXME: should we skip sig checking?
out << 1; // indicate success
break;
}
- case cmdExportPaths: {
+ case ServeProto::Command::ExportPaths: {
readInt(in); // obsolete
- store->exportPaths(WorkerProto<StorePathSet>::read(*store, in), out);
+ store->exportPaths(WorkerProto::Serialise<StorePathSet>::read(*store, rconn), out);
break;
}
- case cmdBuildPaths: {
+ case ServeProto::Command::BuildPaths: {
if (!writeAllowed) throw Error("building paths is not allowed");
@@ -923,7 +927,7 @@ static void opServe(Strings opFlags, Strings opArgs)
break;
}
- case cmdBuildDerivation: { /* Used by hydra-queue-runner. */
+ case ServeProto::Command::BuildDerivation: { /* Used by hydra-queue-runner. */
if (!writeAllowed) throw Error("building paths is not allowed");
@@ -944,22 +948,22 @@ static void opServe(Strings opFlags, Strings opArgs)
DrvOutputs builtOutputs;
for (auto & [output, realisation] : status.builtOutputs)
builtOutputs.insert_or_assign(realisation.id, realisation);
- workerProtoWrite(*store, out, builtOutputs);
+ WorkerProto::write(*store, wconn, builtOutputs);
}
break;
}
- case cmdQueryClosure: {
+ case ServeProto::Command::QueryClosure: {
bool includeOutputs = readInt(in);
StorePathSet closure;
- store->computeFSClosure(WorkerProto<StorePathSet>::read(*store, in),
+ store->computeFSClosure(WorkerProto::Serialise<StorePathSet>::read(*store, rconn),
closure, false, includeOutputs);
- workerProtoWrite(*store, out, closure);
+ WorkerProto::write(*store, wconn, closure);
break;
}
- case cmdAddToStoreNar: {
+ case ServeProto::Command::AddToStoreNar: {
if (!writeAllowed) throw Error("importing paths is not allowed");
auto path = readString(in);
@@ -970,7 +974,7 @@ static void opServe(Strings opFlags, Strings opArgs)
};
if (deriver != "")
info.deriver = store->parseStorePath(deriver);
- info.references = WorkerProto<StorePathSet>::read(*store, in);
+ info.references = WorkerProto::Serialise<StorePathSet>::read(*store, rconn);
in >> info.registrationTime >> info.narSize >> info.ultimate;
info.sigs = readStrings<StringSet>(in);
info.ca = ContentAddress::parseOpt(readString(in));
diff --git a/src/nix/daemon.cc b/src/nix/daemon.cc
index c1a91c63d..8e2bcf7e1 100644
--- a/src/nix/daemon.cc
+++ b/src/nix/daemon.cc
@@ -4,6 +4,7 @@
#include "shared.hh"
#include "local-store.hh"
#include "remote-store.hh"
+#include "remote-store-connection.hh"
#include "util.hh"
#include "serialise.hh"
#include "archive.hh"
@@ -24,6 +25,7 @@
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/un.h>
+#include <sys/select.h>
#include <errno.h>
#include <pwd.h>
#include <grp.h>
diff --git a/src/nix/flake.cc b/src/nix/flake.cc
index 3db655aeb..1eea52e15 100644
--- a/src/nix/flake.cc
+++ b/src/nix/flake.cc
@@ -259,6 +259,7 @@ struct CmdFlakeInfo : CmdFlakeMetadata
struct CmdFlakeCheck : FlakeCommand
{
bool build = true;
+ bool checkAllSystems = false;
CmdFlakeCheck()
{
@@ -267,6 +268,11 @@ struct CmdFlakeCheck : FlakeCommand
.description = "Do not build checks.",
.handler = {&build, false}
});
+ addFlag({
+ .longName = "all-systems",
+ .description = "Check the outputs for all systems.",
+ .handler = {&checkAllSystems, true}
+ });
}
std::string description() override
@@ -292,6 +298,7 @@ struct CmdFlakeCheck : FlakeCommand
lockFlags.applyNixConfig = true;
auto flake = lockFlake();
+ auto localSystem = std::string(settings.thisSystem.get());
bool hasErrors = false;
auto reportError = [&](const Error & e) {
@@ -307,6 +314,8 @@ struct CmdFlakeCheck : FlakeCommand
}
};
+ std::set<std::string> omittedSystems;
+
// FIXME: rewrite to use EvalCache.
auto resolve = [&] (PosIdx p) {
@@ -327,6 +336,15 @@ struct CmdFlakeCheck : FlakeCommand
reportError(Error("'%s' is not a valid system type, at %s", system, resolve(pos)));
};
+ auto checkSystemType = [&](const std::string & system, const PosIdx pos) {
+ if (!checkAllSystems && system != localSystem) {
+ omittedSystems.insert(system);
+ return false;
+ } else {
+ return true;
+ }
+ };
+
auto checkDerivation = [&](const std::string & attrPath, Value & v, const PosIdx pos) -> std::optional<StorePath> {
try {
auto drvInfo = getDerivation(*state, v, false);
@@ -509,16 +527,18 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *vOutput.attrs) {
const auto & attr_name = state->symbols[attr.name];
checkSystemName(attr_name, attr.pos);
- state->forceAttrs(*attr.value, attr.pos, "");
- for (auto & attr2 : *attr.value->attrs) {
- auto drvPath = checkDerivation(
- fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
- *attr2.value, attr2.pos);
- if (drvPath && attr_name == settings.thisSystem.get()) {
- drvPaths.push_back(DerivedPath::Built {
- .drvPath = *drvPath,
- .outputs = OutputsSpec::All { },
- });
+ if (checkSystemType(attr_name, attr.pos)) {
+ state->forceAttrs(*attr.value, attr.pos, "");
+ for (auto & attr2 : *attr.value->attrs) {
+ auto drvPath = checkDerivation(
+ fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
+ *attr2.value, attr2.pos);
+ if (drvPath && attr_name == settings.thisSystem.get()) {
+ drvPaths.push_back(DerivedPath::Built {
+ .drvPath = *drvPath,
+ .outputs = OutputsSpec::All { },
+ });
+ }
}
}
}
@@ -529,9 +549,11 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *vOutput.attrs) {
const auto & attr_name = state->symbols[attr.name];
checkSystemName(attr_name, attr.pos);
- checkApp(
- fmt("%s.%s", name, attr_name),
- *attr.value, attr.pos);
+ if (checkSystemType(attr_name, attr.pos)) {
+ checkApp(
+ fmt("%s.%s", name, attr_name),
+ *attr.value, attr.pos);
+ };
}
}
@@ -540,11 +562,13 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *vOutput.attrs) {
const auto & attr_name = state->symbols[attr.name];
checkSystemName(attr_name, attr.pos);
- state->forceAttrs(*attr.value, attr.pos, "");
- for (auto & attr2 : *attr.value->attrs)
- checkDerivation(
- fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
- *attr2.value, attr2.pos);
+ if (checkSystemType(attr_name, attr.pos)) {
+ state->forceAttrs(*attr.value, attr.pos, "");
+ for (auto & attr2 : *attr.value->attrs)
+ checkDerivation(
+ fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
+ *attr2.value, attr2.pos);
+ };
}
}
@@ -553,11 +577,13 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *vOutput.attrs) {
const auto & attr_name = state->symbols[attr.name];
checkSystemName(attr_name, attr.pos);
- state->forceAttrs(*attr.value, attr.pos, "");
- for (auto & attr2 : *attr.value->attrs)
- checkApp(
- fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
- *attr2.value, attr2.pos);
+ if (checkSystemType(attr_name, attr.pos)) {
+ state->forceAttrs(*attr.value, attr.pos, "");
+ for (auto & attr2 : *attr.value->attrs)
+ checkApp(
+ fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
+ *attr2.value, attr2.pos);
+ };
}
}
@@ -566,9 +592,11 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *vOutput.attrs) {
const auto & attr_name = state->symbols[attr.name];
checkSystemName(attr_name, attr.pos);
- checkDerivation(
- fmt("%s.%s", name, attr_name),
- *attr.value, attr.pos);
+ if (checkSystemType(attr_name, attr.pos)) {
+ checkDerivation(
+ fmt("%s.%s", name, attr_name),
+ *attr.value, attr.pos);
+ };
}
}
@@ -577,9 +605,11 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *vOutput.attrs) {
const auto & attr_name = state->symbols[attr.name];
checkSystemName(attr_name, attr.pos);
- checkApp(
- fmt("%s.%s", name, attr_name),
- *attr.value, attr.pos);
+ if (checkSystemType(attr_name, attr.pos) ) {
+ checkApp(
+ fmt("%s.%s", name, attr_name),
+ *attr.value, attr.pos);
+ };
}
}
@@ -587,6 +617,7 @@ struct CmdFlakeCheck : FlakeCommand
state->forceAttrs(vOutput, pos, "");
for (auto & attr : *vOutput.attrs) {
checkSystemName(state->symbols[attr.name], attr.pos);
+ checkSystemType(state->symbols[attr.name], attr.pos);
// FIXME: do getDerivations?
}
}
@@ -636,9 +667,11 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *vOutput.attrs) {
const auto & attr_name = state->symbols[attr.name];
checkSystemName(attr_name, attr.pos);
- checkBundler(
- fmt("%s.%s", name, attr_name),
- *attr.value, attr.pos);
+ if (checkSystemType(attr_name, attr.pos)) {
+ checkBundler(
+ fmt("%s.%s", name, attr_name),
+ *attr.value, attr.pos);
+ };
}
}
@@ -647,12 +680,14 @@ struct CmdFlakeCheck : FlakeCommand
for (auto & attr : *vOutput.attrs) {
const auto & attr_name = state->symbols[attr.name];
checkSystemName(attr_name, attr.pos);
- state->forceAttrs(*attr.value, attr.pos, "");
- for (auto & attr2 : *attr.value->attrs) {
- checkBundler(
- fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
- *attr2.value, attr2.pos);
- }
+ if (checkSystemType(attr_name, attr.pos)) {
+ state->forceAttrs(*attr.value, attr.pos, "");
+ for (auto & attr2 : *attr.value->attrs) {
+ checkBundler(
+ fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
+ *attr2.value, attr2.pos);
+ }
+ };
}
}
@@ -685,7 +720,15 @@ struct CmdFlakeCheck : FlakeCommand
}
if (hasErrors)
throw Error("some errors were encountered during the evaluation");
- }
+
+ if (!omittedSystems.empty()) {
+ warn(
+ "The check omitted these incompatible systems: %s\n"
+ "Use '--all-systems' to check all.",
+ concatStringsSep(", ", omittedSystems)
+ );
+ };
+ };
};
static Strings defaultTemplateAttrPathsPrefixes{"templates."};
diff --git a/src/nix/nix.md b/src/nix/nix.md
index 8a850ae83..6d9e40dbc 100644
--- a/src/nix/nix.md
+++ b/src/nix/nix.md
@@ -102,6 +102,7 @@ way:
available in the flake. If this is undesirable, specify `path:<directory>` explicitly;
For example, if `/foo/bar` is a git repository with the following structure:
+
```
.
└── baz
diff --git a/src/nix/profile.cc b/src/nix/profile.cc
index 7cea616d2..f3b73f10d 100644
--- a/src/nix/profile.cc
+++ b/src/nix/profile.cc
@@ -806,9 +806,10 @@ struct CmdProfileWipeHistory : virtual StoreCommand, MixDefaultProfile, MixDryRu
void run(ref<Store> store) override
{
- if (minAge)
- deleteGenerationsOlderThan(*profile, *minAge, dryRun);
- else
+ if (minAge) {
+ auto t = parseOlderThanTimeSpec(*minAge);
+ deleteGenerationsOlderThan(*profile, t, dryRun);
+ } else
deleteOldGenerations(*profile, dryRun);
}
};