aboutsummaryrefslogtreecommitdiff
path: root/src/libstore
diff options
context:
space:
mode:
Diffstat (limited to 'src/libstore')
-rw-r--r--src/libstore/binary-cache-store.cc23
-rw-r--r--src/libstore/build.cc457
-rw-r--r--src/libstore/builtins/fetchurl.cc22
-rw-r--r--src/libstore/derivations.cc16
-rw-r--r--src/libstore/derivations.hh4
-rw-r--r--src/libstore/download.cc124
-rw-r--r--src/libstore/download.hh4
-rw-r--r--src/libstore/gc.cc29
-rw-r--r--src/libstore/globals.cc21
-rw-r--r--src/libstore/globals.hh20
-rw-r--r--src/libstore/http-binary-cache-store.cc41
-rw-r--r--src/libstore/legacy-ssh-store.cc68
-rw-r--r--src/libstore/local-store.cc16
-rw-r--r--src/libstore/local-store.hh2
-rw-r--r--src/libstore/optimise-store.cc3
-rw-r--r--src/libstore/parsed-derivations.cc111
-rw-r--r--src/libstore/parsed-derivations.hh35
-rw-r--r--src/libstore/remote-store.cc176
-rw-r--r--src/libstore/remote-store.hh15
-rw-r--r--src/libstore/s3-binary-cache-store.cc166
-rw-r--r--src/libstore/s3.hh4
-rw-r--r--src/libstore/serve-protocol.hh3
-rw-r--r--src/libstore/ssh.cc25
-rw-r--r--src/libstore/ssh.hh1
-rw-r--r--src/libstore/store-api.cc48
-rw-r--r--src/libstore/store-api.hh7
26 files changed, 984 insertions, 457 deletions
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc
index 76c0a1a89..4527ee6ba 100644
--- a/src/libstore/binary-cache-store.cc
+++ b/src/libstore/binary-cache-store.cc
@@ -217,17 +217,6 @@ void BinaryCacheStore::narFromPath(const Path & storePath, Sink & sink)
{
auto info = queryPathInfo(storePath).cast<const NarInfo>();
- auto source = sinkToSource([this, url{info->url}](Sink & sink) {
- try {
- getFile(url, sink);
- } catch (NoSuchBinaryCacheFile & e) {
- throw SubstituteGone(e.what());
- }
- });
-
- stats.narRead++;
- //stats.narReadCompressedBytes += nar->size(); // FIXME
-
uint64_t narSize = 0;
LambdaSink wrapperSink([&](const unsigned char * data, size_t len) {
@@ -235,8 +224,18 @@ void BinaryCacheStore::narFromPath(const Path & storePath, Sink & sink)
narSize += len;
});
- decompress(info->compression, *source, wrapperSink);
+ auto decompressor = makeDecompressionSink(info->compression, wrapperSink);
+ try {
+ getFile(info->url, *decompressor);
+ } catch (NoSuchBinaryCacheFile & e) {
+ throw SubstituteGone(e.what());
+ }
+
+ decompressor->finish();
+
+ stats.narRead++;
+ //stats.narReadCompressedBytes += nar->size(); // FIXME
stats.narReadBytes += narSize;
}
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
index 8eb192059..9c408e29c 100644
--- a/src/libstore/build.cc
+++ b/src/libstore/build.cc
@@ -11,6 +11,7 @@
#include "compression.hh"
#include "json.hh"
#include "nar-info.hh"
+#include "parsed-derivations.hh"
#include <algorithm>
#include <iostream>
@@ -20,6 +21,7 @@
#include <future>
#include <chrono>
#include <regex>
+#include <queue>
#include <limits.h>
#include <sys/time.h>
@@ -29,7 +31,9 @@
#include <sys/utsname.h>
#include <sys/select.h>
#include <sys/resource.h>
+#include <sys/socket.h>
#include <fcntl.h>
+#include <netdb.h>
#include <unistd.h>
#include <errno.h>
#include <cstring>
@@ -738,6 +742,8 @@ private:
/* The derivation stored at drvPath. */
std::unique_ptr<BasicDerivation> drv;
+ std::unique_ptr<ParsedDerivation> parsedDrv;
+
/* The remainder is state held during the build. */
/* Locks on the output paths. */
@@ -852,7 +858,7 @@ private:
building multiple times. Since this contains the hash, it
allows us to compare whether two rounds produced the same
result. */
- ValidPathInfos prevInfos;
+ std::map<Path, ValidPathInfo> prevInfos;
const uid_t sandboxUid = 1000;
const gid_t sandboxGid = 100;
@@ -933,6 +939,11 @@ private:
as valid. */
void registerOutputs();
+ /* Check that an output meets the requirements specified by the
+ 'outputChecks' attribute (or the legacy
+ '{allowed,disallowed}{References,Requisites}' attributes). */
+ void checkOutputs(const std::map<std::string, ValidPathInfo> & outputs);
+
/* Open a log file and a pipe to it. */
Path openLogFile();
@@ -1137,6 +1148,8 @@ void DerivationGoal::haveDerivation()
return;
}
+ parsedDrv = std::make_unique<ParsedDerivation>(drvPath, *drv);
+
/* We are first going to try to create the invalid output paths
through substitutes. If that doesn't work, we'll build
them. */
@@ -1393,7 +1406,7 @@ void DerivationGoal::tryToBuild()
/* Don't do a remote build if the derivation has the attribute
`preferLocalBuild' set. Also, check and repair modes are only
supported for local builds. */
- bool buildLocally = buildMode != bmNormal || drv->willBuildLocally();
+ bool buildLocally = buildMode != bmNormal || parsedDrv->willBuildLocally();
auto started = [&]() {
auto msg = fmt(
@@ -1639,19 +1652,13 @@ HookReply DerivationGoal::tryBuildHook()
try {
- /* Tell the hook about system features (beyond the system type)
- required from the build machine. (The hook could parse the
- drv file itself, but this is easier.) */
- Strings features = tokenizeString<Strings>(get(drv->env, "requiredSystemFeatures"));
- for (auto & i : features) checkStoreName(i); /* !!! abuse */
-
/* Send the request to the hook. */
worker.hook->sink
<< "try"
<< (worker.getNrLocalBuilds() < settings.maxBuildJobs ? 1 : 0)
<< drv->platform
<< drvPath
- << features;
+ << parsedDrv->getRequiredSystemFeatures();
worker.hook->sink.flush();
/* Read the first line of input, which should be a word indicating
@@ -1777,35 +1784,40 @@ static std::once_flag dns_resolve_flag;
static void preloadNSS() {
/* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of
one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already
- been loaded in the parent. So we force a download of an invalid URL to force the NSS machinery to
+ been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to
load its lookup libraries in the parent before any child gets a chance to. */
std::call_once(dns_resolve_flag, []() {
- DownloadRequest request("http://this.pre-initializes.the.dns.resolvers.invalid");
- request.tries = 1; // We only need to do it once, and this also suppresses an annoying warning
- try { getDownloader()->download(request); } catch (...) {}
+ struct addrinfo *res = NULL;
+
+ if (getaddrinfo("this.pre-initializes.the.dns.resolvers.invalid.", "http", NULL, &res) != 0) {
+ if (res) freeaddrinfo(res);
+ }
});
}
void DerivationGoal::startBuilder()
{
/* Right platform? */
- if (!drv->canBuildLocally()) {
- throw Error(
- format("a '%1%' is required to build '%3%', but I am a '%2%'")
- % drv->platform % settings.thisSystem % drvPath);
- }
+ if (!parsedDrv->canBuildLocally())
+ throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}",
+ drv->platform,
+ concatStringsSep(", ", parsedDrv->getRequiredSystemFeatures()),
+ drvPath,
+ settings.thisSystem,
+ concatStringsSep(", ", settings.systemFeatures));
if (drv->isBuiltin())
preloadNSS();
#if __APPLE__
- additionalSandboxProfile = get(drv->env, "__sandboxProfile");
+ additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or("");
#endif
/* Are we doing a chroot build? */
{
+ auto noChroot = parsedDrv->getBoolAttr("__noChroot");
if (settings.sandboxMode == smEnabled) {
- if (get(drv->env, "__noChroot") == "1")
+ if (noChroot)
throw Error(format("derivation '%1%' has '__noChroot' set, "
"but that's not allowed when 'sandbox' is 'true'") % drvPath);
#if __APPLE__
@@ -1818,7 +1830,7 @@ void DerivationGoal::startBuilder()
else if (settings.sandboxMode == smDisabled)
useChroot = false;
else if (settings.sandboxMode == smRelaxed)
- useChroot = !fixedOutput && get(drv->env, "__noChroot") != "1";
+ useChroot = !fixedOutput && !noChroot;
}
if (worker.store.storeDir != worker.store.realStoreDir) {
@@ -1869,7 +1881,7 @@ void DerivationGoal::startBuilder()
writeStructuredAttrs();
/* Handle exportReferencesGraph(), if set. */
- if (!drv->env.count("__json")) {
+ if (!parsedDrv->getStructuredAttrs()) {
/* The `exportReferencesGraph' feature allows the references graph
to be passed to a builder. This attribute should be a list of
pairs [name1 path1 name2 path2 ...]. The references graph of
@@ -1934,7 +1946,7 @@ void DerivationGoal::startBuilder()
PathSet allowedPaths = settings.allowedImpureHostPrefixes;
/* This works like the above, except on a per-derivation level */
- Strings impurePaths = tokenizeString<Strings>(get(drv->env, "__impureHostDeps"));
+ auto impurePaths = parsedDrv->getStringsAttr("__impureHostDeps").value_or(Strings());
for (auto & i : impurePaths) {
bool found = false;
@@ -2003,7 +2015,7 @@ void DerivationGoal::startBuilder()
/* Create /etc/hosts with localhost entry. */
if (!fixedOutput)
- writeFile(chrootRootDir + "/etc/hosts", "127.0.0.1 localhost\n");
+ writeFile(chrootRootDir + "/etc/hosts", "127.0.0.1 localhost\n::1 localhost\n");
/* Make the closure of the inputs available in the chroot,
rather than the whole Nix store. This prevents any access
@@ -2181,6 +2193,7 @@ void DerivationGoal::startBuilder()
userNamespaceSync.create();
options.allowVfork = false;
+ options.restoreMountNamespace = false;
Pid helper = startProcess([&]() {
@@ -2247,6 +2260,7 @@ void DerivationGoal::startBuilder()
#endif
{
options.allowVfork = !buildUser && !drv->isBuiltin();
+ options.restoreMountNamespace = false;
pid = startProcess([&]() {
runChild();
}, options);
@@ -2302,7 +2316,7 @@ void DerivationGoal::initEnv()
passAsFile is ignored in structure mode because it's not
needed (attributes are not passed through the environment, so
there is no size constraint). */
- if (!drv->env.count("__json")) {
+ if (!parsedDrv->getStructuredAttrs()) {
StringSet passAsFile = tokenizeString<StringSet>(get(drv->env, "passAsFile"));
int fileNr = 0;
@@ -2349,8 +2363,8 @@ void DerivationGoal::initEnv()
fixed-output derivations is by definition pure (since we
already know the cryptographic hash of the output). */
if (fixedOutput) {
- Strings varNames = tokenizeString<Strings>(get(drv->env, "impureEnvVars"));
- for (auto & i : varNames) env[i] = getEnv(i);
+ for (auto & i : parsedDrv->getStringsAttr("impureEnvVars").value_or(Strings()))
+ env[i] = getEnv(i);
}
/* Currently structured log messages piggyback on stderr, but we
@@ -2365,111 +2379,103 @@ static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*");
void DerivationGoal::writeStructuredAttrs()
{
- auto jsonAttr = drv->env.find("__json");
- if (jsonAttr == drv->env.end()) return;
-
- try {
-
- auto jsonStr = rewriteStrings(jsonAttr->second, inputRewrites);
+ auto & structuredAttrs = parsedDrv->getStructuredAttrs();
+ if (!structuredAttrs) return;
- auto json = nlohmann::json::parse(jsonStr);
+ auto json = *structuredAttrs;
- /* Add an "outputs" object containing the output paths. */
- nlohmann::json outputs;
- for (auto & i : drv->outputs)
- outputs[i.first] = rewriteStrings(i.second.path, inputRewrites);
- json["outputs"] = outputs;
-
- /* Handle exportReferencesGraph. */
- auto e = json.find("exportReferencesGraph");
- if (e != json.end() && e->is_object()) {
- for (auto i = e->begin(); i != e->end(); ++i) {
- std::ostringstream str;
- {
- JSONPlaceholder jsonRoot(str, true);
- PathSet storePaths;
- for (auto & p : *i)
- storePaths.insert(p.get<std::string>());
- worker.store.pathInfoToJSON(jsonRoot,
- exportReferences(storePaths), false, true);
- }
- json[i.key()] = nlohmann::json::parse(str.str()); // urgh
+ /* Add an "outputs" object containing the output paths. */
+ nlohmann::json outputs;
+ for (auto & i : drv->outputs)
+ outputs[i.first] = rewriteStrings(i.second.path, inputRewrites);
+ json["outputs"] = outputs;
+
+ /* Handle exportReferencesGraph. */
+ auto e = json.find("exportReferencesGraph");
+ if (e != json.end() && e->is_object()) {
+ for (auto i = e->begin(); i != e->end(); ++i) {
+ std::ostringstream str;
+ {
+ JSONPlaceholder jsonRoot(str, true);
+ PathSet storePaths;
+ for (auto & p : *i)
+ storePaths.insert(p.get<std::string>());
+ worker.store.pathInfoToJSON(jsonRoot,
+ exportReferences(storePaths), false, true);
}
+ json[i.key()] = nlohmann::json::parse(str.str()); // urgh
}
+ }
- writeFile(tmpDir + "/.attrs.json", json.dump());
+ writeFile(tmpDir + "/.attrs.json", rewriteStrings(json.dump(), inputRewrites));
- /* As a convenience to bash scripts, write a shell file that
- maps all attributes that are representable in bash -
- namely, strings, integers, nulls, Booleans, and arrays and
- objects consisting entirely of those values. (So nested
- arrays or objects are not supported.) */
+ /* As a convenience to bash scripts, write a shell file that
+ maps all attributes that are representable in bash -
+ namely, strings, integers, nulls, Booleans, and arrays and
+ objects consisting entirely of those values. (So nested
+ arrays or objects are not supported.) */
- auto handleSimpleType = [](const nlohmann::json & value) -> std::experimental::optional<std::string> {
- if (value.is_string())
- return shellEscape(value);
+ auto handleSimpleType = [](const nlohmann::json & value) -> std::experimental::optional<std::string> {
+ if (value.is_string())
+ return shellEscape(value);
- if (value.is_number()) {
- auto f = value.get<float>();
- if (std::ceil(f) == f)
- return std::to_string(value.get<int>());
- }
-
- if (value.is_null())
- return std::string("''");
+ if (value.is_number()) {
+ auto f = value.get<float>();
+ if (std::ceil(f) == f)
+ return std::to_string(value.get<int>());
+ }
- if (value.is_boolean())
- return value.get<bool>() ? std::string("1") : std::string("");
+ if (value.is_null())
+ return std::string("''");
- return {};
- };
+ if (value.is_boolean())
+ return value.get<bool>() ? std::string("1") : std::string("");
- std::string jsonSh;
+ return {};
+ };
- for (auto i = json.begin(); i != json.end(); ++i) {
+ std::string jsonSh;
- if (!std::regex_match(i.key(), shVarName)) continue;
+ for (auto i = json.begin(); i != json.end(); ++i) {
- auto & value = i.value();
+ if (!std::regex_match(i.key(), shVarName)) continue;
- auto s = handleSimpleType(value);
- if (s)
- jsonSh += fmt("declare %s=%s\n", i.key(), *s);
+ auto & value = i.value();
- else if (value.is_array()) {
- std::string s2;
- bool good = true;
+ auto s = handleSimpleType(value);
+ if (s)
+ jsonSh += fmt("declare %s=%s\n", i.key(), *s);
- for (auto i = value.begin(); i != value.end(); ++i) {
- auto s3 = handleSimpleType(i.value());
- if (!s3) { good = false; break; }
- s2 += *s3; s2 += ' ';
- }
+ else if (value.is_array()) {
+ std::string s2;
+ bool good = true;
- if (good)
- jsonSh += fmt("declare -a %s=(%s)\n", i.key(), s2);
+ for (auto i = value.begin(); i != value.end(); ++i) {
+ auto s3 = handleSimpleType(i.value());
+ if (!s3) { good = false; break; }
+ s2 += *s3; s2 += ' ';
}
- else if (value.is_object()) {
- std::string s2;
- bool good = true;
+ if (good)
+ jsonSh += fmt("declare -a %s=(%s)\n", i.key(), s2);
+ }
- for (auto i = value.begin(); i != value.end(); ++i) {
- auto s3 = handleSimpleType(i.value());
- if (!s3) { good = false; break; }
- s2 += fmt("[%s]=%s ", shellEscape(i.key()), *s3);
- }
+ else if (value.is_object()) {
+ std::string s2;
+ bool good = true;
- if (good)
- jsonSh += fmt("declare -A %s=(%s)\n", i.key(), s2);
+ for (auto i = value.begin(); i != value.end(); ++i) {
+ auto s3 = handleSimpleType(i.value());
+ if (!s3) { good = false; break; }
+ s2 += fmt("[%s]=%s ", shellEscape(i.key()), *s3);
}
- }
- writeFile(tmpDir + "/.attrs.sh", jsonSh);
-
- } catch (std::exception & e) {
- throw Error("cannot process __json attribute of '%s': %s", drvPath, e.what());
+ if (good)
+ jsonSh += fmt("declare -A %s=(%s)\n", i.key(), s2);
+ }
}
+
+ writeFile(tmpDir + "/.attrs.sh", rewriteStrings(jsonSh, inputRewrites));
}
@@ -2624,7 +2630,7 @@ void DerivationGoal::runChild()
createDirs(chrootRootDir + "/dev/shm");
createDirs(chrootRootDir + "/dev/pts");
ss.push_back("/dev/full");
- if (pathExists("/dev/kvm"))
+ if (settings.systemFeatures.get().count("kvm") && pathExists("/dev/kvm"))
ss.push_back("/dev/kvm");
ss.push_back("/dev/null");
ss.push_back("/dev/random");
@@ -2913,7 +2919,7 @@ void DerivationGoal::runChild()
writeFile(sandboxFile, sandboxProfile);
- bool allowLocalNetworking = get(drv->env, "__darwinAllowLocalNetworking") == "1";
+ bool allowLocalNetworking = parsedDrv->getBoolAttr("__darwinAllowLocalNetworking");
/* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms
to find temporary directories, so we want to open up a broader place for them to dump their files, if needed. */
@@ -2985,10 +2991,9 @@ void DerivationGoal::runChild()
/* Parse a list of reference specifiers. Each element must either be
a store path, or the symbolic name of the output of the derivation
(such as `out'). */
-PathSet parseReferenceSpecifiers(Store & store, const BasicDerivation & drv, string attr)
+PathSet parseReferenceSpecifiers(Store & store, const BasicDerivation & drv, const Strings & paths)
{
PathSet result;
- Paths paths = tokenizeString<Paths>(attr);
for (auto & i : paths) {
if (store.isStorePath(i))
result.insert(i);
@@ -3013,7 +3018,7 @@ void DerivationGoal::registerOutputs()
if (allValid) return;
}
- ValidPathInfos infos;
+ std::map<std::string, ValidPathInfo> infos;
/* Set of inodes seen during calls to canonicalisePathMetaData()
for this build's outputs. This needs to be shared between
@@ -3117,7 +3122,7 @@ void DerivationGoal::registerOutputs()
the derivation to its content-addressed location. */
Hash h2 = recursive ? hashPath(h.type, actualPath).first : hashFile(h.type, actualPath);
- Path dest = worker.store.makeFixedOutputPath(recursive, h2, drv->env["name"]);
+ Path dest = worker.store.makeFixedOutputPath(recursive, h2, storePathToName(path));
if (h != h2) {
@@ -3198,48 +3203,6 @@ void DerivationGoal::registerOutputs()
debug(format("referenced input: '%1%'") % i);
}
- /* Enforce `allowedReferences' and friends. */
- auto checkRefs = [&](const string & attrName, bool allowed, bool recursive) {
- if (drv->env.find(attrName) == drv->env.end()) return;
-
- PathSet spec = parseReferenceSpecifiers(worker.store, *drv, get(drv->env, attrName));
-
- PathSet used;
- if (recursive) {
- /* Our requisites are the union of the closures of our references. */
- for (auto & i : references)
- /* Don't call computeFSClosure on ourselves. */
- if (path != i)
- worker.store.computeFSClosure(i, used);
- } else
- used = references;
-
- PathSet badPaths;
-
- for (auto & i : used)
- if (allowed) {
- if (spec.find(i) == spec.end())
- badPaths.insert(i);
- } else {
- if (spec.find(i) != spec.end())
- badPaths.insert(i);
- }
-
- if (!badPaths.empty()) {
- string badPathsStr;
- for (auto & i : badPaths) {
- badPathsStr += "\n\t";
- badPathsStr += i;
- }
- throw BuildError(format("output '%1%' is not allowed to refer to the following paths:%2%") % actualPath % badPathsStr);
- }
- };
-
- checkRefs("allowedReferences", true, false);
- checkRefs("allowedRequisites", true, true);
- checkRefs("disallowedReferences", false, false);
- checkRefs("disallowedRequisites", false, true);
-
if (curRound == nrRounds) {
worker.store.optimisePath(actualPath); // FIXME: combine with scanForReferences()
worker.markContentsGood(path);
@@ -3255,11 +3218,14 @@ void DerivationGoal::registerOutputs()
if (!info.references.empty()) info.ca.clear();
- infos.push_back(info);
+ infos[i.first] = info;
}
if (buildMode == bmCheck) return;
+ /* Apply output checks. */
+ checkOutputs(infos);
+
/* Compare the result with the previous round, and report which
path is different, if any.*/
if (curRound > 1 && prevInfos != infos) {
@@ -3267,16 +3233,16 @@ void DerivationGoal::registerOutputs()
for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); ++i, ++j)
if (!(*i == *j)) {
result.isNonDeterministic = true;
- Path prev = i->path + checkSuffix;
+ Path prev = i->second.path + checkSuffix;
bool prevExists = keepPreviousRound && pathExists(prev);
auto msg = prevExists
- ? fmt("output '%1%' of '%2%' differs from '%3%' from previous round", i->path, drvPath, prev)
- : fmt("output '%1%' of '%2%' differs from previous round", i->path, drvPath);
+ ? fmt("output '%1%' of '%2%' differs from '%3%' from previous round", i->second.path, drvPath, prev)
+ : fmt("output '%1%' of '%2%' differs from previous round", i->second.path, drvPath);
auto diffHook = settings.diffHook;
if (prevExists && diffHook != "" && runDiffHook) {
try {
- auto diff = runProgram(diffHook, true, {prev, i->path});
+ auto diff = runProgram(diffHook, true, {prev, i->second.path});
if (diff != "")
printError(chomp(diff));
} catch (Error & error) {
@@ -3321,7 +3287,11 @@ void DerivationGoal::registerOutputs()
/* Register each output path as valid, and register the sets of
paths referenced by each of them. If there are cycles in the
outputs, this will fail. */
- worker.store.registerValidPaths(infos);
+ {
+ ValidPathInfos infos2;
+ for (auto & i : infos) infos2.push_back(i.second);
+ worker.store.registerValidPaths(infos2);
+ }
/* In case of a fixed-output derivation hash mismatch, throw an
exception now that we have registered the output as valid. */
@@ -3330,6 +3300,158 @@ void DerivationGoal::registerOutputs()
}
+void DerivationGoal::checkOutputs(const std::map<Path, ValidPathInfo> & outputs)
+{
+ std::map<Path, const ValidPathInfo &> outputsByPath;
+ for (auto & output : outputs)
+ outputsByPath.emplace(output.second.path, output.second);
+
+ for (auto & output : outputs) {
+ auto & outputName = output.first;
+ auto & info = output.second;
+
+ struct Checks
+ {
+ bool ignoreSelfRefs = false;
+ std::experimental::optional<uint64_t> maxSize, maxClosureSize;
+ std::experimental::optional<Strings> allowedReferences, allowedRequisites, disallowedReferences, disallowedRequisites;
+ };
+
+ /* Compute the closure and closure size of some output. This
+ is slightly tricky because some of its references (namely
+ other outputs) may not be valid yet. */
+ auto getClosure = [&](const Path & path)
+ {
+ uint64_t closureSize = 0;
+ PathSet pathsDone;
+ std::queue<Path> pathsLeft;
+ pathsLeft.push(path);
+
+ while (!pathsLeft.empty()) {
+ auto path = pathsLeft.front();
+ pathsLeft.pop();
+ if (!pathsDone.insert(path).second) continue;
+
+ auto i = outputsByPath.find(path);
+ if (i != outputsByPath.end()) {
+ closureSize += i->second.narSize;
+ for (auto & ref : i->second.references)
+ pathsLeft.push(ref);
+ } else {
+ auto info = worker.store.queryPathInfo(path);
+ closureSize += info->narSize;
+ for (auto & ref : info->references)
+ pathsLeft.push(ref);
+ }
+ }
+
+ return std::make_pair(pathsDone, closureSize);
+ };
+
+ auto applyChecks = [&](const Checks & checks)
+ {
+ if (checks.maxSize && info.narSize > *checks.maxSize)
+ throw BuildError("path '%s' is too large at %d bytes; limit is %d bytes",
+ info.path, info.narSize, *checks.maxSize);
+
+ if (checks.maxClosureSize) {
+ uint64_t closureSize = getClosure(info.path).second;
+ if (closureSize > *checks.maxClosureSize)
+ throw BuildError("closure of path '%s' is too large at %d bytes; limit is %d bytes",
+ info.path, closureSize, *checks.maxClosureSize);
+ }
+
+ auto checkRefs = [&](const std::experimental::optional<Strings> & value, bool allowed, bool recursive)
+ {
+ if (!value) return;
+
+ PathSet spec = parseReferenceSpecifiers(worker.store, *drv, *value);
+
+ PathSet used = recursive ? getClosure(info.path).first : info.references;
+
+ if (recursive && checks.ignoreSelfRefs)
+ used.erase(info.path);
+
+ PathSet badPaths;
+
+ for (auto & i : used)
+ if (allowed) {
+ if (!spec.count(i))
+ badPaths.insert(i);
+ } else {
+ if (spec.count(i))
+ badPaths.insert(i);
+ }
+
+ if (!badPaths.empty()) {
+ string badPathsStr;
+ for (auto & i : badPaths) {
+ badPathsStr += "\n ";
+ badPathsStr += i;
+ }
+ throw BuildError("output '%s' is not allowed to refer to the following paths:%s", info.path, badPathsStr);
+ }
+ };
+
+ checkRefs(checks.allowedReferences, true, false);
+ checkRefs(checks.allowedRequisites, true, true);
+ checkRefs(checks.disallowedReferences, false, false);
+ checkRefs(checks.disallowedRequisites, false, true);
+ };
+
+ if (auto structuredAttrs = parsedDrv->getStructuredAttrs()) {
+ auto outputChecks = structuredAttrs->find("outputChecks");
+ if (outputChecks != structuredAttrs->end()) {
+ auto output = outputChecks->find(outputName);
+
+ if (output != outputChecks->end()) {
+ Checks checks;
+
+ auto maxSize = output->find("maxSize");
+ if (maxSize != output->end())
+ checks.maxSize = maxSize->get<uint64_t>();
+
+ auto maxClosureSize = output->find("maxClosureSize");
+ if (maxClosureSize != output->end())
+ checks.maxClosureSize = maxClosureSize->get<uint64_t>();
+
+ auto get = [&](const std::string & name) -> std::experimental::optional<Strings> {
+ auto i = output->find(name);
+ if (i != output->end()) {
+ Strings res;
+ for (auto j = i->begin(); j != i->end(); ++j) {
+ if (!j->is_string())
+ throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath);
+ res.push_back(j->get<std::string>());
+ }
+ checks.disallowedRequisites = res;
+ return res;
+ }
+ return {};
+ };
+
+ checks.allowedReferences = get("allowedReferences");
+ checks.allowedRequisites = get("allowedRequisites");
+ checks.disallowedReferences = get("disallowedReferences");
+ checks.disallowedRequisites = get("disallowedRequisites");
+
+ applyChecks(checks);
+ }
+ }
+ } else {
+ // legacy non-structured-attributes case
+ Checks checks;
+ checks.ignoreSelfRefs = true;
+ checks.allowedReferences = parsedDrv->getStringsAttr("allowedReferences");
+ checks.allowedRequisites = parsedDrv->getStringsAttr("allowedRequisites");
+ checks.disallowedReferences = parsedDrv->getStringsAttr("disallowedReferences");
+ checks.disallowedRequisites = parsedDrv->getStringsAttr("disallowedRequisites");
+ applyChecks(checks);
+ }
+ }
+}
+
+
Path DerivationGoal::openLogFile()
{
logSize = 0;
@@ -3678,6 +3800,19 @@ void SubstitutionGoal::tryNext()
} catch (InvalidPath &) {
tryNext();
return;
+ } catch (SubstituterDisabled &) {
+ if (settings.tryFallback) {
+ tryNext();
+ return;
+ }
+ throw;
+ } catch (Error & e) {
+ if (settings.tryFallback) {
+ printError(e.what());
+ tryNext();
+ return;
+ }
+ throw;
}
/* Update the total expected download size. */
diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc
index 1f4abd374..92aec63a0 100644
--- a/src/libstore/builtins/fetchurl.cc
+++ b/src/libstore/builtins/fetchurl.cc
@@ -24,6 +24,7 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
Path storePath = getAttr("out");
auto mainUrl = getAttr("url");
+ bool unpack = get(drv.env, "unpack", "") == "1";
/* Note: have to use a fresh downloader here because we're in
a forked process. */
@@ -39,21 +40,16 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
request.verifyTLS = false;
request.decompress = false;
- downloader->download(std::move(request), sink);
+ auto decompressor = makeDecompressionSink(
+ unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink);
+ downloader->download(std::move(request), *decompressor);
+ decompressor->finish();
});
- if (get(drv.env, "unpack", "") == "1") {
-
- if (hasSuffix(mainUrl, ".xz")) {
- auto source2 = sinkToSource([&](Sink & sink) {
- decompress("xz", *source, sink);
- });
- restorePath(storePath, *source2);
- } else
- restorePath(storePath, *source);
-
- } else
- writeFile(storePath, *source);
+ if (unpack)
+ restorePath(storePath, *source);
+ else
+ writeFile(storePath, *source);
auto executable = drv.env.find("executable");
if (executable != drv.env.end() && executable->second == "1") {
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
index 74b861281..3961126ff 100644
--- a/src/libstore/derivations.cc
+++ b/src/libstore/derivations.cc
@@ -36,12 +36,6 @@ Path BasicDerivation::findOutput(const string & id) const
}
-bool BasicDerivation::willBuildLocally() const
-{
- return get(env, "preferLocalBuild") == "1" && canBuildLocally();
-}
-
-
bool BasicDerivation::substitutesAllowed() const
{
return get(env, "allowSubstitutes", "1") == "1";
@@ -54,14 +48,6 @@ bool BasicDerivation::isBuiltin() const
}
-bool BasicDerivation::canBuildLocally() const
-{
- return platform == settings.thisSystem
- || settings.extraPlatforms.get().count(platform) > 0
- || isBuiltin();
-}
-
-
Path writeDerivation(ref<Store> store,
const Derivation & drv, const string & name, RepairFlag repair)
{
@@ -342,7 +328,7 @@ Hash hashDerivationModulo(Store & store, Derivation drv)
Hash h = drvHashes[i.first];
if (!h) {
assert(store.isValidPath(i.first));
- Derivation drv2 = readDerivation(i.first);
+ Derivation drv2 = readDerivation(store.toRealPath(i.first));
h = hashDerivationModulo(store, drv2);
drvHashes[i.first] = h;
}
diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh
index 7b97730d3..9753e796d 100644
--- a/src/libstore/derivations.hh
+++ b/src/libstore/derivations.hh
@@ -56,14 +56,10 @@ struct BasicDerivation
the given derivation. */
Path findOutput(const string & id) const;
- bool willBuildLocally() const;
-
bool substitutesAllowed() const;
bool isBuiltin() const;
- bool canBuildLocally() const;
-
/* Return true iff this is a fixed-output derivation. */
bool isFixedOutput() const;
diff --git a/src/libstore/download.cc b/src/libstore/download.cc
index 29bb56ea1..7773d9032 100644
--- a/src/libstore/download.cc
+++ b/src/libstore/download.cc
@@ -58,16 +58,6 @@ std::string resolveUri(const std::string & uri)
return uri;
}
-ref<std::string> decodeContent(const std::string & encoding, ref<std::string> data)
-{
- if (encoding == "")
- return data;
- else if (encoding == "br")
- return decompress(encoding, *data);
- else
- throw Error("unsupported Content-Encoding '%s'", encoding);
-}
-
struct CurlDownloader : public Downloader
{
CURLM * curlm = 0;
@@ -106,6 +96,12 @@ struct CurlDownloader : public Downloader
fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri),
{request.uri}, request.parentAct)
, callback(callback)
+ , finalSink([this](const unsigned char * data, size_t len) {
+ if (this->request.dataCallback)
+ this->request.dataCallback((char *) data, len);
+ else
+ this->result.data->append((char *) data, len);
+ })
{
if (!request.expectedETag.empty())
requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str());
@@ -129,22 +125,40 @@ struct CurlDownloader : public Downloader
}
}
- template<class T>
- void fail(const T & e)
+ void failEx(std::exception_ptr ex)
{
assert(!done);
done = true;
- callback.rethrow(std::make_exception_ptr(e));
+ callback.rethrow(ex);
+ }
+
+ template<class T>
+ void fail(const T & e)
+ {
+ failEx(std::make_exception_ptr(e));
}
+ LambdaSink finalSink;
+ std::shared_ptr<CompressionSink> decompressionSink;
+
+ std::exception_ptr writeException;
+
size_t writeCallback(void * contents, size_t size, size_t nmemb)
{
- size_t realSize = size * nmemb;
- if (request.dataCallback)
- request.dataCallback((char *) contents, realSize);
- else
- result.data->append((char *) contents, realSize);
- return realSize;
+ try {
+ size_t realSize = size * nmemb;
+ result.bodySize += realSize;
+
+ if (!decompressionSink)
+ decompressionSink = makeDecompressionSink(encoding, finalSink);
+
+ (*decompressionSink)((unsigned char *) contents, realSize);
+
+ return realSize;
+ } catch (...) {
+ writeException = std::current_exception();
+ return 0;
+ }
}
static size_t writeCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp)
@@ -162,6 +176,7 @@ struct CurlDownloader : public Downloader
auto ss = tokenizeString<vector<string>>(line, " ");
status = ss.size() >= 2 ? ss[1] : "";
result.data = std::make_shared<std::string>();
+ result.bodySize = 0;
encoding = "";
} else {
auto i = line.find(':');
@@ -244,6 +259,7 @@ struct CurlDownloader : public Downloader
curl_easy_setopt(req, CURLOPT_URL, request.uri.c_str());
curl_easy_setopt(req, CURLOPT_FOLLOWLOCATION, 1L);
+ curl_easy_setopt(req, CURLOPT_MAXREDIRS, 10);
curl_easy_setopt(req, CURLOPT_NOSIGNAL, 1);
curl_easy_setopt(req, CURLOPT_USERAGENT,
("curl/" LIBCURL_VERSION " Nix/" + nixVersion +
@@ -295,6 +311,7 @@ struct CurlDownloader : public Downloader
curl_easy_setopt(req, CURLOPT_NETRC, CURL_NETRC_OPTIONAL);
result.data = std::make_shared<std::string>();
+ result.bodySize = 0;
}
void finish(CURLcode code)
@@ -308,29 +325,35 @@ struct CurlDownloader : public Downloader
result.effectiveUrl = effectiveUrlCStr;
debug("finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes",
- request.verb(), request.uri, code, httpStatus, result.data ? result.data->size() : 0);
+ request.verb(), request.uri, code, httpStatus, result.bodySize);
+
+ if (decompressionSink)
+ decompressionSink->finish();
if (code == CURLE_WRITE_ERROR && result.etag == request.expectedETag) {
code = CURLE_OK;
httpStatus = 304;
}
- if (code == CURLE_OK &&
+ if (writeException)
+ failEx(writeException);
+
+ else if (code == CURLE_OK &&
(httpStatus == 200 || httpStatus == 201 || httpStatus == 204 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */))
{
result.cached = httpStatus == 304;
done = true;
try {
- if (request.decompress)
- result.data = decodeContent(encoding, ref<std::string>(result.data));
- act.progress(result.data->size(), result.data->size());
+ act.progress(result.bodySize, result.bodySize);
callback(std::move(result));
} catch (...) {
done = true;
callback.rethrow();
}
- } else {
+ }
+
+ else {
// We treat most errors as transient, but won't retry when hopeless
Error err = Transient;
@@ -364,6 +387,8 @@ struct CurlDownloader : public Downloader
case CURLE_INTERFACE_FAILED:
case CURLE_UNKNOWN_OPTION:
case CURLE_SSL_CACERT_BADFILE:
+ case CURLE_TOO_MANY_REDIRECTS:
+ case CURLE_WRITE_ERROR:
err = Misc;
break;
default: // Shut up warnings
@@ -597,7 +622,7 @@ struct CurlDownloader : public Downloader
// FIXME: do this on a worker thread
try {
#ifdef ENABLE_S3
- S3Helper s3Helper("", Aws::Region::US_EAST_1); // FIXME: make configurable
+ S3Helper s3Helper("", Aws::Region::US_EAST_1, ""); // FIXME: make configurable
auto slash = request.uri.find('/', 5);
if (slash == std::string::npos)
throw nix::Error("bad S3 URI '%s'", request.uri);
@@ -686,11 +711,12 @@ void Downloader::download(DownloadRequest && request, Sink & sink)
/* If the buffer is full, then go to sleep until the calling
thread wakes us up (i.e. when it has removed data from the
- buffer). Note: this does stall the download thread. */
- while (state->data.size() > 1024 * 1024) {
- if (state->quit) return;
+ buffer). We don't wait forever to prevent stalling the
+ download thread. (Hopefully sleeping will throttle the
+ sender.) */
+ if (state->data.size() > 1024 * 1024) {
debug("download buffer is full; going to sleep");
- state.wait(state->request);
+ state.wait_for(state->request, std::chrono::seconds(10));
}
/* Append data to the buffer and wake up the calling
@@ -712,28 +738,36 @@ void Downloader::download(DownloadRequest && request, Sink & sink)
state->request.notify_one();
}});
- auto state(_state->lock());
-
while (true) {
checkInterrupt();
- if (state->quit) {
- if (state->exc) std::rethrow_exception(state->exc);
- break;
- }
+ std::string chunk;
- /* If no data is available, then wait for the download thread
- to wake us up. */
- if (state->data.empty())
- state.wait(state->avail);
+ /* Grab data if available, otherwise wait for the download
+ thread to wake us up. */
+ {
+ auto state(_state->lock());
+
+ while (state->data.empty()) {
+
+ if (state->quit) {
+ if (state->exc) std::rethrow_exception(state->exc);
+ return;
+ }
+
+ state.wait(state->avail);
+ }
+
+ chunk = std::move(state->data);
- /* If data is available, then flush it to the sink and wake up
- the download thread if it's blocked on a full buffer. */
- if (!state->data.empty()) {
- sink((unsigned char *) state->data.data(), state->data.size());
- state->data.clear();
state->request.notify_one();
}
+
+ /* Flush the data to the sink and wake up the download thread
+ if it's blocked on a full buffer. We don't hold the state
+ lock while doing this to prevent blocking the download
+ thread if sink() takes a long time. */
+ sink((unsigned char *) chunk.data(), chunk.size());
}
}
diff --git a/src/libstore/download.hh b/src/libstore/download.hh
index da55df7a6..f0228f7d0 100644
--- a/src/libstore/download.hh
+++ b/src/libstore/download.hh
@@ -38,6 +38,7 @@ struct DownloadResult
std::string etag;
std::string effectiveUrl;
std::shared_ptr<std::string> data;
+ uint64_t bodySize = 0;
};
class Store;
@@ -87,7 +88,4 @@ public:
bool isUri(const string & s);
-/* Decode data according to the Content-Encoding header. */
-ref<std::string> decodeContent(const std::string & encoding, ref<std::string> data);
-
}
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
index ba49749d8..b415d5421 100644
--- a/src/libstore/gc.cc
+++ b/src/libstore/gc.cc
@@ -7,6 +7,7 @@
#include <queue>
#include <algorithm>
#include <regex>
+#include <random>
#include <sys/types.h>
#include <sys/stat.h>
@@ -365,7 +366,7 @@ try_again:
char buf[bufsiz];
auto res = readlink(file.c_str(), buf, bufsiz);
if (res == -1) {
- if (errno == ENOENT || errno == EACCES)
+ if (errno == ENOENT || errno == EACCES || errno == ESRCH)
return;
throw SysError("reading symlink");
}
@@ -425,25 +426,28 @@ PathSet LocalStore::findRuntimeRoots()
readProcLink((format("%1%/%2%") % fdStr % fd_ent->d_name).str(), paths);
}
}
- if (errno)
+ if (errno) {
+ if (errno == ESRCH)
+ continue;
throw SysError(format("iterating /proc/%1%/fd") % ent->d_name);
- fdDir.reset();
-
- auto mapLines =
- tokenizeString<std::vector<string>>(readFile((format("/proc/%1%/maps") % ent->d_name).str(), true), "\n");
- for (const auto& line : mapLines) {
- auto match = std::smatch{};
- if (std::regex_match(line, match, mapRegex))
- paths.emplace(match[1]);
}
+ fdDir.reset();
try {
+ auto mapLines =
+ tokenizeString<std::vector<string>>(readFile((format("/proc/%1%/maps") % ent->d_name).str(), true), "\n");
+ for (const auto& line : mapLines) {
+ auto match = std::smatch{};
+ if (std::regex_match(line, match, mapRegex))
+ paths.emplace(match[1]);
+ }
+
auto envString = readFile((format("/proc/%1%/environ") % ent->d_name).str(), true);
auto env_end = std::sregex_iterator{};
for (auto i = std::sregex_iterator{envString.begin(), envString.end(), storePathRegex}; i != env_end; ++i)
paths.emplace(i->str());
} catch (SysError & e) {
- if (errno == ENOENT || errno == EACCES)
+ if (errno == ENOENT || errno == EACCES || errno == ESRCH)
continue;
throw;
}
@@ -829,7 +833,8 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
alphabetically first (e.g. /nix/store/000...). This
matters when using --max-freed etc. */
vector<Path> entries_(entries.begin(), entries.end());
- random_shuffle(entries_.begin(), entries_.end());
+ std::mt19937 gen(1);
+ std::shuffle(entries_.begin(), entries_.end(), gen);
for (auto & i : entries_)
tryToDelete(state, i);
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index d95db5672..1c2c08715 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -78,7 +78,11 @@ void loadConfFile()
~/.nix/nix.conf or the command line. */
globalConfig.resetOverriden();
- globalConfig.applyConfigFile(getConfigDir() + "/nix/nix.conf");
+ auto dirs = getConfigDirs();
+ // Iterate over them in reverse so that the ones appearing first in the path take priority
+ for (auto dir = dirs.rbegin(); dir != dirs.rend(); dir++) {
+ globalConfig.applyConfigFile(*dir + "/nix/nix.conf");
+ }
}
unsigned int Settings::getDefaultCores()
@@ -86,6 +90,21 @@ unsigned int Settings::getDefaultCores()
return std::max(1U, std::thread::hardware_concurrency());
}
+StringSet Settings::getDefaultSystemFeatures()
+{
+ /* For backwards compatibility, accept some "features" that are
+ used in Nixpkgs to route builds to certain machines but don't
+ actually require anything special on the machines. */
+ StringSet features{"nixos-test", "benchmark", "big-parallel"};
+
+ #if __linux__
+ if (access("/dev/kvm", R_OK | W_OK) == 0)
+ features.insert("kvm");
+ #endif
+
+ return features;
+}
+
const string nixVersion = PACKAGE_VERSION;
template<> void BaseSetting<SandboxMode>::set(const std::string & str)
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index f589078db..53efc6a90 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -32,6 +32,8 @@ class Settings : public Config {
unsigned int getDefaultCores();
+ StringSet getDefaultSystemFeatures();
+
public:
Settings();
@@ -80,9 +82,9 @@ public:
/* Whether to show build log output in real time. */
bool verboseBuild = true;
- /* If verboseBuild is false, the number of lines of the tail of
- the log to show if a build fails. */
- size_t logLines = 10;
+ Setting<size_t> logLines{this, 10, "log-lines",
+ "If verbose-build is false, the number of lines of the tail of "
+ "the log to show if a build fails."};
MaxBuildJobsSetting maxBuildJobs{this, 1, "max-jobs",
"Maximum number of parallel build jobs. \"auto\" means use number of cores.",
@@ -193,7 +195,13 @@ public:
Setting<bool> showTrace{this, false, "show-trace",
"Whether to show a stack trace on evaluation errors."};
- Setting<SandboxMode> sandboxMode{this, smDisabled, "sandbox",
+ Setting<SandboxMode> sandboxMode{this,
+ #if __linux__
+ smEnabled
+ #else
+ smDisabled
+ #endif
+ , "sandbox",
"Whether to enable sandboxed builds. Can be \"true\", \"false\" or \"relaxed\".",
{"build-use-chroot", "build-use-sandbox"}};
@@ -261,6 +269,10 @@ public:
"These may be supported natively (e.g. armv7 on some aarch64 CPUs "
"or using hacks like qemu-user."};
+ Setting<StringSet> systemFeatures{this, getDefaultSystemFeatures(),
+ "system-features",
+ "Optional features that this system implements (like \"kvm\")."};
+
Setting<Strings> substituters{this,
nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"} : Strings(),
"substituters",
diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc
index ab524d523..8da0e2f9d 100644
--- a/src/libstore/http-binary-cache-store.cc
+++ b/src/libstore/http-binary-cache-store.cc
@@ -13,6 +13,14 @@ private:
Path cacheUri;
+ struct State
+ {
+ bool enabled = true;
+ std::chrono::steady_clock::time_point disabledUntil;
+ };
+
+ Sync<State> _state;
+
public:
HttpBinaryCacheStore(
@@ -46,8 +54,33 @@ public:
protected:
+ void maybeDisable()
+ {
+ auto state(_state.lock());
+ if (state->enabled && settings.tryFallback) {
+ int t = 60;
+ printError("disabling binary cache '%s' for %s seconds", getUri(), t);
+ state->enabled = false;
+ state->disabledUntil = std::chrono::steady_clock::now() + std::chrono::seconds(t);
+ }
+ }
+
+ void checkEnabled()
+ {
+ auto state(_state.lock());
+ if (state->enabled) return;
+ if (std::chrono::steady_clock::now() > state->disabledUntil) {
+ state->enabled = true;
+ debug("re-enabling binary cache '%s'", getUri());
+ return;
+ }
+ throw SubstituterDisabled("substituter '%s' is disabled", getUri());
+ }
+
bool fileExists(const std::string & path) override
{
+ checkEnabled();
+
try {
DownloadRequest request(cacheUri + "/" + path);
request.head = true;
@@ -59,6 +92,7 @@ protected:
bucket is unlistable, so treat 403 as 404. */
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
return false;
+ maybeDisable();
throw;
}
}
@@ -86,12 +120,14 @@ protected:
void getFile(const std::string & path, Sink & sink) override
{
+ checkEnabled();
auto request(makeRequest(path));
try {
getDownloader()->download(std::move(request), sink);
} catch (DownloadError & e) {
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri());
+ maybeDisable();
throw;
}
}
@@ -99,15 +135,18 @@ protected:
void getFile(const std::string & path,
Callback<std::shared_ptr<std::string>> callback) override
{
+ checkEnabled();
+
auto request(makeRequest(path));
getDownloader()->enqueueDownload(request,
- {[callback](std::future<DownloadResult> result) {
+ {[callback, this](std::future<DownloadResult> result) {
try {
callback(result.get().data);
} catch (DownloadError & e) {
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
return callback(std::shared_ptr<std::string>());
+ maybeDisable();
callback.rethrow();
} catch (...) {
callback.rethrow();
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
index 02d91ded0..26e185198 100644
--- a/src/libstore/legacy-ssh-store.cc
+++ b/src/libstore/legacy-ssh-store.cc
@@ -17,6 +17,7 @@ struct LegacySSHStore : public Store
const Setting<Path> sshKey{this, "", "ssh-key", "path to an SSH private key"};
const Setting<bool> compress{this, false, "compress", "whether to compress the connection"};
const Setting<Path> remoteProgram{this, "nix-store", "remote-program", "path to the nix-store executable on the remote system"};
+ const Setting<std::string> remoteStore{this, "", "remote-store", "URI of the store on the remote system"};
// Hack for getting remote build log output.
const Setting<int> logFD{this, -1, "log-fd", "file descriptor to which SSH's stderr is connected"};
@@ -27,6 +28,7 @@ struct LegacySSHStore : public Store
FdSink to;
FdSource from;
int remoteVersion;
+ bool good = true;
};
std::string host;
@@ -41,7 +43,7 @@ struct LegacySSHStore : public Store
, connections(make_ref<Pool<Connection>>(
std::max(1, (int) maxConnections),
[this]() { return openConnection(); },
- [](const ref<Connection> & r) { return true; }
+ [](const ref<Connection> & r) { return r->good; }
))
, master(
host,
@@ -56,7 +58,9 @@ struct LegacySSHStore : public Store
ref<Connection> openConnection()
{
auto conn = make_ref<Connection>();
- conn->sshConn = master.startCommand(fmt("%s --serve --write", remoteProgram));
+ conn->sshConn = master.startCommand(
+ fmt("%s --serve --write", remoteProgram)
+ + (remoteStore.get() == "" ? "" : " --store " + shellEscape(remoteStore.get())));
conn->to = FdSink(conn->sshConn->in.get());
conn->from = FdSource(conn->sshConn->out.get());
@@ -127,18 +131,48 @@ struct LegacySSHStore : public Store
auto conn(connections->get());
- conn->to
- << cmdImportPaths
- << 1;
- copyNAR(source, conn->to);
- conn->to
- << exportMagic
- << info.path
- << info.references
- << info.deriver
- << 0
- << 0;
- conn->to.flush();
+ if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 5) {
+
+ conn->to
+ << cmdAddToStoreNar
+ << info.path
+ << info.deriver
+ << info.narHash.to_string(Base16, false)
+ << info.references
+ << info.registrationTime
+ << info.narSize
+ << info.ultimate
+ << info.sigs
+ << info.ca;
+ try {
+ copyNAR(source, conn->to);
+ } catch (...) {
+ conn->good = false;
+ throw;
+ }
+ conn->to.flush();
+
+ } else {
+
+ conn->to
+ << cmdImportPaths
+ << 1;
+ try {
+ copyNAR(source, conn->to);
+ } catch (...) {
+ conn->good = false;
+ throw;
+ }
+ conn->to
+ << exportMagic
+ << info.path
+ << info.references
+ << info.deriver
+ << 0
+ << 0;
+ conn->to.flush();
+
+ }
if (readInt(conn->from) != 1)
throw Error("failed to add path '%s' to remote host '%s', info.path, host");
@@ -269,6 +303,12 @@ struct LegacySSHStore : public Store
{
auto conn(connections->get());
}
+
+ unsigned int getProtocol() override
+ {
+ auto conn(connections->get());
+ return conn->remoteVersion;
+ }
};
static RegisterStoreImplementation regStore([](
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index 3b2ba65f3..e1cb423d1 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -366,6 +366,8 @@ void LocalStore::makeStoreWritable()
throw SysError("getting info about the Nix store mount point");
if (stat.f_flag & ST_RDONLY) {
+ saveMountNamespace();
+
if (unshare(CLONE_NEWNS) == -1)
throw SysError("setting up a private mount namespace");
@@ -450,7 +452,7 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
ssize_t eaSize = llistxattr(path.c_str(), nullptr, 0);
if (eaSize < 0) {
- if (errno != ENOTSUP)
+ if (errno != ENOTSUP && errno != ENODATA)
throw SysError("querying extended attributes of '%s'", path);
} else if (eaSize > 0) {
std::vector<char> eaBuf(eaSize);
@@ -880,6 +882,12 @@ void LocalStore::querySubstitutablePathInfos(const PathSet & paths,
narInfo ? narInfo->fileSize : 0,
info->narSize};
} catch (InvalidPath) {
+ } catch (SubstituterDisabled) {
+ } catch (Error & e) {
+ if (settings.tryFallback)
+ printError(e.what());
+ else
+ throw;
}
}
}
@@ -1332,6 +1340,12 @@ void LocalStore::verifyPath(const Path & path, const PathSet & store,
}
+unsigned int LocalStore::getProtocol()
+{
+ return PROTOCOL_VERSION;
+}
+
+
#if defined(FS_IOC_SETFLAGS) && defined(FS_IOC_GETFLAGS) && defined(FS_IMMUTABLE_FL)
static void makeMutable(const Path & path)
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
index 746bdbeed..fce963433 100644
--- a/src/libstore/local-store.hh
+++ b/src/libstore/local-store.hh
@@ -209,6 +209,8 @@ public:
void registerValidPaths(const ValidPathInfos & infos);
+ unsigned int getProtocol() override;
+
void vacuumDB();
/* Repair the contents of the given path by redownloading it using
diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc
index 7840167d7..991512f21 100644
--- a/src/libstore/optimise-store.cc
+++ b/src/libstore/optimise-store.cc
@@ -104,8 +104,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
*.app/Contents/Resources/\*.lproj seem to be the only paths affected. See
https://github.com/NixOS/nix/issues/1443 for more discussion. */
- if (std::regex_search(path, std::regex("\\.app/Contents/PkgInfo$")) ||
- std::regex_search(path, std::regex("\\.app/Contents/Resources/.+\\.lproj$")))
+ if (std::regex_search(path, std::regex("\\.app/Contents/.+$")))
{
debug(format("'%1%' is not allowed to be linked in macOS") % path);
return;
diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc
new file mode 100644
index 000000000..dc3286482
--- /dev/null
+++ b/src/libstore/parsed-derivations.cc
@@ -0,0 +1,111 @@
+#include "parsed-derivations.hh"
+
+namespace nix {
+
+ParsedDerivation::ParsedDerivation(const Path & drvPath, BasicDerivation & drv)
+ : drvPath(drvPath), drv(drv)
+{
+ /* Parse the __json attribute, if any. */
+ auto jsonAttr = drv.env.find("__json");
+ if (jsonAttr != drv.env.end()) {
+ try {
+ structuredAttrs = nlohmann::json::parse(jsonAttr->second);
+ } catch (std::exception & e) {
+ throw Error("cannot process __json attribute of '%s': %s", drvPath, e.what());
+ }
+ }
+}
+
+std::experimental::optional<std::string> ParsedDerivation::getStringAttr(const std::string & name) const
+{
+ if (structuredAttrs) {
+ auto i = structuredAttrs->find(name);
+ if (i == structuredAttrs->end())
+ return {};
+ else {
+ if (!i->is_string())
+ throw Error("attribute '%s' of derivation '%s' must be a string", name, drvPath);
+ return i->get<std::string>();
+ }
+ } else {
+ auto i = drv.env.find(name);
+ if (i == drv.env.end())
+ return {};
+ else
+ return i->second;
+ }
+}
+
+bool ParsedDerivation::getBoolAttr(const std::string & name, bool def) const
+{
+ if (structuredAttrs) {
+ auto i = structuredAttrs->find(name);
+ if (i == structuredAttrs->end())
+ return def;
+ else {
+ if (!i->is_boolean())
+ throw Error("attribute '%s' of derivation '%s' must be a Boolean", name, drvPath);
+ return i->get<bool>();
+ }
+ } else {
+ auto i = drv.env.find(name);
+ if (i == drv.env.end())
+ return def;
+ else
+ return i->second == "1";
+ }
+}
+
+std::experimental::optional<Strings> ParsedDerivation::getStringsAttr(const std::string & name) const
+{
+ if (structuredAttrs) {
+ auto i = structuredAttrs->find(name);
+ if (i == structuredAttrs->end())
+ return {};
+ else {
+ if (!i->is_array())
+ throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath);
+ Strings res;
+ for (auto j = i->begin(); j != i->end(); ++j) {
+ if (!j->is_string())
+ throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath);
+ res.push_back(j->get<std::string>());
+ }
+ return res;
+ }
+ } else {
+ auto i = drv.env.find(name);
+ if (i == drv.env.end())
+ return {};
+ else
+ return tokenizeString<Strings>(i->second);
+ }
+}
+
+StringSet ParsedDerivation::getRequiredSystemFeatures() const
+{
+ StringSet res;
+ for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings()))
+ res.insert(i);
+ return res;
+}
+
+bool ParsedDerivation::canBuildLocally() const
+{
+ if (drv.platform != settings.thisSystem.get()
+ && !settings.extraPlatforms.get().count(drv.platform)
+ && !drv.isBuiltin())
+ return false;
+
+ for (auto & feature : getRequiredSystemFeatures())
+ if (!settings.systemFeatures.get().count(feature)) return false;
+
+ return true;
+}
+
+bool ParsedDerivation::willBuildLocally() const
+{
+ return getBoolAttr("preferLocalBuild") && canBuildLocally();
+}
+
+}
diff --git a/src/libstore/parsed-derivations.hh b/src/libstore/parsed-derivations.hh
new file mode 100644
index 000000000..0a82c1461
--- /dev/null
+++ b/src/libstore/parsed-derivations.hh
@@ -0,0 +1,35 @@
+#include "derivations.hh"
+
+#include <nlohmann/json.hpp>
+
+namespace nix {
+
+class ParsedDerivation
+{
+ Path drvPath;
+ BasicDerivation & drv;
+ std::experimental::optional<nlohmann::json> structuredAttrs;
+
+public:
+
+ ParsedDerivation(const Path & drvPath, BasicDerivation & drv);
+
+ const std::experimental::optional<nlohmann::json> & getStructuredAttrs() const
+ {
+ return structuredAttrs;
+ }
+
+ std::experimental::optional<std::string> getStringAttr(const std::string & name) const;
+
+ bool getBoolAttr(const std::string & name, bool def = false) const;
+
+ std::experimental::optional<Strings> getStringsAttr(const std::string & name) const;
+
+ StringSet getRequiredSystemFeatures() const;
+
+ bool canBuildLocally() const;
+
+ bool willBuildLocally() const;
+};
+
+}
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index ea86ef052..def140cfb 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -161,7 +161,8 @@ void RemoteStore::initConnection(Connection & conn)
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 11)
conn.to << false;
- conn.processStderr();
+ auto ex = conn.processStderr();
+ if (ex) std::rethrow_exception(ex);
}
catch (Error & e) {
throw Error("cannot open connection to remote store '%s': %s", getUri(), e.what());
@@ -195,22 +196,68 @@ void RemoteStore::setOptions(Connection & conn)
conn.to << i.first << i.second.value;
}
- conn.processStderr();
+ auto ex = conn.processStderr();
+ if (ex) std::rethrow_exception(ex);
+}
+
+
+/* A wrapper around Pool<RemoteStore::Connection>::Handle that marks
+ the connection as bad (causing it to be closed) if a non-daemon
+ exception is thrown before the handle is closed. Such an exception
+ causes a deviation from the expected protocol and therefore a
+ desynchronization between the client and daemon. */
+struct ConnectionHandle
+{
+ Pool<RemoteStore::Connection>::Handle handle;
+ bool daemonException = false;
+
+ ConnectionHandle(Pool<RemoteStore::Connection>::Handle && handle)
+ : handle(std::move(handle))
+ { }
+
+ ConnectionHandle(ConnectionHandle && h)
+ : handle(std::move(h.handle))
+ { }
+
+ ~ConnectionHandle()
+ {
+ if (!daemonException && std::uncaught_exception()) {
+ handle.markBad();
+ debug("closing daemon connection because of an exception");
+ }
+ }
+
+ RemoteStore::Connection * operator -> () { return &*handle; }
+
+ void processStderr(Sink * sink = 0, Source * source = 0)
+ {
+ auto ex = handle->processStderr(sink, source);
+ if (ex) {
+ daemonException = true;
+ std::rethrow_exception(ex);
+ }
+ }
+};
+
+
+ConnectionHandle RemoteStore::getConnection()
+{
+ return ConnectionHandle(connections->get());
}
bool RemoteStore::isValidPathUncached(const Path & path)
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopIsValidPath << path;
- conn->processStderr();
+ conn.processStderr();
return readInt(conn->from);
}
PathSet RemoteStore::queryValidPaths(const PathSet & paths, SubstituteFlag maybeSubstitute)
{
- auto conn(connections->get());
+ auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
PathSet res;
for (auto & i : paths)
@@ -218,7 +265,7 @@ PathSet RemoteStore::queryValidPaths(const PathSet & paths, SubstituteFlag maybe
return res;
} else {
conn->to << wopQueryValidPaths << paths;
- conn->processStderr();
+ conn.processStderr();
return readStorePaths<PathSet>(*this, conn->from);
}
}
@@ -226,27 +273,27 @@ PathSet RemoteStore::queryValidPaths(const PathSet & paths, SubstituteFlag maybe
PathSet RemoteStore::queryAllValidPaths()
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopQueryAllValidPaths;
- conn->processStderr();
+ conn.processStderr();
return readStorePaths<PathSet>(*this, conn->from);
}
PathSet RemoteStore::querySubstitutablePaths(const PathSet & paths)
{
- auto conn(connections->get());
+ auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
PathSet res;
for (auto & i : paths) {
conn->to << wopHasSubstitutes << i;
- conn->processStderr();
+ conn.processStderr();
if (readInt(conn->from)) res.insert(i);
}
return res;
} else {
conn->to << wopQuerySubstitutablePaths << paths;
- conn->processStderr();
+ conn.processStderr();
return readStorePaths<PathSet>(*this, conn->from);
}
}
@@ -257,14 +304,14 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
{
if (paths.empty()) return;
- auto conn(connections->get());
+ auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
for (auto & i : paths) {
SubstitutablePathInfo info;
conn->to << wopQuerySubstitutablePathInfo << i;
- conn->processStderr();
+ conn.processStderr();
unsigned int reply = readInt(conn->from);
if (reply == 0) continue;
info.deriver = readString(conn->from);
@@ -278,7 +325,7 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
} else {
conn->to << wopQuerySubstitutablePathInfos << paths;
- conn->processStderr();
+ conn.processStderr();
size_t count = readNum<size_t>(conn->from);
for (size_t n = 0; n < count; n++) {
Path path = readStorePath(*this, conn->from);
@@ -300,10 +347,10 @@ void RemoteStore::queryPathInfoUncached(const Path & path,
try {
std::shared_ptr<ValidPathInfo> info;
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopQueryPathInfo << path;
try {
- conn->processStderr();
+ conn.processStderr();
} catch (Error & e) {
// Ugly backwards compatibility hack.
if (e.msg().find("is not valid") != std::string::npos)
@@ -335,9 +382,9 @@ void RemoteStore::queryPathInfoUncached(const Path & path,
void RemoteStore::queryReferrers(const Path & path,
PathSet & referrers)
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopQueryReferrers << path;
- conn->processStderr();
+ conn.processStderr();
PathSet referrers2 = readStorePaths<PathSet>(*this, conn->from);
referrers.insert(referrers2.begin(), referrers2.end());
}
@@ -345,36 +392,36 @@ void RemoteStore::queryReferrers(const Path & path,
PathSet RemoteStore::queryValidDerivers(const Path & path)
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopQueryValidDerivers << path;
- conn->processStderr();
+ conn.processStderr();
return readStorePaths<PathSet>(*this, conn->from);
}
PathSet RemoteStore::queryDerivationOutputs(const Path & path)
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopQueryDerivationOutputs << path;
- conn->processStderr();
+ conn.processStderr();
return readStorePaths<PathSet>(*this, conn->from);
}
PathSet RemoteStore::queryDerivationOutputNames(const Path & path)
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopQueryDerivationOutputNames << path;
- conn->processStderr();
+ conn.processStderr();
return readStrings<PathSet>(conn->from);
}
Path RemoteStore::queryPathFromHashPart(const string & hashPart)
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopQueryPathFromHashPart << hashPart;
- conn->processStderr();
+ conn.processStderr();
Path path = readString(conn->from);
if (!path.empty()) assertStorePath(path);
return path;
@@ -384,7 +431,7 @@ Path RemoteStore::queryPathFromHashPart(const string & hashPart)
void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor)
{
- auto conn(connections->get());
+ auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) {
conn->to << wopImportPaths;
@@ -403,7 +450,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
;
});
- conn->processStderr(0, source2.get());
+ conn.processStderr(0, source2.get());
auto importedPaths = readStorePaths<PathSet>(*this, conn->from);
assert(importedPaths.size() <= 1);
@@ -417,7 +464,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
<< repair << !checkSigs;
bool tunnel = GET_PROTOCOL_MINOR(conn->daemonVersion) >= 21;
if (!tunnel) copyNAR(source, conn->to);
- conn->processStderr(0, tunnel ? &source : nullptr);
+ conn.processStderr(0, tunnel ? &source : nullptr);
}
}
@@ -427,7 +474,7 @@ Path RemoteStore::addToStore(const string & name, const Path & _srcPath,
{
if (repair) throw Error("repairing is not supported when building through the Nix daemon");
- auto conn(connections->get());
+ auto conn(getConnection());
Path srcPath(absPath(_srcPath));
@@ -445,13 +492,13 @@ Path RemoteStore::addToStore(const string & name, const Path & _srcPath,
dumpPath(srcPath, conn->to, filter);
}
conn->to.warn = false;
- conn->processStderr();
+ conn.processStderr();
} catch (SysError & e) {
/* Daemon closed while we were sending the path. Probably OOM
or I/O error. */
if (e.errNo == EPIPE)
try {
- conn->processStderr();
+ conn.processStderr();
} catch (EndOfFile & e) { }
throw;
}
@@ -465,17 +512,17 @@ Path RemoteStore::addTextToStore(const string & name, const string & s,
{
if (repair) throw Error("repairing is not supported when building through the Nix daemon");
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopAddTextToStore << name << s << references;
- conn->processStderr();
+ conn.processStderr();
return readStorePath(*this, conn->from);
}
void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopBuildPaths;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13) {
conn->to << drvPaths;
@@ -494,7 +541,7 @@ void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
drvPaths2.insert(string(i, 0, i.find('!')));
conn->to << drvPaths2;
}
- conn->processStderr();
+ conn.processStderr();
readInt(conn->from);
}
@@ -502,9 +549,9 @@ void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
BuildResult RemoteStore::buildDerivation(const Path & drvPath, const BasicDerivation & drv,
BuildMode buildMode)
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopBuildDerivation << drvPath << drv << buildMode;
- conn->processStderr();
+ conn.processStderr();
BuildResult res;
unsigned int status;
conn->from >> status >> res.errorMsg;
@@ -515,45 +562,45 @@ BuildResult RemoteStore::buildDerivation(const Path & drvPath, const BasicDeriva
void RemoteStore::ensurePath(const Path & path)
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopEnsurePath << path;
- conn->processStderr();
+ conn.processStderr();
readInt(conn->from);
}
void RemoteStore::addTempRoot(const Path & path)
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopAddTempRoot << path;
- conn->processStderr();
+ conn.processStderr();
readInt(conn->from);
}
void RemoteStore::addIndirectRoot(const Path & path)
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopAddIndirectRoot << path;
- conn->processStderr();
+ conn.processStderr();
readInt(conn->from);
}
void RemoteStore::syncWithGC()
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopSyncWithGC;
- conn->processStderr();
+ conn.processStderr();
readInt(conn->from);
}
Roots RemoteStore::findRoots()
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopFindRoots;
- conn->processStderr();
+ conn.processStderr();
size_t count = readNum<size_t>(conn->from);
Roots result;
while (count--) {
@@ -567,7 +614,7 @@ Roots RemoteStore::findRoots()
void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to
<< wopCollectGarbage << options.action << options.pathsToDelete << options.ignoreLiveness
@@ -575,7 +622,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
/* removed options */
<< 0 << 0 << 0;
- conn->processStderr();
+ conn.processStderr();
results.paths = readStrings<PathSet>(conn->from);
results.bytesFreed = readLongLong(conn->from);
@@ -590,27 +637,27 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
void RemoteStore::optimiseStore()
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopOptimiseStore;
- conn->processStderr();
+ conn.processStderr();
readInt(conn->from);
}
bool RemoteStore::verifyStore(bool checkContents, RepairFlag repair)
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopVerifyStore << checkContents << repair;
- conn->processStderr();
+ conn.processStderr();
return readInt(conn->from);
}
void RemoteStore::addSignatures(const Path & storePath, const StringSet & sigs)
{
- auto conn(connections->get());
+ auto conn(getConnection());
conn->to << wopAddSignatures << storePath << sigs;
- conn->processStderr();
+ conn.processStderr();
readInt(conn->from);
}
@@ -620,13 +667,13 @@ void RemoteStore::queryMissing(const PathSet & targets,
unsigned long long & downloadSize, unsigned long long & narSize)
{
{
- auto conn(connections->get());
+ auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 19)
// Don't hold the connection handle in the fallback case
// to prevent a deadlock.
goto fallback;
conn->to << wopQueryMissing << targets;
- conn->processStderr();
+ conn.processStderr();
willBuild = readStorePaths<PathSet>(*this, conn->from);
willSubstitute = readStorePaths<PathSet>(*this, conn->from);
unknown = readStorePaths<PathSet>(*this, conn->from);
@@ -642,7 +689,14 @@ void RemoteStore::queryMissing(const PathSet & targets,
void RemoteStore::connect()
{
+ auto conn(getConnection());
+}
+
+
+unsigned int RemoteStore::getProtocol()
+{
auto conn(connections->get());
+ return conn->daemonVersion;
}
@@ -679,7 +733,7 @@ static Logger::Fields readFields(Source & from)
}
-void RemoteStore::Connection::processStderr(Sink * sink, Source * source)
+std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source * source)
{
to.flush();
@@ -704,7 +758,7 @@ void RemoteStore::Connection::processStderr(Sink * sink, Source * source)
else if (msg == STDERR_ERROR) {
string error = readString(from);
unsigned int status = readInt(from);
- throw Error(status, error);
+ return std::make_exception_ptr(Error(status, error));
}
else if (msg == STDERR_NEXT)
@@ -738,6 +792,8 @@ void RemoteStore::Connection::processStderr(Sink * sink, Source * source)
else
throw Error("got unknown message type %x from Nix daemon", msg);
}
+
+ return nullptr;
}
static std::string uriScheme = "unix://";
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
index b488e34ce..4f554b598 100644
--- a/src/libstore/remote-store.hh
+++ b/src/libstore/remote-store.hh
@@ -14,6 +14,7 @@ class Pid;
struct FdSink;
struct FdSource;
template<typename T> class Pool;
+struct ConnectionHandle;
/* FIXME: RemoteStore is a misnomer - should be something like
@@ -97,12 +98,15 @@ public:
void connect() override;
+ unsigned int getProtocol() override;
+
void flushBadConnections();
protected:
struct Connection
{
+ AutoCloseFD fd;
FdSink to;
FdSource from;
unsigned int daemonVersion;
@@ -110,7 +114,7 @@ protected:
virtual ~Connection();
- void processStderr(Sink * sink = 0, Source * source = 0);
+ std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0);
};
ref<Connection> openConnectionWrapper();
@@ -123,6 +127,10 @@ protected:
virtual void setOptions(Connection & conn);
+ ConnectionHandle getConnection();
+
+ friend struct ConnectionHandle;
+
private:
std::atomic_bool failed{false};
@@ -140,11 +148,6 @@ public:
private:
- struct Connection : RemoteStore::Connection
- {
- AutoCloseFD fd;
- };
-
ref<RemoteStore::Connection> openConnection() override;
std::experimental::optional<std::string> path;
};
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
index 26144ccb4..4f1e23198 100644
--- a/src/libstore/s3-binary-cache-store.cc
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -19,8 +19,6 @@
#include <aws/core/utils/logging/LogMacros.h>
#include <aws/core/utils/threading/Executor.h>
#include <aws/s3/S3Client.h>
-#include <aws/s3/model/CreateBucketRequest.h>
-#include <aws/s3/model/GetBucketLocationRequest.h>
#include <aws/s3/model/GetObjectRequest.h>
#include <aws/s3/model/HeadObjectRequest.h>
#include <aws/s3/model/ListObjectsRequest.h>
@@ -84,8 +82,8 @@ static void initAWS()
});
}
-S3Helper::S3Helper(const std::string & profile, const std::string & region)
- : config(makeConfig(region))
+S3Helper::S3Helper(const std::string & profile, const std::string & region, const std::string & endpoint)
+ : config(makeConfig(region, endpoint))
, client(make_ref<Aws::S3::S3Client>(
profile == ""
? std::dynamic_pointer_cast<Aws::Auth::AWSCredentialsProvider>(
@@ -99,7 +97,7 @@ S3Helper::S3Helper(const std::string & profile, const std::string & region)
#else
Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never,
#endif
- false))
+ endpoint.empty()))
{
}
@@ -116,11 +114,14 @@ class RetryStrategy : public Aws::Client::DefaultRetryStrategy
}
};
-ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig(const string & region)
+ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig(const string & region, const string & endpoint)
{
initAWS();
auto res = make_ref<Aws::Client::ClientConfiguration>();
res->region = region;
+ if (!endpoint.empty()) {
+ res->endpointOverride = endpoint;
+ }
res->requestTimeoutMs = 600 * 1000;
res->retryStrategy = std::make_shared<RetryStrategy>();
res->caFile = settings.caFile;
@@ -150,10 +151,8 @@ S3Helper::DownloadResult S3Helper::getObject(
auto result = checkAws(fmt("AWS error fetching '%s'", key),
client->GetObject(request));
- res.data = decodeContent(
- result.GetContentEncoding(),
- make_ref<std::string>(
- dynamic_cast<std::stringstream &>(result.GetBody()).str()));
+ res.data = decompress(result.GetContentEncoding(),
+ dynamic_cast<std::stringstream &>(result.GetBody()).str());
} catch (S3Error & e) {
if (e.err != Aws::S3::S3Errors::NO_SUCH_KEY) throw;
@@ -170,9 +169,12 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
{
const Setting<std::string> profile{this, "", "profile", "The name of the AWS configuration profile to use."};
const Setting<std::string> region{this, Aws::Region::US_EAST_1, "region", {"aws-region"}};
+ const Setting<std::string> endpoint{this, "", "endpoint", "An optional override of the endpoint to use when talking to S3."};
const Setting<std::string> narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"};
const Setting<std::string> lsCompression{this, "", "ls-compression", "compression method for .ls files"};
const Setting<std::string> logCompression{this, "", "log-compression", "compression method for log/* files"};
+ const Setting<bool> multipartUpload{
+ this, false, "multipart-upload", "whether to use multi-part uploads"};
const Setting<uint64_t> bufferSize{
this, 5 * 1024 * 1024, "buffer-size", "size (in bytes) of each part in multi-part uploads"};
@@ -186,7 +188,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
const Params & params, const std::string & bucketName)
: S3BinaryCacheStore(params)
, bucketName(bucketName)
- , s3Helper(profile, region)
+ , s3Helper(profile, region, endpoint)
{
diskCache = getNarInfoDiskCache();
}
@@ -200,32 +202,6 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
{
if (!diskCache->cacheExists(getUri(), wantMassQuery_, priority)) {
- /* Create the bucket if it doesn't already exists. */
- // FIXME: HeadBucket would be more appropriate, but doesn't return
- // an easily parsed 404 message.
- auto res = s3Helper.client->GetBucketLocation(
- Aws::S3::Model::GetBucketLocationRequest().WithBucket(bucketName));
-
- if (!res.IsSuccess()) {
- if (res.GetError().GetErrorType() != Aws::S3::S3Errors::NO_SUCH_BUCKET)
- throw Error(format("AWS error checking bucket '%s': %s") % bucketName % res.GetError().GetMessage());
-
- printInfo("creating S3 bucket '%s'...", bucketName);
-
- // Stupid S3 bucket locations.
- auto bucketConfig = Aws::S3::Model::CreateBucketConfiguration();
- if (s3Helper.config->region != "us-east-1")
- bucketConfig.SetLocationConstraint(
- Aws::S3::Model::BucketLocationConstraintMapper::GetBucketLocationConstraintForName(
- s3Helper.config->region));
-
- checkAws(format("AWS error creating bucket '%s'") % bucketName,
- s3Helper.client->CreateBucket(
- Aws::S3::Model::CreateBucketRequest()
- .WithBucket(bucketName)
- .WithCreateBucketConfiguration(bucketConfig)));
- }
-
BinaryCacheStore::init();
diskCache->createCache(getUri(), storeDir, wantMassQuery_, priority);
@@ -273,6 +249,9 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
return true;
}
+ std::shared_ptr<TransferManager> transferManager;
+ std::once_flag transferManagerCreated;
+
void uploadFile(const std::string & path, const std::string & data,
const std::string & mimeType,
const std::string & contentEncoding)
@@ -284,60 +263,73 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
static std::shared_ptr<Aws::Utils::Threading::PooledThreadExecutor>
executor = std::make_shared<Aws::Utils::Threading::PooledThreadExecutor>(maxThreads);
- TransferManagerConfiguration transferConfig(executor.get());
-
- transferConfig.s3Client = s3Helper.client;
- transferConfig.bufferSize = bufferSize;
-
- if (contentEncoding != "")
- transferConfig.createMultipartUploadTemplate.SetContentEncoding(
- contentEncoding);
-
- transferConfig.uploadProgressCallback =
- [&](const TransferManager *transferManager,
- const std::shared_ptr<const TransferHandle>
- &transferHandle) {
- //FIXME: find a way to properly abort the multipart upload.
- checkInterrupt();
- debug("upload progress ('%s'): '%d' of '%d' bytes",
- path,
- transferHandle->GetBytesTransferred(),
- transferHandle->GetBytesTotalSize());
- };
+ std::call_once(transferManagerCreated, [&]()
+ {
+ if (multipartUpload) {
+ TransferManagerConfiguration transferConfig(executor.get());
+
+ transferConfig.s3Client = s3Helper.client;
+ transferConfig.bufferSize = bufferSize;
+
+ transferConfig.uploadProgressCallback =
+ [](const TransferManager *transferManager,
+ const std::shared_ptr<const TransferHandle>
+ &transferHandle)
+ {
+ //FIXME: find a way to properly abort the multipart upload.
+ //checkInterrupt();
+ debug("upload progress ('%s'): '%d' of '%d' bytes",
+ transferHandle->GetKey(),
+ transferHandle->GetBytesTransferred(),
+ transferHandle->GetBytesTotalSize());
+ };
+
+ transferManager = TransferManager::Create(transferConfig);
+ }
+ });
- transferConfig.transferStatusUpdatedCallback =
- [&](const TransferManager *,
- const std::shared_ptr<const TransferHandle>
- &transferHandle) {
- switch (transferHandle->GetStatus()) {
- case TransferStatus::COMPLETED:
- printTalkative("upload of '%s' completed", path);
- stats.put++;
- stats.putBytes += data.size();
- break;
- case TransferStatus::IN_PROGRESS:
- break;
- case TransferStatus::FAILED:
- throw Error("AWS error: failed to upload 's3://%s/%s'",
- bucketName, path);
- break;
- default:
- throw Error("AWS error: transfer status of 's3://%s/%s' "
- "in unexpected state",
- bucketName, path);
- };
- };
+ auto now1 = std::chrono::steady_clock::now();
- std::shared_ptr<TransferManager> transferManager =
- TransferManager::Create(transferConfig);
+ if (transferManager) {
- auto now1 = std::chrono::steady_clock::now();
+ if (contentEncoding != "")
+ throw Error("setting a content encoding is not supported with S3 multi-part uploads");
+
+ std::shared_ptr<TransferHandle> transferHandle =
+ transferManager->UploadFile(
+ stream, bucketName, path, mimeType,
+ Aws::Map<Aws::String, Aws::String>(),
+ nullptr /*, contentEncoding */);
- std::shared_ptr<TransferHandle> transferHandle =
- transferManager->UploadFile(stream, bucketName, path, mimeType,
- Aws::Map<Aws::String, Aws::String>());
+ transferHandle->WaitUntilFinished();
- transferHandle->WaitUntilFinished();
+ if (transferHandle->GetStatus() == TransferStatus::FAILED)
+ throw Error("AWS error: failed to upload 's3://%s/%s': %s",
+ bucketName, path, transferHandle->GetLastError().GetMessage());
+
+ if (transferHandle->GetStatus() != TransferStatus::COMPLETED)
+ throw Error("AWS error: transfer status of 's3://%s/%s' in unexpected state",
+ bucketName, path);
+
+ } else {
+
+ auto request =
+ Aws::S3::Model::PutObjectRequest()
+ .WithBucket(bucketName)
+ .WithKey(path);
+
+ request.SetContentType(mimeType);
+
+ if (contentEncoding != "")
+ request.SetContentEncoding(contentEncoding);
+
+ auto stream = std::make_shared<istringstream_nocopy>(data);
+
+ request.SetBody(stream);
+
+ auto result = checkAws(fmt("AWS error uploading '%s'", path),
+ s3Helper.client->PutObject(request));
+ }
auto now2 = std::chrono::steady_clock::now();
@@ -349,6 +341,8 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
bucketName % path % data.size() % duration);
stats.putTimeMs += duration;
+ stats.putBytes += data.size();
+ stats.put++;
}
void upsertFile(const std::string & path, const std::string & data,
diff --git a/src/libstore/s3.hh b/src/libstore/s3.hh
index 4f9964003..95d612b66 100644
--- a/src/libstore/s3.hh
+++ b/src/libstore/s3.hh
@@ -14,9 +14,9 @@ struct S3Helper
ref<Aws::Client::ClientConfiguration> config;
ref<Aws::S3::S3Client> client;
- S3Helper(const std::string & profile, const std::string & region);
+ S3Helper(const std::string & profile, const std::string & region, const std::string & endpoint);
- ref<Aws::Client::ClientConfiguration> makeConfig(const std::string & region);
+ ref<Aws::Client::ClientConfiguration> makeConfig(const std::string & region, const std::string & endpoint);
struct DownloadResult
{
diff --git a/src/libstore/serve-protocol.hh b/src/libstore/serve-protocol.hh
index f67d1e258..9fae6d534 100644
--- a/src/libstore/serve-protocol.hh
+++ b/src/libstore/serve-protocol.hh
@@ -5,7 +5,7 @@ namespace nix {
#define SERVE_MAGIC_1 0x390c9deb
#define SERVE_MAGIC_2 0x5452eecb
-#define SERVE_PROTOCOL_VERSION 0x204
+#define SERVE_PROTOCOL_VERSION 0x205
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
@@ -18,6 +18,7 @@ typedef enum {
cmdBuildPaths = 6,
cmdQueryClosure = 7,
cmdBuildDerivation = 8,
+ cmdAddToStoreNar = 9,
} ServeCommand;
}
diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc
index 033c58093..cf133b57c 100644
--- a/src/libstore/ssh.cc
+++ b/src/libstore/ssh.cc
@@ -1,11 +1,13 @@
#include "ssh.hh"
+#include "affinity.hh"
namespace nix {
SSHMaster::SSHMaster(const std::string & host, const std::string & keyFile, bool useMaster, bool compress, int logFD)
: host(host)
+ , fakeSSH(host == "localhost")
, keyFile(keyFile)
- , useMaster(useMaster)
+ , useMaster(useMaster && !fakeSSH)
, compress(compress)
, logFD(logFD)
{
@@ -33,7 +35,9 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string
auto conn = std::make_unique<Connection>();
conn->sshPid = startProcess([&]() {
+ restoreAffinity();
restoreSignals();
+ restoreMountNamespace();
close(in.writeSide.get());
close(out.readSide.get());
@@ -45,12 +49,19 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string
if (logFD != -1 && dup2(logFD, STDERR_FILENO) == -1)
throw SysError("duping over stderr");
- Strings args = { "ssh", host.c_str(), "-x", "-a" };
- addCommonSSHOpts(args);
- if (socketPath != "")
- args.insert(args.end(), {"-S", socketPath});
- if (verbosity >= lvlChatty)
- args.push_back("-v");
+ Strings args;
+
+ if (fakeSSH) {
+ args = { "bash", "-c" };
+ } else {
+ args = { "ssh", host.c_str(), "-x", "-a" };
+ addCommonSSHOpts(args);
+ if (socketPath != "")
+ args.insert(args.end(), {"-S", socketPath});
+ if (verbosity >= lvlChatty)
+ args.push_back("-v");
+ }
+
args.push_back(command);
execvp(args.begin()->c_str(), stringsToCharPtrs(args).data());
diff --git a/src/libstore/ssh.hh b/src/libstore/ssh.hh
index 1268e6d00..4f0f0bd29 100644
--- a/src/libstore/ssh.hh
+++ b/src/libstore/ssh.hh
@@ -10,6 +10,7 @@ class SSHMaster
private:
const std::string host;
+ bool fakeSSH;
const std::string keyFile;
const bool useMaster;
const bool compress;
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index 9b0b7d632..dc54c735f 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -320,6 +320,8 @@ ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath)
void Store::queryPathInfo(const Path & storePath,
Callback<ref<ValidPathInfo>> callback)
{
+ assertStorePath(storePath);
+
auto hashPart = storePathToHash(storePath);
try {
@@ -586,15 +588,19 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
uint64_t total = 0;
- // FIXME
-#if 0
if (!info->narHash) {
+ StringSink sink;
+ srcStore->narFromPath({storePath}, sink);
auto info2 = make_ref<ValidPathInfo>(*info);
info2->narHash = hashString(htSHA256, *sink.s);
if (!info->narSize) info2->narSize = sink.s->size();
+ if (info->ultimate) info2->ultimate = false;
info = info2;
+
+ StringSource source(*sink.s);
+ dstStore->addToStore(*info, source, repair, checkSigs);
+ return;
}
-#endif
if (info->ultimate) {
auto info2 = make_ref<ValidPathInfo>(*info);
@@ -609,6 +615,8 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
act.progress(total, info->narSize);
});
srcStore->narFromPath({storePath}, wrapperSink);
+ }, [&]() {
+ throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", storePath, srcStore->getUri());
});
dstStore->addToStore(*info, *source, repair, checkSigs);
@@ -629,11 +637,12 @@ void copyPaths(ref<Store> srcStore, ref<Store> dstStore, const PathSet & storePa
Activity act(*logger, lvlInfo, actCopyPaths, fmt("copying %d paths", missing.size()));
std::atomic<size_t> nrDone{0};
+ std::atomic<size_t> nrFailed{0};
std::atomic<uint64_t> bytesExpected{0};
std::atomic<uint64_t> nrRunning{0};
auto showProgress = [&]() {
- act.progress(nrDone, missing.size(), nrRunning);
+ act.progress(nrDone, missing.size(), nrRunning, nrFailed);
};
ThreadPool pool;
@@ -662,7 +671,16 @@ void copyPaths(ref<Store> srcStore, ref<Store> dstStore, const PathSet & storePa
if (!dstStore->isValidPath(storePath)) {
MaintainCount<decltype(nrRunning)> mc(nrRunning);
showProgress();
- copyStorePath(srcStore, dstStore, storePath, repair, checkSigs);
+ try {
+ copyStorePath(srcStore, dstStore, storePath, repair, checkSigs);
+ } catch (Error &e) {
+ nrFailed++;
+ if (!settings.keepGoing)
+ throw e;
+ logger->log(lvlError, format("could not copy %s: %s") % storePath % e.what());
+ showProgress();
+ return;
+ }
}
nrDone++;
@@ -834,8 +852,24 @@ ref<Store> openStore(const std::string & uri_,
if (q != std::string::npos) {
for (auto s : tokenizeString<Strings>(uri.substr(q + 1), "&")) {
auto e = s.find('=');
- if (e != std::string::npos)
- params[s.substr(0, e)] = s.substr(e + 1);
+ if (e != std::string::npos) {
+ auto value = s.substr(e + 1);
+ std::string decoded;
+ for (size_t i = 0; i < value.size(); ) {
+ if (value[i] == '%') {
+ if (i + 2 >= value.size())
+ throw Error("invalid URI parameter '%s'", value);
+ try {
+ decoded += std::stoul(std::string(value, i + 1, 2), 0, 16);
+ i += 3;
+ } catch (...) {
+ throw Error("invalid URI parameter '%s'", value);
+ }
+ } else
+ decoded += value[i++];
+ }
+ params[s.substr(0, e)] = decoded;
+ }
}
uri = uri_.substr(0, q);
}
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index 7c5b495a4..106b2be5e 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -23,6 +23,7 @@ MakeError(BuildError, Error) /* denotes a permanent build failure */
MakeError(InvalidPath, Error)
MakeError(Unsupported, Error)
MakeError(SubstituteGone, Error)
+MakeError(SubstituterDisabled, Error)
struct BasicDerivation;
@@ -598,6 +599,12 @@ public:
a notion of connection. Otherwise this is a no-op. */
virtual void connect() { };
+ /* Get the protocol version of this store or it's connection. */
+ virtual unsigned int getProtocol()
+ {
+ return 0;
+ };
+
/* Get the priority of the store, used to order substituters. In
particular, binary caches can specify a priority field in their
"nix-cache-info" file. Lower value means higher priority. */