aboutsummaryrefslogtreecommitdiff
path: root/src/libstore
diff options
context:
space:
mode:
Diffstat (limited to 'src/libstore')
-rw-r--r--src/libstore/binary-cache-store.cc107
-rw-r--r--src/libstore/binary-cache-store.hh27
-rw-r--r--src/libstore/build/derivation-goal.cc68
-rw-r--r--src/libstore/build/derivation-goal.hh4
-rw-r--r--src/libstore/build/drv-output-substitution-goal.cc44
-rw-r--r--src/libstore/build/drv-output-substitution-goal.hh14
-rw-r--r--src/libstore/build/entry-points.cc9
-rw-r--r--src/libstore/build/local-derivation-goal.cc154
-rw-r--r--src/libstore/build/local-derivation-goal.hh3
-rw-r--r--src/libstore/build/substitution-goal.cc4
-rw-r--r--src/libstore/build/worker.cc6
-rw-r--r--src/libstore/ca-specific-schema.sql5
-rw-r--r--src/libstore/content-address.cc22
-rw-r--r--src/libstore/daemon.cc107
-rw-r--r--src/libstore/derivations.cc46
-rw-r--r--src/libstore/derivations.hh4
-rw-r--r--src/libstore/derived-path.cc10
-rw-r--r--src/libstore/dummy-store.cc5
-rw-r--r--src/libstore/export-import.cc6
-rw-r--r--src/libstore/filetransfer.cc35
-rw-r--r--src/libstore/filetransfer.hh8
-rw-r--r--src/libstore/gc.cc784
-rw-r--r--src/libstore/globals.cc20
-rw-r--r--src/libstore/globals.hh52
-rw-r--r--src/libstore/http-binary-cache-store.cc12
-rw-r--r--src/libstore/legacy-ssh-store.cc30
-rw-r--r--src/libstore/local-binary-cache-store.cc1
-rw-r--r--src/libstore/local-fs-store.cc16
-rw-r--r--src/libstore/local-fs-store.hh3
-rw-r--r--src/libstore/local-store.cc134
-rw-r--r--src/libstore/local-store.hh45
-rw-r--r--src/libstore/local.mk4
-rw-r--r--src/libstore/machines.cc96
-rw-r--r--src/libstore/misc.cc13
-rw-r--r--src/libstore/names.cc2
-rw-r--r--src/libstore/names.hh2
-rw-r--r--src/libstore/nar-accessor.cc8
-rw-r--r--src/libstore/nar-accessor.hh2
-rw-r--r--src/libstore/optimise-store.cc31
-rw-r--r--src/libstore/path-with-outputs.cc4
-rw-r--r--src/libstore/pathlocks.cc13
-rw-r--r--src/libstore/pathlocks.hh14
-rw-r--r--src/libstore/profiles.cc37
-rw-r--r--src/libstore/profiles.hh9
-rw-r--r--src/libstore/realisation.cc2
-rw-r--r--src/libstore/references.cc93
-rw-r--r--src/libstore/references.hh24
-rw-r--r--src/libstore/remote-fs-accessor.cc30
-rw-r--r--src/libstore/remote-fs-accessor.hh3
-rw-r--r--src/libstore/remote-store.cc102
-rw-r--r--src/libstore/remote-store.hh14
-rw-r--r--src/libstore/s3-binary-cache-store.cc8
-rw-r--r--src/libstore/s3.hh4
-rw-r--r--src/libstore/sandbox-defaults.sb5
-rw-r--r--src/libstore/serve-protocol.hh2
-rw-r--r--src/libstore/sqlite.cc7
-rw-r--r--src/libstore/ssh-store.cc5
-rw-r--r--src/libstore/store-api.cc119
-rw-r--r--src/libstore/store-api.hh83
-rw-r--r--src/libstore/tests/local.mk15
-rw-r--r--src/libstore/tests/machines.cc169
-rw-r--r--src/libstore/tests/references.cc45
-rw-r--r--src/libstore/tests/test-data/machines.bad_format1
-rw-r--r--src/libstore/tests/test-data/machines.valid3
-rw-r--r--src/libstore/uds-remote-store.cc28
-rw-r--r--src/libstore/uds-remote-store.hh6
-rw-r--r--src/libstore/worker-protocol.hh1
67 files changed, 1740 insertions, 1049 deletions
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc
index 74eb0a9ab..6e4458f7a 100644
--- a/src/libstore/binary-cache-store.cc
+++ b/src/libstore/binary-cache-store.cc
@@ -31,7 +31,7 @@ BinaryCacheStore::BinaryCacheStore(const Params & params)
StringSink sink;
sink << narVersionMagic1;
- narMagic = *sink.s;
+ narMagic = sink.s;
}
void BinaryCacheStore::init()
@@ -52,9 +52,9 @@ void BinaryCacheStore::init()
throw Error("binary cache '%s' is for Nix stores with prefix '%s', not '%s'",
getUri(), value, storeDir);
} else if (name == "WantMassQuery") {
- wantMassQuery.setDefault(value == "1" ? "true" : "false");
+ wantMassQuery.setDefault(value == "1");
} else if (name == "Priority") {
- priority.setDefault(fmt("%d", std::stoi(value)));
+ priority.setDefault(std::stoi(value));
}
}
}
@@ -68,7 +68,7 @@ void BinaryCacheStore::upsertFile(const std::string & path,
}
void BinaryCacheStore::getFile(const std::string & path,
- Callback<std::shared_ptr<std::string>> callback) noexcept
+ Callback<std::optional<std::string>> callback) noexcept
{
try {
callback(getFile(path));
@@ -77,9 +77,9 @@ void BinaryCacheStore::getFile(const std::string & path,
void BinaryCacheStore::getFile(const std::string & path, Sink & sink)
{
- std::promise<std::shared_ptr<std::string>> promise;
+ std::promise<std::optional<std::string>> promise;
getFile(path,
- {[&](std::future<std::shared_ptr<std::string>> result) {
+ {[&](std::future<std::optional<std::string>> result) {
try {
promise.set_value(result.get());
} catch (...) {
@@ -89,15 +89,15 @@ void BinaryCacheStore::getFile(const std::string & path, Sink & sink)
sink(*promise.get_future().get());
}
-std::shared_ptr<std::string> BinaryCacheStore::getFile(const std::string & path)
+std::optional<std::string> BinaryCacheStore::getFile(const std::string & path)
{
StringSink sink;
try {
getFile(path, sink);
} catch (NoSuchBinaryCacheFile &) {
- return nullptr;
+ return std::nullopt;
}
- return sink.s;
+ return std::move(sink.s);
}
std::string BinaryCacheStore::narInfoFileFor(const StorePath & storePath)
@@ -111,15 +111,15 @@ void BinaryCacheStore::writeNarInfo(ref<NarInfo> narInfo)
upsertFile(narInfoFile, narInfo->to_string(*this), "text/x-nix-narinfo");
- std::string hashPart(narInfo->path.hashPart());
-
{
auto state_(state.lock());
- state_->pathInfoCache.upsert(hashPart, PathInfoCacheValue { .value = std::shared_ptr<NarInfo>(narInfo) });
+ state_->pathInfoCache.upsert(
+ std::string(narInfo->path.to_string()),
+ PathInfoCacheValue { .value = std::shared_ptr<NarInfo>(narInfo) });
}
if (diskCache)
- diskCache->upsertNarInfo(getUri(), hashPart, std::shared_ptr<NarInfo>(narInfo));
+ diskCache->upsertNarInfo(getUri(), std::string(narInfo->path.hashPart()), std::shared_ptr<NarInfo>(narInfo));
}
AutoCloseFD openFile(const Path & path)
@@ -149,7 +149,7 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
{
FdSink fileSink(fdTemp.get());
TeeSink teeSinkCompressed { fileSink, fileHashSink };
- auto compressionSink = makeCompressionSink(compression, teeSinkCompressed);
+ auto compressionSink = makeCompressionSink(compression, teeSinkCompressed, parallelCompression, compressionLevel);
TeeSink teeSinkUncompressed { *compressionSink, narHashSink };
TeeSource teeSource { narSource, teeSinkUncompressed };
narAccessor = makeNarAccessor(teeSource);
@@ -308,16 +308,17 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource
}
StorePath BinaryCacheStore::addToStoreFromDump(Source & dump, const string & name,
- FileIngestionMethod method, HashType hashAlgo, RepairFlag repair)
+ FileIngestionMethod method, HashType hashAlgo, RepairFlag repair, const StorePathSet & references)
{
if (method != FileIngestionMethod::Recursive || hashAlgo != htSHA256)
unsupported("addToStoreFromDump");
return addToStoreCommon(dump, repair, CheckSigs, [&](HashResult nar) {
ValidPathInfo info {
- makeFixedOutputPath(method, nar.first, name),
+ makeFixedOutputPath(method, nar.first, name, references),
nar.first,
};
info.narSize = nar.second;
+ info.references = references;
return info;
})->path;
}
@@ -366,11 +367,11 @@ void BinaryCacheStore::queryPathInfoUncached(const StorePath & storePath,
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
getFile(narInfoFile,
- {[=](std::future<std::shared_ptr<std::string>> fut) {
+ {[=](std::future<std::optional<std::string>> fut) {
try {
auto data = fut.get();
- if (!data) return (*callbackPtr)(nullptr);
+ if (!data) return (*callbackPtr)({});
stats.narInfoRead++;
@@ -385,7 +386,7 @@ void BinaryCacheStore::queryPathInfoUncached(const StorePath & storePath,
}
StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath,
- FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
+ FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair, const StorePathSet & references)
{
/* FIXME: Make BinaryCacheStore::addToStoreCommon support
non-recursive+sha256 so we can just use the default
@@ -404,10 +405,11 @@ StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath
});
return addToStoreCommon(*source, repair, CheckSigs, [&](HashResult nar) {
ValidPathInfo info {
- makeFixedOutputPath(method, h, name),
+ makeFixedOutputPath(method, h, name, references),
nar.first,
};
info.narSize = nar.second;
+ info.references = references;
info.ca = FixedOutputHash {
.method = method,
.hash = h,
@@ -427,7 +429,7 @@ StorePath BinaryCacheStore::addTextToStore(const string & name, const string & s
StringSink sink;
dumpString(s, sink);
- auto source = StringSource { *sink.s };
+ StringSource source(sink.s);
return addToStoreCommon(source, repair, CheckSigs, [&](HashResult nar) {
ValidPathInfo info { path, nar.first };
info.narSize = nar.second;
@@ -437,40 +439,29 @@ StorePath BinaryCacheStore::addTextToStore(const string & name, const string & s
})->path;
}
-std::optional<const Realisation> BinaryCacheStore::queryRealisation(const DrvOutput & id)
+void BinaryCacheStore::queryRealisationUncached(const DrvOutput & id,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept
{
- if (diskCache) {
- auto [cacheOutcome, maybeCachedRealisation] =
- diskCache->lookupRealisation(getUri(), id);
- switch (cacheOutcome) {
- case NarInfoDiskCache::oValid:
- debug("Returning a cached realisation for %s", id.to_string());
- return *maybeCachedRealisation;
- case NarInfoDiskCache::oInvalid:
- debug("Returning a cached missing realisation for %s", id.to_string());
- return {};
- case NarInfoDiskCache::oUnknown:
- break;
- }
- }
-
auto outputInfoFilePath = realisationsPrefix + "/" + id.to_string() + ".doi";
- auto rawOutputInfo = getFile(outputInfoFilePath);
- if (rawOutputInfo) {
- auto realisation = Realisation::fromJSON(
- nlohmann::json::parse(*rawOutputInfo), outputInfoFilePath);
+ auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
- if (diskCache)
- diskCache->upsertRealisation(
- getUri(), realisation);
+ Callback<std::optional<std::string>> newCallback = {
+ [=](std::future<std::optional<std::string>> fut) {
+ try {
+ auto data = fut.get();
+ if (!data) return (*callbackPtr)({});
- return {realisation};
- } else {
- if (diskCache)
- diskCache->upsertAbsentRealisation(getUri(), id);
- return std::nullopt;
- }
+ auto realisation = Realisation::fromJSON(
+ nlohmann::json::parse(*data), outputInfoFilePath);
+ return (*callbackPtr)(std::make_shared<const Realisation>(realisation));
+ } catch (...) {
+ callbackPtr->rethrow();
+ }
+ }
+ };
+
+ getFile(outputInfoFilePath, std::move(newCallback));
}
void BinaryCacheStore::registerDrvOutput(const Realisation& info) {
@@ -499,7 +490,7 @@ void BinaryCacheStore::addSignatures(const StorePath & storePath, const StringSe
writeNarInfo(narInfo);
}
-std::shared_ptr<std::string> BinaryCacheStore::getBuildLog(const StorePath & path)
+std::optional<std::string> BinaryCacheStore::getBuildLog(const StorePath & path)
{
auto drvPath = path;
@@ -507,10 +498,10 @@ std::shared_ptr<std::string> BinaryCacheStore::getBuildLog(const StorePath & pat
try {
auto info = queryPathInfo(path);
// FIXME: add a "Log" field to .narinfo
- if (!info->deriver) return nullptr;
+ if (!info->deriver) return std::nullopt;
drvPath = *info->deriver;
} catch (InvalidPath &) {
- return nullptr;
+ return std::nullopt;
}
}
@@ -521,4 +512,14 @@ std::shared_ptr<std::string> BinaryCacheStore::getBuildLog(const StorePath & pat
return getFile(logPath);
}
+void BinaryCacheStore::addBuildLog(const StorePath & drvPath, std::string_view log)
+{
+ assert(drvPath.isDerivation());
+
+ upsertFile(
+ "log/" + std::string(drvPath.to_string()),
+ (std::string) log, // FIXME: don't copy
+ "text/plain; charset=utf-8");
+}
+
}
diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh
index 657be2fcf..7599230d9 100644
--- a/src/libstore/binary-cache-store.hh
+++ b/src/libstore/binary-cache-store.hh
@@ -15,13 +15,17 @@ struct BinaryCacheStoreConfig : virtual StoreConfig
{
using StoreConfig::StoreConfig;
- const Setting<std::string> compression{(StoreConfig*) this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"};
+ const Setting<std::string> compression{(StoreConfig*) this, "xz", "compression", "NAR compression method ('xz', 'bzip2', 'gzip', 'zstd', or 'none')"};
const Setting<bool> writeNARListing{(StoreConfig*) this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"};
const Setting<bool> writeDebugInfo{(StoreConfig*) this, false, "index-debug-info", "whether to index DWARF debug info files by build ID"};
const Setting<Path> secretKeyFile{(StoreConfig*) this, "", "secret-key", "path to secret key used to sign the binary cache"};
const Setting<Path> localNarCache{(StoreConfig*) this, "", "local-nar-cache", "path to a local cache of NARs"};
const Setting<bool> parallelCompression{(StoreConfig*) this, false, "parallel-compression",
- "enable multi-threading compression, available for xz only currently"};
+ "enable multi-threading compression for NARs, available for xz and zstd only currently"};
+ const Setting<int> compressionLevel{(StoreConfig*) this, -1, "compression-level",
+ "specify 'preset level' of compression to be used with NARs: "
+ "meaning and accepted range of values depends on compression method selected, "
+ "other than -1 which we reserve to indicate Nix defaults should be used"};
};
class BinaryCacheStore : public virtual BinaryCacheStoreConfig, public virtual Store
@@ -47,6 +51,7 @@ public:
const std::string & mimeType) = 0;
void upsertFile(const std::string & path,
+ // FIXME: use std::string_view
std::string && data,
const std::string & mimeType);
@@ -58,10 +63,11 @@ public:
/* Fetch the specified file and call the specified callback with
the result. A subclass may implement this asynchronously. */
- virtual void getFile(const std::string & path,
- Callback<std::shared_ptr<std::string>> callback) noexcept;
+ virtual void getFile(
+ const std::string & path,
+ Callback<std::optional<std::string>> callback) noexcept;
- std::shared_ptr<std::string> getFile(const std::string & path);
+ std::optional<std::string> getFile(const std::string & path);
public:
@@ -93,18 +99,19 @@ public:
RepairFlag repair, CheckSigsFlag checkSigs) override;
StorePath addToStoreFromDump(Source & dump, const string & name,
- FileIngestionMethod method, HashType hashAlgo, RepairFlag repair) override;
+ FileIngestionMethod method, HashType hashAlgo, RepairFlag repair, const StorePathSet & references ) override;
StorePath addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method, HashType hashAlgo,
- PathFilter & filter, RepairFlag repair) override;
+ PathFilter & filter, RepairFlag repair, const StorePathSet & references) override;
StorePath addTextToStore(const string & name, const string & s,
const StorePathSet & references, RepairFlag repair) override;
void registerDrvOutput(const Realisation & info) override;
- std::optional<const Realisation> queryRealisation(const DrvOutput &) override;
+ void queryRealisationUncached(const DrvOutput &,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
void narFromPath(const StorePath & path, Sink & sink) override;
@@ -112,7 +119,9 @@ public:
void addSignatures(const StorePath & storePath, const StringSet & sigs) override;
- std::shared_ptr<std::string> getBuildLog(const StorePath & path) override;
+ std::optional<std::string> getBuildLog(const StorePath & path) override;
+
+ void addBuildLog(const StorePath & drvPath, std::string_view log) override;
};
diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc
index 876b8def0..151217b8b 100644
--- a/src/libstore/build/derivation-goal.cc
+++ b/src/libstore/build/derivation-goal.cc
@@ -17,6 +17,7 @@
#include <regex>
#include <queue>
+#include <fstream>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
@@ -193,7 +194,7 @@ void DerivationGoal::loadDerivation()
assert(worker.evalStore.isValidPath(drvPath));
/* Get the derivation. */
- drv = std::make_unique<Derivation>(worker.evalStore.derivationFromPath(drvPath));
+ drv = std::make_unique<Derivation>(worker.evalStore.readDerivation(drvPath));
haveDerivation();
}
@@ -204,7 +205,7 @@ void DerivationGoal::haveDerivation()
trace("have derivation");
if (drv->type() == DerivationType::CAFloating)
- settings.requireExperimentalFeature("ca-derivations");
+ settings.requireExperimentalFeature(Xp::CaDerivations);
retrySubstitution = false;
@@ -277,7 +278,7 @@ void DerivationGoal::outputsSubstitutionTried()
if (nrFailed > 0 && nrFailed > nrNoSubstituters + nrIncompleteClosure && !settings.tryFallback) {
done(BuildResult::TransientFailure,
- fmt("some substitutes for the outputs of derivation '%s' failed (usually happens due to networking issues); try '--fallback' to build derivation from source ",
+ Error("some substitutes for the outputs of derivation '%s' failed (usually happens due to networking issues); try '--fallback' to build derivation from source ",
worker.store.printStorePath(drvPath)));
return;
}
@@ -453,7 +454,7 @@ void DerivationGoal::inputsRealised()
if (useDerivation) {
auto & fullDrv = *dynamic_cast<Derivation *>(drv.get());
- if (settings.isExperimentalFeatureEnabled("ca-derivations") &&
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations) &&
((!fullDrv.inputDrvs.empty() && derivationIsCA(fullDrv.type()))
|| fullDrv.type() == DerivationType::DeferredInputAddressed)) {
/* We are be able to resolve this derivation based on the
@@ -464,7 +465,6 @@ void DerivationGoal::inputsRealised()
Derivation drvResolved { *std::move(attempt) };
auto pathResolved = writeDerivation(worker.store, drvResolved);
- resolvedDrv = drvResolved;
auto msg = fmt("Resolved derivation: '%s' -> '%s'",
worker.store.printStorePath(drvPath),
@@ -475,9 +475,9 @@ void DerivationGoal::inputsRealised()
worker.store.printStorePath(pathResolved),
});
- auto resolvedGoal = worker.makeDerivationGoal(
+ resolvedDrvGoal = worker.makeDerivationGoal(
pathResolved, wantedOutputs, buildMode);
- addWaitee(resolvedGoal);
+ addWaitee(resolvedDrvGoal);
state = &DerivationGoal::resolvedFinished;
return;
@@ -566,7 +566,7 @@ void DerivationGoal::tryToBuild()
lockFiles.insert(worker.store.Store::toRealPath(*i.second.second));
else
lockFiles.insert(
- worker.store.Store::toRealPath(drvPath) + "!" + i.first
+ worker.store.Store::toRealPath(drvPath) + "." + i.first
);
}
}
@@ -616,7 +616,9 @@ void DerivationGoal::tryToBuild()
/* Don't do a remote build if the derivation has the attribute
`preferLocalBuild' set. Also, check and repair modes are only
supported for local builds. */
- bool buildLocally = buildMode != bmNormal || parsedDrv->willBuildLocally(worker.store);
+ bool buildLocally =
+ (buildMode != bmNormal || parsedDrv->willBuildLocally(worker.store))
+ && settings.maxBuildJobs.get() != 0;
if (!buildLocally) {
switch (tryBuildHook()) {
@@ -653,7 +655,7 @@ void DerivationGoal::tryLocalBuild() {
throw Error(
"unable to build with a primary store that isn't a local store; "
"either pass a different '--store' or enable remote builds."
- "\nhttps://nixos.org/nix/manual/#chap-distributed-builds");
+ "\nhttps://nixos.org/manual/nix/stable/advanced-topics/distributed-builds.html");
}
@@ -774,9 +776,6 @@ void runPostBuildHook(
hookEnvironment.emplace("OUT_PATHS", chomp(concatStringsSep(" ", store.printStorePathSet(outputPaths))));
hookEnvironment.emplace("NIX_CONFIG", globalConfig.toKeyValue());
- RunOptions opts(settings.postBuildHook, {});
- opts.environment = hookEnvironment;
-
struct LogSink : Sink {
Activity & act;
std::string currentLine;
@@ -807,9 +806,12 @@ void runPostBuildHook(
};
LogSink sink(act);
- opts.standardOut = &sink;
- opts.mergeStderrToStdout = true;
- runProgram2(opts);
+ runProgram2({
+ .program = settings.postBuildHook,
+ .environment = hookEnvironment,
+ .standardOut = &sink,
+ .mergeStderrToStdout = true,
+ });
}
void DerivationGoal::buildDone()
@@ -936,16 +938,17 @@ void DerivationGoal::buildDone()
}
void DerivationGoal::resolvedFinished() {
- assert(resolvedDrv);
+ assert(resolvedDrvGoal);
+ auto resolvedDrv = *resolvedDrvGoal->drv;
- auto resolvedHashes = staticOutputHashes(worker.store, *resolvedDrv);
+ auto resolvedHashes = staticOutputHashes(worker.store, resolvedDrv);
StorePathSet outputPaths;
// `wantedOutputs` might be empty, which means “all the outputs”
auto realWantedOutputs = wantedOutputs;
if (realWantedOutputs.empty())
- realWantedOutputs = resolvedDrv->outputNames();
+ realWantedOutputs = resolvedDrv.outputNames();
for (auto & wantedOutput : realWantedOutputs) {
assert(initialOutputs.count(wantedOutput) != 0);
@@ -977,9 +980,17 @@ void DerivationGoal::resolvedFinished() {
outputPaths
);
- // This is potentially a bit fishy in terms of error reporting. Not sure
- // how to do it in a cleaner way
- amDone(nrFailed == 0 ? ecSuccess : ecFailed, ex);
+ auto status = [&]() {
+ auto resolvedResult = resolvedDrvGoal->getResult();
+ switch (resolvedResult.status) {
+ case BuildResult::AlreadyValid:
+ return BuildResult::ResolvesToAlreadyValid;
+ default:
+ return resolvedResult.status;
+ }
+ }();
+
+ done(status);
}
HookReply DerivationGoal::tryBuildHook()
@@ -1009,7 +1020,7 @@ HookReply DerivationGoal::tryBuildHook()
return readLine(worker.hook->fromHook.readSide.get());
} catch (Error & e) {
e.addTrace({}, "while reading the response from the build hook");
- throw e;
+ throw;
}
}();
if (handleJSONLogMessage(s, worker.act, worker.hook->activities, true))
@@ -1055,7 +1066,7 @@ HookReply DerivationGoal::tryBuildHook()
machineName = readLine(hook->fromHook.readSide.get());
} catch (Error & e) {
e.addTrace({}, "while reading the machine name from the build hook");
- throw e;
+ throw;
}
/* Tell the hook all the inputs that have to be copied to the
@@ -1273,7 +1284,7 @@ void DerivationGoal::checkPathValidity()
: PathStatus::Corrupt,
};
}
- if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
auto drvOutput = DrvOutput{initialOutputs.at(i.first).outputHash, i.first};
if (auto real = worker.store.queryRealisation(drvOutput)) {
info.known = {
@@ -1327,6 +1338,13 @@ void DerivationGoal::done(BuildResult::Status status, std::optional<Error> ex)
}
worker.updateProgress();
+
+ auto traceBuiltOutputsFile = getEnv("_NIX_TRACE_BUILT_OUTPUTS").value_or("");
+ if (traceBuiltOutputsFile != "") {
+ std::fstream fs;
+ fs.open(traceBuiltOutputsFile, std::fstream::out);
+ fs << worker.store.printStorePath(drvPath) << "\t" << result.toString() << std::endl;
+ }
}
diff --git a/src/libstore/build/derivation-goal.hh b/src/libstore/build/derivation-goal.hh
index 704b77caf..e112542c7 100644
--- a/src/libstore/build/derivation-goal.hh
+++ b/src/libstore/build/derivation-goal.hh
@@ -50,8 +50,8 @@ struct DerivationGoal : public Goal
/* The path of the derivation. */
StorePath drvPath;
- /* The path of the corresponding resolved derivation */
- std::optional<BasicDerivation> resolvedDrv;
+ /* The goal for the corresponding resolved derivation */
+ std::shared_ptr<DerivationGoal> resolvedDrvGoal;
/* The specific outputs that we need to build. Empty means all of
them. */
diff --git a/src/libstore/build/drv-output-substitution-goal.cc b/src/libstore/build/drv-output-substitution-goal.cc
index be270d079..b9602e696 100644
--- a/src/libstore/build/drv-output-substitution-goal.cc
+++ b/src/libstore/build/drv-output-substitution-goal.cc
@@ -1,6 +1,8 @@
#include "drv-output-substitution-goal.hh"
+#include "finally.hh"
#include "worker.hh"
#include "substitution-goal.hh"
+#include "callback.hh"
namespace nix {
@@ -50,14 +52,42 @@ void DrvOutputSubstitutionGoal::tryNext()
return;
}
- auto sub = subs.front();
+ sub = subs.front();
subs.pop_front();
// FIXME: Make async
- outputInfo = sub->queryRealisation(id);
+ // outputInfo = sub->queryRealisation(id);
+ outPipe.create();
+ promise = decltype(promise)();
+
+ sub->queryRealisation(
+ id, { [&](std::future<std::shared_ptr<const Realisation>> res) {
+ try {
+ Finally updateStats([this]() { outPipe.writeSide.close(); });
+ promise.set_value(res.get());
+ } catch (...) {
+ promise.set_exception(std::current_exception());
+ }
+ } });
+
+ worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false);
+
+ state = &DrvOutputSubstitutionGoal::realisationFetched;
+}
+
+void DrvOutputSubstitutionGoal::realisationFetched()
+{
+ worker.childTerminated(this);
+
+ try {
+ outputInfo = promise.get_future().get();
+ } catch (std::exception & e) {
+ printError(e.what());
+ substituterFailed = true;
+ }
+
if (!outputInfo) {
- tryNext();
- return;
+ return tryNext();
}
for (const auto & [depId, depPath] : outputInfo->dependentRealisations) {
@@ -119,4 +149,10 @@ void DrvOutputSubstitutionGoal::work()
(this->*state)();
}
+void DrvOutputSubstitutionGoal::handleEOF(int fd)
+{
+ if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this());
+}
+
+
}
diff --git a/src/libstore/build/drv-output-substitution-goal.hh b/src/libstore/build/drv-output-substitution-goal.hh
index 63ab53d89..67ae2624a 100644
--- a/src/libstore/build/drv-output-substitution-goal.hh
+++ b/src/libstore/build/drv-output-substitution-goal.hh
@@ -3,6 +3,8 @@
#include "store-api.hh"
#include "goal.hh"
#include "realisation.hh"
+#include <thread>
+#include <future>
namespace nix {
@@ -20,11 +22,18 @@ private:
// The realisation corresponding to the given output id.
// Will be filled once we can get it.
- std::optional<Realisation> outputInfo;
+ std::shared_ptr<const Realisation> outputInfo;
/* The remaining substituters. */
std::list<ref<Store>> subs;
+ /* The current substituter. */
+ std::shared_ptr<Store> sub;
+
+ Pipe outPipe;
+ std::thread thr;
+ std::promise<std::shared_ptr<const Realisation>> promise;
+
/* Whether a substituter failed. */
bool substituterFailed = false;
@@ -36,6 +45,7 @@ public:
void init();
void tryNext();
+ void realisationFetched();
void outPathValid();
void finished();
@@ -44,7 +54,7 @@ public:
string key() override;
void work() override;
-
+ void handleEOF(int fd) override;
};
}
diff --git a/src/libstore/build/entry-points.cc b/src/libstore/build/entry-points.cc
index 96deb81d1..9b4cfd835 100644
--- a/src/libstore/build/entry-points.cc
+++ b/src/libstore/build/entry-points.cc
@@ -1,4 +1,3 @@
-#include "machines.hh"
#include "worker.hh"
#include "substitution-goal.hh"
#include "derivation-goal.hh"
@@ -11,12 +10,12 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
Worker worker(*this, evalStore ? *evalStore : *this);
Goals goals;
- for (auto & br : reqs) {
+ for (const auto & br : reqs) {
std::visit(overloaded {
- [&](DerivedPath::Built bfd) {
+ [&](const DerivedPath::Built & bfd) {
goals.insert(worker.makeDerivationGoal(bfd.drvPath, bfd.outputs, buildMode));
},
- [&](DerivedPath::Opaque bo) {
+ [&](const DerivedPath::Opaque & bo) {
goals.insert(worker.makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair));
},
}, br.raw());
@@ -74,7 +73,7 @@ BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivat
outputId,
Realisation{ outputId, *staticOutput.second}
);
- if (settings.isExperimentalFeatureEnabled("ca-derivations") && !derivationHasKnownOutputPaths(drv.type())) {
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations) && !derivationHasKnownOutputPaths(drv.type())) {
auto realisation = this->queryRealisation(outputId);
if (realisation)
result.builtOutputs.insert_or_assign(
diff --git a/src/libstore/build/local-derivation-goal.cc b/src/libstore/build/local-derivation-goal.cc
index 990ff60b7..0d0afea2d 100644
--- a/src/libstore/build/local-derivation-goal.cc
+++ b/src/libstore/build/local-derivation-goal.cc
@@ -17,16 +17,14 @@
#include <regex>
#include <queue>
-#include <sys/types.h>
-#include <sys/socket.h>
#include <sys/un.h>
-#include <netdb.h>
#include <fcntl.h>
#include <termios.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/utsname.h>
#include <sys/resource.h>
+#include <sys/socket.h>
#if HAVE_STATVFS
#include <sys/statvfs.h>
@@ -34,7 +32,6 @@
/* Includes required for chroot support. */
#if __linux__
-#include <sys/socket.h>
#include <sys/ioctl.h>
#include <net/if.h>
#include <netinet/ip.h>
@@ -70,12 +67,14 @@ void handleDiffHook(
auto diffHook = settings.diffHook;
if (diffHook != "" && settings.runDiffHook) {
try {
- RunOptions diffHookOptions(diffHook,{tryA, tryB, drvPath, tmpDir});
- diffHookOptions.searchPath = true;
- diffHookOptions.uid = uid;
- diffHookOptions.gid = gid;
- diffHookOptions.chdir = "/";
- auto diffRes = runProgram(diffHookOptions);
+ auto diffRes = runProgram(RunOptions {
+ .program = diffHook,
+ .searchPath = true,
+ .args = {tryA, tryB, drvPath, tmpDir},
+ .uid = uid,
+ .gid = gid,
+ .chdir = "/"
+ });
if (!statusOk(diffRes.first))
throw ExecError(diffRes.first,
"diff-hook program '%1%' %2%",
@@ -261,6 +260,7 @@ void LocalDerivationGoal::cleanupHookFinally()
void LocalDerivationGoal::cleanupPreChildKill()
{
sandboxMountNamespace = -1;
+ sandboxUserNamespace = -1;
}
@@ -343,24 +343,7 @@ int childEntry(void * arg)
return 1;
}
-
-static std::once_flag dns_resolve_flag;
-
-static void preloadNSS() {
- /* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of
- one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already
- been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to
- load its lookup libraries in the parent before any child gets a chance to. */
- std::call_once(dns_resolve_flag, []() {
- struct addrinfo *res = NULL;
-
- if (getaddrinfo("this.pre-initializes.the.dns.resolvers.invalid.", "http", NULL, &res) != 0) {
- if (res) freeaddrinfo(res);
- }
- });
-}
-
-
+#if __linux__
static void linkOrCopy(const Path & from, const Path & to)
{
if (link(from.c_str(), to.c_str()) == -1) {
@@ -376,6 +359,7 @@ static void linkOrCopy(const Path & from, const Path & to)
copyPath(from, to);
}
}
+#endif
void LocalDerivationGoal::startBuilder()
@@ -389,9 +373,6 @@ void LocalDerivationGoal::startBuilder()
settings.thisSystem,
concatStringsSep<StringSet>(", ", worker.store.systemFeatures));
- if (drv->isBuiltin())
- preloadNSS();
-
#if __APPLE__
additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or("");
#endif
@@ -732,6 +713,7 @@ void LocalDerivationGoal::startBuilder()
if (!builderOut.readSide)
throw SysError("opening pseudoterminal master");
+ // FIXME: not thread-safe, use ptsname_r
std::string slaveName(ptsname(builderOut.readSide.get()));
if (buildUser) {
@@ -775,7 +757,6 @@ void LocalDerivationGoal::startBuilder()
result.startTime = time(0);
/* Fork a child to build the package. */
- ProcessOptions options;
#if __linux__
if (useChroot) {
@@ -818,8 +799,6 @@ void LocalDerivationGoal::startBuilder()
userNamespaceSync.create();
- options.allowVfork = false;
-
Path maxUserNamespaces = "/proc/sys/user/max_user_namespaces";
static bool userNamespacesEnabled =
pathExists(maxUserNamespaces)
@@ -877,7 +856,7 @@ void LocalDerivationGoal::startBuilder()
writeFull(builderOut.writeSide.get(),
fmt("%d %d\n", usingUserNamespace, child));
_exit(0);
- }, options);
+ });
int res = helper.wait();
if (res != 0 && settings.sandboxFallback) {
@@ -928,11 +907,14 @@ void LocalDerivationGoal::startBuilder()
"nobody:x:65534:65534:Nobody:/:/noshell\n",
sandboxUid(), sandboxGid(), settings.sandboxBuildDir));
- /* Save the mount namespace of the child. We have to do this
+ /* Save the mount- and user namespace of the child. We have to do this
*before* the child does a chroot. */
sandboxMountNamespace = open(fmt("/proc/%d/ns/mnt", (pid_t) pid).c_str(), O_RDONLY);
if (sandboxMountNamespace.get() == -1)
throw SysError("getting sandbox mount namespace");
+ sandboxUserNamespace = open(fmt("/proc/%d/ns/user", (pid_t) pid).c_str(), O_RDONLY);
+ if (sandboxUserNamespace.get() == -1)
+ throw SysError("getting sandbox user namespace");
/* Signal the builder that we've updated its user namespace. */
writeFull(userNamespaceSync.writeSide.get(), "1");
@@ -940,11 +922,12 @@ void LocalDerivationGoal::startBuilder()
} else
#endif
{
+#if __linux__
fallback:
- options.allowVfork = !buildUser && !drv->isBuiltin();
+#endif
pid = startProcess([&]() {
runChild();
- }, options);
+ });
}
/* parent */
@@ -959,9 +942,12 @@ void LocalDerivationGoal::startBuilder()
try {
return readLine(builderOut.readSide.get());
} catch (Error & e) {
- e.addTrace({}, "while waiting for the build environment to initialize (previous messages: %s)",
+ auto status = pid.wait();
+ e.addTrace({}, "while waiting for the build environment for '%s' to initialize (%s, previous messages: %s)",
+ worker.store.printStorePath(drvPath),
+ statusToString(status),
concatStringsSep("|", msgs));
- throw e;
+ throw;
}
}();
if (string(msg, 0, 1) == "\2") break;
@@ -1112,10 +1098,10 @@ void LocalDerivationGoal::writeStructuredAttrs()
static StorePath pathPartOfReq(const DerivedPath & req)
{
return std::visit(overloaded {
- [&](DerivedPath::Opaque bo) {
+ [&](const DerivedPath::Opaque & bo) {
return bo.path;
},
- [&](DerivedPath::Built bfd) {
+ [&](const DerivedPath::Built & bfd) {
return bfd.drvPath;
},
}, req.raw());
@@ -1200,7 +1186,8 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
StorePath addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256,
- PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair) override
+ PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair,
+ const StorePathSet & references = StorePathSet()) override
{ throw Error("addToStore"); }
void addToStore(const ValidPathInfo & info, Source & narSource,
@@ -1219,9 +1206,10 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
}
StorePath addToStoreFromDump(Source & dump, const string & name,
- FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override
+ FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair,
+ const StorePathSet & references = StorePathSet()) override
{
- auto path = next->addToStoreFromDump(dump, name, method, hashAlgo, repair);
+ auto path = next->addToStoreFromDump(dump, name, method, hashAlgo, repair, references);
goal.addDependency(path);
return path;
}
@@ -1245,13 +1233,14 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
// corresponds to an allowed derivation
{ throw Error("registerDrvOutput"); }
- std::optional<const Realisation> queryRealisation(const DrvOutput & id) override
+ void queryRealisationUncached(const DrvOutput & id,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept override
// XXX: This should probably be allowed if the realisation corresponds to
// an allowed derivation
{
if (!goal.isAllowed(id))
- throw InvalidPath("cannot query an unknown output id '%s' in recursive Nix", id.to_string());
- return next->queryRealisation(id);
+ callback(nullptr);
+ next->queryRealisation(id, std::move(callback));
}
void buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override
@@ -1280,7 +1269,7 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
for (auto & [outputName, outputPath] : outputs)
if (wantOutput(outputName, bfd.outputs)) {
newPaths.insert(outputPath);
- if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
auto thisRealisation = next->queryRealisation(
DrvOutput{drvHashes.at(outputName), outputName}
);
@@ -1341,7 +1330,7 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
void LocalDerivationGoal::startDaemon()
{
- settings.requireExperimentalFeature("recursive-nix");
+ settings.requireExperimentalFeature(Xp::RecursiveNix);
Store::Params params;
params["path-info-cache-size"] = "0";
@@ -1374,7 +1363,7 @@ void LocalDerivationGoal::startDaemon()
AutoCloseFD remote = accept(daemonSocket.get(),
(struct sockaddr *) &remoteAddr, &remoteAddrLen);
if (!remote) {
- if (errno == EINTR) continue;
+ if (errno == EINTR || errno == EAGAIN) continue;
if (errno == EINVAL) break;
throw SysError("accepting connection");
}
@@ -1453,6 +1442,9 @@ void LocalDerivationGoal::addDependency(const StorePath & path)
child process.*/
Pid child(startProcess([&]() {
+ if (usingUserNamespace && (setns(sandboxUserNamespace.get(), 0) == -1))
+ throw SysError("entering sandbox user namespace");
+
if (setns(sandboxMountNamespace.get(), 0) == -1)
throw SysError("entering sandbox mount namespace");
@@ -1794,11 +1786,14 @@ void LocalDerivationGoal::runChild()
i686-linux build on an x86_64-linux machine. */
struct utsname utsbuf;
uname(&utsbuf);
- if (drv->platform == "i686-linux" &&
- (settings.thisSystem == "x86_64-linux" ||
- (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64")))) {
+ if ((drv->platform == "i686-linux"
+ && (settings.thisSystem == "x86_64-linux"
+ || (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64"))))
+ || drv->platform == "armv7l-linux"
+ || drv->platform == "armv6l-linux")
+ {
if (personality(PER_LINUX32) == -1)
- throw SysError("cannot set i686-linux personality");
+ throw SysError("cannot set 32-bit personality");
}
/* Impersonate a Linux 2.6 machine to get some determinism in
@@ -1853,7 +1848,7 @@ void LocalDerivationGoal::runChild()
/* Fill in the arguments. */
Strings args;
- const char *builder = "invalid";
+ std::string builder = "invalid";
if (drv->isBuiltin()) {
;
@@ -1979,13 +1974,13 @@ void LocalDerivationGoal::runChild()
}
args.push_back(drv->builder);
} else {
- builder = drv->builder.c_str();
+ builder = drv->builder;
args.push_back(std::string(baseNameOf(drv->builder)));
}
}
#else
else {
- builder = drv->builder.c_str();
+ builder = drv->builder;
args.push_back(std::string(baseNameOf(drv->builder)));
}
#endif
@@ -2012,7 +2007,7 @@ void LocalDerivationGoal::runChild()
else if (drv->builder == "builtin:unpack-channel")
builtinUnpackChannel(drv2);
else
- throw Error("unsupported builtin function '%1%'", string(drv->builder, 8));
+ throw Error("unsupported builtin builder '%1%'", string(drv->builder, 8));
_exit(0);
} catch (std::exception & e) {
writeFull(STDERR_FILENO, e.what() + std::string("\n"));
@@ -2041,9 +2036,9 @@ void LocalDerivationGoal::runChild()
posix_spawnattr_setbinpref_np(&attrp, 1, &cpu, NULL);
}
- posix_spawn(NULL, builder, NULL, &attrp, stringsToCharPtrs(args).data(), stringsToCharPtrs(envStrs).data());
+ posix_spawn(NULL, builder.c_str(), NULL, &attrp, stringsToCharPtrs(args).data(), stringsToCharPtrs(envStrs).data());
#else
- execve(builder, stringsToCharPtrs(args).data(), stringsToCharPtrs(envStrs).data());
+ execve(builder.c_str(), stringsToCharPtrs(args).data(), stringsToCharPtrs(envStrs).data());
#endif
throw SysError("executing '%1%'", drv->builder);
@@ -2158,8 +2153,7 @@ void LocalDerivationGoal::registerOutputs()
/* Pass blank Sink as we are not ready to hash data at this stage. */
NullSink blank;
- auto references = worker.store.parseStorePathSet(
- scanForReferences(blank, actualPath, worker.store.printStorePathSet(referenceablePaths)));
+ auto references = scanForReferences(blank, actualPath, referenceablePaths);
outputReferencesIfUnregistered.insert_or_assign(
outputName,
@@ -2173,8 +2167,8 @@ void LocalDerivationGoal::registerOutputs()
/* Since we'll use the already installed versions of these, we
can treat them as leaves and ignore any references they
have. */
- [&](AlreadyRegistered _) { return StringSet {}; },
- [&](PerhapsNeedToRegister refs) {
+ [&](const AlreadyRegistered &) { return StringSet {}; },
+ [&](const PerhapsNeedToRegister & refs) {
StringSet referencedOutputs;
/* FIXME build inverted map up front so no quadratic waste here */
for (auto & r : refs.refs)
@@ -2210,11 +2204,11 @@ void LocalDerivationGoal::registerOutputs()
};
std::optional<StorePathSet> referencesOpt = std::visit(overloaded {
- [&](AlreadyRegistered skippedFinalPath) -> std::optional<StorePathSet> {
+ [&](const AlreadyRegistered & skippedFinalPath) -> std::optional<StorePathSet> {
finish(skippedFinalPath.path);
return std::nullopt;
},
- [&](PerhapsNeedToRegister r) -> std::optional<StorePathSet> {
+ [&](const PerhapsNeedToRegister & r) -> std::optional<StorePathSet> {
return r.refs;
},
}, outputReferencesIfUnregistered.at(outputName));
@@ -2226,14 +2220,14 @@ void LocalDerivationGoal::registerOutputs()
auto rewriteOutput = [&]() {
/* Apply hash rewriting if necessary. */
if (!outputRewrites.empty()) {
- warn("rewriting hashes in '%1%'; cross fingers", actualPath);
+ debug("rewriting hashes in '%1%'; cross fingers", actualPath);
/* FIXME: this is in-memory. */
StringSink sink;
dumpPath(actualPath, sink);
deletePath(actualPath);
- sink.s = make_ref<std::string>(rewriteStrings(*sink.s, outputRewrites));
- StringSource source(*sink.s);
+ sink.s = rewriteStrings(sink.s, outputRewrites);
+ StringSource source(sink.s);
restorePath(actualPath, source);
}
};
@@ -2301,7 +2295,7 @@ void LocalDerivationGoal::registerOutputs()
StringSink sink;
dumpPath(actualPath, sink);
RewritingSink rsink2(oldHashPart, std::string(finalPath.hashPart()), nextSink);
- rsink2(*sink.s);
+ rsink2(sink.s);
rsink2.flush();
});
Path tmpPath = actualPath + ".tmp";
@@ -2330,7 +2324,7 @@ void LocalDerivationGoal::registerOutputs()
};
ValidPathInfo newInfo = std::visit(overloaded {
- [&](DerivationOutputInputAddressed output) {
+ [&](const DerivationOutputInputAddressed & output) {
/* input-addressed case */
auto requiredFinalPath = output.path;
/* Preemptively add rewrite rule for final hash, as that is
@@ -2349,14 +2343,14 @@ void LocalDerivationGoal::registerOutputs()
newInfo0.references.insert(newInfo0.path);
return newInfo0;
},
- [&](DerivationOutputCAFixed dof) {
+ [&](const DerivationOutputCAFixed & dof) {
auto newInfo0 = newInfoFromCA(DerivationOutputCAFloating {
.method = dof.hash.method,
.hashType = dof.hash.hash.type,
});
/* Check wanted hash */
- Hash & wanted = dof.hash.hash;
+ const Hash & wanted = dof.hash.hash;
assert(newInfo0.ca);
auto got = getContentAddressHash(*newInfo0.ca);
if (wanted != got) {
@@ -2473,7 +2467,7 @@ void LocalDerivationGoal::registerOutputs()
}
if (curRound == nrRounds) {
- localStore.optimisePath(actualPath); // FIXME: combine with scanForReferences()
+ localStore.optimisePath(actualPath, NoRepair); // FIXME: combine with scanForReferences()
worker.markContentsGood(newInfo.path);
}
@@ -2492,7 +2486,13 @@ void LocalDerivationGoal::registerOutputs()
infos.emplace(outputName, std::move(newInfo));
}
- if (buildMode == bmCheck) return;
+ if (buildMode == bmCheck) {
+ // In case of FOD mismatches on `--check` an error must be thrown as this is also
+ // a source for non-determinism.
+ if (delayedException)
+ std::rethrow_exception(delayedException);
+ return;
+ }
/* Apply output checks. */
checkOutputs(infos);
@@ -2577,7 +2577,7 @@ void LocalDerivationGoal::registerOutputs()
that for floating CA derivations, which otherwise couldn't be cached,
but it's fine to do in all cases. */
- if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
for (auto& [outputName, newInfo] : infos) {
auto thisRealisation = Realisation{
.id = DrvOutput{initialOutputs.at(outputName).outputHash,
diff --git a/src/libstore/build/local-derivation-goal.hh b/src/libstore/build/local-derivation-goal.hh
index 088a57209..bfdf91d89 100644
--- a/src/libstore/build/local-derivation-goal.hh
+++ b/src/libstore/build/local-derivation-goal.hh
@@ -27,9 +27,10 @@ struct LocalDerivationGoal : public DerivationGoal
/* Pipe for synchronising updates to the builder namespaces. */
Pipe userNamespaceSync;
- /* The mount namespace of the builder, used to add additional
+ /* The mount namespace and user namespace of the builder, used to add additional
paths to the sandbox as a result of recursive Nix calls. */
AutoCloseFD sandboxMountNamespace;
+ AutoCloseFD sandboxUserNamespace;
/* On Linux, whether we're doing the build in its own user
namespace. */
diff --git a/src/libstore/build/substitution-goal.cc b/src/libstore/build/substitution-goal.cc
index 29a8cfb87..5ecf1da7e 100644
--- a/src/libstore/build/substitution-goal.cc
+++ b/src/libstore/build/substitution-goal.cc
@@ -138,8 +138,8 @@ void PathSubstitutionGoal::tryNext()
only after we've downloaded the path. */
if (!sub->isTrusted && worker.store.pathInfoIsUntrusted(*info))
{
- warn("substituter '%s' does not have a valid signature for path '%s'",
- sub->getUri(), worker.store.printStorePath(storePath));
+ warn("the substitute for '%s' from '%s' is not signed by any of the keys in 'trusted-public-keys'",
+ worker.store.printStorePath(storePath), sub->getUri());
tryNext();
return;
}
diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc
index a7a6b92a6..f11c5ce68 100644
--- a/src/libstore/build/worker.cc
+++ b/src/libstore/build/worker.cc
@@ -239,7 +239,7 @@ void Worker::run(const Goals & _topGoals)
}
}
- /* Call queryMissing() efficiently query substitutes. */
+ /* Call queryMissing() to efficiently query substitutes. */
StorePathSet willBuild, willSubstitute, unknown;
uint64_t downloadSize, narSize;
store.queryMissing(topPaths, willBuild, willSubstitute, unknown, downloadSize, narSize);
@@ -281,11 +281,11 @@ void Worker::run(const Goals & _topGoals)
if (getMachines().empty())
throw Error("unable to start any build; either increase '--max-jobs' "
"or enable remote builds."
- "\nhttps://nixos.org/nix/manual/#chap-distributed-builds");
+ "\nhttps://nixos.org/manual/nix/stable/advanced-topics/distributed-builds.html");
else
throw Error("unable to start any build; remote machines may not have "
"all required system features."
- "\nhttps://nixos.org/nix/manual/#chap-distributed-builds");
+ "\nhttps://nixos.org/manual/nix/stable/advanced-topics/distributed-builds.html");
}
assert(!awake.empty());
diff --git a/src/libstore/ca-specific-schema.sql b/src/libstore/ca-specific-schema.sql
index 08af0cc1f..64cc97fde 100644
--- a/src/libstore/ca-specific-schema.sql
+++ b/src/libstore/ca-specific-schema.sql
@@ -19,3 +19,8 @@ create table if not exists RealisationsRefs (
foreign key (referrer) references Realisations(id) on delete cascade,
foreign key (realisationReference) references Realisations(id) on delete restrict
);
+
+-- used by QueryRealisationReferences
+create index if not exists IndexRealisationsRefs on RealisationsRefs(referrer);
+-- used by cascade deletion when ValidPaths is deleted
+create index if not exists IndexRealisationsRefsOnOutputPath on Realisations(outputPath);
diff --git a/src/libstore/content-address.cc b/src/libstore/content-address.cc
index 90a3ad1f5..cf32ccdc4 100644
--- a/src/libstore/content-address.cc
+++ b/src/libstore/content-address.cc
@@ -31,10 +31,10 @@ std::string makeFixedOutputCA(FileIngestionMethod method, const Hash & hash)
std::string renderContentAddress(ContentAddress ca)
{
return std::visit(overloaded {
- [](TextHash th) {
+ [](TextHash & th) {
return "text:" + th.hash.to_string(Base32, true);
},
- [](FixedOutputHash fsh) {
+ [](FixedOutputHash & fsh) {
return makeFixedOutputCA(fsh.method, fsh.hash);
}
}, ca);
@@ -43,10 +43,10 @@ std::string renderContentAddress(ContentAddress ca)
std::string renderContentAddressMethod(ContentAddressMethod cam)
{
return std::visit(overloaded {
- [](TextHashMethod &th) {
+ [](TextHashMethod & th) {
return std::string{"text:"} + printHashType(htSHA256);
},
- [](FixedOutputHashMethod &fshm) {
+ [](FixedOutputHashMethod & fshm) {
return "fixed:" + makeFileIngestionPrefix(fshm.fileIngestionMethod) + printHashType(fshm.hashType);
}
}, cam);
@@ -104,12 +104,12 @@ ContentAddress parseContentAddress(std::string_view rawCa) {
return std::visit(
overloaded {
- [&](TextHashMethod thm) {
+ [&](TextHashMethod & thm) {
return ContentAddress(TextHash {
.hash = Hash::parseNonSRIUnprefixed(rest, htSHA256)
});
},
- [&](FixedOutputHashMethod fohMethod) {
+ [&](FixedOutputHashMethod & fohMethod) {
return ContentAddress(FixedOutputHash {
.method = fohMethod.fileIngestionMethod,
.hash = Hash::parseNonSRIUnprefixed(rest, std::move(fohMethod.hashType)),
@@ -120,8 +120,10 @@ ContentAddress parseContentAddress(std::string_view rawCa) {
ContentAddressMethod parseContentAddressMethod(std::string_view caMethod)
{
- std::string_view asPrefix {std::string{caMethod} + ":"};
- return parseContentAddressMethodPrefix(asPrefix);
+ std::string asPrefix = std::string{caMethod} + ":";
+ // parseContentAddressMethodPrefix takes its argument by reference
+ std::string_view asPrefixView = asPrefix;
+ return parseContentAddressMethodPrefix(asPrefixView);
}
std::optional<ContentAddress> parseContentAddressOpt(std::string_view rawCaOpt)
@@ -137,10 +139,10 @@ std::string renderContentAddress(std::optional<ContentAddress> ca)
Hash getContentAddressHash(const ContentAddress & ca)
{
return std::visit(overloaded {
- [](TextHash th) {
+ [](const TextHash & th) {
return th.hash;
},
- [](FixedOutputHash fsh) {
+ [](const FixedOutputHash & fsh) {
return fsh.hash;
}
}, ca);
diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc
index d68ff64d7..101aa13a5 100644
--- a/src/libstore/daemon.cc
+++ b/src/libstore/daemon.cc
@@ -4,7 +4,6 @@
#include "store-api.hh"
#include "path-with-outputs.hh"
#include "finally.hh"
-#include "affinity.hh"
#include "archive.hh"
#include "derivations.hh"
#include "args.hh"
@@ -70,7 +69,7 @@ struct TunnelLogger : public Logger
StringSink buf;
buf << STDERR_NEXT << (fs.s + "\n");
- enqueueMsg(*buf.s);
+ enqueueMsg(buf.s);
}
void logEI(const ErrorInfo & ei) override
@@ -82,7 +81,7 @@ struct TunnelLogger : public Logger
StringSink buf;
buf << STDERR_NEXT << oss.str();
- enqueueMsg(*buf.s);
+ enqueueMsg(buf.s);
}
/* startWork() means that we're starting an operation for which we
@@ -130,7 +129,7 @@ struct TunnelLogger : public Logger
StringSink buf;
buf << STDERR_START_ACTIVITY << act << lvl << type << s << fields << parent;
- enqueueMsg(*buf.s);
+ enqueueMsg(buf.s);
}
void stopActivity(ActivityId act) override
@@ -138,7 +137,7 @@ struct TunnelLogger : public Logger
if (GET_PROTOCOL_MINOR(clientVersion) < 20) return;
StringSink buf;
buf << STDERR_STOP_ACTIVITY << act;
- enqueueMsg(*buf.s);
+ enqueueMsg(buf.s);
}
void result(ActivityId act, ResultType type, const Fields & fields) override
@@ -146,7 +145,7 @@ struct TunnelLogger : public Logger
if (GET_PROTOCOL_MINOR(clientVersion) < 20) return;
StringSink buf;
buf << STDERR_RESULT << act << type << fields;
- enqueueMsg(*buf.s);
+ enqueueMsg(buf.s);
}
};
@@ -227,8 +226,15 @@ struct ClientSettings
try {
if (name == "ssh-auth-sock") // obsolete
;
+ else if (name == settings.experimentalFeatures.name) {
+ // We don’t want to forward the experimental features to
+ // the daemon, as that could cause some pretty weird stuff
+ if (parseFeatures(tokenizeString<StringSet>(value)) != settings.experimentalFeatures.get())
+ debug("Ignoring the client-specified experimental features");
+ }
else if (trusted
|| name == settings.buildTimeout.name
+ || name == settings.buildRepeat.name
|| name == "connect-timeout"
|| (name == "builders" && value == ""))
settings.set(name, value);
@@ -389,16 +395,14 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
FramedSource source(from);
// TODO this is essentially RemoteStore::addCAToStore. Move it up to Store.
return std::visit(overloaded {
- [&](TextHashMethod &_) {
+ [&](TextHashMethod &) {
// We could stream this by changing Store
std::string contents = source.drain();
auto path = store->addTextToStore(name, contents, refs, repair);
return store->queryPathInfo(path);
},
- [&](FixedOutputHashMethod &fohm) {
- if (!refs.empty())
- throw UnimplementedError("cannot yet have refs with flat or nar-hashed data");
- auto path = store->addToStoreFromDump(source, name, fohm.fileIngestionMethod, fohm.hashType, repair);
+ [&](FixedOutputHashMethod & fohm) {
+ auto path = store->addToStoreFromDump(source, name, fohm.fileIngestionMethod, fohm.hashType, repair, refs);
return store->queryPathInfo(path);
},
}, contentAddressMethod);
@@ -426,25 +430,30 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
hashAlgo = parseHashType(hashAlgoRaw);
}
- StringSink saved;
- TeeSource savedNARSource(from, saved);
- RetrieveRegularNARSink savedRegular { saved };
-
- if (method == FileIngestionMethod::Recursive) {
- /* Get the entire NAR dump from the client and save it to
- a string so that we can pass it to
- addToStoreFromDump(). */
- ParseSink sink; /* null sink; just parse the NAR */
- parseDump(sink, savedNARSource);
- } else
- parseDump(savedRegular, from);
-
+ auto dumpSource = sinkToSource([&](Sink & saved) {
+ if (method == FileIngestionMethod::Recursive) {
+ /* We parse the NAR dump through into `saved` unmodified,
+ so why all this extra work? We still parse the NAR so
+ that we aren't sending arbitrary data to `saved`
+ unwittingly`, and we know when the NAR ends so we don't
+ consume the rest of `from` and can't parse another
+ command. (We don't trust `addToStoreFromDump` to not
+ eagerly consume the entire stream it's given, past the
+ length of the Nar. */
+ TeeSource savedNARSource(from, saved);
+ ParseSink sink; /* null sink; just parse the NAR */
+ parseDump(sink, savedNARSource);
+ } else {
+ /* Incrementally parse the NAR file, stripping the
+ metadata, and streaming the sole file we expect into
+ `saved`. */
+ RetrieveRegularNARSink savedRegular { saved };
+ parseDump(savedRegular, from);
+ if (!savedRegular.regular) throw Error("regular file expected");
+ }
+ });
logger->startWork();
- if (!savedRegular.regular) throw Error("regular file expected");
-
- // FIXME: try to stream directly from `from`.
- StringSource dumpSource { *saved.s };
- auto path = store->addToStoreFromDump(dumpSource, baseName, method, hashAlgo);
+ auto path = store->addToStoreFromDump(*dumpSource, baseName, method, hashAlgo);
logger->stopWork();
to << store->printStorePath(path);
@@ -459,10 +468,12 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
dontCheckSigs = false;
logger->startWork();
- FramedSource source(from);
- store->addMultipleToStore(source,
- RepairFlag{repair},
- dontCheckSigs ? NoCheckSigs : CheckSigs);
+ {
+ FramedSource source(from);
+ store->addMultipleToStore(source,
+ RepairFlag{repair},
+ dontCheckSigs ? NoCheckSigs : CheckSigs);
+ }
logger->stopWork();
break;
}
@@ -618,9 +629,9 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
+ // Obsolete.
case wopSyncWithGC: {
logger->startWork();
- store->syncWithGC();
logger->stopWork();
to << 1;
break;
@@ -843,14 +854,14 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
else {
std::unique_ptr<Source> source;
+ StringSink saved;
if (GET_PROTOCOL_MINOR(clientVersion) >= 21)
source = std::make_unique<TunnelSource>(from, to);
else {
- StringSink saved;
TeeSource tee { from, saved };
ParseSink ether;
parseDump(ether, tee);
- source = std::make_unique<StringSource>(std::move(*saved.s));
+ source = std::make_unique<StringSource>(saved.s);
}
logger->startWork();
@@ -911,6 +922,22 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
break;
}
+ case wopAddBuildLog: {
+ StorePath path{readString(from)};
+ logger->startWork();
+ if (!trusted)
+ throw Error("you are not privileged to add logs");
+ {
+ FramedSource source(from);
+ StringSink sink;
+ source.drainInto(sink);
+ store->addBuildLog(path, sink.s);
+ }
+ logger->stopWork();
+ to << 1;
+ break;
+ }
+
default:
throw Error("invalid operation %1%", op);
}
@@ -946,12 +973,12 @@ void processConnection(
Finally finally([&]() {
_isInterrupted = false;
- prevLogger->log(lvlDebug, fmt("%d operations", opCount));
+ printMsgUsing(prevLogger, lvlDebug, "%d operations", opCount);
});
if (GET_PROTOCOL_MINOR(clientVersion) >= 14 && readInt(from)) {
- auto affinity = readInt(from);
- setAffinityTo(affinity);
+ // Obsolete CPU affinity.
+ readInt(from);
}
readInt(from); // obsolete reserveSpace
@@ -979,6 +1006,8 @@ void processConnection(
break;
}
+ printMsgUsing(prevLogger, lvlDebug, "received daemon op %d", op);
+
opCount++;
try {
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
index 899475860..b926bb711 100644
--- a/src/libstore/derivations.cc
+++ b/src/libstore/derivations.cc
@@ -10,18 +10,18 @@ namespace nix {
std::optional<StorePath> DerivationOutput::path(const Store & store, std::string_view drvName, std::string_view outputName) const
{
return std::visit(overloaded {
- [](DerivationOutputInputAddressed doi) -> std::optional<StorePath> {
+ [](const DerivationOutputInputAddressed & doi) -> std::optional<StorePath> {
return { doi.path };
},
- [&](DerivationOutputCAFixed dof) -> std::optional<StorePath> {
+ [&](const DerivationOutputCAFixed & dof) -> std::optional<StorePath> {
return {
dof.path(store, drvName, outputName)
};
},
- [](DerivationOutputCAFloating dof) -> std::optional<StorePath> {
+ [](const DerivationOutputCAFloating & dof) -> std::optional<StorePath> {
return std::nullopt;
},
- [](DerivationOutputDeferred) -> std::optional<StorePath> {
+ [](const DerivationOutputDeferred &) -> std::optional<StorePath> {
return std::nullopt;
},
}, output);
@@ -187,7 +187,7 @@ static DerivationOutput parseDerivationOutput(const Store & store,
},
};
} else {
- settings.requireExperimentalFeature("ca-derivations");
+ settings.requireExperimentalFeature(Xp::CaDerivations);
assert(pathS == "");
return DerivationOutput {
.output = DerivationOutputCAFloating {
@@ -332,22 +332,22 @@ string Derivation::unparse(const Store & store, bool maskOutputs,
if (first) first = false; else s += ',';
s += '('; printUnquotedString(s, i.first);
std::visit(overloaded {
- [&](DerivationOutputInputAddressed doi) {
+ [&](const DerivationOutputInputAddressed & doi) {
s += ','; printUnquotedString(s, maskOutputs ? "" : store.printStorePath(doi.path));
s += ','; printUnquotedString(s, "");
s += ','; printUnquotedString(s, "");
},
- [&](DerivationOutputCAFixed dof) {
+ [&](const DerivationOutputCAFixed & dof) {
s += ','; printUnquotedString(s, maskOutputs ? "" : store.printStorePath(dof.path(store, name, i.first)));
s += ','; printUnquotedString(s, dof.hash.printMethodAlgo());
s += ','; printUnquotedString(s, dof.hash.hash.to_string(Base16, false));
},
- [&](DerivationOutputCAFloating dof) {
+ [&](const DerivationOutputCAFloating & dof) {
s += ','; printUnquotedString(s, "");
s += ','; printUnquotedString(s, makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType));
s += ','; printUnquotedString(s, "");
},
- [&](DerivationOutputDeferred) {
+ [&](const DerivationOutputDeferred &) {
s += ','; printUnquotedString(s, "");
s += ','; printUnquotedString(s, "");
s += ','; printUnquotedString(s, "");
@@ -420,13 +420,13 @@ DerivationType BasicDerivation::type() const
std::optional<HashType> floatingHashType;
for (auto & i : outputs) {
std::visit(overloaded {
- [&](DerivationOutputInputAddressed _) {
+ [&](const DerivationOutputInputAddressed &) {
inputAddressedOutputs.insert(i.first);
},
- [&](DerivationOutputCAFixed _) {
+ [&](const DerivationOutputCAFixed &) {
fixedCAOutputs.insert(i.first);
},
- [&](DerivationOutputCAFloating dof) {
+ [&](const DerivationOutputCAFloating & dof) {
floatingCAOutputs.insert(i.first);
if (!floatingHashType) {
floatingHashType = dof.hashType;
@@ -435,7 +435,7 @@ DerivationType BasicDerivation::type() const
throw Error("All floating outputs must use the same hash type");
}
},
- [&](DerivationOutputDeferred _) {
+ [&](const DerivationOutputDeferred &) {
deferredIAOutputs.insert(i.first);
},
}, i.second.output);
@@ -538,15 +538,15 @@ DrvHashModulo hashDerivationModulo(Store & store, const Derivation & drv, bool m
const auto & res = pathDerivationModulo(store, i.first);
std::visit(overloaded {
// Regular non-CA derivation, replace derivation
- [&](Hash drvHash) {
+ [&](const Hash & drvHash) {
inputs2.insert_or_assign(drvHash.to_string(Base16, false), i.second);
},
- [&](DeferredHash deferredHash) {
+ [&](const DeferredHash & deferredHash) {
isDeferred = true;
inputs2.insert_or_assign(deferredHash.hash.to_string(Base16, false), i.second);
},
// CA derivation's output hashes
- [&](CaOutputHashes outputHashes) {
+ [&](const CaOutputHashes & outputHashes) {
std::set<std::string> justOut = { "out" };
for (auto & output : i.second) {
/* Put each one in with a single "out" output.. */
@@ -572,17 +572,17 @@ std::map<std::string, Hash> staticOutputHashes(Store & store, const Derivation &
{
std::map<std::string, Hash> res;
std::visit(overloaded {
- [&](Hash drvHash) {
+ [&](const Hash & drvHash) {
for (auto & outputName : drv.outputNames()) {
res.insert({outputName, drvHash});
}
},
- [&](DeferredHash deferredHash) {
+ [&](const DeferredHash & deferredHash) {
for (auto & outputName : drv.outputNames()) {
res.insert({outputName, deferredHash.hash});
}
},
- [&](CaOutputHashes outputHashes) {
+ [&](const CaOutputHashes & outputHashes) {
res = outputHashes;
},
}, hashDerivationModulo(store, drv, true));
@@ -666,22 +666,22 @@ void writeDerivation(Sink & out, const Store & store, const BasicDerivation & dr
for (auto & i : drv.outputs) {
out << i.first;
std::visit(overloaded {
- [&](DerivationOutputInputAddressed doi) {
+ [&](const DerivationOutputInputAddressed & doi) {
out << store.printStorePath(doi.path)
<< ""
<< "";
},
- [&](DerivationOutputCAFixed dof) {
+ [&](const DerivationOutputCAFixed & dof) {
out << store.printStorePath(dof.path(store, drv.name, i.first))
<< dof.hash.printMethodAlgo()
<< dof.hash.hash.to_string(Base16, false);
},
- [&](DerivationOutputCAFloating dof) {
+ [&](const DerivationOutputCAFloating & dof) {
out << ""
<< (makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType))
<< "";
},
- [&](DerivationOutputDeferred) {
+ [&](const DerivationOutputDeferred &) {
out << ""
<< ""
<< "";
diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh
index 2df440536..b1cb68194 100644
--- a/src/libstore/derivations.hh
+++ b/src/libstore/derivations.hh
@@ -138,8 +138,8 @@ struct Derivation : BasicDerivation
/* Return the underlying basic derivation but with these changes:
- 1. Input drvs are emptied, but the outputs of them that were used are
- added directly to input sources.
+ 1. Input drvs are emptied, but the outputs of them that were used are
+ added directly to input sources.
2. Input placeholders are replaced with realized input store paths. */
std::optional<BasicDerivation> tryResolve(Store & store);
diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc
index 8da81d0ac..3d188e981 100644
--- a/src/libstore/derived-path.cc
+++ b/src/libstore/derived-path.cc
@@ -24,8 +24,8 @@ StorePathSet BuiltPath::outPaths() const
{
return std::visit(
overloaded{
- [](BuiltPath::Opaque p) { return StorePathSet{p.path}; },
- [](BuiltPath::Built b) {
+ [](const BuiltPath::Opaque & p) { return StorePathSet{p.path}; },
+ [](const BuiltPath::Built & b) {
StorePathSet res;
for (auto & [_, path] : b.outputs)
res.insert(path);
@@ -94,13 +94,13 @@ RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const
RealisedPath::Set res;
std::visit(
overloaded{
- [&](BuiltPath::Opaque p) { res.insert(p.path); },
- [&](BuiltPath::Built p) {
+ [&](const BuiltPath::Opaque & p) { res.insert(p.path); },
+ [&](const BuiltPath::Built & p) {
auto drvHashes =
staticOutputHashes(store, store.readDerivation(p.drvPath));
for (auto& [outputName, outputPath] : p.outputs) {
if (settings.isExperimentalFeatureEnabled(
- "ca-derivations")) {
+ Xp::CaDerivations)) {
auto thisRealisation = store.queryRealisation(
DrvOutput{drvHashes.at(outputName), outputName});
assert(thisRealisation); // We’ve built it, so we must h
diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc
index 36c6e725c..62dc21c59 100644
--- a/src/libstore/dummy-store.cc
+++ b/src/libstore/dummy-store.cc
@@ -50,8 +50,9 @@ struct DummyStore : public virtual DummyStoreConfig, public virtual Store
void narFromPath(const StorePath & path, Sink & sink) override
{ unsupported("narFromPath"); }
- std::optional<const Realisation> queryRealisation(const DrvOutput&) override
- { unsupported("queryRealisation"); }
+ void queryRealisationUncached(const DrvOutput &,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept override
+ { callback(nullptr); }
};
static RegisterStoreImplementation<DummyStore, DummyStoreConfig> regDummyStore;
diff --git a/src/libstore/export-import.cc b/src/libstore/export-import.cc
index 02c839520..9875da909 100644
--- a/src/libstore/export-import.cc
+++ b/src/libstore/export-import.cc
@@ -75,20 +75,20 @@ StorePaths Store::importPaths(Source & source, CheckSigsFlag checkSigs)
auto references = worker_proto::read(*this, source, Phantom<StorePathSet> {});
auto deriver = readString(source);
- auto narHash = hashString(htSHA256, *saved.s);
+ auto narHash = hashString(htSHA256, saved.s);
ValidPathInfo info { path, narHash };
if (deriver != "")
info.deriver = parseStorePath(deriver);
info.references = references;
- info.narSize = saved.s->size();
+ info.narSize = saved.s.size();
// Ignore optional legacy signature.
if (readInt(source) == 1)
readString(source);
// Can't use underlying source, which would have been exhausted
- auto source = StringSource { *saved.s };
+ auto source = StringSource(saved.s);
addToStore(info, source, NoRepair, checkSigs);
res.push_back(info.path);
diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc
index 2cf35ec83..6b62311cf 100644
--- a/src/libstore/filetransfer.cc
+++ b/src/libstore/filetransfer.cc
@@ -106,7 +106,7 @@ struct curlFileTransfer : public FileTransfer
this->request.dataCallback(data);
}
} else
- this->result.data->append(data);
+ this->result.data.append(data);
})
{
if (!request.expectedETag.empty())
@@ -195,7 +195,7 @@ struct curlFileTransfer : public FileTransfer
std::smatch match;
if (std::regex_match(line, match, statusLine)) {
result.etag = "";
- result.data = std::make_shared<std::string>();
+ result.data.clear();
result.bodySize = 0;
statusMsg = trim(match[1]);
acceptRanges = false;
@@ -340,7 +340,7 @@ struct curlFileTransfer : public FileTransfer
if (writtenToSink)
curl_easy_setopt(req, CURLOPT_RESUME_FROM_LARGE, writtenToSink);
- result.data = std::make_shared<std::string>();
+ result.data.clear();
result.bodySize = 0;
}
@@ -434,21 +434,21 @@ struct curlFileTransfer : public FileTransfer
attempt++;
- std::shared_ptr<std::string> response;
+ std::optional<std::string> response;
if (errorSink)
- response = errorSink->s;
+ response = std::move(errorSink->s);
auto exc =
code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted
- ? FileTransferError(Interrupted, response, "%s of '%s' was interrupted", request.verb(), request.uri)
+ ? FileTransferError(Interrupted, std::move(response), "%s of '%s' was interrupted", request.verb(), request.uri)
: httpStatus != 0
? FileTransferError(err,
- response,
+ std::move(response),
fmt("unable to %s '%s': HTTP error %d ('%s')",
request.verb(), request.uri, httpStatus, statusMsg)
+ (code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code)))
)
: FileTransferError(err,
- response,
+ std::move(response),
fmt("unable to %s '%s': %s (%d)",
request.verb(), request.uri, curl_easy_strerror(code), code));
@@ -544,6 +544,8 @@ struct curlFileTransfer : public FileTransfer
stopWorkerThread();
});
+ unshareFilesystem();
+
std::map<CURL *, std::shared_ptr<TransferItem>> items;
bool quit = false;
@@ -703,7 +705,7 @@ struct curlFileTransfer : public FileTransfer
FileTransferResult res;
if (!s3Res.data)
throw FileTransferError(NotFound, nullptr, "S3 object '%s' does not exist", request.uri);
- res.data = s3Res.data;
+ res.data = std::move(*s3Res.data);
callback(std::move(res));
#else
throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri);
@@ -716,15 +718,24 @@ struct curlFileTransfer : public FileTransfer
}
};
+ref<curlFileTransfer> makeCurlFileTransfer()
+{
+ return make_ref<curlFileTransfer>();
+}
+
ref<FileTransfer> getFileTransfer()
{
- static ref<FileTransfer> fileTransfer = makeFileTransfer();
+ static ref<curlFileTransfer> fileTransfer = makeCurlFileTransfer();
+
+ if (fileTransfer->state_.lock()->quit)
+ fileTransfer = makeCurlFileTransfer();
+
return fileTransfer;
}
ref<FileTransfer> makeFileTransfer()
{
- return make_ref<curlFileTransfer>();
+ return makeCurlFileTransfer();
}
std::future<FileTransferResult> FileTransfer::enqueueFileTransfer(const FileTransferRequest & request)
@@ -848,7 +859,7 @@ void FileTransfer::download(FileTransferRequest && request, Sink & sink)
}
template<typename... Args>
-FileTransferError::FileTransferError(FileTransfer::Error error, std::shared_ptr<string> response, const Args & ... args)
+FileTransferError::FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args & ... args)
: Error(args...), error(error), response(response)
{
const auto hf = hintfmt(args...);
diff --git a/src/libstore/filetransfer.hh b/src/libstore/filetransfer.hh
index 45d9ccf89..3e61b23b1 100644
--- a/src/libstore/filetransfer.hh
+++ b/src/libstore/filetransfer.hh
@@ -59,7 +59,7 @@ struct FileTransferRequest
unsigned int baseRetryTimeMs = 250;
ActivityId parentAct;
bool decompress = true;
- std::shared_ptr<std::string> data;
+ std::optional<std::string> data;
std::string mimeType;
std::function<void(std::string_view data)> dataCallback;
@@ -77,7 +77,7 @@ struct FileTransferResult
bool cached = false;
std::string etag;
std::string effectiveUri;
- std::shared_ptr<std::string> data;
+ std::string data;
uint64_t bodySize = 0;
};
@@ -119,10 +119,10 @@ class FileTransferError : public Error
{
public:
FileTransfer::Error error;
- std::shared_ptr<string> response; // intentionally optional
+ std::optional<string> response; // intentionally optional
template<typename... Args>
- FileTransferError(FileTransfer::Error error, std::shared_ptr<string> response, const Args & ... args);
+ FileTransferError(FileTransfer::Error error, std::optional<string> response, const Args & ... args);
virtual const char* sname() const override { return "FileTransferError"; }
};
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
index 5a62c6529..e35199b3d 100644
--- a/src/libstore/gc.cc
+++ b/src/libstore/gc.cc
@@ -10,48 +10,22 @@
#include <regex>
#include <random>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/statvfs.h>
+#include <climits>
#include <errno.h>
#include <fcntl.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/statvfs.h>
+#include <sys/types.h>
+#include <sys/un.h>
#include <unistd.h>
-#include <climits>
namespace nix {
-static string gcLockName = "gc.lock";
-static string gcRootsDir = "gcroots";
-
-
-/* Acquire the global GC lock. This is used to prevent new Nix
- processes from starting after the temporary root files have been
- read. To be precise: when they try to create a new temporary root
- file, they will block until the garbage collector has finished /
- yielded the GC lock. */
-AutoCloseFD LocalStore::openGCLock(LockType lockType)
-{
- Path fnGCLock = (format("%1%/%2%")
- % stateDir % gcLockName).str();
-
- debug(format("acquiring global GC lock '%1%'") % fnGCLock);
-
- AutoCloseFD fdGCLock = open(fnGCLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
- if (!fdGCLock)
- throw SysError("opening global GC lock '%1%'", fnGCLock);
-
- if (!lockFile(fdGCLock.get(), lockType, false)) {
- printInfo("waiting for the big garbage collector lock...");
- lockFile(fdGCLock.get(), lockType, true);
- }
-
- /* !!! Restrict read permission on the GC root. Otherwise any
- process that can open the file for reading can DoS the
- collector. */
-
- return fdGCLock;
-}
+static std::string gcSocketPath = "/gc-socket/socket";
+static std::string gcRootsDir = "gcroots";
static void makeSymlink(const Path & link, const Path & target)
@@ -71,12 +45,6 @@ static void makeSymlink(const Path & link, const Path & target)
}
-void LocalStore::syncWithGC()
-{
- AutoCloseFD fdGCLock = openGCLock(ltRead);
-}
-
-
void LocalStore::addIndirectRoot(const Path & path)
{
string hash = hashString(htSHA1, path).to_string(Base32, false);
@@ -95,6 +63,12 @@ Path LocalFSStore::addPermRoot(const StorePath & storePath, const Path & _gcRoot
"creating a garbage collector root (%1%) in the Nix store is forbidden "
"(are you running nix-build inside the store?)", gcRoot);
+ /* Register this root with the garbage collector, if it's
+ running. This should be superfluous since the caller should
+ have registered this root yet, but let's be on the safe
+ side. */
+ addTempRoot(storePath);
+
/* Don't clobber the link if it already exists and doesn't
point to the Nix store. */
if (pathExists(gcRoot) && (!isLink(gcRoot) || !isInStore(readLink(gcRoot))))
@@ -102,11 +76,6 @@ Path LocalFSStore::addPermRoot(const StorePath & storePath, const Path & _gcRoot
makeSymlink(gcRoot, printStorePath(storePath));
addIndirectRoot(gcRoot);
- /* Grab the global GC root, causing us to block while a GC is in
- progress. This prevents the set of permanent roots from
- increasing while a GC is in progress. */
- syncWithGC();
-
return gcRoot;
}
@@ -119,8 +88,6 @@ void LocalStore::addTempRoot(const StorePath & path)
if (!state->fdTempRoots) {
while (1) {
- AutoCloseFD fdGCLock = openGCLock(ltRead);
-
if (pathExists(fnTempRoots))
/* It *must* be stale, since there can be no two
processes with the same pid. */
@@ -128,10 +95,8 @@ void LocalStore::addTempRoot(const StorePath & path)
state->fdTempRoots = openLockFile(fnTempRoots, true);
- fdGCLock = -1;
-
- debug(format("acquiring read lock on '%1%'") % fnTempRoots);
- lockFile(state->fdTempRoots.get(), ltRead, true);
+ debug("acquiring write lock on '%s'", fnTempRoots);
+ lockFile(state->fdTempRoots.get(), ltWrite, true);
/* Check whether the garbage collector didn't get in our
way. */
@@ -147,24 +112,65 @@ void LocalStore::addTempRoot(const StorePath & path)
}
- /* Upgrade the lock to a write lock. This will cause us to block
- if the garbage collector is holding our lock. */
- debug(format("acquiring write lock on '%1%'") % fnTempRoots);
- lockFile(state->fdTempRoots.get(), ltWrite, true);
+ if (!state->fdGCLock)
+ state->fdGCLock = openGCLock();
+
+ restart:
+ FdLock gcLock(state->fdGCLock.get(), ltRead, false, "");
+
+ if (!gcLock.acquired) {
+ /* We couldn't get a shared global GC lock, so the garbage
+ collector is running. So we have to connect to the garbage
+ collector and inform it about our root. */
+ if (!state->fdRootsSocket) {
+ auto socketPath = stateDir.get() + gcSocketPath;
+ debug("connecting to '%s'", socketPath);
+ state->fdRootsSocket = createUnixDomainSocket();
+ try {
+ nix::connect(state->fdRootsSocket.get(), socketPath);
+ } catch (SysError & e) {
+ /* The garbage collector may have exited, so we need to
+ restart. */
+ if (e.errNo == ECONNREFUSED) {
+ debug("GC socket connection refused");
+ state->fdRootsSocket.close();
+ goto restart;
+ }
+ }
+ }
+ try {
+ debug("sending GC root '%s'", printStorePath(path));
+ writeFull(state->fdRootsSocket.get(), printStorePath(path) + "\n", false);
+ char c;
+ readFull(state->fdRootsSocket.get(), &c, 1);
+ assert(c == '1');
+ debug("got ack for GC root '%s'", printStorePath(path));
+ } catch (SysError & e) {
+ /* The garbage collector may have exited, so we need to
+ restart. */
+ if (e.errNo == EPIPE) {
+ debug("GC socket disconnected");
+ state->fdRootsSocket.close();
+ goto restart;
+ }
+ } catch (EndOfFile & e) {
+ debug("GC socket disconnected");
+ state->fdRootsSocket.close();
+ goto restart;
+ }
+ }
+
+ /* Append the store path to the temporary roots file. */
string s = printStorePath(path) + '\0';
writeFull(state->fdTempRoots.get(), s);
-
- /* Downgrade to a read lock. */
- debug(format("downgrading to read lock on '%1%'") % fnTempRoots);
- lockFile(state->fdTempRoots.get(), ltRead, true);
}
static std::string censored = "{censored}";
-void LocalStore::findTempRoots(FDs & fds, Roots & tempRoots, bool censor)
+void LocalStore::findTempRoots(Roots & tempRoots, bool censor)
{
/* Read the `temproots' directory for per-process temporary root
files. */
@@ -179,35 +185,25 @@ void LocalStore::findTempRoots(FDs & fds, Roots & tempRoots, bool censor)
pid_t pid = std::stoi(i.name);
debug(format("reading temporary root file '%1%'") % path);
- FDPtr fd(new AutoCloseFD(open(path.c_str(), O_CLOEXEC | O_RDWR, 0666)));
- if (!*fd) {
+ AutoCloseFD fd(open(path.c_str(), O_CLOEXEC | O_RDWR, 0666));
+ if (!fd) {
/* It's okay if the file has disappeared. */
if (errno == ENOENT) continue;
throw SysError("opening temporary roots file '%1%'", path);
}
- /* This should work, but doesn't, for some reason. */
- //FDPtr fd(new AutoCloseFD(openLockFile(path, false)));
- //if (*fd == -1) continue;
-
/* Try to acquire a write lock without blocking. This can
only succeed if the owning process has died. In that case
we don't care about its temporary roots. */
- if (lockFile(fd->get(), ltWrite, false)) {
+ if (lockFile(fd.get(), ltWrite, false)) {
printInfo("removing stale temporary roots file '%1%'", path);
unlink(path.c_str());
- writeFull(fd->get(), "d");
+ writeFull(fd.get(), "d");
continue;
}
- /* Acquire a read lock. This will prevent the owning process
- from upgrading to a write lock, therefore it will block in
- addTempRoot(). */
- debug(format("waiting for read lock on '%1%'") % path);
- lockFile(fd->get(), ltRead, true);
-
/* Read the entire file. */
- string contents = readFile(fd->get());
+ string contents = readFile(fd.get());
/* Extract the roots. */
string::size_type pos = 0, end;
@@ -218,8 +214,6 @@ void LocalStore::findTempRoots(FDs & fds, Roots & tempRoots, bool censor)
tempRoots[parseStorePath(root)].emplace(censor ? censored : fmt("{temp:%d}", pid));
pos = end + 1;
}
-
- fds.push_back(fd); /* keep open */
}
}
@@ -304,8 +298,7 @@ Roots LocalStore::findRoots(bool censor)
Roots roots;
findRootsNoTemp(roots, censor);
- FDs fds;
- findTempRoots(fds, roots, censor);
+ findTempRoots(roots, censor);
return roots;
}
@@ -341,6 +334,7 @@ static string quoteRegexChars(const string & raw)
return std::regex_replace(raw, specialRegex, R"(\$&)");
}
+#if __linux__
static void readFileRoots(const char * path, UncheckedRoots & roots)
{
try {
@@ -350,6 +344,7 @@ static void readFileRoots(const char * path, UncheckedRoots & roots)
throw;
}
}
+#endif
void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
{
@@ -431,7 +426,7 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
}
#endif
-#if defined(__linux__)
+#if __linux__
readFileRoots("/proc/sys/kernel/modprobe", unchecked);
readFileRoots("/proc/sys/kernel/fbsplash", unchecked);
readFileRoots("/proc/sys/kernel/poweroff_cmd", unchecked);
@@ -455,391 +450,408 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
struct GCLimitReached { };
-struct LocalStore::GCState
-{
- const GCOptions & options;
- GCResults & results;
- StorePathSet roots;
- StorePathSet tempRoots;
- StorePathSet dead;
- StorePathSet alive;
- bool gcKeepOutputs;
- bool gcKeepDerivations;
- uint64_t bytesInvalidated;
- bool moveToTrash = true;
- bool shouldDelete;
- GCState(const GCOptions & options, GCResults & results)
- : options(options), results(results), bytesInvalidated(0) { }
-};
-
-
-bool LocalStore::isActiveTempFile(const GCState & state,
- const Path & path, const string & suffix)
+void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
{
- return hasSuffix(path, suffix)
- && state.tempRoots.count(parseStorePath(string(path, 0, path.size() - suffix.size())));
-}
+ bool shouldDelete = options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific;
+ bool gcKeepOutputs = settings.gcKeepOutputs;
+ bool gcKeepDerivations = settings.gcKeepDerivations;
+ StorePathSet roots, dead, alive;
-void LocalStore::deleteGarbage(GCState & state, const Path & path)
-{
- uint64_t bytesFreed;
- deletePath(path, bytesFreed);
- state.results.bytesFreed += bytesFreed;
-}
+ struct Shared
+ {
+ // The temp roots only store the hash part to make it easier to
+ // ignore suffixes like '.lock', '.chroot' and '.check'.
+ std::unordered_set<std::string> tempRoots;
+ // Hash part of the store path currently being deleted, if
+ // any.
+ std::optional<std::string> pending;
+ };
-void LocalStore::deletePathRecursive(GCState & state, const Path & path)
-{
- checkInterrupt();
-
- uint64_t size = 0;
-
- auto storePath = maybeParseStorePath(path);
- if (storePath && isValidPath(*storePath)) {
- StorePathSet referrers;
- queryReferrers(*storePath, referrers);
- for (auto & i : referrers)
- if (printStorePath(i) != path) deletePathRecursive(state, printStorePath(i));
- size = queryPathInfo(*storePath)->narSize;
- invalidatePathChecked(*storePath);
- }
+ Sync<Shared> _shared;
- Path realPath = realStoreDir + "/" + std::string(baseNameOf(path));
+ std::condition_variable wakeup;
- struct stat st;
- if (lstat(realPath.c_str(), &st)) {
- if (errno == ENOENT) return;
- throw SysError("getting status of %1%", realPath);
+ /* Using `--ignore-liveness' with `--delete' can have unintended
+ consequences if `keep-outputs' or `keep-derivations' are true
+ (the garbage collector will recurse into deleting the outputs
+ or derivers, respectively). So disable them. */
+ if (options.action == GCOptions::gcDeleteSpecific && options.ignoreLiveness) {
+ gcKeepOutputs = false;
+ gcKeepDerivations = false;
}
- printInfo(format("deleting '%1%'") % path);
-
- state.results.paths.insert(path);
+ if (shouldDelete)
+ deletePath(reservedPath);
- /* If the path is not a regular file or symlink, move it to the
- trash directory. The move is to ensure that later (when we're
- not holding the global GC lock) we can delete the path without
- being afraid that the path has become alive again. Otherwise
- delete it right away. */
- if (state.moveToTrash && S_ISDIR(st.st_mode)) {
- // Estimate the amount freed using the narSize field. FIXME:
- // if the path was not valid, need to determine the actual
- // size.
- try {
- if (chmod(realPath.c_str(), st.st_mode | S_IWUSR) == -1)
- throw SysError("making '%1%' writable", realPath);
- Path tmp = trashDir + "/" + std::string(baseNameOf(path));
- if (rename(realPath.c_str(), tmp.c_str()))
- throw SysError("unable to rename '%1%' to '%2%'", realPath, tmp);
- state.bytesInvalidated += size;
- } catch (SysError & e) {
- if (e.errNo == ENOSPC) {
- printInfo(format("note: can't create move '%1%': %2%") % realPath % e.msg());
- deleteGarbage(state, realPath);
+ /* Acquire the global GC root. Note: we don't use fdGCLock
+ here because then in auto-gc mode, another thread could
+ downgrade our exclusive lock. */
+ auto fdGCLock = openGCLock();
+ FdLock gcLock(fdGCLock.get(), ltWrite, true, "waiting for the big garbage collector lock...");
+
+ /* Start the server for receiving new roots. */
+ auto socketPath = stateDir.get() + gcSocketPath;
+ createDirs(dirOf(socketPath));
+ auto fdServer = createUnixDomainSocket(socketPath, 0666);
+
+ if (fcntl(fdServer.get(), F_SETFL, fcntl(fdServer.get(), F_GETFL) | O_NONBLOCK) == -1)
+ throw SysError("making socket '%1%' non-blocking", socketPath);
+
+ Pipe shutdownPipe;
+ shutdownPipe.create();
+
+ std::thread serverThread([&]() {
+ Sync<std::map<int, std::thread>> connections;
+
+ Finally cleanup([&]() {
+ debug("GC roots server shutting down");
+ while (true) {
+ auto item = remove_begin(*connections.lock());
+ if (!item) break;
+ auto & [fd, thread] = *item;
+ shutdown(fd, SHUT_RDWR);
+ thread.join();
}
- }
- } else
- deleteGarbage(state, realPath);
-
- if (state.results.bytesFreed + state.bytesInvalidated > state.options.maxFreed) {
- printInfo(format("deleted or invalidated more than %1% bytes; stopping") % state.options.maxFreed);
- throw GCLimitReached();
- }
-}
-
-
-bool LocalStore::canReachRoot(GCState & state, StorePathSet & visited, const StorePath & path)
-{
- if (visited.count(path)) return false;
-
- if (state.alive.count(path)) return true;
-
- if (state.dead.count(path)) return false;
-
- if (state.roots.count(path)) {
- debug("cannot delete '%1%' because it's a root", printStorePath(path));
- state.alive.insert(path);
- return true;
- }
-
- visited.insert(path);
-
- if (!isValidPath(path)) return false;
-
- StorePathSet incoming;
-
- /* Don't delete this path if any of its referrers are alive. */
- queryReferrers(path, incoming);
-
- /* If keep-derivations is set and this is a derivation, then
- don't delete the derivation if any of the outputs are alive. */
- if (state.gcKeepDerivations && path.isDerivation()) {
- for (auto & [name, maybeOutPath] : queryPartialDerivationOutputMap(path))
- if (maybeOutPath &&
- isValidPath(*maybeOutPath) &&
- queryPathInfo(*maybeOutPath)->deriver == path
- )
- incoming.insert(*maybeOutPath);
- }
-
- /* If keep-outputs is set, then don't delete this path if there
- are derivers of this path that are not garbage. */
- if (state.gcKeepOutputs) {
- auto derivers = queryValidDerivers(path);
- for (auto & i : derivers)
- incoming.insert(i);
- }
+ });
+
+ while (true) {
+ std::vector<struct pollfd> fds;
+ fds.push_back({.fd = shutdownPipe.readSide.get(), .events = POLLIN});
+ fds.push_back({.fd = fdServer.get(), .events = POLLIN});
+ auto count = poll(fds.data(), fds.size(), -1);
+ assert(count != -1);
+
+ if (fds[0].revents)
+ /* Parent is asking us to quit. */
+ break;
+
+ if (fds[1].revents) {
+ /* Accept a new connection. */
+ assert(fds[1].revents & POLLIN);
+ AutoCloseFD fdClient = accept(fdServer.get(), nullptr, nullptr);
+ if (!fdClient) continue;
+
+ debug("GC roots server accepted new client");
+
+ /* Process the connection in a separate thread. */
+ auto fdClient_ = fdClient.get();
+ std::thread clientThread([&, fdClient = std::move(fdClient)]() {
+ Finally cleanup([&]() {
+ auto conn(connections.lock());
+ auto i = conn->find(fdClient.get());
+ if (i != conn->end()) {
+ i->second.detach();
+ conn->erase(i);
+ }
+ });
+
+ /* On macOS, accepted sockets inherit the
+ non-blocking flag from the server socket, so
+ explicitly make it blocking. */
+ if (fcntl(fdServer.get(), F_SETFL, fcntl(fdServer.get(), F_GETFL) & ~O_NONBLOCK) == -1)
+ abort();
+
+ while (true) {
+ try {
+ auto path = readLine(fdClient.get());
+ auto storePath = maybeParseStorePath(path);
+ if (storePath) {
+ debug("got new GC root '%s'", path);
+ auto hashPart = std::string(storePath->hashPart());
+ auto shared(_shared.lock());
+ shared->tempRoots.insert(hashPart);
+ /* If this path is currently being
+ deleted, then we have to wait until
+ deletion is finished to ensure that
+ the client doesn't start
+ re-creating it before we're
+ done. FIXME: ideally we would use a
+ FD for this so we don't block the
+ poll loop. */
+ while (shared->pending == hashPart) {
+ debug("synchronising with deletion of path '%s'", path);
+ shared.wait(wakeup);
+ }
+ } else
+ printError("received garbage instead of a root from client");
+ writeFull(fdClient.get(), "1", false);
+ } catch (Error & e) {
+ debug("reading GC root from client: %s", e.msg());
+ break;
+ }
+ }
+ });
- for (auto & i : incoming)
- if (i != path)
- if (canReachRoot(state, visited, i)) {
- state.alive.insert(path);
- return true;
+ connections.lock()->insert({fdClient_, std::move(clientThread)});
}
+ }
+ });
- return false;
-}
-
-
-void LocalStore::tryToDelete(GCState & state, const Path & path)
-{
- checkInterrupt();
-
- auto realPath = realStoreDir + "/" + std::string(baseNameOf(path));
- if (realPath == linksDir || realPath == trashDir) return;
-
- //Activity act(*logger, lvlDebug, format("considering whether to delete '%1%'") % path);
-
- auto storePath = maybeParseStorePath(path);
-
- if (!storePath || !isValidPath(*storePath)) {
- /* A lock file belonging to a path that we're building right
- now isn't garbage. */
- if (isActiveTempFile(state, path, ".lock")) return;
+ Finally stopServer([&]() {
+ writeFull(shutdownPipe.writeSide.get(), "x", false);
+ wakeup.notify_all();
+ if (serverThread.joinable()) serverThread.join();
+ });
- /* Don't delete .chroot directories for derivations that are
- currently being built. */
- if (isActiveTempFile(state, path, ".chroot")) return;
+ /* Find the roots. Since we've grabbed the GC lock, the set of
+ permanent roots cannot increase now. */
+ printInfo("finding garbage collector roots...");
+ Roots rootMap;
+ if (!options.ignoreLiveness)
+ findRootsNoTemp(rootMap, true);
- /* Don't delete .check directories for derivations that are
- currently being built, because we may need to run
- diff-hook. */
- if (isActiveTempFile(state, path, ".check")) return;
- }
+ for (auto & i : rootMap) roots.insert(i.first);
- StorePathSet visited;
-
- if (storePath && canReachRoot(state, visited, *storePath)) {
- debug("cannot delete '%s' because it's still reachable", path);
- } else {
- /* No path we visited was a root, so everything is garbage.
- But we only delete ‘path’ and its referrers here so that
- ‘nix-store --delete’ doesn't have the unexpected effect of
- recursing into derivations and outputs. */
- for (auto & i : visited)
- state.dead.insert(i);
- if (state.shouldDelete)
- deletePathRecursive(state, path);
+ /* Read the temporary roots created before we acquired the global
+ GC root. Any new roots will be sent to our socket. */
+ Roots tempRoots;
+ findTempRoots(tempRoots, true);
+ for (auto & root : tempRoots) {
+ _shared.lock()->tempRoots.insert(std::string(root.first.hashPart()));
+ roots.insert(root.first);
}
-}
-
-/* Unlink all files in /nix/store/.links that have a link count of 1,
- which indicates that there are no other links and so they can be
- safely deleted. FIXME: race condition with optimisePath(): we
- might see a link count of 1 just before optimisePath() increases
- the link count. */
-void LocalStore::removeUnusedLinks(const GCState & state)
-{
- AutoCloseDir dir(opendir(linksDir.c_str()));
- if (!dir) throw SysError("opening directory '%1%'", linksDir);
+ /* Helper function that deletes a path from the store and throws
+ GCLimitReached if we've deleted enough garbage. */
+ auto deleteFromStore = [&](std::string_view baseName)
+ {
+ Path path = storeDir + "/" + std::string(baseName);
+ Path realPath = realStoreDir + "/" + std::string(baseName);
- int64_t actualSize = 0, unsharedSize = 0;
+ printInfo("deleting '%1%'", path);
- struct dirent * dirent;
- while (errno = 0, dirent = readdir(dir.get())) {
- checkInterrupt();
- string name = dirent->d_name;
- if (name == "." || name == "..") continue;
- Path path = linksDir + "/" + name;
+ results.paths.insert(path);
- auto st = lstat(path);
+ uint64_t bytesFreed;
+ deletePath(realPath, bytesFreed);
+ results.bytesFreed += bytesFreed;
- if (st.st_nlink != 1) {
- actualSize += st.st_size;
- unsharedSize += (st.st_nlink - 1) * st.st_size;
- continue;
+ if (results.bytesFreed > options.maxFreed) {
+ printInfo("deleted more than %d bytes; stopping", options.maxFreed);
+ throw GCLimitReached();
}
+ };
- printMsg(lvlTalkative, format("deleting unused link '%1%'") % path);
-
- if (unlink(path.c_str()) == -1)
- throw SysError("deleting '%1%'", path);
+ std::map<StorePath, StorePathSet> referrersCache;
- state.results.bytesFreed += st.st_size;
- }
+ /* Helper function that visits all paths reachable from `start`
+ via the referrers edges and optionally derivers and derivation
+ output edges. If none of those paths are roots, then all
+ visited paths are garbage and are deleted. */
+ auto deleteReferrersClosure = [&](const StorePath & start) {
+ StorePathSet visited;
+ std::queue<StorePath> todo;
- struct stat st;
- if (stat(linksDir.c_str(), &st) == -1)
- throw SysError("statting '%1%'", linksDir);
- int64_t overhead = st.st_blocks * 512ULL;
+ /* Wake up any GC client waiting for deletion of the paths in
+ 'visited' to finish. */
+ Finally releasePending([&]() {
+ auto shared(_shared.lock());
+ shared->pending.reset();
+ wakeup.notify_all();
+ });
- printInfo("note: currently hard linking saves %.2f MiB",
- ((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
-}
+ auto enqueue = [&](const StorePath & path) {
+ if (visited.insert(path).second)
+ todo.push(path);
+ };
+ enqueue(start);
-void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
-{
- GCState state(options, results);
- state.gcKeepOutputs = settings.gcKeepOutputs;
- state.gcKeepDerivations = settings.gcKeepDerivations;
+ while (auto path = pop(todo)) {
+ checkInterrupt();
- /* Using `--ignore-liveness' with `--delete' can have unintended
- consequences if `keep-outputs' or `keep-derivations' are true
- (the garbage collector will recurse into deleting the outputs
- or derivers, respectively). So disable them. */
- if (options.action == GCOptions::gcDeleteSpecific && options.ignoreLiveness) {
- state.gcKeepOutputs = false;
- state.gcKeepDerivations = false;
- }
+ /* Bail out if we've previously discovered that this path
+ is alive. */
+ if (alive.count(*path)) {
+ alive.insert(start);
+ return;
+ }
- state.shouldDelete = options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific;
+ /* If we've previously deleted this path, we don't have to
+ handle it again. */
+ if (dead.count(*path)) continue;
- if (state.shouldDelete)
- deletePath(reservedPath);
+ auto markAlive = [&]()
+ {
+ alive.insert(*path);
+ alive.insert(start);
+ try {
+ StorePathSet closure;
+ computeFSClosure(*path, closure);
+ for (auto & p : closure)
+ alive.insert(p);
+ } catch (InvalidPath &) { }
+ };
+
+ /* If this is a root, bail out. */
+ if (roots.count(*path)) {
+ debug("cannot delete '%s' because it's a root", printStorePath(*path));
+ return markAlive();
+ }
- /* Acquire the global GC root. This prevents
- a) New roots from being added.
- b) Processes from creating new temporary root files. */
- AutoCloseFD fdGCLock = openGCLock(ltWrite);
+ if (options.action == GCOptions::gcDeleteSpecific
+ && !options.pathsToDelete.count(*path))
+ return;
- /* Find the roots. Since we've grabbed the GC lock, the set of
- permanent roots cannot increase now. */
- printInfo("finding garbage collector roots...");
- Roots rootMap;
- if (!options.ignoreLiveness)
- findRootsNoTemp(rootMap, true);
+ {
+ auto hashPart = std::string(path->hashPart());
+ auto shared(_shared.lock());
+ if (shared->tempRoots.count(hashPart)) {
+ debug("cannot delete '%s' because it's a temporary root", printStorePath(*path));
+ return markAlive();
+ }
+ shared->pending = hashPart;
+ }
- for (auto & i : rootMap) state.roots.insert(i.first);
+ if (isValidPath(*path)) {
- /* Read the temporary roots. This acquires read locks on all
- per-process temporary root files. So after this point no paths
- can be added to the set of temporary roots. */
- FDs fds;
- Roots tempRoots;
- findTempRoots(fds, tempRoots, true);
- for (auto & root : tempRoots) {
- state.tempRoots.insert(root.first);
- state.roots.insert(root.first);
- }
+ /* Visit the referrers of this path. */
+ auto i = referrersCache.find(*path);
+ if (i == referrersCache.end()) {
+ StorePathSet referrers;
+ queryReferrers(*path, referrers);
+ referrersCache.emplace(*path, std::move(referrers));
+ i = referrersCache.find(*path);
+ }
+ for (auto & p : i->second)
+ enqueue(p);
+
+ /* If keep-derivations is set and this is a
+ derivation, then visit the derivation outputs. */
+ if (gcKeepDerivations && path->isDerivation()) {
+ for (auto & [name, maybeOutPath] : queryPartialDerivationOutputMap(*path))
+ if (maybeOutPath &&
+ isValidPath(*maybeOutPath) &&
+ queryPathInfo(*maybeOutPath)->deriver == *path)
+ enqueue(*maybeOutPath);
+ }
- /* After this point the set of roots or temporary roots cannot
- increase, since we hold locks on everything. So everything
- that is not reachable from `roots' is garbage. */
+ /* If keep-outputs is set, then visit the derivers. */
+ if (gcKeepOutputs) {
+ auto derivers = queryValidDerivers(*path);
+ for (auto & i : derivers)
+ enqueue(i);
+ }
+ }
+ }
- if (state.shouldDelete) {
- if (pathExists(trashDir)) deleteGarbage(state, trashDir);
- try {
- createDirs(trashDir);
- } catch (SysError & e) {
- if (e.errNo == ENOSPC) {
- printInfo("note: can't create trash directory: %s", e.msg());
- state.moveToTrash = false;
+ for (auto & path : topoSortPaths(visited)) {
+ if (!dead.insert(path).second) continue;
+ if (shouldDelete) {
+ invalidatePathChecked(path);
+ deleteFromStore(path.to_string());
+ referrersCache.erase(path);
}
}
- }
+ };
- /* Now either delete all garbage paths, or just the specified
- paths (for gcDeleteSpecific). */
+ /* Synchronisation point for testing, see tests/gc-concurrent.sh. */
+ if (auto p = getEnv("_NIX_TEST_GC_SYNC"))
+ readFile(*p);
+ /* Either delete all garbage paths, or just the specified
+ paths (for gcDeleteSpecific). */
if (options.action == GCOptions::gcDeleteSpecific) {
for (auto & i : options.pathsToDelete) {
- tryToDelete(state, printStorePath(i));
- if (state.dead.find(i) == state.dead.end())
+ deleteReferrersClosure(i);
+ if (!dead.count(i))
throw Error(
- "cannot delete path '%1%' since it is still alive. "
- "To find out why use: "
+ "Cannot delete path '%1%' since it is still alive. "
+ "To find out why, use: "
"nix-store --query --roots",
printStorePath(i));
}
} else if (options.maxFreed > 0) {
- if (state.shouldDelete)
+ if (shouldDelete)
printInfo("deleting garbage...");
else
printInfo("determining live/dead paths...");
try {
-
AutoCloseDir dir(opendir(realStoreDir.get().c_str()));
if (!dir) throw SysError("opening directory '%1%'", realStoreDir);
- /* Read the store and immediately delete all paths that
- aren't valid. When using --max-freed etc., deleting
- invalid paths is preferred over deleting unreachable
- paths, since unreachable paths could become reachable
- again. We don't use readDirectory() here so that GCing
- can start faster. */
+ /* Read the store and delete all paths that are invalid or
+ unreachable. We don't use readDirectory() here so that
+ GCing can start faster. */
+ auto linksName = baseNameOf(linksDir);
Paths entries;
struct dirent * dirent;
while (errno = 0, dirent = readdir(dir.get())) {
checkInterrupt();
string name = dirent->d_name;
- if (name == "." || name == "..") continue;
- Path path = storeDir + "/" + name;
- auto storePath = maybeParseStorePath(path);
- if (storePath && isValidPath(*storePath))
- entries.push_back(path);
- else
- tryToDelete(state, path);
- }
-
- dir.reset();
-
- /* Now delete the unreachable valid paths. Randomise the
- order in which we delete entries to make the collector
- less biased towards deleting paths that come
- alphabetically first (e.g. /nix/store/000...). This
- matters when using --max-freed etc. */
- vector<Path> entries_(entries.begin(), entries.end());
- std::mt19937 gen(1);
- std::shuffle(entries_.begin(), entries_.end(), gen);
+ if (name == "." || name == ".." || name == linksName) continue;
- for (auto & i : entries_)
- tryToDelete(state, i);
+ if (auto storePath = maybeParseStorePath(storeDir + "/" + name))
+ deleteReferrersClosure(*storePath);
+ else
+ deleteFromStore(name);
+ }
} catch (GCLimitReached & e) {
}
}
- if (state.options.action == GCOptions::gcReturnLive) {
- for (auto & i : state.alive)
- state.results.paths.insert(printStorePath(i));
+ if (options.action == GCOptions::gcReturnLive) {
+ for (auto & i : alive)
+ results.paths.insert(printStorePath(i));
return;
}
- if (state.options.action == GCOptions::gcReturnDead) {
- for (auto & i : state.dead)
- state.results.paths.insert(printStorePath(i));
+ if (options.action == GCOptions::gcReturnDead) {
+ for (auto & i : dead)
+ results.paths.insert(printStorePath(i));
return;
}
- /* Allow other processes to add to the store from here on. */
- fdGCLock = -1;
- fds.clear();
-
- /* Delete the trash directory. */
- printInfo(format("deleting '%1%'") % trashDir);
- deleteGarbage(state, trashDir);
-
- /* Clean up the links directory. */
+ /* Unlink all files in /nix/store/.links that have a link count of 1,
+ which indicates that there are no other links and so they can be
+ safely deleted. FIXME: race condition with optimisePath(): we
+ might see a link count of 1 just before optimisePath() increases
+ the link count. */
if (options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific) {
printInfo("deleting unused links...");
- removeUnusedLinks(state);
+
+ AutoCloseDir dir(opendir(linksDir.c_str()));
+ if (!dir) throw SysError("opening directory '%1%'", linksDir);
+
+ int64_t actualSize = 0, unsharedSize = 0;
+
+ struct dirent * dirent;
+ while (errno = 0, dirent = readdir(dir.get())) {
+ checkInterrupt();
+ string name = dirent->d_name;
+ if (name == "." || name == "..") continue;
+ Path path = linksDir + "/" + name;
+
+ auto st = lstat(path);
+
+ if (st.st_nlink != 1) {
+ actualSize += st.st_size;
+ unsharedSize += (st.st_nlink - 1) * st.st_size;
+ continue;
+ }
+
+ printMsg(lvlTalkative, format("deleting unused link '%1%'") % path);
+
+ if (unlink(path.c_str()) == -1)
+ throw SysError("deleting '%1%'", path);
+
+ results.bytesFreed += st.st_size;
+ }
+
+ struct stat st;
+ if (stat(linksDir.c_str(), &st) == -1)
+ throw SysError("statting '%1%'", linksDir);
+ int64_t overhead = st.st_blocks * 512ULL;
+
+ printInfo("note: currently hard linking saves %.2f MiB",
+ ((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
}
/* While we're at it, vacuum the database. */
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index d3b27d7be..81ca9cc0f 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -122,7 +122,7 @@ StringSet Settings::getDefaultSystemFeatures()
/* For backwards compatibility, accept some "features" that are
used in Nixpkgs to route builds to certain machines but don't
actually require anything special on the machines. */
- StringSet features{"nixos-test", "benchmark", "big-parallel", "recursive-nix"};
+ StringSet features{"nixos-test", "benchmark", "big-parallel"};
#if __linux__
if (access("/dev/kvm", R_OK | W_OK) == 0)
@@ -148,7 +148,8 @@ StringSet Settings::getDefaultExtraPlatforms()
// machines. Note that we can’t force processes from executing
// x86_64 in aarch64 environments or vice versa since they can
// always exec with their own binary preferences.
- if (pathExists("/Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist")) {
+ if (pathExists("/Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist") ||
+ pathExists("/System/Library/LaunchDaemons/com.apple.oahd.plist")) {
if (std::string{SYSTEM} == "x86_64-darwin")
extraPlatforms.insert("aarch64-darwin");
else if (std::string{SYSTEM} == "aarch64-darwin")
@@ -159,21 +160,16 @@ StringSet Settings::getDefaultExtraPlatforms()
return extraPlatforms;
}
-bool Settings::isExperimentalFeatureEnabled(const std::string & name)
+bool Settings::isExperimentalFeatureEnabled(const ExperimentalFeature & feature)
{
auto & f = experimentalFeatures.get();
- return std::find(f.begin(), f.end(), name) != f.end();
+ return std::find(f.begin(), f.end(), feature) != f.end();
}
-MissingExperimentalFeature::MissingExperimentalFeature(std::string feature)
- : Error("experimental Nix feature '%1%' is disabled; use '--experimental-features %1%' to override", feature)
- , missingFeature(feature)
- {}
-
-void Settings::requireExperimentalFeature(const std::string & name)
+void Settings::requireExperimentalFeature(const ExperimentalFeature & feature)
{
- if (!isExperimentalFeatureEnabled(name))
- throw MissingExperimentalFeature(name);
+ if (!isExperimentalFeatureEnabled(feature))
+ throw MissingExperimentalFeature(feature);
}
bool Settings::isWSL1()
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index 8784d5faf..f65893b10 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -3,6 +3,7 @@
#include "types.hh"
#include "config.hh"
#include "util.hh"
+#include "experimental-features.hh"
#include <map>
#include <limits>
@@ -20,7 +21,7 @@ struct MaxBuildJobsSetting : public BaseSetting<unsigned int>
const std::string & name,
const std::string & description,
const std::set<std::string> & aliases = {})
- : BaseSetting<unsigned int>(def, name, description, aliases)
+ : BaseSetting<unsigned int>(def, true, name, description, aliases)
{
options->addSetting(this);
}
@@ -37,7 +38,7 @@ struct PluginFilesSetting : public BaseSetting<Paths>
const std::string & name,
const std::string & description,
const std::set<std::string> & aliases = {})
- : BaseSetting<Paths>(def, name, description, aliases)
+ : BaseSetting<Paths>(def, true, name, description, aliases)
{
options->addSetting(this);
}
@@ -45,15 +46,6 @@ struct PluginFilesSetting : public BaseSetting<Paths>
void set(const std::string & str, bool append = false) override;
};
-class MissingExperimentalFeature: public Error
-{
-public:
- std::string missingFeature;
-
- MissingExperimentalFeature(std::string feature);
- virtual const char* sname() const override { return "MissingExperimentalFeature"; }
-};
-
class Settings : public Config {
unsigned int getDefaultCores();
@@ -138,7 +130,9 @@ public:
{"build-max-jobs"}};
Setting<unsigned int> buildCores{
- this, getDefaultCores(), "cores",
+ this,
+ getDefaultCores(),
+ "cores",
R"(
Sets the value of the `NIX_BUILD_CORES` environment variable in the
invocation of builders. Builders can use this variable at their
@@ -149,7 +143,7 @@ public:
command line switch and defaults to `1`. The value `0` means that
the builder should use all available CPU cores in the system.
)",
- {"build-cores"}};
+ {"build-cores"}, false};
/* Read-only mode. Don't copy stuff to the store, don't change
the database. */
@@ -591,10 +585,11 @@ public:
platform and generate incompatible code, so you may wish to
cross-check the results of using this option against proper
natively-built versions of your derivations.
- )"};
+ )", {}, false};
Setting<StringSet> systemFeatures{
- this, getDefaultSystemFeatures(),
+ this,
+ getDefaultSystemFeatures(),
"system-features",
R"(
A set of system “features” supported by this machine, e.g. `kvm`.
@@ -610,7 +605,7 @@ public:
This setting by default includes `kvm` if `/dev/kvm` is accessible,
and the pseudo-features `nixos-test`, `benchmark` and `big-parallel`
that are used in Nixpkgs to route builds to specific machines.
- )"};
+ )", {}, false};
Setting<Strings> substituters{
this,
@@ -805,6 +800,15 @@ public:
may be useful in certain scenarios (e.g. to spin up containers or
set up userspace network interfaces in tests).
)"};
+
+ Setting<StringSet> ignoredAcls{
+ this, {"security.selinux", "system.nfs4_acl"}, "ignored-acls",
+ R"(
+ A list of ACLs that should be ignored, normally Nix attempts to
+ remove all ACLs from files and directories in the Nix store, but
+ some ACLs like `security.selinux` or `system.nfs4_acl` can't be
+ removed even by root. Therefore it's best to just ignore them.
+ )"};
#endif
Setting<Strings> hashedMirrors{
@@ -925,12 +929,12 @@ public:
value.
)"};
- Setting<Strings> experimentalFeatures{this, {}, "experimental-features",
+ Setting<std::set<ExperimentalFeature>> experimentalFeatures{this, {}, "experimental-features",
"Experimental Nix features to enable."};
- bool isExperimentalFeatureEnabled(const std::string & name);
+ bool isExperimentalFeatureEnabled(const ExperimentalFeature &);
- void requireExperimentalFeature(const std::string & name);
+ void requireExperimentalFeature(const ExperimentalFeature &);
Setting<bool> allowDirty{this, true, "allow-dirty",
"Whether to allow dirty Git/Mercurial trees."};
@@ -959,6 +963,16 @@ public:
Setting<bool> useRegistries{this, true, "use-registries",
"Whether to use flake registries to resolve flake references."};
+
+ Setting<bool> acceptFlakeConfig{this, false, "accept-flake-config",
+ "Whether to accept nix configuration from a flake without prompting."};
+
+ Setting<std::string> commitLockFileSummary{
+ this, "", "commit-lockfile-summary",
+ R"(
+ The commit summary to use when commiting changed flake lock files. If
+ empty, the summary is generated based on the action performed.
+ )"};
};
diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc
index 0a3afcd51..3cb5efdbf 100644
--- a/src/libstore/http-binary-cache-store.cc
+++ b/src/libstore/http-binary-cache-store.cc
@@ -57,8 +57,8 @@ public:
{
// FIXME: do this lazily?
if (auto cacheInfo = diskCache->cacheExists(cacheUri)) {
- wantMassQuery.setDefault(cacheInfo->wantMassQuery ? "true" : "false");
- priority.setDefault(fmt("%d", cacheInfo->priority));
+ wantMassQuery.setDefault(cacheInfo->wantMassQuery);
+ priority.setDefault(cacheInfo->priority);
} else {
try {
BinaryCacheStore::init();
@@ -126,7 +126,7 @@ protected:
const std::string & mimeType) override
{
auto req = makeRequest(path);
- req.data = std::make_shared<string>(StreamToSourceAdapter(istream).drain());
+ req.data = StreamToSourceAdapter(istream).drain();
req.mimeType = mimeType;
try {
getFileTransfer()->upload(req);
@@ -159,7 +159,7 @@ protected:
}
void getFile(const std::string & path,
- Callback<std::shared_ptr<std::string>> callback) noexcept override
+ Callback<std::optional<std::string>> callback) noexcept override
{
checkEnabled();
@@ -170,10 +170,10 @@ protected:
getFileTransfer()->enqueueFileTransfer(request,
{[callbackPtr, this](std::future<FileTransferResult> result) {
try {
- (*callbackPtr)(result.get().data);
+ (*callbackPtr)(std::move(result.get().data));
} catch (FileTransferError & e) {
if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden)
- return (*callbackPtr)(std::shared_ptr<std::string>());
+ return (*callbackPtr)({});
maybeDisable();
callbackPtr->rethrow();
} catch (...) {
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
index 45eed5707..f8b2662af 100644
--- a/src/libstore/legacy-ssh-store.cc
+++ b/src/libstore/legacy-ssh-store.cc
@@ -82,9 +82,20 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
conn->to << SERVE_MAGIC_1 << SERVE_PROTOCOL_VERSION;
conn->to.flush();
- unsigned int magic = readInt(conn->from);
- if (magic != SERVE_MAGIC_2)
- throw Error("protocol mismatch with 'nix-store --serve' on '%s'", host);
+ StringSink saved;
+ try {
+ TeeSource tee(conn->from, saved);
+ unsigned int magic = readInt(tee);
+ if (magic != SERVE_MAGIC_2)
+ throw Error("'nix-store --serve' protocol mismatch from '%s'", host);
+ } catch (SerialisationError & e) {
+ /* In case the other side is waiting for our input,
+ close it. */
+ conn->sshConn->in.close();
+ auto msg = conn->from.drain();
+ throw Error("'nix-store --serve' protocol mismatch from '%s', got '%s'",
+ host, chomp(saved.s + msg));
+ }
conn->remoteVersion = readInt(conn->from);
if (GET_PROTOCOL_MAJOR(conn->remoteVersion) != 0x200)
throw Error("unsupported 'nix-store --serve' protocol version on '%s'", host);
@@ -216,7 +227,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor
StorePath addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method, HashType hashAlgo,
- PathFilter & filter, RepairFlag repair) override
+ PathFilter & filter, RepairFlag repair, const StorePathSet & references) override
{ unsupported("addToStore"); }
StorePath addTextToStore(const string & name, const string & s,
@@ -237,6 +248,10 @@ private:
conn.to
<< settings.buildRepeat
<< settings.enforceDeterminism;
+
+ if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 7) {
+ conn.to << ((int) settings.keepFailed);
+ }
}
public:
@@ -279,10 +294,10 @@ public:
for (auto & p : drvPaths) {
auto sOrDrvPath = StorePathWithOutputs::tryFromDerivedPath(p);
std::visit(overloaded {
- [&](StorePathWithOutputs s) {
+ [&](const StorePathWithOutputs & s) {
ss.push_back(s.to_string(*this));
},
- [&](StorePath drvPath) {
+ [&](const StorePath & drvPath) {
throw Error("wanted to fetch '%s' but the legacy ssh protocol doesn't support merely substituting drv files via the build paths command. It would build them instead. Try using ssh-ng://", printStorePath(drvPath));
},
}, sOrDrvPath);
@@ -352,7 +367,8 @@ public:
return conn->remoteVersion;
}
- std::optional<const Realisation> queryRealisation(const DrvOutput&) override
+ void queryRealisationUncached(const DrvOutput &,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept override
// TODO: Implement
{ unsupported("queryRealisation"); }
};
diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc
index f93111fce..f754770f9 100644
--- a/src/libstore/local-binary-cache-store.cc
+++ b/src/libstore/local-binary-cache-store.cc
@@ -96,6 +96,7 @@ void LocalBinaryCacheStore::init()
createDirs(binaryCacheDir + "/" + realisationsPrefix);
if (writeDebugInfo)
createDirs(binaryCacheDir + "/debuginfo");
+ createDirs(binaryCacheDir + "/log");
BinaryCacheStore::init();
}
diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc
index 6de13c73a..c933251db 100644
--- a/src/libstore/local-fs-store.cc
+++ b/src/libstore/local-fs-store.cc
@@ -87,34 +87,32 @@ void LocalFSStore::narFromPath(const StorePath & path, Sink & sink)
const string LocalFSStore::drvsLogDir = "drvs";
-
-
-std::shared_ptr<std::string> LocalFSStore::getBuildLog(const StorePath & path_)
+std::optional<std::string> LocalFSStore::getBuildLog(const StorePath & path_)
{
auto path = path_;
if (!path.isDerivation()) {
try {
auto info = queryPathInfo(path);
- if (!info->deriver) return nullptr;
+ if (!info->deriver) return std::nullopt;
path = *info->deriver;
} catch (InvalidPath &) {
- return nullptr;
+ return std::nullopt;
}
}
- auto baseName = std::string(baseNameOf(printStorePath(path)));
+ auto baseName = path.to_string();
for (int j = 0; j < 2; j++) {
Path logPath =
j == 0
- ? fmt("%s/%s/%s/%s", logDir, drvsLogDir, string(baseName, 0, 2), string(baseName, 2))
+ ? fmt("%s/%s/%s/%s", logDir, drvsLogDir, baseName.substr(0, 2), baseName.substr(2))
: fmt("%s/%s/%s", logDir, drvsLogDir, baseName);
Path logBz2Path = logPath + ".bz2";
if (pathExists(logPath))
- return std::make_shared<std::string>(readFile(logPath));
+ return readFile(logPath);
else if (pathExists(logBz2Path)) {
try {
@@ -124,7 +122,7 @@ std::shared_ptr<std::string> LocalFSStore::getBuildLog(const StorePath & path_)
}
- return nullptr;
+ return std::nullopt;
}
}
diff --git a/src/libstore/local-fs-store.hh b/src/libstore/local-fs-store.hh
index f8b19d00d..e44b27cc2 100644
--- a/src/libstore/local-fs-store.hh
+++ b/src/libstore/local-fs-store.hh
@@ -45,7 +45,8 @@ public:
return getRealStoreDir() + "/" + std::string(storePath, storeDir.size() + 1);
}
- std::shared_ptr<std::string> getBuildLog(const StorePath & path) override;
+ std::optional<std::string> getBuildLog(const StorePath & path) override;
+
};
}
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index 747eb205e..1807940d8 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -8,6 +8,8 @@
#include "references.hh"
#include "callback.hh"
#include "topo-sort.hh"
+#include "finally.hh"
+#include "compression.hh"
#include <iostream>
#include <algorithm>
@@ -79,7 +81,7 @@ int getSchema(Path schemaPath)
void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
{
- const int nixCASchemaVersion = 2;
+ const int nixCASchemaVersion = 3;
int curCASchema = getSchema(schemaPath);
if (curCASchema != nixCASchemaVersion) {
if (curCASchema > nixCASchemaVersion) {
@@ -130,6 +132,17 @@ void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
txn.commit();
}
+ if (curCASchema < 3) {
+ SQLiteTxn txn(db);
+ // Apply new indices added in this schema update.
+ db.exec(R"(
+ -- used by QueryRealisationReferences
+ create index if not exists IndexRealisationsRefs on RealisationsRefs(referrer);
+ -- used by cascade deletion when ValidPaths is deleted
+ create index if not exists IndexRealisationsRefsOnOutputPath on Realisations(outputPath);
+ )");
+ txn.commit();
+ }
writeFile(schemaPath, fmt("%d", nixCASchemaVersion));
lockFile(lockFd.get(), ltRead, true);
}
@@ -145,7 +158,6 @@ LocalStore::LocalStore(const Params & params)
, linksDir(realStoreDir + "/.links")
, reservedPath(dbDir + "/reserved")
, schemaPath(dbDir + "/schema")
- , trashDir(realStoreDir + "/trash")
, tempRootsDir(stateDir + "/temproots")
, fnTempRoots(fmt("%s/%d", tempRootsDir, getpid()))
, locksHeld(tokenizeString<PathSet>(getEnv("NIX_HELD_LOCKS").value_or("")))
@@ -309,7 +321,7 @@ LocalStore::LocalStore(const Params & params)
else openDB(*state, false);
- if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
migrateCASchema(state->db, dbDir + "/ca-schema", globalLock);
}
@@ -339,7 +351,7 @@ LocalStore::LocalStore(const Params & params)
state->stmts->QueryPathFromHashPart.create(state->db,
"select path from ValidPaths where path >= ? limit 1;");
state->stmts->QueryValidPaths.create(state->db, "select path from ValidPaths");
- if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) {
state->stmts->RegisterRealisedOutput.create(state->db,
R"(
insert into Realisations (drvPath, outputName, outputPath, signatures)
@@ -386,6 +398,16 @@ LocalStore::LocalStore(const Params & params)
}
+AutoCloseFD LocalStore::openGCLock()
+{
+ Path fnGCLock = stateDir + "/gc.lock";
+ auto fdGCLock = open(fnGCLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
+ if (!fdGCLock)
+ throw SysError("opening global GC lock '%1%'", fnGCLock);
+ return fdGCLock;
+}
+
+
LocalStore::~LocalStore()
{
std::shared_future<void> future;
@@ -495,9 +517,6 @@ void LocalStore::makeStoreWritable()
throw SysError("getting info about the Nix store mount point");
if (stat.f_flag & ST_RDONLY) {
- if (unshare(CLONE_NEWNS) == -1)
- throw SysError("setting up a private mount namespace");
-
if (mount(0, realStoreDir.get().c_str(), "none", MS_REMOUNT | MS_BIND, 0) == -1)
throw SysError("remounting %1% writable", realStoreDir);
}
@@ -583,9 +602,7 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
throw SysError("querying extended attributes of '%s'", path);
for (auto & eaName: tokenizeString<Strings>(std::string(eaBuf.data(), eaSize), std::string("\000", 1))) {
- /* Ignore SELinux security labels since these cannot be
- removed even by root. */
- if (eaName == "security.selinux") continue;
+ if (settings.ignoredAcls.get().count(eaName)) continue;
if (lremovexattr(path.c_str(), eaName.c_str()) == -1)
throw SysError("removing extended attribute '%s' from '%s'", eaName, path);
}
@@ -681,7 +698,7 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
std::optional<Hash> h;
for (auto & i : drv.outputs) {
std::visit(overloaded {
- [&](DerivationOutputInputAddressed doia) {
+ [&](const DerivationOutputInputAddressed & doia) {
if (!h) {
// somewhat expensive so we do lazily
auto temp = hashDerivationModulo(*this, drv, true);
@@ -693,14 +710,14 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
printStorePath(drvPath), printStorePath(doia.path), printStorePath(recomputed));
envHasRightPath(doia.path, i.first);
},
- [&](DerivationOutputCAFixed dof) {
+ [&](const DerivationOutputCAFixed & dof) {
StorePath path = makeFixedOutputPath(dof.hash.method, dof.hash.hash, drvName);
envHasRightPath(path, i.first);
},
- [&](DerivationOutputCAFloating _) {
+ [&](const DerivationOutputCAFloating &) {
/* Nothing to check */
},
- [&](DerivationOutputDeferred) {
+ [&](const DerivationOutputDeferred &) {
},
}, i.second.output);
}
@@ -708,7 +725,7 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
void LocalStore::registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs)
{
- settings.requireExperimentalFeature("ca-derivations");
+ settings.requireExperimentalFeature(Xp::CaDerivations);
if (checkSigs == NoCheckSigs || !realisationIsUntrusted(info))
registerDrvOutput(info);
else
@@ -717,7 +734,7 @@ void LocalStore::registerDrvOutput(const Realisation & info, CheckSigsFlag check
void LocalStore::registerDrvOutput(const Realisation & info)
{
- settings.requireExperimentalFeature("ca-derivations");
+ settings.requireExperimentalFeature(Xp::CaDerivations);
retrySQLite<void>([&]() {
auto state(_state.lock());
if (auto oldR = queryRealisation_(*state, info.id)) {
@@ -825,7 +842,7 @@ uint64_t LocalStore::addValidPath(State & state,
{
auto state_(Store::state.lock());
- state_->pathInfoCache.upsert(std::string(info.path.hashPart()),
+ state_->pathInfoCache.upsert(std::string(info.path.to_string()),
PathInfoCacheValue{ .value = std::make_shared<const ValidPathInfo>(info) });
}
@@ -1003,7 +1020,7 @@ LocalStore::queryPartialDerivationOutputMap(const StorePath & path_)
return outputs;
});
- if (!settings.isExperimentalFeatureEnabled("ca-derivations"))
+ if (!settings.isExperimentalFeatureEnabled(Xp::CaDerivations))
return outputs;
auto drv = readInvalidDerivation(path);
@@ -1071,14 +1088,19 @@ StorePathSet LocalStore::querySubstitutablePaths(const StorePathSet & paths)
}
+// FIXME: move this, it's not specific to LocalStore.
void LocalStore::querySubstitutablePathInfos(const StorePathCAMap & paths, SubstitutablePathInfos & infos)
{
if (!settings.useSubstitutes) return;
for (auto & sub : getDefaultSubstituters()) {
for (auto & path : paths) {
+ if (infos.count(path.first))
+ // Choose first succeeding substituter.
+ continue;
+
auto subPath(path.first);
- // recompute store path so that we can use a different store root
+ // Recompute store path so that we can use a different store root.
if (path.second) {
subPath = makeFixedOutputPathFromCA(path.first.name(), *path.second);
if (sub->storeDir == storeDir)
@@ -1193,7 +1215,7 @@ void LocalStore::invalidatePath(State & state, const StorePath & path)
{
auto state_(Store::state.lock());
- state_->pathInfoCache.erase(std::string(path.hashPart()));
+ state_->pathInfoCache.erase(std::string(path.to_string()));
}
}
@@ -1239,11 +1261,6 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
deletePath(realPath);
- // text hashing has long been allowed to have non-self-references because it is used for drv files.
- bool refersToSelf = info.references.count(info.path) > 0;
- if (info.ca.has_value() && !info.references.empty() && !(std::holds_alternative<TextHash>(*info.ca) && !refersToSelf))
- settings.requireExperimentalFeature("ca-references");
-
/* While restoring the path from the NAR, compute the hash
of the NAR. */
HashSink hashSink(htSHA256);
@@ -1291,7 +1308,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
canonicalisePathMetaData(realPath, -1);
- optimisePath(realPath); // FIXME: combine with hashPath()
+ optimisePath(realPath, repair); // FIXME: combine with hashPath()
registerValidPath(info);
}
@@ -1302,7 +1319,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
StorePath LocalStore::addToStoreFromDump(Source & source0, const string & name,
- FileIngestionMethod method, HashType hashAlgo, RepairFlag repair)
+ FileIngestionMethod method, HashType hashAlgo, RepairFlag repair, const StorePathSet & references)
{
/* For computing the store path. */
auto hashSink = std::make_unique<HashSink>(hashAlgo);
@@ -1327,13 +1344,15 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, const string & name,
auto want = std::min(chunkSize, settings.narBufferSize - oldSize);
dump.resize(oldSize + want);
auto got = 0;
+ Finally cleanup([&]() {
+ dump.resize(oldSize + got);
+ });
try {
got = source.read(dump.data() + oldSize, want);
} catch (EndOfFile &) {
inMemory = true;
break;
}
- dump.resize(oldSize + got);
}
std::unique_ptr<AutoDelete> delTempDir;
@@ -1358,7 +1377,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, const string & name,
auto [hash, size] = hashSink->finish();
- auto dstPath = makeFixedOutputPath(method, hash, name);
+ auto dstPath = makeFixedOutputPath(method, hash, name, references);
addTempRoot(dstPath);
@@ -1401,10 +1420,11 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, const string & name,
canonicalisePathMetaData(realPath, -1); // FIXME: merge into restorePath
- optimisePath(realPath);
+ optimisePath(realPath, repair);
ValidPathInfo info { dstPath, narHash.first };
info.narSize = narHash.second;
+ info.references = references;
info.ca = FixedOutputHash { .method = method, .hash = hash };
registerValidPath(info);
}
@@ -1442,12 +1462,12 @@ StorePath LocalStore::addTextToStore(const string & name, const string & s,
StringSink sink;
dumpString(s, sink);
- auto narHash = hashString(htSHA256, *sink.s);
+ auto narHash = hashString(htSHA256, sink.s);
- optimisePath(realPath);
+ optimisePath(realPath, repair);
ValidPathInfo info { dstPath, narHash };
- info.narSize = sink.s->size();
+ info.narSize = sink.s.size();
info.references = references;
info.ca = TextHash { .hash = hash };
registerValidPath(info);
@@ -1505,7 +1525,8 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
/* Acquire the global GC lock to get a consistent snapshot of
existing and valid paths. */
- AutoCloseFD fdGCLock = openGCLock(ltWrite);
+ auto fdGCLock = openGCLock();
+ FdLock gcLock(fdGCLock.get(), ltRead, true, "waiting for the big garbage collector lock...");
StringSet store;
for (auto & i : readDirectory(realStoreDir)) store.insert(i.name);
@@ -1516,8 +1537,6 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
StorePathSet validPaths;
PathSet done;
- fdGCLock = -1;
-
for (auto & i : queryAllValidPaths())
verifyPath(printStorePath(i), store, done, validPaths, repair, errors);
@@ -1830,13 +1849,24 @@ std::optional<const Realisation> LocalStore::queryRealisation_(
return { res };
}
-std::optional<const Realisation>
-LocalStore::queryRealisation(const DrvOutput & id)
+void LocalStore::queryRealisationUncached(const DrvOutput & id,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept
{
- return retrySQLite<std::optional<const Realisation>>([&]() {
- auto state(_state.lock());
- return queryRealisation_(*state, id);
- });
+ try {
+ auto maybeRealisation
+ = retrySQLite<std::optional<const Realisation>>([&]() {
+ auto state(_state.lock());
+ return queryRealisation_(*state, id);
+ });
+ if (maybeRealisation)
+ callback(
+ std::make_shared<const Realisation>(maybeRealisation.value()));
+ else
+ callback(nullptr);
+
+ } catch (...) {
+ callback.rethrow();
+ }
}
FixedOutputHash LocalStore::hashCAPath(
@@ -1869,4 +1899,24 @@ FixedOutputHash LocalStore::hashCAPath(
};
}
+void LocalStore::addBuildLog(const StorePath & drvPath, std::string_view log)
+{
+ assert(drvPath.isDerivation());
+
+ auto baseName = drvPath.to_string();
+
+ auto logPath = fmt("%s/%s/%s/%s.bz2", logDir, drvsLogDir, baseName.substr(0, 2), baseName.substr(2));
+
+ if (pathExists(logPath)) return;
+
+ createDirs(dirOf(logPath));
+
+ auto tmpFile = fmt("%s.tmp.%d", logPath, getpid());
+
+ writeFile(tmpFile, compress("bzip2", log));
+
+ if (rename(tmpFile.c_str(), logPath.c_str()) != 0)
+ throw SysError("renaming '%1%' to '%2%'", tmpFile, logPath);
+}
+
} // namespace nix
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
index a01d48c4b..6d867d778 100644
--- a/src/libstore/local-store.hh
+++ b/src/libstore/local-store.hh
@@ -58,9 +58,15 @@ private:
struct Stmts;
std::unique_ptr<Stmts> stmts;
+ /* The global GC lock */
+ AutoCloseFD fdGCLock;
+
/* The file to which we write our temporary roots. */
AutoCloseFD fdTempRoots;
+ /* Connection to the garbage collector. */
+ AutoCloseFD fdRootsSocket;
+
/* The last time we checked whether to do an auto-GC, or an
auto-GC finished. */
std::chrono::time_point<std::chrono::steady_clock> lastGCCheck;
@@ -87,7 +93,6 @@ public:
const Path linksDir;
const Path reservedPath;
const Path schemaPath;
- const Path trashDir;
const Path tempRootsDir;
const Path fnTempRoots;
@@ -140,7 +145,7 @@ public:
RepairFlag repair, CheckSigsFlag checkSigs) override;
StorePath addToStoreFromDump(Source & dump, const string & name,
- FileIngestionMethod method, HashType hashAlgo, RepairFlag repair) override;
+ FileIngestionMethod method, HashType hashAlgo, RepairFlag repair, const StorePathSet & references) override;
StorePath addTextToStore(const string & name, const string & s,
const StorePathSet & references, RepairFlag repair) override;
@@ -149,14 +154,11 @@ public:
void addIndirectRoot(const Path & path) override;
- void syncWithGC() override;
-
private:
- typedef std::shared_ptr<AutoCloseFD> FDPtr;
- typedef list<FDPtr> FDs;
+ void findTempRoots(Roots & roots, bool censor);
- void findTempRoots(FDs & fds, Roots & roots, bool censor);
+ AutoCloseFD openGCLock();
public:
@@ -170,8 +172,9 @@ public:
void optimiseStore() override;
- /* Optimise a single store path. */
- void optimisePath(const Path & path);
+ /* Optimise a single store path. Optionally, test the encountered
+ symlinks for corruption. */
+ void optimisePath(const Path & path, RepairFlag repair);
bool verifyStore(bool checkContents, RepairFlag repair) override;
@@ -205,7 +208,8 @@ public:
std::optional<const Realisation> queryRealisation_(State & state, const DrvOutput & id);
std::optional<std::pair<int64_t, Realisation>> queryRealisationCore_(State & state, const DrvOutput & id);
- std::optional<const Realisation> queryRealisation(const DrvOutput&) override;
+ void queryRealisationUncached(const DrvOutput&,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
private:
@@ -236,29 +240,12 @@ private:
PathSet queryValidPathsOld();
ValidPathInfo queryPathInfoOld(const Path & path);
- struct GCState;
-
- void deleteGarbage(GCState & state, const Path & path);
-
- void tryToDelete(GCState & state, const Path & path);
-
- bool canReachRoot(GCState & state, StorePathSet & visited, const StorePath & path);
-
- void deletePathRecursive(GCState & state, const Path & path);
-
- bool isActiveTempFile(const GCState & state,
- const Path & path, const string & suffix);
-
- AutoCloseFD openGCLock(LockType lockType);
-
void findRoots(const Path & path, unsigned char type, Roots & roots);
void findRootsNoTemp(Roots & roots, bool censor);
void findRuntimeRoots(Roots & roots, bool censor);
- void removeUnusedLinks(const GCState & state);
-
Path createTempDirInStore();
void checkDerivationOutputs(const StorePath & drvPath, const Derivation & drv);
@@ -267,7 +254,7 @@ private:
InodeHash loadInodeHash();
Strings readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash);
- void optimisePath_(Activity * act, OptimiseStats & stats, const Path & path, InodeHash & inodeHash);
+ void optimisePath_(Activity * act, OptimiseStats & stats, const Path & path, InodeHash & inodeHash, RepairFlag repair);
// Internal versions that are not wrapped in retry_sqlite.
bool isValidPath_(State & state, const StorePath & path);
@@ -293,6 +280,8 @@ private:
const std::string_view pathHash
);
+ void addBuildLog(const StorePath & drvPath, std::string_view log) override;
+
friend struct LocalDerivationGoal;
friend struct PathSubstitutionGoal;
friend struct SubstitutionGoal;
diff --git a/src/libstore/local.mk b/src/libstore/local.mk
index 2fc334a82..b992bcbc0 100644
--- a/src/libstore/local.mk
+++ b/src/libstore/local.mk
@@ -8,7 +8,7 @@ libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc $(d)/build/*.cc)
libstore_LIBS = libutil
-libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread
+libstore_LDFLAGS += $(SQLITE3_LIBS) $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread
ifdef HOST_LINUX
libstore_LDFLAGS += -ldl
endif
@@ -60,7 +60,7 @@ $(d)/build.cc:
clean-files += $(d)/schema.sql.gen.hh $(d)/ca-specific-schema.sql.gen.hh
-$(eval $(call install-file-in, $(d)/nix-store.pc, $(prefix)/lib/pkgconfig, 0644))
+$(eval $(call install-file-in, $(d)/nix-store.pc, $(libdir)/pkgconfig, 0644))
$(foreach i, $(wildcard src/libstore/builtins/*.hh), \
$(eval $(call install-file-in, $(i), $(includedir)/nix/builtins, 0644)))
diff --git a/src/libstore/machines.cc b/src/libstore/machines.cc
index 9843ccf04..b6270a81b 100644
--- a/src/libstore/machines.cc
+++ b/src/libstore/machines.cc
@@ -39,7 +39,8 @@ Machine::Machine(decltype(storeUri) storeUri,
sshPublicHostKey(sshPublicHostKey)
{}
-bool Machine::allSupported(const std::set<string> & features) const {
+bool Machine::allSupported(const std::set<string> & features) const
+{
return std::all_of(features.begin(), features.end(),
[&](const string & feature) {
return supportedFeatures.count(feature) ||
@@ -47,14 +48,16 @@ bool Machine::allSupported(const std::set<string> & features) const {
});
}
-bool Machine::mandatoryMet(const std::set<string> & features) const {
+bool Machine::mandatoryMet(const std::set<string> & features) const
+{
return std::all_of(mandatoryFeatures.begin(), mandatoryFeatures.end(),
[&](const string & feature) {
return features.count(feature);
});
}
-ref<Store> Machine::openStore() const {
+ref<Store> Machine::openStore() const
+{
Store::Params storeParams;
if (hasPrefix(storeUri, "ssh://")) {
storeParams["max-connections"] = "1";
@@ -83,53 +86,86 @@ ref<Store> Machine::openStore() const {
return nix::openStore(storeUri, storeParams);
}
-void parseMachines(const std::string & s, Machines & machines)
+static std::vector<std::string> expandBuilderLines(const std::string & builders)
{
- for (auto line : tokenizeString<std::vector<string>>(s, "\n;")) {
+ std::vector<std::string> result;
+ for (auto line : tokenizeString<std::vector<string>>(builders, "\n;")) {
trim(line);
line.erase(std::find(line.begin(), line.end(), '#'), line.end());
if (line.empty()) continue;
if (line[0] == '@') {
- auto file = trim(std::string(line, 1));
+ const std::string path = trim(std::string(line, 1));
+ std::string text;
try {
- parseMachines(readFile(file), machines);
+ text = readFile(path);
} catch (const SysError & e) {
if (e.errNo != ENOENT)
throw;
- debug("cannot find machines file '%s'", file);
+ debug("cannot find machines file '%s'", path);
}
+
+ const auto lines = expandBuilderLines(text);
+ result.insert(end(result), begin(lines), end(lines));
continue;
}
- auto tokens = tokenizeString<std::vector<string>>(line);
- auto sz = tokens.size();
- if (sz < 1)
- throw FormatError("bad machine specification '%s'", line);
+ result.emplace_back(line);
+ }
+ return result;
+}
- auto isSet = [&](size_t n) {
- return tokens.size() > n && tokens[n] != "" && tokens[n] != "-";
- };
+static Machine parseBuilderLine(const std::string & line)
+{
+ const auto tokens = tokenizeString<std::vector<string>>(line);
- machines.emplace_back(tokens[0],
- isSet(1) ? tokenizeString<std::vector<string>>(tokens[1], ",") : std::vector<string>{settings.thisSystem},
- isSet(2) ? tokens[2] : "",
- isSet(3) ? std::stoull(tokens[3]) : 1LL,
- isSet(4) ? std::stoull(tokens[4]) : 1LL,
- isSet(5) ? tokenizeString<std::set<string>>(tokens[5], ",") : std::set<string>{},
- isSet(6) ? tokenizeString<std::set<string>>(tokens[6], ",") : std::set<string>{},
- isSet(7) ? tokens[7] : "");
- }
+ auto isSet = [&](size_t fieldIndex) {
+ return tokens.size() > fieldIndex && tokens[fieldIndex] != "" && tokens[fieldIndex] != "-";
+ };
+
+ auto parseUnsignedIntField = [&](size_t fieldIndex) {
+ const auto result = string2Int<unsigned int>(tokens[fieldIndex]);
+ if (!result) {
+ throw FormatError("bad machine specification: failed to convert column #%lu in a row: '%s' to 'unsigned int'", fieldIndex, line);
+ }
+ return result.value();
+ };
+
+ auto ensureBase64 = [&](size_t fieldIndex) {
+ const auto & str = tokens[fieldIndex];
+ try {
+ base64Decode(str);
+ } catch (const Error & e) {
+ throw FormatError("bad machine specification: a column #%lu in a row: '%s' is not valid base64 string: %s", fieldIndex, line, e.what());
+ }
+ return str;
+ };
+
+ if (!isSet(0))
+ throw FormatError("bad machine specification: store URL was not found at the first column of a row: '%s'", line);
+
+ return {
+ tokens[0],
+ isSet(1) ? tokenizeString<std::vector<string>>(tokens[1], ",") : std::vector<string>{settings.thisSystem},
+ isSet(2) ? tokens[2] : "",
+ isSet(3) ? parseUnsignedIntField(3) : 1U,
+ isSet(4) ? parseUnsignedIntField(4) : 1U,
+ isSet(5) ? tokenizeString<std::set<string>>(tokens[5], ",") : std::set<string>{},
+ isSet(6) ? tokenizeString<std::set<string>>(tokens[6], ",") : std::set<string>{},
+ isSet(7) ? ensureBase64(7) : ""
+ };
+}
+
+static Machines parseBuilderLines(const std::vector<std::string>& builders) {
+ Machines result;
+ std::transform(builders.begin(), builders.end(), std::back_inserter(result), parseBuilderLine);
+ return result;
}
Machines getMachines()
{
- static auto machines = [&]() {
- Machines machines;
- parseMachines(settings.builders, machines);
- return machines;
- }();
- return machines;
+ const auto builderLines = expandBuilderLines(settings.builders);
+ return parseBuilderLines(builderLines);
}
}
diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc
index b4929b445..6409874ff 100644
--- a/src/libstore/misc.cc
+++ b/src/libstore/misc.cc
@@ -7,6 +7,7 @@
#include "topo-sort.hh"
#include "callback.hh"
#include "closure.hh"
+#include "filetransfer.hh"
namespace nix {
@@ -100,7 +101,8 @@ void Store::queryMissing(const std::vector<DerivedPath> & targets,
downloadSize_ = narSize_ = 0;
- ThreadPool pool;
+ // FIXME: make async.
+ ThreadPool pool(fileTransferSettings.httpConnections);
struct State
{
@@ -166,7 +168,7 @@ void Store::queryMissing(const std::vector<DerivedPath> & targets,
}
std::visit(overloaded {
- [&](DerivedPath::Built bfd) {
+ [&](const DerivedPath::Built & bfd) {
if (!isValidPath(bfd.drvPath)) {
// FIXME: we could try to substitute the derivation.
auto state(state_.lock());
@@ -199,7 +201,7 @@ void Store::queryMissing(const std::vector<DerivedPath> & targets,
mustBuildDrv(bfd.drvPath, *drv);
},
- [&](DerivedPath::Opaque bo) {
+ [&](const DerivedPath::Opaque & bo) {
if (isValidPath(bo.path)) return;
@@ -239,12 +241,11 @@ StorePaths Store::topoSortPaths(const StorePathSet & paths)
{
return topoSort(paths,
{[&](const StorePath & path) {
- StorePathSet references;
try {
- references = queryPathInfo(path)->references;
+ return queryPathInfo(path)->references;
} catch (InvalidPath &) {
+ return StorePathSet();
}
- return references;
}},
{[&](const StorePath & path, const StorePath & parent) {
return BuildError(
diff --git a/src/libstore/names.cc b/src/libstore/names.cc
index ce808accc..54c95055d 100644
--- a/src/libstore/names.cc
+++ b/src/libstore/names.cc
@@ -42,7 +42,7 @@ DrvName::~DrvName()
{ }
-bool DrvName::matches(DrvName & n)
+bool DrvName::matches(const DrvName & n)
{
if (name != "*") {
if (!regex) {
diff --git a/src/libstore/names.hh b/src/libstore/names.hh
index bc62aac93..3f861bc44 100644
--- a/src/libstore/names.hh
+++ b/src/libstore/names.hh
@@ -19,7 +19,7 @@ struct DrvName
DrvName(std::string_view s);
~DrvName();
- bool matches(DrvName & n);
+ bool matches(const DrvName & n);
private:
std::unique_ptr<Regex> regex;
diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc
index 784ebb719..7d27d7667 100644
--- a/src/libstore/nar-accessor.cc
+++ b/src/libstore/nar-accessor.cc
@@ -28,7 +28,7 @@ struct NarMember
struct NarAccessor : public FSAccessor
{
- std::shared_ptr<const std::string> nar;
+ std::optional<const std::string> nar;
GetNarBytes getNarBytes;
@@ -104,7 +104,7 @@ struct NarAccessor : public FSAccessor
}
};
- NarAccessor(ref<const std::string> nar) : nar(nar)
+ NarAccessor(std::string && _nar) : nar(_nar)
{
StringSource source(*nar);
NarIndexer indexer(*this, source);
@@ -224,9 +224,9 @@ struct NarAccessor : public FSAccessor
}
};
-ref<FSAccessor> makeNarAccessor(ref<const std::string> nar)
+ref<FSAccessor> makeNarAccessor(std::string && nar)
{
- return make_ref<NarAccessor>(nar);
+ return make_ref<NarAccessor>(std::move(nar));
}
ref<FSAccessor> makeNarAccessor(Source & source)
diff --git a/src/libstore/nar-accessor.hh b/src/libstore/nar-accessor.hh
index 8af1272f6..c2241a04c 100644
--- a/src/libstore/nar-accessor.hh
+++ b/src/libstore/nar-accessor.hh
@@ -10,7 +10,7 @@ struct Source;
/* Return an object that provides access to the contents of a NAR
file. */
-ref<FSAccessor> makeNarAccessor(ref<const std::string> nar);
+ref<FSAccessor> makeNarAccessor(std::string && nar);
ref<FSAccessor> makeNarAccessor(Source & source);
diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc
index d95e54af1..1833c954e 100644
--- a/src/libstore/optimise-store.cc
+++ b/src/libstore/optimise-store.cc
@@ -88,7 +88,7 @@ Strings LocalStore::readDirectoryIgnoringInodes(const Path & path, const InodeHa
void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
- const Path & path, InodeHash & inodeHash)
+ const Path & path, InodeHash & inodeHash, RepairFlag repair)
{
checkInterrupt();
@@ -110,7 +110,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
if (S_ISDIR(st.st_mode)) {
Strings names = readDirectoryIgnoringInodes(path, inodeHash);
for (auto & i : names)
- optimisePath_(act, stats, path + "/" + i, inodeHash);
+ optimisePath_(act, stats, path + "/" + i, inodeHash, repair);
return;
}
@@ -151,7 +151,20 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
/* Check if this is a known hash. */
Path linkPath = linksDir + "/" + hash.to_string(Base32, false);
- retry:
+ /* Maybe delete the link, if it has been corrupted. */
+ if (pathExists(linkPath)) {
+ auto stLink = lstat(linkPath);
+ if (st.st_size != stLink.st_size
+ || (repair && hash != hashPath(htSHA256, linkPath).first))
+ {
+ // XXX: Consider overwriting linkPath with our valid version.
+ warn("removing corrupted link '%s'", linkPath);
+ warn("There may be more corrupted paths."
+ "\nYou should run `nix-store --verify --check-contents --repair` to fix them all");
+ unlink(linkPath.c_str());
+ }
+ }
+
if (!pathExists(linkPath)) {
/* Nope, create a hard link in the links directory. */
if (link(path.c_str(), linkPath.c_str()) == 0) {
@@ -187,12 +200,6 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
return;
}
- if (st.st_size != stLink.st_size) {
- warn("removing corrupted link '%s'", linkPath);
- unlink(linkPath.c_str());
- goto retry;
- }
-
printMsg(lvlTalkative, format("linking '%1%' to '%2%'") % path % linkPath);
/* Make the containing directory writable, but only if it's not
@@ -260,7 +267,7 @@ void LocalStore::optimiseStore(OptimiseStats & stats)
if (!isValidPath(i)) continue; /* path was GC'ed, probably */
{
Activity act(*logger, lvlTalkative, actUnknown, fmt("optimising path '%s'", printStorePath(i)));
- optimisePath_(&act, stats, realStoreDir + "/" + std::string(i.to_string()), inodeHash);
+ optimisePath_(&act, stats, realStoreDir + "/" + std::string(i.to_string()), inodeHash, NoRepair);
}
done++;
act.progress(done, paths.size());
@@ -278,12 +285,12 @@ void LocalStore::optimiseStore()
stats.filesLinked);
}
-void LocalStore::optimisePath(const Path & path)
+void LocalStore::optimisePath(const Path & path, RepairFlag repair)
{
OptimiseStats stats;
InodeHash inodeHash;
- if (settings.autoOptimiseStore) optimisePath_(nullptr, stats, path, inodeHash);
+ if (settings.autoOptimiseStore) optimisePath_(nullptr, stats, path, inodeHash, repair);
}
diff --git a/src/libstore/path-with-outputs.cc b/src/libstore/path-with-outputs.cc
index 865d64cf2..e5a121e00 100644
--- a/src/libstore/path-with-outputs.cc
+++ b/src/libstore/path-with-outputs.cc
@@ -31,14 +31,14 @@ std::vector<DerivedPath> toDerivedPaths(const std::vector<StorePathWithOutputs>
std::variant<StorePathWithOutputs, StorePath> StorePathWithOutputs::tryFromDerivedPath(const DerivedPath & p)
{
return std::visit(overloaded {
- [&](DerivedPath::Opaque bo) -> std::variant<StorePathWithOutputs, StorePath> {
+ [&](const DerivedPath::Opaque & bo) -> std::variant<StorePathWithOutputs, StorePath> {
if (bo.path.isDerivation()) {
// drv path gets interpreted as "build", not "get drv file itself"
return bo.path;
}
return StorePathWithOutputs { bo.path };
},
- [&](DerivedPath::Built bfd) -> std::variant<StorePathWithOutputs, StorePath> {
+ [&](const DerivedPath::Built & bfd) -> std::variant<StorePathWithOutputs, StorePath> {
return StorePathWithOutputs { bfd.drvPath, bfd.outputs };
},
}, p.raw());
diff --git a/src/libstore/pathlocks.cc b/src/libstore/pathlocks.cc
index 926f4ea1e..2da74e262 100644
--- a/src/libstore/pathlocks.cc
+++ b/src/libstore/pathlocks.cc
@@ -176,4 +176,17 @@ void PathLocks::setDeletion(bool deletePaths)
}
+FdLock::FdLock(int fd, LockType lockType, bool wait, std::string_view waitMsg)
+ : fd(fd)
+{
+ if (wait) {
+ if (!lockFile(fd, lockType, false)) {
+ printInfo("%s", waitMsg);
+ acquired = lockFile(fd, lockType, true);
+ }
+ } else
+ acquired = lockFile(fd, lockType, false);
+}
+
+
}
diff --git a/src/libstore/pathlocks.hh b/src/libstore/pathlocks.hh
index 411da0222..919c8904c 100644
--- a/src/libstore/pathlocks.hh
+++ b/src/libstore/pathlocks.hh
@@ -35,4 +35,18 @@ public:
void setDeletion(bool deletePaths);
};
+struct FdLock
+{
+ int fd;
+ bool acquired = false;
+
+ FdLock(int fd, LockType lockType, bool wait, std::string_view waitMsg);
+
+ ~FdLock()
+ {
+ if (acquired)
+ lockFile(fd, ltNone, false);
+ }
+};
+
}
diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc
index 84a21c0ba..73163424c 100644
--- a/src/libstore/profiles.cc
+++ b/src/libstore/profiles.cc
@@ -126,9 +126,9 @@ void deleteGeneration(const Path & profile, GenerationNumber gen)
static void deleteGeneration2(const Path & profile, GenerationNumber gen, bool dryRun)
{
if (dryRun)
- printInfo(format("would remove generation %1%") % gen);
+ notice("would remove profile version %1%", gen);
else {
- printInfo(format("removing generation %1%") % gen);
+ notice("removing profile version %1%", gen);
deleteGeneration(profile, gen);
}
}
@@ -142,7 +142,7 @@ void deleteGenerations(const Path & profile, const std::set<GenerationNumber> &
auto [gens, curGen] = findGenerations(profile);
if (gensToDelete.count(*curGen))
- throw Error("cannot delete current generation of profile %1%'", profile);
+ throw Error("cannot delete current version of profile %1%'", profile);
for (auto & i : gens) {
if (!gensToDelete.count(i.number)) continue;
@@ -236,6 +236,37 @@ void switchLink(Path link, Path target)
}
+void switchGeneration(
+ const Path & profile,
+ std::optional<GenerationNumber> dstGen,
+ bool dryRun)
+{
+ PathLocks lock;
+ lockProfile(lock, profile);
+
+ auto [gens, curGen] = findGenerations(profile);
+
+ std::optional<Generation> dst;
+ for (auto & i : gens)
+ if ((!dstGen && i.number < curGen) ||
+ (dstGen && i.number == *dstGen))
+ dst = i;
+
+ if (!dst) {
+ if (dstGen)
+ throw Error("profile version %1% does not exist", *dstGen);
+ else
+ throw Error("no profile version older than the current (%1%) exists", curGen.value_or(0));
+ }
+
+ notice("switching profile from version %d to %d", curGen.value_or(0), dst->number);
+
+ if (dryRun) return;
+
+ switchLink(profile, dst->path);
+}
+
+
void lockProfile(PathLocks & lock, const Path & profile)
{
lock.lockPaths({profile}, (format("waiting for lock on profile '%1%'") % profile).str());
diff --git a/src/libstore/profiles.hh b/src/libstore/profiles.hh
index be55a65d4..d100c970c 100644
--- a/src/libstore/profiles.hh
+++ b/src/libstore/profiles.hh
@@ -11,7 +11,7 @@ namespace nix {
class StorePath;
-typedef unsigned int GenerationNumber;
+typedef uint64_t GenerationNumber;
struct Generation
{
@@ -46,6 +46,13 @@ void deleteGenerationsOlderThan(const Path & profile, const string & timeSpec, b
void switchLink(Path link, Path target);
+/* Roll back a profile to the specified generation, or to the most
+ recent one older than the current. */
+void switchGeneration(
+ const Path & profile,
+ std::optional<GenerationNumber> dstGen,
+ bool dryRun);
+
/* Ensure exclusive access to a profile. Any command that modifies
the profile first acquires this lock. */
void lockProfile(PathLocks & lock, const Path & profile);
diff --git a/src/libstore/realisation.cc b/src/libstore/realisation.cc
index f871e6437..d63ec5ea2 100644
--- a/src/libstore/realisation.cc
+++ b/src/libstore/realisation.cc
@@ -78,7 +78,7 @@ Realisation Realisation::fromJSON(
auto fieldIterator = json.find(fieldName);
if (fieldIterator == json.end())
return std::nullopt;
- return *fieldIterator;
+ return {*fieldIterator};
};
auto getField = [&](std::string fieldName) -> std::string {
if (auto field = getOptionalField(fieldName))
diff --git a/src/libstore/references.cc b/src/libstore/references.cc
index 3a07c1411..91b3fc142 100644
--- a/src/libstore/references.cc
+++ b/src/libstore/references.cc
@@ -11,11 +11,13 @@
namespace nix {
-static unsigned int refLength = 32; /* characters */
+static size_t refLength = 32; /* characters */
-static void search(const unsigned char * s, size_t len,
- StringSet & hashes, StringSet & seen)
+static void search(
+ std::string_view s,
+ StringSet & hashes,
+ StringSet & seen)
{
static std::once_flag initialised;
static bool isBase32[256];
@@ -25,7 +27,7 @@ static void search(const unsigned char * s, size_t len,
isBase32[(unsigned char) base32Chars[i]] = true;
});
- for (size_t i = 0; i + refLength <= len; ) {
+ for (size_t i = 0; i + refLength <= s.size(); ) {
int j;
bool match = true;
for (j = refLength - 1; j >= 0; --j)
@@ -35,7 +37,7 @@ static void search(const unsigned char * s, size_t len,
break;
}
if (!match) continue;
- string ref((const char *) s + i, refLength);
+ std::string ref(s.substr(i, refLength));
if (hashes.erase(ref)) {
debug(format("found reference to '%1%' at offset '%2%'")
% ref % i);
@@ -46,69 +48,60 @@ static void search(const unsigned char * s, size_t len,
}
-struct RefScanSink : Sink
+void RefScanSink::operator () (std::string_view data)
{
- StringSet hashes;
- StringSet seen;
-
- string tail;
-
- RefScanSink() { }
-
- void operator () (std::string_view data) override
- {
- /* It's possible that a reference spans the previous and current
- fragment, so search in the concatenation of the tail of the
- previous fragment and the start of the current fragment. */
- string s = tail + std::string(data, 0, refLength);
- search((const unsigned char *) s.data(), s.size(), hashes, seen);
-
- search((const unsigned char *) data.data(), data.size(), hashes, seen);
-
- size_t tailLen = data.size() <= refLength ? data.size() : refLength;
- tail = std::string(tail, tail.size() < refLength - tailLen ? 0 : tail.size() - (refLength - tailLen));
- tail.append({data.data() + data.size() - tailLen, tailLen});
- }
-};
+ /* It's possible that a reference spans the previous and current
+ fragment, so search in the concatenation of the tail of the
+ previous fragment and the start of the current fragment. */
+ auto s = tail;
+ auto tailLen = std::min(data.size(), refLength);
+ s.append(data.data(), tailLen);
+ search(s, hashes, seen);
+
+ search(data, hashes, seen);
+
+ auto rest = refLength - tailLen;
+ if (rest < tail.size())
+ tail = tail.substr(tail.size() - rest);
+ tail.append(data.data() + data.size() - tailLen, tailLen);
+}
-std::pair<PathSet, HashResult> scanForReferences(const string & path,
- const PathSet & refs)
+std::pair<StorePathSet, HashResult> scanForReferences(
+ const string & path,
+ const StorePathSet & refs)
{
HashSink hashSink { htSHA256 };
auto found = scanForReferences(hashSink, path, refs);
auto hash = hashSink.finish();
- return std::pair<PathSet, HashResult>(found, hash);
+ return std::pair<StorePathSet, HashResult>(found, hash);
}
-PathSet scanForReferences(Sink & toTee,
- const string & path, const PathSet & refs)
+StorePathSet scanForReferences(
+ Sink & toTee,
+ const Path & path,
+ const StorePathSet & refs)
{
- RefScanSink refsSink;
- TeeSink sink { refsSink, toTee };
- std::map<string, Path> backMap;
+ StringSet hashes;
+ std::map<std::string, StorePath> backMap;
for (auto & i : refs) {
- auto baseName = std::string(baseNameOf(i));
- string::size_type pos = baseName.find('-');
- if (pos == string::npos)
- throw Error("bad reference '%1%'", i);
- string s = string(baseName, 0, pos);
- assert(s.size() == refLength);
- assert(backMap.find(s) == backMap.end());
- // parseHash(htSHA256, s);
- refsSink.hashes.insert(s);
- backMap[s] = i;
+ std::string hashPart(i.hashPart());
+ auto inserted = backMap.emplace(hashPart, i).second;
+ assert(inserted);
+ hashes.insert(hashPart);
}
/* Look for the hashes in the NAR dump of the path. */
+ RefScanSink refsSink(std::move(hashes));
+ TeeSink sink { refsSink, toTee };
dumpPath(path, sink);
/* Map the hashes found back to their store paths. */
- PathSet found;
- for (auto & i : refsSink.seen) {
- std::map<string, Path>::iterator j;
- if ((j = backMap.find(i)) == backMap.end()) abort();
+ StorePathSet found;
+ for (auto & i : refsSink.getResult()) {
+ auto j = backMap.find(i);
+ assert(j != backMap.end());
found.insert(j->second);
}
diff --git a/src/libstore/references.hh b/src/libstore/references.hh
index 4f12e6b21..a6119c861 100644
--- a/src/libstore/references.hh
+++ b/src/libstore/references.hh
@@ -1,13 +1,31 @@
#pragma once
-#include "types.hh"
#include "hash.hh"
+#include "path.hh"
namespace nix {
-std::pair<PathSet, HashResult> scanForReferences(const Path & path, const PathSet & refs);
+std::pair<StorePathSet, HashResult> scanForReferences(const Path & path, const StorePathSet & refs);
-PathSet scanForReferences(Sink & toTee, const Path & path, const PathSet & refs);
+StorePathSet scanForReferences(Sink & toTee, const Path & path, const StorePathSet & refs);
+
+class RefScanSink : public Sink
+{
+ StringSet hashes;
+ StringSet seen;
+
+ std::string tail;
+
+public:
+
+ RefScanSink(StringSet && hashes) : hashes(hashes)
+ { }
+
+ StringSet & getResult()
+ { return seen; }
+
+ void operator () (std::string_view data) override;
+};
struct RewritingSink : Sink
{
diff --git a/src/libstore/remote-fs-accessor.cc b/src/libstore/remote-fs-accessor.cc
index f43456f0b..0ce335646 100644
--- a/src/libstore/remote-fs-accessor.cc
+++ b/src/libstore/remote-fs-accessor.cc
@@ -22,9 +22,18 @@ Path RemoteFSAccessor::makeCacheFile(std::string_view hashPart, const std::strin
return fmt("%s/%s.%s", cacheDir, hashPart, ext);
}
-void RemoteFSAccessor::addToCache(std::string_view hashPart, const std::string & nar,
- ref<FSAccessor> narAccessor)
+ref<FSAccessor> RemoteFSAccessor::addToCache(std::string_view hashPart, std::string && nar)
{
+ if (cacheDir != "") {
+ try {
+ /* FIXME: do this asynchronously. */
+ writeFile(makeCacheFile(hashPart, "nar"), nar);
+ } catch (...) {
+ ignoreException();
+ }
+ }
+
+ auto narAccessor = makeNarAccessor(std::move(nar));
nars.emplace(hashPart, narAccessor);
if (cacheDir != "") {
@@ -33,14 +42,12 @@ void RemoteFSAccessor::addToCache(std::string_view hashPart, const std::string &
JSONPlaceholder jsonRoot(str);
listNar(jsonRoot, narAccessor, "", true);
writeFile(makeCacheFile(hashPart, "ls"), str.str());
-
- /* FIXME: do this asynchronously. */
- writeFile(makeCacheFile(hashPart, "nar"), nar);
-
} catch (...) {
ignoreException();
}
}
+
+ return narAccessor;
}
std::pair<ref<FSAccessor>, Path> RemoteFSAccessor::fetch(const Path & path_, bool requireValidPath)
@@ -55,7 +62,6 @@ std::pair<ref<FSAccessor>, Path> RemoteFSAccessor::fetch(const Path & path_, boo
auto i = nars.find(std::string(storePath.hashPart()));
if (i != nars.end()) return {i->second, restPath};
- StringSink sink;
std::string listing;
Path cacheFile;
@@ -86,19 +92,15 @@ std::pair<ref<FSAccessor>, Path> RemoteFSAccessor::fetch(const Path & path_, boo
} catch (SysError &) { }
try {
- *sink.s = nix::readFile(cacheFile);
-
- auto narAccessor = makeNarAccessor(sink.s);
+ auto narAccessor = makeNarAccessor(nix::readFile(cacheFile));
nars.emplace(storePath.hashPart(), narAccessor);
return {narAccessor, restPath};
-
} catch (SysError &) { }
}
+ StringSink sink;
store->narFromPath(storePath, sink);
- auto narAccessor = makeNarAccessor(sink.s);
- addToCache(storePath.hashPart(), *sink.s, narAccessor);
- return {narAccessor, restPath};
+ return {addToCache(storePath.hashPart(), std::move(sink.s)), restPath};
}
FSAccessor::Stat RemoteFSAccessor::stat(const Path & path)
diff --git a/src/libstore/remote-fs-accessor.hh b/src/libstore/remote-fs-accessor.hh
index 594852d0e..99f5544ef 100644
--- a/src/libstore/remote-fs-accessor.hh
+++ b/src/libstore/remote-fs-accessor.hh
@@ -20,8 +20,7 @@ class RemoteFSAccessor : public FSAccessor
Path makeCacheFile(std::string_view hashPart, const std::string & ext);
- void addToCache(std::string_view hashPart, const std::string & nar,
- ref<FSAccessor> narAccessor);
+ ref<FSAccessor> addToCache(std::string_view hashPart, std::string && nar);
public:
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index 140f39120..aac2965e0 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -5,7 +5,6 @@
#include "remote-store.hh"
#include "worker-protocol.hh"
#include "archive.hh"
-#include "affinity.hh"
#include "globals.hh"
#include "derivations.hh"
#include "pool.hh"
@@ -162,8 +161,19 @@ void RemoteStore::initConnection(Connection & conn)
try {
conn.to << WORKER_MAGIC_1;
conn.to.flush();
- unsigned int magic = readInt(conn.from);
- if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch");
+ StringSink saved;
+ try {
+ TeeSource tee(conn.from, saved);
+ unsigned int magic = readInt(tee);
+ if (magic != WORKER_MAGIC_2)
+ throw Error("protocol mismatch");
+ } catch (SerialisationError & e) {
+ /* In case the other side is waiting for our input, close
+ it. */
+ conn.closeWrite();
+ auto msg = conn.from.drain();
+ throw Error("protocol mismatch, got '%s'", chomp(saved.s + msg));
+ }
conn.from >> conn.daemonVersion;
if (GET_PROTOCOL_MAJOR(conn.daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION))
@@ -173,11 +183,8 @@ void RemoteStore::initConnection(Connection & conn)
conn.to << PROTOCOL_VERSION;
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 14) {
- int cpu = sameMachine() && settings.lockCPU ? lockToCurrentCPU() : -1;
- if (cpu != -1)
- conn.to << 1 << cpu;
- else
- conn.to << 0;
+ // Obsolete CPU affinity.
+ conn.to << 0;
}
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 11)
@@ -222,6 +229,7 @@ void RemoteStore::setOptions(Connection & conn)
overrides.erase(settings.buildCores.name);
overrides.erase(settings.useSubstitutes.name);
overrides.erase(loggerSettings.showTrace.name);
+ overrides.erase(settings.experimentalFeatures.name);
conn.to << overrides.size();
for (auto & i : overrides)
conn.to << i.first << i.second.value;
@@ -278,6 +286,10 @@ ConnectionHandle RemoteStore::getConnection()
return ConnectionHandle(connections->get());
}
+void RemoteStore::setOptions()
+{
+ setOptions(*(getConnection().handle));
+}
bool RemoteStore::isValidPathUncached(const StorePath & path)
{
@@ -516,13 +528,13 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
if (repair) throw Error("repairing is not supported when building through the Nix daemon protocol < 1.25");
std::visit(overloaded {
- [&](TextHashMethod thm) -> void {
+ [&](const TextHashMethod & thm) -> void {
std::string s = dump.drain();
conn->to << wopAddTextToStore << name << s;
worker_proto::write(*this, conn->to, references);
conn.processStderr();
},
- [&](FixedOutputHashMethod fohm) -> void {
+ [&](const FixedOutputHashMethod & fohm) -> void {
conn->to
<< wopAddToStore
<< name
@@ -566,9 +578,8 @@ ref<const ValidPathInfo> RemoteStore::addCAToStore(
StorePath RemoteStore::addToStoreFromDump(Source & dump, const string & name,
- FileIngestionMethod method, HashType hashType, RepairFlag repair)
+ FileIngestionMethod method, HashType hashType, RepairFlag repair, const StorePathSet & references)
{
- StorePathSet references;
return addCAToStore(dump, name, FixedOutputHashMethod{ .fileIngestionMethod = method, .hashType = hashType }, references, repair)->path;
}
@@ -665,23 +676,41 @@ void RemoteStore::registerDrvOutput(const Realisation & info)
conn.processStderr();
}
-std::optional<const Realisation> RemoteStore::queryRealisation(const DrvOutput & id)
+void RemoteStore::queryRealisationUncached(const DrvOutput & id,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept
{
auto conn(getConnection());
+
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 27) {
+ warn("the daemon is too old to support content-addressed derivations, please upgrade it to 2.4");
+ try {
+ callback(nullptr);
+ } catch (...) { return callback.rethrow(); }
+ }
+
conn->to << wopQueryRealisation;
conn->to << id.to_string();
conn.processStderr();
- if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
- auto outPaths = worker_proto::read(*this, conn->from, Phantom<std::set<StorePath>>{});
- if (outPaths.empty())
- return std::nullopt;
- return {Realisation{.id = id, .outPath = *outPaths.begin()}};
- } else {
- auto realisations = worker_proto::read(*this, conn->from, Phantom<std::set<Realisation>>{});
- if (realisations.empty())
- return std::nullopt;
- return *realisations.begin();
- }
+
+ auto real = [&]() -> std::shared_ptr<const Realisation> {
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
+ auto outPaths = worker_proto::read(
+ *this, conn->from, Phantom<std::set<StorePath>> {});
+ if (outPaths.empty())
+ return nullptr;
+ return std::make_shared<const Realisation>(Realisation { .id = id, .outPath = *outPaths.begin() });
+ } else {
+ auto realisations = worker_proto::read(
+ *this, conn->from, Phantom<std::set<Realisation>> {});
+ if (realisations.empty())
+ return nullptr;
+ return std::make_shared<const Realisation>(*realisations.begin());
+ }
+ }();
+
+ try {
+ callback(std::shared_ptr<const Realisation>(real));
+ } catch (...) { return callback.rethrow(); }
}
static void writeDerivedPaths(RemoteStore & store, ConnectionHandle & conn, const std::vector<DerivedPath> & reqs)
@@ -693,10 +722,10 @@ static void writeDerivedPaths(RemoteStore & store, ConnectionHandle & conn, cons
for (auto & p : reqs) {
auto sOrDrvPath = StorePathWithOutputs::tryFromDerivedPath(p);
std::visit(overloaded {
- [&](StorePathWithOutputs s) {
+ [&](const StorePathWithOutputs & s) {
ss.push_back(s.to_string(store));
},
- [&](StorePath drvPath) {
+ [&](const StorePath & drvPath) {
throw Error("trying to request '%s', but daemon protocol %d.%d is too old (< 1.29) to request a derivation file",
store.printStorePath(drvPath),
GET_PROTOCOL_MAJOR(conn->daemonVersion),
@@ -785,15 +814,6 @@ void RemoteStore::addIndirectRoot(const Path & path)
}
-void RemoteStore::syncWithGC()
-{
- auto conn(getConnection());
- conn->to << wopSyncWithGC;
- conn.processStderr();
- readInt(conn->from);
-}
-
-
Roots RemoteStore::findRoots(bool censor)
{
auto conn(getConnection());
@@ -888,6 +908,18 @@ void RemoteStore::queryMissing(const std::vector<DerivedPath> & targets,
}
+void RemoteStore::addBuildLog(const StorePath & drvPath, std::string_view log)
+{
+ auto conn(getConnection());
+ conn->to << wopAddBuildLog << drvPath.to_string();
+ StringSource source(log);
+ conn.withFramedSink([&](Sink & sink) {
+ source.drainInto(sink);
+ });
+ readInt(conn->from);
+}
+
+
void RemoteStore::connect()
{
auto conn(getConnection());
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
index 8901c79fc..4754ff45a 100644
--- a/src/libstore/remote-store.hh
+++ b/src/libstore/remote-store.hh
@@ -73,7 +73,7 @@ public:
/* Add a content-addressable store path. Does not support references. `dump` will be drained. */
StorePath addToStoreFromDump(Source & dump, const string & name,
- FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override;
+ FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair, const StorePathSet & references = StorePathSet()) override;
void addToStore(const ValidPathInfo & info, Source & nar,
RepairFlag repair, CheckSigsFlag checkSigs) override;
@@ -88,7 +88,8 @@ public:
void registerDrvOutput(const Realisation & info) override;
- std::optional<const Realisation> queryRealisation(const DrvOutput &) override;
+ void queryRealisationUncached(const DrvOutput &,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept override;
void buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode, std::shared_ptr<Store> evalStore) override;
@@ -101,8 +102,6 @@ public:
void addIndirectRoot(const Path & path) override;
- void syncWithGC() override;
-
Roots findRoots(bool censor) override;
void collectGarbage(const GCOptions & options, GCResults & results) override;
@@ -117,6 +116,8 @@ public:
StorePathSet & willBuild, StorePathSet & willSubstitute, StorePathSet & unknown,
uint64_t & downloadSize, uint64_t & narSize) override;
+ void addBuildLog(const StorePath & drvPath, std::string_view log) override;
+
void connect() override;
unsigned int getProtocol() override;
@@ -125,7 +126,6 @@ public:
struct Connection
{
- AutoCloseFD fd;
FdSink to;
FdSource from;
unsigned int daemonVersion;
@@ -133,6 +133,8 @@ public:
virtual ~Connection();
+ virtual void closeWrite() = 0;
+
std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0, bool flush = true);
};
@@ -148,6 +150,8 @@ protected:
virtual void setOptions(Connection & conn);
+ void setOptions() override;
+
ConnectionHandle getConnection();
friend struct ConnectionHandle;
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
index 6bfbee044..a024e971d 100644
--- a/src/libstore/s3-binary-cache-store.cc
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -209,7 +209,7 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
S3Helper s3Helper;
S3BinaryCacheStoreImpl(
- const std::string & scheme,
+ const std::string & uriScheme,
const std::string & bucketName,
const Params & params)
: StoreConfig(params)
@@ -232,8 +232,8 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
void init() override
{
if (auto cacheInfo = diskCache->cacheExists(getUri())) {
- wantMassQuery.setDefault(cacheInfo->wantMassQuery ? "true" : "false");
- priority.setDefault(fmt("%d", cacheInfo->priority));
+ wantMassQuery.setDefault(cacheInfo->wantMassQuery);
+ priority.setDefault(cacheInfo->priority);
} else {
BinaryCacheStore::init();
diskCache->createCache(getUri(), storeDir, wantMassQuery, priority);
@@ -385,7 +385,7 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual
auto compress = [&](std::string compression)
{
auto compressed = nix::compress(compression, StreamToSourceAdapter(istream).drain());
- return std::make_shared<std::stringstream>(std::move(*compressed));
+ return std::make_shared<std::stringstream>(std::move(compressed));
};
if (narinfoCompression != "" && hasSuffix(path, ".narinfo"))
diff --git a/src/libstore/s3.hh b/src/libstore/s3.hh
index 2042bffcf..3f55c74db 100644
--- a/src/libstore/s3.hh
+++ b/src/libstore/s3.hh
@@ -4,6 +4,8 @@
#include "ref.hh"
+#include <optional>
+
namespace Aws { namespace Client { class ClientConfiguration; } }
namespace Aws { namespace S3 { class S3Client; } }
@@ -20,7 +22,7 @@ struct S3Helper
struct FileTransferResult
{
- std::shared_ptr<std::string> data;
+ std::optional<std::string> data;
unsigned int durationMs;
};
diff --git a/src/libstore/sandbox-defaults.sb b/src/libstore/sandbox-defaults.sb
index 2bb1ea130..56b35c3fe 100644
--- a/src/libstore/sandbox-defaults.sb
+++ b/src/libstore/sandbox-defaults.sb
@@ -97,3 +97,8 @@
; This is used by /bin/sh on macOS 10.15 and later.
(allow file*
(literal "/private/var/select/sh"))
+
+; Allow Rosetta 2 to run x86_64 binaries on aarch64-darwin.
+(allow file-read*
+ (subpath "/Library/Apple/usr/libexec/oah")
+ (subpath "/System/Library/Apple/usr/libexec/oah"))
diff --git a/src/libstore/serve-protocol.hh b/src/libstore/serve-protocol.hh
index 02d0810cc..3f76baa82 100644
--- a/src/libstore/serve-protocol.hh
+++ b/src/libstore/serve-protocol.hh
@@ -5,7 +5,7 @@ namespace nix {
#define SERVE_MAGIC_1 0x390c9deb
#define SERVE_MAGIC_2 0x5452eecb
-#define SERVE_PROTOCOL_VERSION (2 << 8 | 6)
+#define SERVE_PROTOCOL_VERSION (2 << 8 | 7)
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc
index 447b4179b..1d6baf02d 100644
--- a/src/libstore/sqlite.cc
+++ b/src/libstore/sqlite.cc
@@ -1,4 +1,5 @@
#include "sqlite.hh"
+#include "globals.hh"
#include "util.hh"
#include <sqlite3.h>
@@ -27,8 +28,12 @@ namespace nix {
SQLite::SQLite(const Path & path, bool create)
{
+ // useSQLiteWAL also indicates what virtual file system we need. Using
+ // `unix-dotfile` is needed on NFS file systems and on Windows' Subsystem
+ // for Linux (WSL) where useSQLiteWAL should be false by default.
+ const char *vfs = settings.useSQLiteWAL ? 0 : "unix-dotfile";
if (sqlite3_open_v2(path.c_str(), &db,
- SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK)
+ SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), vfs) != SQLITE_OK)
throw Error("cannot open SQLite database '%s'", path);
if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc
index f2caf2aeb..bb03daef4 100644
--- a/src/libstore/ssh-store.cc
+++ b/src/libstore/ssh-store.cc
@@ -57,6 +57,11 @@ private:
struct Connection : RemoteStore::Connection
{
std::unique_ptr<SSHMaster::Connection> sshConn;
+
+ void closeWrite() override
+ {
+ sshConn->in.close();
+ }
};
ref<RemoteStore::Connection> openConnection() override;
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index bb9f0967d..84767e917 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -199,10 +199,10 @@ StorePath Store::makeFixedOutputPathFromCA(std::string_view name, ContentAddress
{
// New template
return std::visit(overloaded {
- [&](TextHash th) {
+ [&](const TextHash & th) {
return makeTextPath(name, th.hash, references);
},
- [&](FixedOutputHash fsh) {
+ [&](const FixedOutputHash & fsh) {
return makeFixedOutputPath(fsh.method, fsh.hash, name, references, hasSelfReference);
}
}, ca);
@@ -237,7 +237,7 @@ StorePath Store::computeStorePathForText(const string & name, const string & s,
StorePath Store::addToStore(const string & name, const Path & _srcPath,
- FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
+ FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair, const StorePathSet & references)
{
Path srcPath(absPath(_srcPath));
auto source = sinkToSource([&](Sink & sink) {
@@ -246,7 +246,7 @@ StorePath Store::addToStore(const string & name, const Path & _srcPath,
else
readFile(srcPath, sink);
});
- return addToStoreFromDump(*source, name, method, hashAlgo, repair);
+ return addToStoreFromDump(*source, name, method, hashAlgo, repair, references);
}
@@ -355,8 +355,13 @@ ValidPathInfo Store::addToStoreSlow(std::string_view name, const Path & srcPath,
StringSet StoreConfig::getDefaultSystemFeatures()
{
auto res = settings.systemFeatures.get();
- if (settings.isExperimentalFeatureEnabled("ca-derivations"))
+
+ if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations))
res.insert("ca-derivations");
+
+ if (settings.isExperimentalFeatureEnabled(Xp::RecursiveNix))
+ res.insert("recursive-nix");
+
return res;
}
@@ -414,11 +419,9 @@ StorePathSet Store::queryDerivationOutputs(const StorePath & path)
bool Store::isValidPath(const StorePath & storePath)
{
- std::string hashPart(storePath.hashPart());
-
{
auto state_(state.lock());
- auto res = state_->pathInfoCache.get(hashPart);
+ auto res = state_->pathInfoCache.get(std::string(storePath.to_string()));
if (res && res->isKnownNow()) {
stats.narInfoReadAverted++;
return res->didExist();
@@ -426,11 +429,11 @@ bool Store::isValidPath(const StorePath & storePath)
}
if (diskCache) {
- auto res = diskCache->lookupNarInfo(getUri(), hashPart);
+ auto res = diskCache->lookupNarInfo(getUri(), std::string(storePath.hashPart()));
if (res.first != NarInfoDiskCache::oUnknown) {
stats.narInfoReadAverted++;
auto state_(state.lock());
- state_->pathInfoCache.upsert(hashPart,
+ state_->pathInfoCache.upsert(std::string(storePath.to_string()),
res.first == NarInfoDiskCache::oInvalid ? PathInfoCacheValue{} : PathInfoCacheValue { .value = res.second });
return res.first == NarInfoDiskCache::oValid;
}
@@ -440,7 +443,7 @@ bool Store::isValidPath(const StorePath & storePath)
if (diskCache && !valid)
// FIXME: handle valid = true case.
- diskCache->upsertNarInfo(getUri(), hashPart, 0);
+ diskCache->upsertNarInfo(getUri(), std::string(storePath.hashPart()), 0);
return valid;
}
@@ -487,13 +490,11 @@ static bool goodStorePath(const StorePath & expected, const StorePath & actual)
void Store::queryPathInfo(const StorePath & storePath,
Callback<ref<const ValidPathInfo>> callback) noexcept
{
- std::string hashPart;
+ auto hashPart = std::string(storePath.hashPart());
try {
- hashPart = storePath.hashPart();
-
{
- auto res = state.lock()->pathInfoCache.get(hashPart);
+ auto res = state.lock()->pathInfoCache.get(std::string(storePath.to_string()));
if (res && res->isKnownNow()) {
stats.narInfoReadAverted++;
if (!res->didExist())
@@ -508,7 +509,7 @@ void Store::queryPathInfo(const StorePath & storePath,
stats.narInfoReadAverted++;
{
auto state_(state.lock());
- state_->pathInfoCache.upsert(hashPart,
+ state_->pathInfoCache.upsert(std::string(storePath.to_string()),
res.first == NarInfoDiskCache::oInvalid ? PathInfoCacheValue{} : PathInfoCacheValue{ .value = res.second });
if (res.first == NarInfoDiskCache::oInvalid ||
!goodStorePath(storePath, res.second->path))
@@ -523,7 +524,7 @@ void Store::queryPathInfo(const StorePath & storePath,
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
queryPathInfoUncached(storePath,
- {[this, storePathS{printStorePath(storePath)}, hashPart, callbackPtr](std::future<std::shared_ptr<const ValidPathInfo>> fut) {
+ {[this, storePath, hashPart, callbackPtr](std::future<std::shared_ptr<const ValidPathInfo>> fut) {
try {
auto info = fut.get();
@@ -533,14 +534,12 @@ void Store::queryPathInfo(const StorePath & storePath,
{
auto state_(state.lock());
- state_->pathInfoCache.upsert(hashPart, PathInfoCacheValue { .value = info });
+ state_->pathInfoCache.upsert(std::string(storePath.to_string()), PathInfoCacheValue { .value = info });
}
- auto storePath = parseStorePath(storePathS);
-
if (!info || !goodStorePath(storePath, info->path)) {
stats.narInfoMissing++;
- throw InvalidPath("path '%s' is not valid", storePathS);
+ throw InvalidPath("path '%s' is not valid", printStorePath(storePath));
}
(*callbackPtr)(ref<const ValidPathInfo>(info));
@@ -548,6 +547,74 @@ void Store::queryPathInfo(const StorePath & storePath,
}});
}
+void Store::queryRealisation(const DrvOutput & id,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept
+{
+
+ try {
+ if (diskCache) {
+ auto [cacheOutcome, maybeCachedRealisation]
+ = diskCache->lookupRealisation(getUri(), id);
+ switch (cacheOutcome) {
+ case NarInfoDiskCache::oValid:
+ debug("Returning a cached realisation for %s", id.to_string());
+ callback(maybeCachedRealisation);
+ return;
+ case NarInfoDiskCache::oInvalid:
+ debug(
+ "Returning a cached missing realisation for %s",
+ id.to_string());
+ callback(nullptr);
+ return;
+ case NarInfoDiskCache::oUnknown:
+ break;
+ }
+ }
+ } catch (...) {
+ return callback.rethrow();
+ }
+
+ auto callbackPtr
+ = std::make_shared<decltype(callback)>(std::move(callback));
+
+ queryRealisationUncached(
+ id,
+ { [this, id, callbackPtr](
+ std::future<std::shared_ptr<const Realisation>> fut) {
+ try {
+ auto info = fut.get();
+
+ if (diskCache) {
+ if (info)
+ diskCache->upsertRealisation(getUri(), *info);
+ else
+ diskCache->upsertAbsentRealisation(getUri(), id);
+ }
+
+ (*callbackPtr)(std::shared_ptr<const Realisation>(info));
+
+ } catch (...) {
+ callbackPtr->rethrow();
+ }
+ } });
+}
+
+std::shared_ptr<const Realisation> Store::queryRealisation(const DrvOutput & id)
+{
+ using RealPtr = std::shared_ptr<const Realisation>;
+ std::promise<RealPtr> promise;
+
+ queryRealisation(id,
+ {[&](std::future<RealPtr> result) {
+ try {
+ promise.set_value(result.get());
+ } catch (...) {
+ promise.set_exception(std::current_exception());
+ }
+ }});
+
+ return promise.get_future().get();
+}
void Store::substitutePaths(const StorePathSet & paths)
{
@@ -860,7 +927,7 @@ std::map<StorePath, StorePath> copyPaths(
for (auto & path : paths) {
storePaths.insert(path.path());
if (auto realisation = std::get_if<Realisation>(&path.raw)) {
- settings.requireExperimentalFeature("ca-derivations");
+ settings.requireExperimentalFeature(Xp::CaDerivations);
toplevelRealisations.insert(*realisation);
}
}
@@ -892,7 +959,7 @@ std::map<StorePath, StorePath> copyPaths(
// Don't fail if the remote doesn't support CA derivations is it might
// not be within our control to change that, and we might still want
// to at least copy the output paths.
- if (e.missingFeature == "ca-derivations")
+ if (e.missingFeature == Xp::CaDerivations)
ignoreException();
else
throw;
@@ -1012,7 +1079,7 @@ std::map<StorePath, StorePath> copyPaths(
nrFailed++;
if (!settings.keepGoing)
throw e;
- logger->log(lvlError, fmt("could not copy %s: %s", dstStore.printStorePath(storePath), e.what()));
+ printMsg(lvlError, "could not copy %s: %s", dstStore.printStorePath(storePath), e.what());
showProgress();
return;
}
@@ -1129,10 +1196,10 @@ bool ValidPathInfo::isContentAddressed(const Store & store) const
if (! ca) return false;
auto caPath = std::visit(overloaded {
- [&](TextHash th) {
+ [&](const TextHash & th) {
return store.makeTextPath(path.name(), th.hash, references);
},
- [&](FixedOutputHash fsh) {
+ [&](const FixedOutputHash & fsh) {
auto refs = references;
bool hasSelfReference = false;
if (refs.count(path)) {
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index 81279c90b..8306509f3 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -151,9 +151,33 @@ struct BuildResult
DependencyFailed,
LogLimitExceeded,
NotDeterministic,
+ ResolvesToAlreadyValid,
} status = MiscFailure;
std::string errorMsg;
+ std::string toString() const {
+ auto strStatus = [&]() {
+ switch (status) {
+ case Built: return "Built";
+ case Substituted: return "Substituted";
+ case AlreadyValid: return "AlreadyValid";
+ case PermanentFailure: return "PermanentFailure";
+ case InputRejected: return "InputRejected";
+ case OutputRejected: return "OutputRejected";
+ case TransientFailure: return "TransientFailure";
+ case CachedFailure: return "CachedFailure";
+ case TimedOut: return "TimedOut";
+ case MiscFailure: return "MiscFailure";
+ case DependencyFailed: return "DependencyFailed";
+ case LogLimitExceeded: return "LogLimitExceeded";
+ case NotDeterministic: return "NotDeterministic";
+ case ResolvesToAlreadyValid: return "ResolvesToAlreadyValid";
+ default: return "Unknown";
+ };
+ }();
+ return strStatus + ((errorMsg == "") ? "" : " : " + errorMsg);
+ }
+
/* How many times this build was performed. */
unsigned int timesBuilt = 0;
@@ -170,7 +194,7 @@ struct BuildResult
time_t startTime = 0, stopTime = 0;
bool success() {
- return status == Built || status == Substituted || status == AlreadyValid;
+ return status == Built || status == Substituted || status == AlreadyValid || status == ResolvesToAlreadyValid;
}
};
@@ -232,7 +256,6 @@ protected:
struct State
{
- // FIXME: fix key
LRUCache<std::string, PathInfoCacheValue> pathInfoCache;
};
@@ -370,6 +393,14 @@ public:
void queryPathInfo(const StorePath & path,
Callback<ref<const ValidPathInfo>> callback) noexcept;
+ /* Query the information about a realisation. */
+ std::shared_ptr<const Realisation> queryRealisation(const DrvOutput &);
+
+ /* Asynchronous version of queryRealisation(). */
+ void queryRealisation(const DrvOutput &,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept;
+
+
/* Check whether the given valid path info is sufficiently attested, by
either being signed by a trusted public key or content-addressed, in
order to be included in the given store.
@@ -394,11 +425,11 @@ protected:
virtual void queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept = 0;
+ virtual void queryRealisationUncached(const DrvOutput &,
+ Callback<std::shared_ptr<const Realisation>> callback) noexcept = 0;
public:
- virtual std::optional<const Realisation> queryRealisation(const DrvOutput &) = 0;
-
/* Queries the set of incoming FS references for a store path.
The result is not cleared. */
virtual void queryReferrers(const StorePath & path, StorePathSet & referrers)
@@ -430,9 +461,10 @@ public:
virtual StorePathSet querySubstitutablePaths(const StorePathSet & paths) { return {}; };
/* Query substitute info (i.e. references, derivers and download
- sizes) of a map of paths to their optional ca values. If a path
- does not have substitute info, it's omitted from the resulting
- ‘infos’ map. */
+ sizes) of a map of paths to their optional ca values. The info
+ of the first succeeding substituter for each path will be
+ returned. If a path does not have substitute info, it's omitted
+ from the resulting ‘infos’ map. */
virtual void querySubstitutablePathInfos(const StorePathCAMap & paths,
SubstitutablePathInfos & infos) { return; };
@@ -452,7 +484,7 @@ public:
libutil/archive.hh). */
virtual StorePath addToStore(const string & name, const Path & srcPath,
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256,
- PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair);
+ PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair, const StorePathSet & references = StorePathSet());
/* Copy the contents of a path to the store and register the
validity the resulting path, using a constant amount of
@@ -468,7 +500,8 @@ public:
`dump` may be drained */
// FIXME: remove?
virtual StorePath addToStoreFromDump(Source & dump, const string & name,
- FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair)
+ FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair,
+ const StorePathSet & references = StorePathSet())
{ unsupported("addToStoreFromDump"); }
/* Like addToStore, but the contents written to the output path is
@@ -560,26 +593,6 @@ public:
virtual void addIndirectRoot(const Path & path)
{ unsupported("addIndirectRoot"); }
- /* Acquire the global GC lock, then immediately release it. This
- function must be called after registering a new permanent root,
- but before exiting. Otherwise, it is possible that a running
- garbage collector doesn't see the new root and deletes the
- stuff we've just built. By acquiring the lock briefly, we
- ensure that either:
-
- - The collector is already running, and so we block until the
- collector is finished. The collector will know about our
- *temporary* locks, which should include whatever it is we
- want to register as a permanent lock.
-
- - The collector isn't running, or it's just started but hasn't
- acquired the GC lock yet. In that case we get and release
- the lock right away, then exit. The collector scans the
- permanent root and sees ours.
-
- In either case the permanent root is seen by the collector. */
- virtual void syncWithGC() { };
-
/* Find the roots of the garbage collector. Each root is a pair
(link, storepath) where `link' is the path of the symlink
outside of the Nix store that point to `storePath'. If
@@ -711,8 +724,11 @@ public:
/* Return the build log of the specified store path, if available,
or null otherwise. */
- virtual std::shared_ptr<std::string> getBuildLog(const StorePath & path)
- { return nullptr; }
+ virtual std::optional<std::string> getBuildLog(const StorePath & path)
+ { return std::nullopt; }
+
+ virtual void addBuildLog(const StorePath & path, std::string_view log)
+ { unsupported("addBuildLog"); }
/* Hack to allow long-running processes like hydra-queue-runner to
occasionally flush their path info cache. */
@@ -744,6 +760,11 @@ public:
virtual void createUser(const std::string & userName, uid_t userId)
{ }
+ /*
+ * Synchronises the options of the client with those of the daemon
+ * (a no-op when there’s no daemon)
+ */
+ virtual void setOptions() { }
protected:
Stats stats;
diff --git a/src/libstore/tests/local.mk b/src/libstore/tests/local.mk
new file mode 100644
index 000000000..f74295d97
--- /dev/null
+++ b/src/libstore/tests/local.mk
@@ -0,0 +1,15 @@
+check: libstore-tests_RUN
+
+programs += libstore-tests
+
+libstore-tests_DIR := $(d)
+
+libstore-tests_INSTALL_DIR :=
+
+libstore-tests_SOURCES := $(wildcard $(d)/*.cc)
+
+libstore-tests_CXXFLAGS += -I src/libstore -I src/libutil
+
+libstore-tests_LIBS = libstore libutil
+
+libstore-tests_LDFLAGS := $(GTEST_LIBS)
diff --git a/src/libstore/tests/machines.cc b/src/libstore/tests/machines.cc
new file mode 100644
index 000000000..f51052b14
--- /dev/null
+++ b/src/libstore/tests/machines.cc
@@ -0,0 +1,169 @@
+#include "machines.hh"
+#include "globals.hh"
+
+#include <gmock/gmock-matchers.h>
+
+using testing::Contains;
+using testing::ElementsAre;
+using testing::EndsWith;
+using testing::Eq;
+using testing::Field;
+using testing::SizeIs;
+
+using nix::absPath;
+using nix::FormatError;
+using nix::getMachines;
+using nix::Machine;
+using nix::Machines;
+using nix::pathExists;
+using nix::Settings;
+using nix::settings;
+
+class Environment : public ::testing::Environment {
+ public:
+ void SetUp() override { settings.thisSystem = "TEST_ARCH-TEST_OS"; }
+};
+
+testing::Environment* const foo_env =
+ testing::AddGlobalTestEnvironment(new Environment);
+
+TEST(machines, getMachinesWithEmptyBuilders) {
+ settings.builders = "";
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(0));
+}
+
+TEST(machines, getMachinesUriOnly) {
+ settings.builders = "nix@scratchy.labs.cs.uu.nl";
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(1));
+ EXPECT_THAT(actual[0], Field(&Machine::storeUri, Eq("ssh://nix@scratchy.labs.cs.uu.nl")));
+ EXPECT_THAT(actual[0], Field(&Machine::systemTypes, ElementsAre("TEST_ARCH-TEST_OS")));
+ EXPECT_THAT(actual[0], Field(&Machine::sshKey, SizeIs(0)));
+ EXPECT_THAT(actual[0], Field(&Machine::maxJobs, Eq(1)));
+ EXPECT_THAT(actual[0], Field(&Machine::speedFactor, Eq(1)));
+ EXPECT_THAT(actual[0], Field(&Machine::supportedFeatures, SizeIs(0)));
+ EXPECT_THAT(actual[0], Field(&Machine::mandatoryFeatures, SizeIs(0)));
+ EXPECT_THAT(actual[0], Field(&Machine::sshPublicHostKey, SizeIs(0)));
+}
+
+TEST(machines, getMachinesDefaults) {
+ settings.builders = "nix@scratchy.labs.cs.uu.nl - - - - - - -";
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(1));
+ EXPECT_THAT(actual[0], Field(&Machine::storeUri, Eq("ssh://nix@scratchy.labs.cs.uu.nl")));
+ EXPECT_THAT(actual[0], Field(&Machine::systemTypes, ElementsAre("TEST_ARCH-TEST_OS")));
+ EXPECT_THAT(actual[0], Field(&Machine::sshKey, SizeIs(0)));
+ EXPECT_THAT(actual[0], Field(&Machine::maxJobs, Eq(1)));
+ EXPECT_THAT(actual[0], Field(&Machine::speedFactor, Eq(1)));
+ EXPECT_THAT(actual[0], Field(&Machine::supportedFeatures, SizeIs(0)));
+ EXPECT_THAT(actual[0], Field(&Machine::mandatoryFeatures, SizeIs(0)));
+ EXPECT_THAT(actual[0], Field(&Machine::sshPublicHostKey, SizeIs(0)));
+}
+
+TEST(machines, getMachinesWithNewLineSeparator) {
+ settings.builders = "nix@scratchy.labs.cs.uu.nl\nnix@itchy.labs.cs.uu.nl";
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(2));
+ EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl"))));
+ EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@itchy.labs.cs.uu.nl"))));
+}
+
+TEST(machines, getMachinesWithSemicolonSeparator) {
+ settings.builders = "nix@scratchy.labs.cs.uu.nl ; nix@itchy.labs.cs.uu.nl";
+ Machines actual = getMachines();
+ EXPECT_THAT(actual, SizeIs(2));
+ EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl"))));
+ EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@itchy.labs.cs.uu.nl"))));
+}
+
+TEST(machines, getMachinesWithCorrectCompleteSingleBuilder) {
+ settings.builders = "nix@scratchy.labs.cs.uu.nl i686-linux "
+ "/home/nix/.ssh/id_scratchy_auto 8 3 kvm "
+ "benchmark SSH+HOST+PUBLIC+KEY+BASE64+ENCODED==";
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(1));
+ EXPECT_THAT(actual[0], Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl")));
+ EXPECT_THAT(actual[0], Field(&Machine::systemTypes, ElementsAre("i686-linux")));
+ EXPECT_THAT(actual[0], Field(&Machine::sshKey, Eq("/home/nix/.ssh/id_scratchy_auto")));
+ EXPECT_THAT(actual[0], Field(&Machine::maxJobs, Eq(8)));
+ EXPECT_THAT(actual[0], Field(&Machine::speedFactor, Eq(3)));
+ EXPECT_THAT(actual[0], Field(&Machine::supportedFeatures, ElementsAre("kvm")));
+ EXPECT_THAT(actual[0], Field(&Machine::mandatoryFeatures, ElementsAre("benchmark")));
+ EXPECT_THAT(actual[0], Field(&Machine::sshPublicHostKey, Eq("SSH+HOST+PUBLIC+KEY+BASE64+ENCODED==")));
+}
+
+TEST(machines,
+ getMachinesWithCorrectCompleteSingleBuilderWithTabColumnDelimiter) {
+ settings.builders =
+ "nix@scratchy.labs.cs.uu.nl\ti686-linux\t/home/nix/.ssh/"
+ "id_scratchy_auto\t8\t3\tkvm\tbenchmark\tSSH+HOST+PUBLIC+"
+ "KEY+BASE64+ENCODED==";
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(1));
+ EXPECT_THAT(actual[0], Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl")));
+ EXPECT_THAT(actual[0], Field(&Machine::systemTypes, ElementsAre("i686-linux")));
+ EXPECT_THAT(actual[0], Field(&Machine::sshKey, Eq("/home/nix/.ssh/id_scratchy_auto")));
+ EXPECT_THAT(actual[0], Field(&Machine::maxJobs, Eq(8)));
+ EXPECT_THAT(actual[0], Field(&Machine::speedFactor, Eq(3)));
+ EXPECT_THAT(actual[0], Field(&Machine::supportedFeatures, ElementsAre("kvm")));
+ EXPECT_THAT(actual[0], Field(&Machine::mandatoryFeatures, ElementsAre("benchmark")));
+ EXPECT_THAT(actual[0], Field(&Machine::sshPublicHostKey, Eq("SSH+HOST+PUBLIC+KEY+BASE64+ENCODED==")));
+}
+
+TEST(machines, getMachinesWithMultiOptions) {
+ settings.builders = "nix@scratchy.labs.cs.uu.nl Arch1,Arch2 - - - "
+ "SupportedFeature1,SupportedFeature2 "
+ "MandatoryFeature1,MandatoryFeature2";
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(1));
+ EXPECT_THAT(actual[0], Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl")));
+ EXPECT_THAT(actual[0], Field(&Machine::systemTypes, ElementsAre("Arch1", "Arch2")));
+ EXPECT_THAT(actual[0], Field(&Machine::supportedFeatures, ElementsAre("SupportedFeature1", "SupportedFeature2")));
+ EXPECT_THAT(actual[0], Field(&Machine::mandatoryFeatures, ElementsAre("MandatoryFeature1", "MandatoryFeature2")));
+}
+
+TEST(machines, getMachinesWithIncorrectFormat) {
+ settings.builders = "nix@scratchy.labs.cs.uu.nl - - eight";
+ EXPECT_THROW(getMachines(), FormatError);
+ settings.builders = "nix@scratchy.labs.cs.uu.nl - - -1";
+ EXPECT_THROW(getMachines(), FormatError);
+ settings.builders = "nix@scratchy.labs.cs.uu.nl - - 8 three";
+ EXPECT_THROW(getMachines(), FormatError);
+ settings.builders = "nix@scratchy.labs.cs.uu.nl - - 8 -3";
+ EXPECT_THROW(getMachines(), FormatError);
+ settings.builders = "nix@scratchy.labs.cs.uu.nl - - 8 3 - - BAD_BASE64";
+ EXPECT_THROW(getMachines(), FormatError);
+}
+
+TEST(machines, getMachinesWithCorrectFileReference) {
+ auto path = absPath("src/libstore/tests/test-data/machines.valid");
+ ASSERT_TRUE(pathExists(path));
+
+ settings.builders = std::string("@") + path;
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(3));
+ EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl"))));
+ EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@itchy.labs.cs.uu.nl"))));
+ EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@poochie.labs.cs.uu.nl"))));
+}
+
+TEST(machines, getMachinesWithCorrectFileReferenceToEmptyFile) {
+ auto path = "/dev/null";
+ ASSERT_TRUE(pathExists(path));
+
+ settings.builders = std::string("@") + path;
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(0));
+}
+
+TEST(machines, getMachinesWithIncorrectFileReference) {
+ settings.builders = std::string("@") + absPath("/not/a/file");
+ Machines actual = getMachines();
+ ASSERT_THAT(actual, SizeIs(0));
+}
+
+TEST(machines, getMachinesWithCorrectFileReferenceToIncorrectFile) {
+ settings.builders = std::string("@") + absPath("src/libstore/tests/test-data/machines.bad_format");
+ EXPECT_THROW(getMachines(), FormatError);
+}
diff --git a/src/libstore/tests/references.cc b/src/libstore/tests/references.cc
new file mode 100644
index 000000000..d91d1cedd
--- /dev/null
+++ b/src/libstore/tests/references.cc
@@ -0,0 +1,45 @@
+#include "references.hh"
+
+#include <gtest/gtest.h>
+
+namespace nix {
+
+TEST(references, scan)
+{
+ std::string hash1 = "dc04vv14dak1c1r48qa0m23vr9jy8sm0";
+ std::string hash2 = "zc842j0rz61mjsp3h3wp5ly71ak6qgdn";
+
+ {
+ RefScanSink scanner(StringSet{hash1});
+ auto s = "foobar";
+ scanner(s);
+ ASSERT_EQ(scanner.getResult(), StringSet{});
+ }
+
+ {
+ RefScanSink scanner(StringSet{hash1});
+ auto s = "foobar" + hash1 + "xyzzy";
+ scanner(s);
+ ASSERT_EQ(scanner.getResult(), StringSet{hash1});
+ }
+
+ {
+ RefScanSink scanner(StringSet{hash1, hash2});
+ auto s = "foobar" + hash1 + "xyzzy" + hash2;
+ scanner(((std::string_view) s).substr(0, 10));
+ scanner(((std::string_view) s).substr(10, 5));
+ scanner(((std::string_view) s).substr(15, 5));
+ scanner(((std::string_view) s).substr(20));
+ ASSERT_EQ(scanner.getResult(), StringSet({hash1, hash2}));
+ }
+
+ {
+ RefScanSink scanner(StringSet{hash1, hash2});
+ auto s = "foobar" + hash1 + "xyzzy" + hash2;
+ for (auto & i : s)
+ scanner(std::string(1, i));
+ ASSERT_EQ(scanner.getResult(), StringSet({hash1, hash2}));
+ }
+}
+
+}
diff --git a/src/libstore/tests/test-data/machines.bad_format b/src/libstore/tests/test-data/machines.bad_format
new file mode 100644
index 000000000..7255a1216
--- /dev/null
+++ b/src/libstore/tests/test-data/machines.bad_format
@@ -0,0 +1 @@
+nix@scratchy.labs.cs.uu.nl - - eight
diff --git a/src/libstore/tests/test-data/machines.valid b/src/libstore/tests/test-data/machines.valid
new file mode 100644
index 000000000..1a6c8017c
--- /dev/null
+++ b/src/libstore/tests/test-data/machines.valid
@@ -0,0 +1,3 @@
+nix@scratchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 1 kvm
+nix@itchy.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 8 2
+nix@poochie.labs.cs.uu.nl i686-linux /home/nix/.ssh/id_scratchy_auto 1 2 kvm benchmark c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFDQVFDWWV5R1laNTNzd1VjMUZNSHBWL1BCcXlKaFR5S1JoRkpWWVRpRHlQN2h5c1JGa0w4VDlLOGdhL2Y2L3c3QjN2SjNHSFRIUFkybENiUEdZbGNLd2h6M2ZRbFNNOEViNi95b3ZLajdvM1FsMEx5Y0dzdGJvRmcwWkZKNldncUxsR0ltS0NobUlxOGZ3TW5ZTWUxbnRQeTBUZFZjSU1tOTV3YzF3SjBMd2c3cEVMRmtHazdkeTVvYnM4a3lGZ0pORDVRSmFwQWJjeWp4Z1QzdzdMcktNZ2xzeWhhd01JNVpkMGZsQTVudW5OZ3pid3plYVhLaUsyTW0vdGJXYTU1YTd4QmNYdHpIZGlPSWdSajJlRWxaMGh5bk10YjBmcklsdmxIcEtLaVFaZ3pQdCtIVXQ2bXpRMkRVME52MGYyYnNSU0krOGpJU2pQcmdlcVVHRldMUzVIUTg2N2xSMlpiaWtyclhZNTdqbVFEZk5DRHY1VFBHZU9UekFEd2pjMDc2aFZ3VFJCd3VTZFhtaWNxTS95b3lrWitkV1dnZ25MenE5QU1tdlNZcDhmZkZDcS9CSDBZNUFXWTFHay9vS3hMVTNaOWt3ZDd2UWNFQWFCQ2dxdnVZRGdTaHE1RlhndDM3OVZESWtEL05ZSTg2QXVvajVDRmVNTzlRM2pJSlRadlh6c1VldjVoSnA2djcxSVh5ODVtbTY5R20zcXdicVE1SjVQZDU1Um56SitpaW5BNjZxTEFSc0Y4amNsSnd5ekFXclBoYU9DRVY2bjVMeVhVazhzMW9EVVR4V1pWN25rVkFTbHJ0MllGcjN5dzdjRTRXQVhsemhHcDhocmdLMVVkMUlyeDVnZWRaSnBWcy9uNWVybmJFMUxmb2x5UHUvRUFIWlh6VGd4dHVDUFNobXc9PQo=
diff --git a/src/libstore/uds-remote-store.cc b/src/libstore/uds-remote-store.cc
index cac4fa036..5c38323cd 100644
--- a/src/libstore/uds-remote-store.cc
+++ b/src/libstore/uds-remote-store.cc
@@ -45,30 +45,20 @@ std::string UDSRemoteStore::getUri()
}
+void UDSRemoteStore::Connection::closeWrite()
+{
+ shutdown(fd.get(), SHUT_WR);
+}
+
+
ref<RemoteStore::Connection> UDSRemoteStore::openConnection()
{
auto conn = make_ref<Connection>();
/* Connect to a daemon that does the privileged work for us. */
- conn->fd = socket(PF_UNIX, SOCK_STREAM
- #ifdef SOCK_CLOEXEC
- | SOCK_CLOEXEC
- #endif
- , 0);
- if (!conn->fd)
- throw SysError("cannot create Unix domain socket");
- closeOnExec(conn->fd.get());
-
- string socketPath = path ? *path : settings.nixDaemonSocketFile;
-
- struct sockaddr_un addr;
- addr.sun_family = AF_UNIX;
- if (socketPath.size() + 1 >= sizeof(addr.sun_path))
- throw Error("socket path '%1%' is too long", socketPath);
- strcpy(addr.sun_path, socketPath.c_str());
-
- if (::connect(conn->fd.get(), (struct sockaddr *) &addr, sizeof(addr)) == -1)
- throw SysError("cannot connect to daemon at '%1%'", socketPath);
+ conn->fd = createUnixDomainSocket();
+
+ nix::connect(conn->fd.get(), path ? *path : settings.nixDaemonSocketFile);
conn->from.fd = conn->fd.get();
conn->to.fd = conn->fd.get();
diff --git a/src/libstore/uds-remote-store.hh b/src/libstore/uds-remote-store.hh
index ddc7716cd..f8dfcca70 100644
--- a/src/libstore/uds-remote-store.hh
+++ b/src/libstore/uds-remote-store.hh
@@ -40,6 +40,12 @@ public:
private:
+ struct Connection : RemoteStore::Connection
+ {
+ AutoCloseFD fd;
+ void closeWrite() override;
+ };
+
ref<RemoteStore::Connection> openConnection() override;
std::optional<std::string> path;
};
diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh
index 93cf546d2..ecf42a5d0 100644
--- a/src/libstore/worker-protocol.hh
+++ b/src/libstore/worker-protocol.hh
@@ -56,6 +56,7 @@ typedef enum {
wopRegisterDrvOutput = 42,
wopQueryRealisation = 43,
wopAddMultipleToStore = 44,
+ wopAddBuildLog = 45,
} WorkerOp;