aboutsummaryrefslogtreecommitdiff
path: root/src/libstore
diff options
context:
space:
mode:
Diffstat (limited to 'src/libstore')
-rw-r--r--src/libstore/binary-cache-store.cc407
-rw-r--r--src/libstore/binary-cache-store.hh133
-rw-r--r--src/libstore/build.cc3935
-rw-r--r--src/libstore/builtins.cc71
-rw-r--r--src/libstore/builtins.hh9
-rw-r--r--src/libstore/crypto.cc126
-rw-r--r--src/libstore/crypto.hh54
-rw-r--r--src/libstore/derivations.cc444
-rw-r--r--src/libstore/derivations.hh122
-rw-r--r--src/libstore/download.cc708
-rw-r--r--src/libstore/download.hh79
-rw-r--r--src/libstore/export-import.cc106
-rw-r--r--src/libstore/fs-accessor.hh30
-rw-r--r--src/libstore/gc.cc851
-rw-r--r--src/libstore/globals.cc104
-rw-r--r--src/libstore/globals.hh318
-rw-r--r--src/libstore/http-binary-cache-store.cc115
-rw-r--r--src/libstore/legacy-ssh-store.cc256
-rw-r--r--src/libstore/local-binary-cache-store.cc107
-rw-r--r--src/libstore/local-fs-store.cc130
-rw-r--r--src/libstore/local-store.cc1345
-rw-r--r--src/libstore/local-store.hh289
-rw-r--r--src/libstore/local.mk41
-rw-r--r--src/libstore/misc.cc277
-rw-r--r--src/libstore/nar-accessor.cc142
-rw-r--r--src/libstore/nar-accessor.hh11
-rw-r--r--src/libstore/nar-info-disk-cache.cc270
-rw-r--r--src/libstore/nar-info-disk-cache.hh31
-rw-r--r--src/libstore/nar-info.cc116
-rw-r--r--src/libstore/nar-info.hh24
-rw-r--r--src/libstore/nix-store.pc.in9
-rw-r--r--src/libstore/optimise-store.cc275
-rw-r--r--src/libstore/pathlocks.cc216
-rw-r--r--src/libstore/pathlocks.hh46
-rw-r--r--src/libstore/profiles.cc236
-rw-r--r--src/libstore/profiles.hh65
-rw-r--r--src/libstore/references.cc122
-rw-r--r--src/libstore/references.hh11
-rw-r--r--src/libstore/remote-fs-accessor.cc57
-rw-r--r--src/libstore/remote-fs-accessor.hh29
-rw-r--r--src/libstore/remote-store.cc655
-rw-r--r--src/libstore/remote-store.hh142
-rw-r--r--src/libstore/s3-binary-cache-store.cc339
-rw-r--r--src/libstore/s3-binary-cache-store.hh33
-rw-r--r--src/libstore/s3.hh33
-rw-r--r--src/libstore/sandbox-defaults.sb.in63
-rw-r--r--src/libstore/schema.sql42
-rw-r--r--src/libstore/serve-protocol.hh23
-rw-r--r--src/libstore/sqlite.cc197
-rw-r--r--src/libstore/sqlite.hh114
-rw-r--r--src/libstore/ssh-store.cc104
-rw-r--r--src/libstore/ssh.cc102
-rw-r--r--src/libstore/ssh.hh49
-rw-r--r--src/libstore/store-api.cc836
-rw-r--r--src/libstore/store-api.hh729
-rw-r--r--src/libstore/worker-protocol.hh66
56 files changed, 15244 insertions, 0 deletions
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc
new file mode 100644
index 000000000..b536c6c00
--- /dev/null
+++ b/src/libstore/binary-cache-store.cc
@@ -0,0 +1,407 @@
+#include "archive.hh"
+#include "binary-cache-store.hh"
+#include "compression.hh"
+#include "derivations.hh"
+#include "fs-accessor.hh"
+#include "globals.hh"
+#include "nar-info.hh"
+#include "sync.hh"
+#include "remote-fs-accessor.hh"
+#include "nar-info-disk-cache.hh"
+#include "nar-accessor.hh"
+#include "json.hh"
+
+#include <chrono>
+
+#include <future>
+
+namespace nix {
+
+/* Given requests for a path /nix/store/<x>/<y>, this accessor will
+ first download the NAR for /nix/store/<x> from the binary cache,
+ build a NAR accessor for that NAR, and use that to access <y>. */
+struct BinaryCacheStoreAccessor : public FSAccessor
+{
+ ref<BinaryCacheStore> store;
+
+ std::map<Path, ref<FSAccessor>> nars;
+
+ BinaryCacheStoreAccessor(ref<BinaryCacheStore> store)
+ : store(store)
+ {
+ }
+
+ std::pair<ref<FSAccessor>, Path> fetch(const Path & path_)
+ {
+ auto path = canonPath(path_);
+
+ auto storePath = store->toStorePath(path);
+ std::string restPath = std::string(path, storePath.size());
+
+ if (!store->isValidPath(storePath))
+ throw InvalidPath(format("path ‘%1%’ is not a valid store path") % storePath);
+
+ auto i = nars.find(storePath);
+ if (i != nars.end()) return {i->second, restPath};
+
+ StringSink sink;
+ store->narFromPath(storePath, sink);
+
+ auto accessor = makeNarAccessor(sink.s);
+ nars.emplace(storePath, accessor);
+ return {accessor, restPath};
+ }
+
+ Stat stat(const Path & path) override
+ {
+ auto res = fetch(path);
+ return res.first->stat(res.second);
+ }
+
+ StringSet readDirectory(const Path & path) override
+ {
+ auto res = fetch(path);
+ return res.first->readDirectory(res.second);
+ }
+
+ std::string readFile(const Path & path) override
+ {
+ auto res = fetch(path);
+ return res.first->readFile(res.second);
+ }
+
+ std::string readLink(const Path & path) override
+ {
+ auto res = fetch(path);
+ return res.first->readLink(res.second);
+ }
+};
+
+BinaryCacheStore::BinaryCacheStore(const Params & params)
+ : Store(params)
+{
+ if (secretKeyFile != "")
+ secretKey = std::unique_ptr<SecretKey>(new SecretKey(readFile(secretKeyFile)));
+
+ StringSink sink;
+ sink << narVersionMagic1;
+ narMagic = *sink.s;
+}
+
+void BinaryCacheStore::init()
+{
+ std::string cacheInfoFile = "nix-cache-info";
+
+ auto cacheInfo = getFile(cacheInfoFile);
+ if (!cacheInfo) {
+ upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n", "text/x-nix-cache-info");
+ } else {
+ for (auto & line : tokenizeString<Strings>(*cacheInfo, "\n")) {
+ size_t colon = line.find(':');
+ if (colon == std::string::npos) continue;
+ auto name = line.substr(0, colon);
+ auto value = trim(line.substr(colon + 1, std::string::npos));
+ if (name == "StoreDir") {
+ if (value != storeDir)
+ throw Error(format("binary cache ‘%s’ is for Nix stores with prefix ‘%s’, not ‘%s’")
+ % getUri() % value % storeDir);
+ } else if (name == "WantMassQuery") {
+ wantMassQuery_ = value == "1";
+ } else if (name == "Priority") {
+ string2Int(value, priority);
+ }
+ }
+ }
+}
+
+void BinaryCacheStore::notImpl()
+{
+ throw Error("operation not implemented for binary cache stores");
+}
+
+std::shared_ptr<std::string> BinaryCacheStore::getFile(const std::string & path)
+{
+ std::promise<std::shared_ptr<std::string>> promise;
+ getFile(path,
+ [&](std::shared_ptr<std::string> result) {
+ promise.set_value(result);
+ },
+ [&](std::exception_ptr exc) {
+ promise.set_exception(exc);
+ });
+ return promise.get_future().get();
+}
+
+Path BinaryCacheStore::narInfoFileFor(const Path & storePath)
+{
+ assertStorePath(storePath);
+ return storePathToHash(storePath) + ".narinfo";
+}
+
+void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+ bool repair, bool dontCheckSigs, std::shared_ptr<FSAccessor> accessor)
+{
+ if (!repair && isValidPath(info.path)) return;
+
+ /* Verify that all references are valid. This may do some .narinfo
+ reads, but typically they'll already be cached. */
+ for (auto & ref : info.references)
+ try {
+ if (ref != info.path)
+ queryPathInfo(ref);
+ } catch (InvalidPath &) {
+ throw Error(format("cannot add ‘%s’ to the binary cache because the reference ‘%s’ is not valid")
+ % info.path % ref);
+ }
+
+ auto narInfoFile = narInfoFileFor(info.path);
+
+ assert(nar->compare(0, narMagic.size(), narMagic) == 0);
+
+ auto narInfo = make_ref<NarInfo>(info);
+
+ narInfo->narSize = nar->size();
+ narInfo->narHash = hashString(htSHA256, *nar);
+
+ if (info.narHash && info.narHash != narInfo->narHash)
+ throw Error(format("refusing to copy corrupted path ‘%1%’ to binary cache") % info.path);
+
+ auto accessor_ = std::dynamic_pointer_cast<BinaryCacheStoreAccessor>(accessor);
+
+ /* Optionally write a JSON file containing a listing of the
+ contents of the NAR. */
+ if (writeNARListing) {
+ std::ostringstream jsonOut;
+
+ {
+ JSONObject jsonRoot(jsonOut);
+ jsonRoot.attr("version", 1);
+
+ auto narAccessor = makeNarAccessor(nar);
+
+ if (accessor_)
+ accessor_->nars.emplace(info.path, narAccessor);
+
+ std::function<void(const Path &, JSONPlaceholder &)> recurse;
+
+ recurse = [&](const Path & path, JSONPlaceholder & res) {
+ auto st = narAccessor->stat(path);
+
+ auto obj = res.object();
+
+ switch (st.type) {
+ case FSAccessor::Type::tRegular:
+ obj.attr("type", "regular");
+ obj.attr("size", st.fileSize);
+ if (st.isExecutable)
+ obj.attr("executable", true);
+ break;
+ case FSAccessor::Type::tDirectory:
+ obj.attr("type", "directory");
+ {
+ auto res2 = obj.object("entries");
+ for (auto & name : narAccessor->readDirectory(path)) {
+ auto res3 = res2.placeholder(name);
+ recurse(path + "/" + name, res3);
+ }
+ }
+ break;
+ case FSAccessor::Type::tSymlink:
+ obj.attr("type", "symlink");
+ obj.attr("target", narAccessor->readLink(path));
+ break;
+ default:
+ abort();
+ }
+ };
+
+ {
+ auto res = jsonRoot.placeholder("root");
+ recurse("", res);
+ }
+ }
+
+ upsertFile(storePathToHash(info.path) + ".ls", jsonOut.str(), "application/json");
+ }
+
+ else {
+ if (accessor_)
+ accessor_->nars.emplace(info.path, makeNarAccessor(nar));
+ }
+
+ /* Compress the NAR. */
+ narInfo->compression = compression;
+ auto now1 = std::chrono::steady_clock::now();
+ auto narCompressed = compress(compression, *nar);
+ auto now2 = std::chrono::steady_clock::now();
+ narInfo->fileHash = hashString(htSHA256, *narCompressed);
+ narInfo->fileSize = narCompressed->size();
+
+ auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
+ printMsg(lvlTalkative, format("copying path ‘%1%’ (%2% bytes, compressed %3$.1f%% in %4% ms) to binary cache")
+ % narInfo->path % narInfo->narSize
+ % ((1.0 - (double) narCompressed->size() / nar->size()) * 100.0)
+ % duration);
+
+ /* Atomically write the NAR file. */
+ narInfo->url = "nar/" + printHash32(narInfo->fileHash) + ".nar"
+ + (compression == "xz" ? ".xz" :
+ compression == "bzip2" ? ".bz2" :
+ compression == "br" ? ".br" :
+ "");
+ if (repair || !fileExists(narInfo->url)) {
+ stats.narWrite++;
+ upsertFile(narInfo->url, *narCompressed, "application/x-nix-nar");
+ } else
+ stats.narWriteAverted++;
+
+ stats.narWriteBytes += nar->size();
+ stats.narWriteCompressedBytes += narCompressed->size();
+ stats.narWriteCompressionTimeMs += duration;
+
+ /* Atomically write the NAR info file.*/
+ if (secretKey) narInfo->sign(*secretKey);
+
+ upsertFile(narInfoFile, narInfo->to_string(), "text/x-nix-narinfo");
+
+ auto hashPart = storePathToHash(narInfo->path);
+
+ {
+ auto state_(state.lock());
+ state_->pathInfoCache.upsert(hashPart, std::shared_ptr<NarInfo>(narInfo));
+ }
+
+ if (diskCache)
+ diskCache->upsertNarInfo(getUri(), hashPart, std::shared_ptr<NarInfo>(narInfo));
+
+ stats.narInfoWrite++;
+}
+
+bool BinaryCacheStore::isValidPathUncached(const Path & storePath)
+{
+ // FIXME: this only checks whether a .narinfo with a matching hash
+ // part exists. So ‘f4kb...-foo’ matches ‘f4kb...-bar’, even
+ // though they shouldn't. Not easily fixed.
+ return fileExists(narInfoFileFor(storePath));
+}
+
+void BinaryCacheStore::narFromPath(const Path & storePath, Sink & sink)
+{
+ auto info = queryPathInfo(storePath).cast<const NarInfo>();
+
+ auto nar = getFile(info->url);
+
+ if (!nar) throw Error(format("file ‘%s’ missing from binary cache") % info->url);
+
+ stats.narRead++;
+ stats.narReadCompressedBytes += nar->size();
+
+ /* Decompress the NAR. FIXME: would be nice to have the remote
+ side do this. */
+ try {
+ nar = decompress(info->compression, *nar);
+ } catch (UnknownCompressionMethod &) {
+ throw Error(format("binary cache path ‘%s’ uses unknown compression method ‘%s’")
+ % storePath % info->compression);
+ }
+
+ stats.narReadBytes += nar->size();
+
+ printMsg(lvlTalkative, format("exporting path ‘%1%’ (%2% bytes)") % storePath % nar->size());
+
+ assert(nar->size() % 8 == 0);
+
+ sink((unsigned char *) nar->c_str(), nar->size());
+}
+
+void BinaryCacheStore::queryPathInfoUncached(const Path & storePath,
+ std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure)
+{
+ auto narInfoFile = narInfoFileFor(storePath);
+
+ getFile(narInfoFile,
+ [=](std::shared_ptr<std::string> data) {
+ if (!data) return success(0);
+
+ stats.narInfoRead++;
+
+ callSuccess(success, failure, (std::shared_ptr<ValidPathInfo>)
+ std::make_shared<NarInfo>(*this, *data, narInfoFile));
+ },
+ failure);
+}
+
+Path BinaryCacheStore::addToStore(const string & name, const Path & srcPath,
+ bool recursive, HashType hashAlgo, PathFilter & filter, bool repair)
+{
+ // FIXME: some cut&paste from LocalStore::addToStore().
+
+ /* Read the whole path into memory. This is not a very scalable
+ method for very large paths, but `copyPath' is mainly used for
+ small files. */
+ StringSink sink;
+ Hash h;
+ if (recursive) {
+ dumpPath(srcPath, sink, filter);
+ h = hashString(hashAlgo, *sink.s);
+ } else {
+ auto s = readFile(srcPath);
+ dumpString(s, sink);
+ h = hashString(hashAlgo, s);
+ }
+
+ ValidPathInfo info;
+ info.path = makeFixedOutputPath(recursive, h, name);
+
+ addToStore(info, sink.s, repair, false, 0);
+
+ return info.path;
+}
+
+Path BinaryCacheStore::addTextToStore(const string & name, const string & s,
+ const PathSet & references, bool repair)
+{
+ ValidPathInfo info;
+ info.path = computeStorePathForText(name, s, references);
+ info.references = references;
+
+ if (repair || !isValidPath(info.path)) {
+ StringSink sink;
+ dumpString(s, sink);
+ addToStore(info, sink.s, repair, false, 0);
+ }
+
+ return info.path;
+}
+
+ref<FSAccessor> BinaryCacheStore::getFSAccessor()
+{
+ return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()));
+}
+
+std::shared_ptr<std::string> BinaryCacheStore::getBuildLog(const Path & path)
+{
+ Path drvPath;
+
+ if (isDerivation(path))
+ drvPath = path;
+ else {
+ try {
+ auto info = queryPathInfo(path);
+ // FIXME: add a "Log" field to .narinfo
+ if (info->deriver == "") return nullptr;
+ drvPath = info->deriver;
+ } catch (InvalidPath &) {
+ return nullptr;
+ }
+ }
+
+ auto logPath = "log/" + baseNameOf(drvPath);
+
+ debug("fetching build log from binary cache ‘%s/%s’", getUri(), logPath);
+
+ return getFile(logPath);
+}
+
+}
diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh
new file mode 100644
index 000000000..5c2d0acfd
--- /dev/null
+++ b/src/libstore/binary-cache-store.hh
@@ -0,0 +1,133 @@
+#pragma once
+
+#include "crypto.hh"
+#include "store-api.hh"
+
+#include "pool.hh"
+
+#include <atomic>
+
+namespace nix {
+
+struct NarInfo;
+
+class BinaryCacheStore : public Store
+{
+public:
+
+ const Setting<std::string> compression{this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"};
+ const Setting<bool> writeNARListing{this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"};
+ const Setting<Path> secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"};
+
+private:
+
+ std::unique_ptr<SecretKey> secretKey;
+
+protected:
+
+ BinaryCacheStore(const Params & params);
+
+ [[noreturn]] void notImpl();
+
+public:
+
+ virtual bool fileExists(const std::string & path) = 0;
+
+ virtual void upsertFile(const std::string & path,
+ const std::string & data,
+ const std::string & mimeType) = 0;
+
+ /* Return the contents of the specified file, or null if it
+ doesn't exist. */
+ virtual void getFile(const std::string & path,
+ std::function<void(std::shared_ptr<std::string>)> success,
+ std::function<void(std::exception_ptr exc)> failure) = 0;
+
+ std::shared_ptr<std::string> getFile(const std::string & path);
+
+protected:
+
+ bool wantMassQuery_ = false;
+ int priority = 50;
+
+public:
+
+ virtual void init();
+
+private:
+
+ std::string narMagic;
+
+ std::string narInfoFileFor(const Path & storePath);
+
+public:
+
+ bool isValidPathUncached(const Path & path) override;
+
+ PathSet queryAllValidPaths() override
+ { notImpl(); }
+
+ void queryPathInfoUncached(const Path & path,
+ std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure) override;
+
+ void queryReferrers(const Path & path,
+ PathSet & referrers) override
+ { notImpl(); }
+
+ PathSet queryDerivationOutputs(const Path & path) override
+ { notImpl(); }
+
+ StringSet queryDerivationOutputNames(const Path & path) override
+ { notImpl(); }
+
+ Path queryPathFromHashPart(const string & hashPart) override
+ { notImpl(); }
+
+ bool wantMassQuery() override { return wantMassQuery_; }
+
+ void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+ bool repair, bool dontCheckSigs,
+ std::shared_ptr<FSAccessor> accessor) override;
+
+ Path addToStore(const string & name, const Path & srcPath,
+ bool recursive, HashType hashAlgo,
+ PathFilter & filter, bool repair) override;
+
+ Path addTextToStore(const string & name, const string & s,
+ const PathSet & references, bool repair) override;
+
+ void narFromPath(const Path & path, Sink & sink) override;
+
+ void buildPaths(const PathSet & paths, BuildMode buildMode) override
+ { notImpl(); }
+
+ BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
+ BuildMode buildMode) override
+ { notImpl(); }
+
+ void ensurePath(const Path & path) override
+ { notImpl(); }
+
+ void addTempRoot(const Path & path) override
+ { notImpl(); }
+
+ void addIndirectRoot(const Path & path) override
+ { notImpl(); }
+
+ Roots findRoots() override
+ { notImpl(); }
+
+ void collectGarbage(const GCOptions & options, GCResults & results) override
+ { notImpl(); }
+
+ ref<FSAccessor> getFSAccessor() override;
+
+ void addSignatures(const Path & storePath, const StringSet & sigs) override
+ { notImpl(); }
+
+ std::shared_ptr<std::string> getBuildLog(const Path & path) override;
+
+};
+
+}
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
new file mode 100644
index 000000000..9bf1ab5aa
--- /dev/null
+++ b/src/libstore/build.cc
@@ -0,0 +1,3935 @@
+#include "references.hh"
+#include "pathlocks.hh"
+#include "globals.hh"
+#include "local-store.hh"
+#include "util.hh"
+#include "archive.hh"
+#include "affinity.hh"
+#include "builtins.hh"
+#include "finally.hh"
+#include "compression.hh"
+#include "json.hh"
+
+#include <algorithm>
+#include <iostream>
+#include <map>
+#include <sstream>
+#include <thread>
+#include <future>
+#include <chrono>
+
+#include <limits.h>
+#include <sys/time.h>
+#include <sys/wait.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/utsname.h>
+#include <sys/select.h>
+#include <sys/resource.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <errno.h>
+#include <cstring>
+
+#include <pwd.h>
+#include <grp.h>
+
+/* Includes required for chroot support. */
+#if __linux__
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <net/if.h>
+#include <netinet/ip.h>
+#include <sys/personality.h>
+#include <sys/mman.h>
+#include <sched.h>
+#include <sys/param.h>
+#include <sys/mount.h>
+#include <sys/syscall.h>
+#define pivot_root(new_root, put_old) (syscall(SYS_pivot_root, new_root, put_old))
+#endif
+
+#if HAVE_STATVFS
+#include <sys/statvfs.h>
+#endif
+
+
+namespace nix {
+
+using std::map;
+
+
+static string pathNullDevice = "/dev/null";
+
+
+/* Forward definition. */
+class Worker;
+struct HookInstance;
+
+
+/* A pointer to a goal. */
+class Goal;
+class DerivationGoal;
+typedef std::shared_ptr<Goal> GoalPtr;
+typedef std::weak_ptr<Goal> WeakGoalPtr;
+
+struct CompareGoalPtrs {
+ bool operator() (const GoalPtr & a, const GoalPtr & b);
+};
+
+/* Set of goals. */
+typedef set<GoalPtr, CompareGoalPtrs> Goals;
+typedef list<WeakGoalPtr> WeakGoals;
+
+/* A map of paths to goals (and the other way around). */
+typedef map<Path, WeakGoalPtr> WeakGoalMap;
+
+
+
+class Goal : public std::enable_shared_from_this<Goal>
+{
+public:
+ typedef enum {ecBusy, ecSuccess, ecFailed, ecNoSubstituters, ecIncompleteClosure} ExitCode;
+
+protected:
+
+ /* Backlink to the worker. */
+ Worker & worker;
+
+ /* Goals that this goal is waiting for. */
+ Goals waitees;
+
+ /* Goals waiting for this one to finish. Must use weak pointers
+ here to prevent cycles. */
+ WeakGoals waiters;
+
+ /* Number of goals we are/were waiting for that have failed. */
+ unsigned int nrFailed;
+
+ /* Number of substitution goals we are/were waiting for that
+ failed because there are no substituters. */
+ unsigned int nrNoSubstituters;
+
+ /* Number of substitution goals we are/were waiting for that
+ failed because othey had unsubstitutable references. */
+ unsigned int nrIncompleteClosure;
+
+ /* Name of this goal for debugging purposes. */
+ string name;
+
+ /* Whether the goal is finished. */
+ ExitCode exitCode;
+
+ Goal(Worker & worker) : worker(worker)
+ {
+ nrFailed = nrNoSubstituters = nrIncompleteClosure = 0;
+ exitCode = ecBusy;
+ }
+
+ virtual ~Goal()
+ {
+ trace("goal destroyed");
+ }
+
+public:
+ virtual void work() = 0;
+
+ void addWaitee(GoalPtr waitee);
+
+ virtual void waiteeDone(GoalPtr waitee, ExitCode result);
+
+ virtual void handleChildOutput(int fd, const string & data)
+ {
+ abort();
+ }
+
+ virtual void handleEOF(int fd)
+ {
+ abort();
+ }
+
+ void trace(const format & f);
+
+ string getName()
+ {
+ return name;
+ }
+
+ ExitCode getExitCode()
+ {
+ return exitCode;
+ }
+
+ /* Callback in case of a timeout. It should wake up its waiters,
+ get rid of any running child processes that are being monitored
+ by the worker (important!), etc. */
+ virtual void timedOut() = 0;
+
+ virtual string key() = 0;
+
+protected:
+ void amDone(ExitCode result);
+};
+
+
+bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) {
+ string s1 = a->key();
+ string s2 = b->key();
+ return s1 < s2;
+}
+
+
+typedef std::chrono::time_point<std::chrono::steady_clock> steady_time_point;
+
+
+/* A mapping used to remember for each child process to what goal it
+ belongs, and file descriptors for receiving log data and output
+ path creation commands. */
+struct Child
+{
+ WeakGoalPtr goal;
+ Goal * goal2; // ugly hackery
+ set<int> fds;
+ bool respectTimeouts;
+ bool inBuildSlot;
+ steady_time_point lastOutput; /* time we last got output on stdout/stderr */
+ steady_time_point timeStarted;
+};
+
+
+/* The worker class. */
+class Worker
+{
+private:
+
+ /* Note: the worker should only have strong pointers to the
+ top-level goals. */
+
+ /* The top-level goals of the worker. */
+ Goals topGoals;
+
+ /* Goals that are ready to do some work. */
+ WeakGoals awake;
+
+ /* Goals waiting for a build slot. */
+ WeakGoals wantingToBuild;
+
+ /* Child processes currently running. */
+ std::list<Child> children;
+
+ /* Number of build slots occupied. This includes local builds and
+ substitutions but not remote builds via the build hook. */
+ unsigned int nrLocalBuilds;
+
+ /* Maps used to prevent multiple instantiations of a goal for the
+ same derivation / path. */
+ WeakGoalMap derivationGoals;
+ WeakGoalMap substitutionGoals;
+
+ /* Goals waiting for busy paths to be unlocked. */
+ WeakGoals waitingForAnyGoal;
+
+ /* Goals sleeping for a few seconds (polling a lock). */
+ WeakGoals waitingForAWhile;
+
+ /* Last time the goals in `waitingForAWhile' where woken up. */
+ steady_time_point lastWokenUp;
+
+ /* Cache for pathContentsGood(). */
+ std::map<Path, bool> pathContentsGoodCache;
+
+public:
+
+ /* Set if at least one derivation had a BuildError (i.e. permanent
+ failure). */
+ bool permanentFailure;
+
+ /* Set if at least one derivation had a timeout. */
+ bool timedOut;
+
+ LocalStore & store;
+
+ std::unique_ptr<HookInstance> hook;
+
+ Worker(LocalStore & store);
+ ~Worker();
+
+ /* Make a goal (with caching). */
+ GoalPtr makeDerivationGoal(const Path & drvPath, const StringSet & wantedOutputs, BuildMode buildMode = bmNormal);
+ std::shared_ptr<DerivationGoal> makeBasicDerivationGoal(const Path & drvPath,
+ const BasicDerivation & drv, BuildMode buildMode = bmNormal);
+ GoalPtr makeSubstitutionGoal(const Path & storePath, bool repair = false);
+
+ /* Remove a dead goal. */
+ void removeGoal(GoalPtr goal);
+
+ /* Wake up a goal (i.e., there is something for it to do). */
+ void wakeUp(GoalPtr goal);
+
+ /* Return the number of local build and substitution processes
+ currently running (but not remote builds via the build
+ hook). */
+ unsigned int getNrLocalBuilds();
+
+ /* Registers a running child process. `inBuildSlot' means that
+ the process counts towards the jobs limit. */
+ void childStarted(GoalPtr goal, const set<int> & fds,
+ bool inBuildSlot, bool respectTimeouts);
+
+ /* Unregisters a running child process. `wakeSleepers' should be
+ false if there is no sense in waking up goals that are sleeping
+ because they can't run yet (e.g., there is no free build slot,
+ or the hook would still say `postpone'). */
+ void childTerminated(Goal * goal, bool wakeSleepers = true);
+
+ /* Put `goal' to sleep until a build slot becomes available (which
+ might be right away). */
+ void waitForBuildSlot(GoalPtr goal);
+
+ /* Wait for any goal to finish. Pretty indiscriminate way to
+ wait for some resource that some other goal is holding. */
+ void waitForAnyGoal(GoalPtr goal);
+
+ /* Wait for a few seconds and then retry this goal. Used when
+ waiting for a lock held by another process. This kind of
+ polling is inefficient, but POSIX doesn't really provide a way
+ to wait for multiple locks in the main select() loop. */
+ void waitForAWhile(GoalPtr goal);
+
+ /* Loop until the specified top-level goals have finished. */
+ void run(const Goals & topGoals);
+
+ /* Wait for input to become available. */
+ void waitForInput();
+
+ unsigned int exitStatus();
+
+ /* Check whether the given valid path exists and has the right
+ contents. */
+ bool pathContentsGood(const Path & path);
+
+ void markContentsGood(const Path & path);
+};
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+void addToWeakGoals(WeakGoals & goals, GoalPtr p)
+{
+ // FIXME: necessary?
+ // FIXME: O(n)
+ for (auto & i : goals)
+ if (i.lock() == p) return;
+ goals.push_back(p);
+}
+
+
+void Goal::addWaitee(GoalPtr waitee)
+{
+ waitees.insert(waitee);
+ addToWeakGoals(waitee->waiters, shared_from_this());
+}
+
+
+void Goal::waiteeDone(GoalPtr waitee, ExitCode result)
+{
+ assert(waitees.find(waitee) != waitees.end());
+ waitees.erase(waitee);
+
+ trace(format("waitee ‘%1%’ done; %2% left") %
+ waitee->name % waitees.size());
+
+ if (result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure) ++nrFailed;
+
+ if (result == ecNoSubstituters) ++nrNoSubstituters;
+
+ if (result == ecIncompleteClosure) ++nrIncompleteClosure;
+
+ if (waitees.empty() || (result == ecFailed && !settings.keepGoing)) {
+
+ /* If we failed and keepGoing is not set, we remove all
+ remaining waitees. */
+ for (auto & goal : waitees) {
+ WeakGoals waiters2;
+ for (auto & j : goal->waiters)
+ if (j.lock() != shared_from_this()) waiters2.push_back(j);
+ goal->waiters = waiters2;
+ }
+ waitees.clear();
+
+ worker.wakeUp(shared_from_this());
+ }
+}
+
+
+void Goal::amDone(ExitCode result)
+{
+ trace("done");
+ assert(exitCode == ecBusy);
+ assert(result == ecSuccess || result == ecFailed || result == ecNoSubstituters || result == ecIncompleteClosure);
+ exitCode = result;
+ for (auto & i : waiters) {
+ GoalPtr goal = i.lock();
+ if (goal) goal->waiteeDone(shared_from_this(), result);
+ }
+ waiters.clear();
+ worker.removeGoal(shared_from_this());
+}
+
+
+void Goal::trace(const format & f)
+{
+ debug(format("%1%: %2%") % name % f);
+}
+
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+/* Common initialisation performed in child processes. */
+static void commonChildInit(Pipe & logPipe)
+{
+ restoreSignals();
+
+ /* Put the child in a separate session (and thus a separate
+ process group) so that it has no controlling terminal (meaning
+ that e.g. ssh cannot open /dev/tty) and it doesn't receive
+ terminal signals. */
+ if (setsid() == -1)
+ throw SysError(format("creating a new session"));
+
+ /* Dup the write side of the logger pipe into stderr. */
+ if (dup2(logPipe.writeSide.get(), STDERR_FILENO) == -1)
+ throw SysError("cannot pipe standard error into log file");
+
+ /* Dup stderr to stdout. */
+ if (dup2(STDERR_FILENO, STDOUT_FILENO) == -1)
+ throw SysError("cannot dup stderr into stdout");
+
+ /* Reroute stdin to /dev/null. */
+ int fdDevNull = open(pathNullDevice.c_str(), O_RDWR);
+ if (fdDevNull == -1)
+ throw SysError(format("cannot open ‘%1%’") % pathNullDevice);
+ if (dup2(fdDevNull, STDIN_FILENO) == -1)
+ throw SysError("cannot dup null device into stdin");
+ close(fdDevNull);
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+class UserLock
+{
+private:
+ /* POSIX locks suck. If we have a lock on a file, and we open and
+ close that file again (without closing the original file
+ descriptor), we lose the lock. So we have to be *very* careful
+ not to open a lock file on which we are holding a lock. */
+ static Sync<PathSet> lockedPaths_;
+
+ Path fnUserLock;
+ AutoCloseFD fdUserLock;
+
+ string user;
+ uid_t uid;
+ gid_t gid;
+ std::vector<gid_t> supplementaryGIDs;
+
+public:
+ UserLock();
+ ~UserLock();
+
+ void kill();
+
+ string getUser() { return user; }
+ uid_t getUID() { assert(uid); return uid; }
+ uid_t getGID() { assert(gid); return gid; }
+ std::vector<gid_t> getSupplementaryGIDs() { return supplementaryGIDs; }
+
+ bool enabled() { return uid != 0; }
+
+};
+
+
+Sync<PathSet> UserLock::lockedPaths_;
+
+
+UserLock::UserLock()
+{
+ assert(settings.buildUsersGroup != "");
+
+ /* Get the members of the build-users-group. */
+ struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
+ if (!gr)
+ throw Error(format("the group ‘%1%’ specified in ‘build-users-group’ does not exist")
+ % settings.buildUsersGroup);
+ gid = gr->gr_gid;
+
+ /* Copy the result of getgrnam. */
+ Strings users;
+ for (char * * p = gr->gr_mem; *p; ++p) {
+ debug(format("found build user ‘%1%’") % *p);
+ users.push_back(*p);
+ }
+
+ if (users.empty())
+ throw Error(format("the build users group ‘%1%’ has no members")
+ % settings.buildUsersGroup);
+
+ /* Find a user account that isn't currently in use for another
+ build. */
+ for (auto & i : users) {
+ debug(format("trying user ‘%1%’") % i);
+
+ struct passwd * pw = getpwnam(i.c_str());
+ if (!pw)
+ throw Error(format("the user ‘%1%’ in the group ‘%2%’ does not exist")
+ % i % settings.buildUsersGroup);
+
+ createDirs(settings.nixStateDir + "/userpool");
+
+ fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str();
+
+ {
+ auto lockedPaths(lockedPaths_.lock());
+ if (lockedPaths->count(fnUserLock))
+ /* We already have a lock on this one. */
+ continue;
+ lockedPaths->insert(fnUserLock);
+ }
+
+ try {
+
+ AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
+ if (!fd)
+ throw SysError(format("opening user lock ‘%1%’") % fnUserLock);
+
+ if (lockFile(fd.get(), ltWrite, false)) {
+ fdUserLock = std::move(fd);
+ user = i;
+ uid = pw->pw_uid;
+
+ /* Sanity check... */
+ if (uid == getuid() || uid == geteuid())
+ throw Error(format("the Nix user should not be a member of ‘%1%’")
+ % settings.buildUsersGroup);
+
+#if __linux__
+ /* Get the list of supplementary groups of this build user. This
+ is usually either empty or contains a group such as "kvm". */
+ supplementaryGIDs.resize(10);
+ int ngroups = supplementaryGIDs.size();
+ int err = getgrouplist(pw->pw_name, pw->pw_gid,
+ supplementaryGIDs.data(), &ngroups);
+ if (err == -1)
+ throw Error(format("failed to get list of supplementary groups for ‘%1%’") % pw->pw_name);
+
+ supplementaryGIDs.resize(ngroups);
+#endif
+
+ return;
+ }
+
+ } catch (...) {
+ lockedPaths_.lock()->erase(fnUserLock);
+ }
+ }
+
+ throw Error(format("all build users are currently in use; "
+ "consider creating additional users and adding them to the ‘%1%’ group")
+ % settings.buildUsersGroup);
+}
+
+
+UserLock::~UserLock()
+{
+ auto lockedPaths(lockedPaths_.lock());
+ assert(lockedPaths->count(fnUserLock));
+ lockedPaths->erase(fnUserLock);
+}
+
+
+void UserLock::kill()
+{
+ killUser(uid);
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+struct HookInstance
+{
+ /* Pipes for talking to the build hook. */
+ Pipe toHook;
+
+ /* Pipe for the hook's standard output/error. */
+ Pipe fromHook;
+
+ /* Pipe for the builder's standard output/error. */
+ Pipe builderOut;
+
+ /* The process ID of the hook. */
+ Pid pid;
+
+ HookInstance();
+
+ ~HookInstance();
+};
+
+
+HookInstance::HookInstance()
+{
+ debug("starting build hook");
+
+ Path buildHook = getEnv("NIX_BUILD_HOOK");
+ if (string(buildHook, 0, 1) != "/") buildHook = settings.nixLibexecDir + "/nix/" + buildHook;
+ buildHook = canonPath(buildHook);
+
+ /* Create a pipe to get the output of the child. */
+ fromHook.create();
+
+ /* Create the communication pipes. */
+ toHook.create();
+
+ /* Create a pipe to get the output of the builder. */
+ builderOut.create();
+
+ /* Fork the hook. */
+ pid = startProcess([&]() {
+
+ commonChildInit(fromHook);
+
+ if (chdir("/") == -1) throw SysError("changing into /");
+
+ /* Dup the communication pipes. */
+ if (dup2(toHook.readSide.get(), STDIN_FILENO) == -1)
+ throw SysError("dupping to-hook read side");
+
+ /* Use fd 4 for the builder's stdout/stderr. */
+ if (dup2(builderOut.writeSide.get(), 4) == -1)
+ throw SysError("dupping builder's stdout/stderr");
+
+ Strings args = {
+ baseNameOf(buildHook),
+ settings.thisSystem,
+ (format("%1%") % settings.maxSilentTime).str(),
+ (format("%1%") % settings.buildTimeout).str()
+ };
+
+ execv(buildHook.c_str(), stringsToCharPtrs(args).data());
+
+ throw SysError(format("executing ‘%1%’") % buildHook);
+ });
+
+ pid.setSeparatePG(true);
+ fromHook.writeSide = -1;
+ toHook.readSide = -1;
+}
+
+
+HookInstance::~HookInstance()
+{
+ try {
+ toHook.writeSide = -1;
+ if (pid != -1) pid.kill();
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+typedef map<std::string, std::string> StringRewrites;
+
+
+std::string rewriteStrings(std::string s, const StringRewrites & rewrites)
+{
+ for (auto & i : rewrites) {
+ size_t j = 0;
+ while ((j = s.find(i.first, j)) != string::npos)
+ s.replace(j, i.first.size(), i.second);
+ }
+ return s;
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+typedef enum {rpAccept, rpDecline, rpPostpone} HookReply;
+
+class SubstitutionGoal;
+
+class DerivationGoal : public Goal
+{
+private:
+ /* Whether to use an on-disk .drv file. */
+ bool useDerivation;
+
+ /* The path of the derivation. */
+ Path drvPath;
+
+ /* The specific outputs that we need to build. Empty means all of
+ them. */
+ StringSet wantedOutputs;
+
+ /* Whether additional wanted outputs have been added. */
+ bool needRestart = false;
+
+ /* Whether to retry substituting the outputs after building the
+ inputs. */
+ bool retrySubstitution = false;
+
+ /* The derivation stored at drvPath. */
+ std::unique_ptr<BasicDerivation> drv;
+
+ /* The remainder is state held during the build. */
+
+ /* Locks on the output paths. */
+ PathLocks outputLocks;
+
+ /* All input paths (that is, the union of FS closures of the
+ immediate input paths). */
+ PathSet inputPaths;
+
+ /* Referenceable paths (i.e., input and output paths). */
+ PathSet allPaths;
+
+ /* Outputs that are already valid. If we're repairing, these are
+ the outputs that are valid *and* not corrupt. */
+ PathSet validPaths;
+
+ /* Outputs that are corrupt or not valid. */
+ PathSet missingPaths;
+
+ /* User selected for running the builder. */
+ std::unique_ptr<UserLock> buildUser;
+
+ /* The process ID of the builder. */
+ Pid pid;
+
+ /* The temporary directory. */
+ Path tmpDir;
+
+ /* The path of the temporary directory in the sandbox. */
+ Path tmpDirInSandbox;
+
+ /* File descriptor for the log file. */
+ AutoCloseFD fdLogFile;
+ std::shared_ptr<BufferedSink> logFileSink, logSink;
+
+ /* Number of bytes received from the builder's stdout/stderr. */
+ unsigned long logSize;
+
+ /* The most recent log lines. */
+ std::list<std::string> logTail;
+
+ std::string currentLogLine;
+ size_t currentLogLinePos = 0; // to handle carriage return
+
+ /* Pipe for the builder's standard output/error. */
+ Pipe builderOut;
+
+ /* Pipe for synchronising updates to the builder user namespace. */
+ Pipe userNamespaceSync;
+
+ /* The build hook. */
+ std::unique_ptr<HookInstance> hook;
+
+ /* Whether we're currently doing a chroot build. */
+ bool useChroot = false;
+
+ Path chrootRootDir;
+
+ /* RAII object to delete the chroot directory. */
+ std::shared_ptr<AutoDelete> autoDelChroot;
+
+ /* Whether this is a fixed-output derivation. */
+ bool fixedOutput;
+
+ /* Whether to run the build in a private network namespace. */
+ bool privateNetwork = false;
+
+ typedef void (DerivationGoal::*GoalState)();
+ GoalState state;
+
+ /* Stuff we need to pass to initChild(). */
+ struct ChrootPath {
+ Path source;
+ bool optional;
+ ChrootPath(Path source = "", bool optional = false)
+ : source(source), optional(optional)
+ { }
+ };
+ typedef map<Path, ChrootPath> DirsInChroot; // maps target path to source path
+ DirsInChroot dirsInChroot;
+
+ typedef map<string, string> Environment;
+ Environment env;
+
+#if __APPLE__
+ typedef string SandboxProfile;
+ SandboxProfile additionalSandboxProfile;
+ AutoDelete autoDelSandbox;
+#endif
+
+ /* Hash rewriting. */
+ StringRewrites inputRewrites, outputRewrites;
+ typedef map<Path, Path> RedirectedOutputs;
+ RedirectedOutputs redirectedOutputs;
+
+ BuildMode buildMode;
+
+ /* If we're repairing without a chroot, there may be outputs that
+ are valid but corrupt. So we redirect these outputs to
+ temporary paths. */
+ PathSet redirectedBadOutputs;
+
+ BuildResult result;
+
+ /* The current round, if we're building multiple times. */
+ unsigned int curRound = 1;
+
+ unsigned int nrRounds;
+
+ /* Path registration info from the previous round, if we're
+ building multiple times. Since this contains the hash, it
+ allows us to compare whether two rounds produced the same
+ result. */
+ ValidPathInfos prevInfos;
+
+ const uid_t sandboxUid = 1000;
+ const gid_t sandboxGid = 100;
+
+ const static Path homeDir;
+
+public:
+ DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs,
+ Worker & worker, BuildMode buildMode = bmNormal);
+ DerivationGoal(const Path & drvPath, const BasicDerivation & drv,
+ Worker & worker, BuildMode buildMode = bmNormal);
+ ~DerivationGoal();
+
+ void timedOut() override;
+
+ string key() override
+ {
+ /* Ensure that derivations get built in order of their name,
+ i.e. a derivation named "aardvark" always comes before
+ "baboon". And substitution goals always happen before
+ derivation goals (due to "b$"). */
+ return "b$" + storePathToName(drvPath) + "$" + drvPath;
+ }
+
+ void work() override;
+
+ Path getDrvPath()
+ {
+ return drvPath;
+ }
+
+ /* Add wanted outputs to an already existing derivation goal. */
+ void addWantedOutputs(const StringSet & outputs);
+
+ BuildResult getResult() { return result; }
+
+private:
+ /* The states. */
+ void getDerivation();
+ void loadDerivation();
+ void haveDerivation();
+ void outputsSubstituted();
+ void closureRepaired();
+ void inputsRealised();
+ void tryToBuild();
+ void buildDone();
+
+ /* Is the build hook willing to perform the build? */
+ HookReply tryBuildHook();
+
+ /* Start building a derivation. */
+ void startBuilder();
+
+ /* Fill in the environment for the builder. */
+ void initEnv();
+
+ /* Write a JSON file containing the derivation attributes. */
+ void writeStructuredAttrs();
+
+ /* Make a file owned by the builder. */
+ void chownToBuilder(const Path & path);
+
+ /* Handle the exportReferencesGraph attribute. */
+ void doExportReferencesGraph();
+
+ /* Run the builder's process. */
+ void runChild();
+
+ friend int childEntry(void *);
+
+ /* Check that the derivation outputs all exist and register them
+ as valid. */
+ void registerOutputs();
+
+ /* Open a log file and a pipe to it. */
+ Path openLogFile();
+
+ /* Close the log file. */
+ void closeLogFile();
+
+ /* Delete the temporary directory, if we have one. */
+ void deleteTmpDir(bool force);
+
+ /* Callback used by the worker to write to the log. */
+ void handleChildOutput(int fd, const string & data) override;
+ void handleEOF(int fd) override;
+ void flushLine();
+
+ /* Return the set of (in)valid paths. */
+ PathSet checkPathValidity(bool returnValid, bool checkHash);
+
+ /* Abort the goal if `path' failed to build. */
+ bool pathFailed(const Path & path);
+
+ /* Forcibly kill the child process, if any. */
+ void killChild();
+
+ Path addHashRewrite(const Path & path);
+
+ void repairClosure();
+
+ void done(BuildResult::Status status, const string & msg = "");
+};
+
+
+const Path DerivationGoal::homeDir = "/homeless-shelter";
+
+
+DerivationGoal::DerivationGoal(const Path & drvPath, const StringSet & wantedOutputs,
+ Worker & worker, BuildMode buildMode)
+ : Goal(worker)
+ , useDerivation(true)
+ , drvPath(drvPath)
+ , wantedOutputs(wantedOutputs)
+ , buildMode(buildMode)
+{
+ state = &DerivationGoal::getDerivation;
+ name = (format("building of ‘%1%’") % drvPath).str();
+ trace("created");
+}
+
+
+DerivationGoal::DerivationGoal(const Path & drvPath, const BasicDerivation & drv,
+ Worker & worker, BuildMode buildMode)
+ : Goal(worker)
+ , useDerivation(false)
+ , drvPath(drvPath)
+ , buildMode(buildMode)
+{
+ this->drv = std::unique_ptr<BasicDerivation>(new BasicDerivation(drv));
+ state = &DerivationGoal::haveDerivation;
+ name = (format("building of %1%") % showPaths(drv.outputPaths())).str();
+ trace("created");
+
+ /* Prevent the .chroot directory from being
+ garbage-collected. (See isActiveTempFile() in gc.cc.) */
+ worker.store.addTempRoot(drvPath);
+}
+
+
+DerivationGoal::~DerivationGoal()
+{
+ /* Careful: we should never ever throw an exception from a
+ destructor. */
+ try { killChild(); } catch (...) { ignoreException(); }
+ try { deleteTmpDir(false); } catch (...) { ignoreException(); }
+ try { closeLogFile(); } catch (...) { ignoreException(); }
+}
+
+
+void DerivationGoal::killChild()
+{
+ if (pid != -1) {
+ worker.childTerminated(this);
+
+ if (buildUser) {
+ /* If we're using a build user, then there is a tricky
+ race condition: if we kill the build user before the
+ child has done its setuid() to the build user uid, then
+ it won't be killed, and we'll potentially lock up in
+ pid.wait(). So also send a conventional kill to the
+ child. */
+ ::kill(-pid, SIGKILL); /* ignore the result */
+ buildUser->kill();
+ pid.wait();
+ } else
+ pid.kill();
+
+ assert(pid == -1);
+ }
+
+ hook.reset();
+}
+
+
+void DerivationGoal::timedOut()
+{
+ killChild();
+ done(BuildResult::TimedOut);
+}
+
+
+void DerivationGoal::work()
+{
+ (this->*state)();
+}
+
+
+void DerivationGoal::addWantedOutputs(const StringSet & outputs)
+{
+ /* If we already want all outputs, there is nothing to do. */
+ if (wantedOutputs.empty()) return;
+
+ if (outputs.empty()) {
+ wantedOutputs.clear();
+ needRestart = true;
+ } else
+ for (auto & i : outputs)
+ if (wantedOutputs.find(i) == wantedOutputs.end()) {
+ wantedOutputs.insert(i);
+ needRestart = true;
+ }
+}
+
+
+void DerivationGoal::getDerivation()
+{
+ trace("init");
+
+ /* The first thing to do is to make sure that the derivation
+ exists. If it doesn't, it may be created through a
+ substitute. */
+ if (buildMode == bmNormal && worker.store.isValidPath(drvPath)) {
+ loadDerivation();
+ return;
+ }
+
+ addWaitee(worker.makeSubstitutionGoal(drvPath));
+
+ state = &DerivationGoal::loadDerivation;
+}
+
+
+void DerivationGoal::loadDerivation()
+{
+ trace("loading derivation");
+
+ if (nrFailed != 0) {
+ printError(format("cannot build missing derivation ‘%1%’") % drvPath);
+ done(BuildResult::MiscFailure);
+ return;
+ }
+
+ /* `drvPath' should already be a root, but let's be on the safe
+ side: if the user forgot to make it a root, we wouldn't want
+ things being garbage collected while we're busy. */
+ worker.store.addTempRoot(drvPath);
+
+ assert(worker.store.isValidPath(drvPath));
+
+ /* Get the derivation. */
+ drv = std::unique_ptr<BasicDerivation>(new Derivation(worker.store.derivationFromPath(drvPath)));
+
+ haveDerivation();
+}
+
+
+void DerivationGoal::haveDerivation()
+{
+ trace("have derivation");
+
+ for (auto & i : drv->outputs)
+ worker.store.addTempRoot(i.second.path);
+
+ /* Check what outputs paths are not already valid. */
+ PathSet invalidOutputs = checkPathValidity(false, buildMode == bmRepair);
+
+ /* If they are all valid, then we're done. */
+ if (invalidOutputs.size() == 0 && buildMode == bmNormal) {
+ done(BuildResult::AlreadyValid);
+ return;
+ }
+
+ /* Reject doing a hash build of anything other than a fixed-output
+ derivation. */
+ if (buildMode == bmHash) {
+ if (drv->outputs.size() != 1 ||
+ drv->outputs.find("out") == drv->outputs.end() ||
+ drv->outputs["out"].hashAlgo == "")
+ throw Error(format("cannot do a hash build of non-fixed-output derivation ‘%1%’") % drvPath);
+ }
+
+ /* We are first going to try to create the invalid output paths
+ through substitutes. If that doesn't work, we'll build
+ them. */
+ if (settings.useSubstitutes && drv->substitutesAllowed())
+ for (auto & i : invalidOutputs)
+ addWaitee(worker.makeSubstitutionGoal(i, buildMode == bmRepair));
+
+ if (waitees.empty()) /* to prevent hang (no wake-up event) */
+ outputsSubstituted();
+ else
+ state = &DerivationGoal::outputsSubstituted;
+}
+
+
+void DerivationGoal::outputsSubstituted()
+{
+ trace("all outputs substituted (maybe)");
+
+ if (nrFailed > 0 && nrFailed > nrNoSubstituters + nrIncompleteClosure && !settings.tryFallback) {
+ done(BuildResult::TransientFailure, (format("some substitutes for the outputs of derivation ‘%1%’ failed (usually happens due to networking issues); try ‘--fallback’ to build derivation from source ") % drvPath).str());
+ return;
+ }
+
+ /* If the substitutes form an incomplete closure, then we should
+ build the dependencies of this derivation, but after that, we
+ can still use the substitutes for this derivation itself. */
+ if (nrIncompleteClosure > 0 && !retrySubstitution) retrySubstitution = true;
+
+ nrFailed = nrNoSubstituters = nrIncompleteClosure = 0;
+
+ if (needRestart) {
+ needRestart = false;
+ haveDerivation();
+ return;
+ }
+
+ unsigned int nrInvalid = checkPathValidity(false, buildMode == bmRepair).size();
+ if (buildMode == bmNormal && nrInvalid == 0) {
+ done(BuildResult::Substituted);
+ return;
+ }
+ if (buildMode == bmRepair && nrInvalid == 0) {
+ repairClosure();
+ return;
+ }
+ if (buildMode == bmCheck && nrInvalid > 0)
+ throw Error(format("some outputs of ‘%1%’ are not valid, so checking is not possible") % drvPath);
+
+ /* Otherwise, at least one of the output paths could not be
+ produced using a substitute. So we have to build instead. */
+
+ /* Make sure checkPathValidity() from now on checks all
+ outputs. */
+ wantedOutputs = PathSet();
+
+ /* The inputs must be built before we can build this goal. */
+ if (useDerivation)
+ for (auto & i : dynamic_cast<Derivation *>(drv.get())->inputDrvs)
+ addWaitee(worker.makeDerivationGoal(i.first, i.second, buildMode == bmRepair ? bmRepair : bmNormal));
+
+ for (auto & i : drv->inputSrcs) {
+ if (worker.store.isValidPath(i)) continue;
+ if (!settings.useSubstitutes)
+ throw Error(format("dependency of ‘%1%’ of ‘%2%’ does not exist, and substitution is disabled")
+ % i % drvPath);
+ addWaitee(worker.makeSubstitutionGoal(i));
+ }
+
+ if (waitees.empty()) /* to prevent hang (no wake-up event) */
+ inputsRealised();
+ else
+ state = &DerivationGoal::inputsRealised;
+}
+
+
+void DerivationGoal::repairClosure()
+{
+ /* If we're repairing, we now know that our own outputs are valid.
+ Now check whether the other paths in the outputs closure are
+ good. If not, then start derivation goals for the derivations
+ that produced those outputs. */
+
+ /* Get the output closure. */
+ PathSet outputClosure;
+ for (auto & i : drv->outputs) {
+ if (!wantOutput(i.first, wantedOutputs)) continue;
+ worker.store.computeFSClosure(i.second.path, outputClosure);
+ }
+
+ /* Filter out our own outputs (which we have already checked). */
+ for (auto & i : drv->outputs)
+ outputClosure.erase(i.second.path);
+
+ /* Get all dependencies of this derivation so that we know which
+ derivation is responsible for which path in the output
+ closure. */
+ PathSet inputClosure;
+ if (useDerivation) worker.store.computeFSClosure(drvPath, inputClosure);
+ std::map<Path, Path> outputsToDrv;
+ for (auto & i : inputClosure)
+ if (isDerivation(i)) {
+ Derivation drv = worker.store.derivationFromPath(i);
+ for (auto & j : drv.outputs)
+ outputsToDrv[j.second.path] = i;
+ }
+
+ /* Check each path (slow!). */
+ PathSet broken;
+ for (auto & i : outputClosure) {
+ if (worker.pathContentsGood(i)) continue;
+ printError(format("found corrupted or missing path ‘%1%’ in the output closure of ‘%2%’") % i % drvPath);
+ Path drvPath2 = outputsToDrv[i];
+ if (drvPath2 == "")
+ addWaitee(worker.makeSubstitutionGoal(i, true));
+ else
+ addWaitee(worker.makeDerivationGoal(drvPath2, PathSet(), bmRepair));
+ }
+
+ if (waitees.empty()) {
+ done(BuildResult::AlreadyValid);
+ return;
+ }
+
+ state = &DerivationGoal::closureRepaired;
+}
+
+
+void DerivationGoal::closureRepaired()
+{
+ trace("closure repaired");
+ if (nrFailed > 0)
+ throw Error(format("some paths in the output closure of derivation ‘%1%’ could not be repaired") % drvPath);
+ done(BuildResult::AlreadyValid);
+}
+
+
+void DerivationGoal::inputsRealised()
+{
+ trace("all inputs realised");
+
+ if (nrFailed != 0) {
+ if (!useDerivation)
+ throw Error(format("some dependencies of ‘%1%’ are missing") % drvPath);
+ printError(
+ format("cannot build derivation ‘%1%’: %2% dependencies couldn't be built")
+ % drvPath % nrFailed);
+ done(BuildResult::DependencyFailed);
+ return;
+ }
+
+ if (retrySubstitution) {
+ haveDerivation();
+ return;
+ }
+
+ /* Gather information necessary for computing the closure and/or
+ running the build hook. */
+
+ /* The outputs are referenceable paths. */
+ for (auto & i : drv->outputs) {
+ debug(format("building path ‘%1%’") % i.second.path);
+ allPaths.insert(i.second.path);
+ }
+
+ /* Determine the full set of input paths. */
+
+ /* First, the input derivations. */
+ if (useDerivation)
+ for (auto & i : dynamic_cast<Derivation *>(drv.get())->inputDrvs) {
+ /* Add the relevant output closures of the input derivation
+ `i' as input paths. Only add the closures of output paths
+ that are specified as inputs. */
+ assert(worker.store.isValidPath(i.first));
+ Derivation inDrv = worker.store.derivationFromPath(i.first);
+ for (auto & j : i.second)
+ if (inDrv.outputs.find(j) != inDrv.outputs.end())
+ worker.store.computeFSClosure(inDrv.outputs[j].path, inputPaths);
+ else
+ throw Error(
+ format("derivation ‘%1%’ requires non-existent output ‘%2%’ from input derivation ‘%3%’")
+ % drvPath % j % i.first);
+ }
+
+ /* Second, the input sources. */
+ worker.store.computeFSClosure(drv->inputSrcs, inputPaths);
+
+ debug(format("added input paths %1%") % showPaths(inputPaths));
+
+ allPaths.insert(inputPaths.begin(), inputPaths.end());
+
+ /* Is this a fixed-output derivation? */
+ fixedOutput = true;
+ for (auto & i : drv->outputs)
+ if (i.second.hash == "") fixedOutput = false;
+
+ /* Don't repeat fixed-output derivations since they're already
+ verified by their output hash.*/
+ nrRounds = fixedOutput ? 1 : settings.buildRepeat + 1;
+
+ /* Okay, try to build. Note that here we don't wait for a build
+ slot to become available, since we don't need one if there is a
+ build hook. */
+ state = &DerivationGoal::tryToBuild;
+ worker.wakeUp(shared_from_this());
+
+ result = BuildResult();
+}
+
+
+void DerivationGoal::tryToBuild()
+{
+ trace("trying to build");
+
+ /* Check for the possibility that some other goal in this process
+ has locked the output since we checked in haveDerivation().
+ (It can't happen between here and the lockPaths() call below
+ because we're not allowing multi-threading.) If so, put this
+ goal to sleep until another goal finishes, then try again. */
+ for (auto & i : drv->outputs)
+ if (pathIsLockedByMe(worker.store.toRealPath(i.second.path))) {
+ debug(format("putting derivation ‘%1%’ to sleep because ‘%2%’ is locked by another goal")
+ % drvPath % i.second.path);
+ worker.waitForAnyGoal(shared_from_this());
+ return;
+ }
+
+ /* Obtain locks on all output paths. The locks are automatically
+ released when we exit this function or Nix crashes. If we
+ can't acquire the lock, then continue; hopefully some other
+ goal can start a build, and if not, the main loop will sleep a
+ few seconds and then retry this goal. */
+ PathSet lockFiles;
+ for (auto & outPath : drv->outputPaths())
+ lockFiles.insert(worker.store.toRealPath(outPath));
+
+ if (!outputLocks.lockPaths(lockFiles, "", false)) {
+ worker.waitForAWhile(shared_from_this());
+ return;
+ }
+
+ /* Now check again whether the outputs are valid. This is because
+ another process may have started building in parallel. After
+ it has finished and released the locks, we can (and should)
+ reuse its results. (Strictly speaking the first check can be
+ omitted, but that would be less efficient.) Note that since we
+ now hold the locks on the output paths, no other process can
+ build this derivation, so no further checks are necessary. */
+ validPaths = checkPathValidity(true, buildMode == bmRepair);
+ if (buildMode != bmCheck && validPaths.size() == drv->outputs.size()) {
+ debug(format("skipping build of derivation ‘%1%’, someone beat us to it") % drvPath);
+ outputLocks.setDeletion(true);
+ done(BuildResult::AlreadyValid);
+ return;
+ }
+
+ missingPaths = drv->outputPaths();
+ if (buildMode != bmCheck)
+ for (auto & i : validPaths) missingPaths.erase(i);
+
+ /* If any of the outputs already exist but are not valid, delete
+ them. */
+ for (auto & i : drv->outputs) {
+ Path path = i.second.path;
+ if (worker.store.isValidPath(path)) continue;
+ debug(format("removing invalid path ‘%1%’") % path);
+ deletePath(worker.store.toRealPath(path));
+ }
+
+ /* Don't do a remote build if the derivation has the attribute
+ `preferLocalBuild' set. Also, check and repair modes are only
+ supported for local builds. */
+ bool buildLocally = buildMode != bmNormal || drv->willBuildLocally();
+
+ /* Is the build hook willing to accept this job? */
+ if (!buildLocally) {
+ switch (tryBuildHook()) {
+ case rpAccept:
+ /* Yes, it has started doing so. Wait until we get
+ EOF from the hook. */
+ result.startTime = time(0); // inexact
+ state = &DerivationGoal::buildDone;
+ return;
+ case rpPostpone:
+ /* Not now; wait until at least one child finishes or
+ the wake-up timeout expires. */
+ worker.waitForAWhile(shared_from_this());
+ outputLocks.unlock();
+ return;
+ case rpDecline:
+ /* We should do it ourselves. */
+ break;
+ }
+ }
+
+ /* Make sure that we are allowed to start a build. If this
+ derivation prefers to be done locally, do it even if
+ maxBuildJobs is 0. */
+ unsigned int curBuilds = worker.getNrLocalBuilds();
+ if (curBuilds >= settings.maxBuildJobs && !(buildLocally && curBuilds == 0)) {
+ worker.waitForBuildSlot(shared_from_this());
+ outputLocks.unlock();
+ return;
+ }
+
+ try {
+
+ /* Okay, we have to build. */
+ startBuilder();
+
+ } catch (BuildError & e) {
+ printError(e.msg());
+ outputLocks.unlock();
+ buildUser.reset();
+ worker.permanentFailure = true;
+ done(BuildResult::InputRejected, e.msg());
+ return;
+ }
+
+ /* This state will be reached when we get EOF on the child's
+ log pipe. */
+ state = &DerivationGoal::buildDone;
+}
+
+
+void replaceValidPath(const Path & storePath, const Path tmpPath)
+{
+ /* We can't atomically replace storePath (the original) with
+ tmpPath (the replacement), so we have to move it out of the
+ way first. We'd better not be interrupted here, because if
+ we're repairing (say) Glibc, we end up with a broken system. */
+ Path oldPath = (format("%1%.old-%2%-%3%") % storePath % getpid() % rand()).str();
+ if (pathExists(storePath))
+ rename(storePath.c_str(), oldPath.c_str());
+ if (rename(tmpPath.c_str(), storePath.c_str()) == -1)
+ throw SysError(format("moving ‘%1%’ to ‘%2%’") % tmpPath % storePath);
+ deletePath(oldPath);
+}
+
+
+MakeError(NotDeterministic, BuildError)
+
+
+void DerivationGoal::buildDone()
+{
+ trace("build done");
+
+ /* Release the build user at the end of this function. We don't do
+ it right away because we don't want another build grabbing this
+ uid and then messing around with our output. */
+ Finally releaseBuildUser([&]() { buildUser.reset(); });
+
+ /* Since we got an EOF on the logger pipe, the builder is presumed
+ to have terminated. In fact, the builder could also have
+ simply have closed its end of the pipe, so just to be sure,
+ kill it. */
+ int status = hook ? hook->pid.kill() : pid.kill();
+
+ debug(format("builder process for ‘%1%’ finished") % drvPath);
+
+ result.timesBuilt++;
+ result.stopTime = time(0);
+
+ /* So the child is gone now. */
+ worker.childTerminated(this);
+
+ /* Close the read side of the logger pipe. */
+ if (hook) {
+ hook->builderOut.readSide = -1;
+ hook->fromHook.readSide = -1;
+ }
+ else builderOut.readSide = -1;
+
+ /* Close the log file. */
+ closeLogFile();
+
+ /* When running under a build user, make sure that all processes
+ running under that uid are gone. This is to prevent a
+ malicious user from leaving behind a process that keeps files
+ open and modifies them after they have been chown'ed to
+ root. */
+ if (buildUser) buildUser->kill();
+
+ bool diskFull = false;
+
+ try {
+
+ /* Check the exit status. */
+ if (!statusOk(status)) {
+
+ /* Heuristically check whether the build failure may have
+ been caused by a disk full condition. We have no way
+ of knowing whether the build actually got an ENOSPC.
+ So instead, check if the disk is (nearly) full now. If
+ so, we don't mark this build as a permanent failure. */
+#if HAVE_STATVFS
+ unsigned long long required = 8ULL * 1024 * 1024; // FIXME: make configurable
+ struct statvfs st;
+ if (statvfs(worker.store.realStoreDir.c_str(), &st) == 0 &&
+ (unsigned long long) st.f_bavail * st.f_bsize < required)
+ diskFull = true;
+ if (statvfs(tmpDir.c_str(), &st) == 0 &&
+ (unsigned long long) st.f_bavail * st.f_bsize < required)
+ diskFull = true;
+#endif
+
+ deleteTmpDir(false);
+
+ /* Move paths out of the chroot for easier debugging of
+ build failures. */
+ if (useChroot && buildMode == bmNormal)
+ for (auto & i : missingPaths)
+ if (pathExists(chrootRootDir + i))
+ rename((chrootRootDir + i).c_str(), i.c_str());
+
+ std::string msg = (format("builder for ‘%1%’ %2%")
+ % drvPath % statusToString(status)).str();
+
+ if (!settings.verboseBuild && !logTail.empty()) {
+ msg += (format("; last %d log lines:") % logTail.size()).str();
+ for (auto & line : logTail)
+ msg += "\n " + line;
+ }
+
+ if (diskFull)
+ msg += "\nnote: build failure may have been caused by lack of free disk space";
+
+ throw BuildError(msg);
+ }
+
+ /* Compute the FS closure of the outputs and register them as
+ being valid. */
+ registerOutputs();
+
+ if (buildMode == bmCheck) {
+ done(BuildResult::Built);
+ return;
+ }
+
+ /* Delete unused redirected outputs (when doing hash rewriting). */
+ for (auto & i : redirectedOutputs)
+ deletePath(i.second);
+
+ /* Delete the chroot (if we were using one). */
+ autoDelChroot.reset(); /* this runs the destructor */
+
+ deleteTmpDir(true);
+
+ /* Repeat the build if necessary. */
+ if (curRound++ < nrRounds) {
+ outputLocks.unlock();
+ state = &DerivationGoal::tryToBuild;
+ worker.wakeUp(shared_from_this());
+ return;
+ }
+
+ /* It is now safe to delete the lock files, since all future
+ lockers will see that the output paths are valid; they will
+ not create new lock files with the same names as the old
+ (unlinked) lock files. */
+ outputLocks.setDeletion(true);
+ outputLocks.unlock();
+
+ } catch (BuildError & e) {
+ if (!hook)
+ printError(e.msg());
+ outputLocks.unlock();
+
+ BuildResult::Status st = BuildResult::MiscFailure;
+
+ if (hook && WIFEXITED(status) && WEXITSTATUS(status) == 101)
+ st = BuildResult::TimedOut;
+
+ else if (hook && (!WIFEXITED(status) || WEXITSTATUS(status) != 100)) {
+ }
+
+ else {
+ st =
+ dynamic_cast<NotDeterministic*>(&e) ? BuildResult::NotDeterministic :
+ statusOk(status) ? BuildResult::OutputRejected :
+ fixedOutput || diskFull ? BuildResult::TransientFailure :
+ BuildResult::PermanentFailure;
+ }
+
+ done(st, e.msg());
+ return;
+ }
+
+ done(BuildResult::Built);
+}
+
+
+HookReply DerivationGoal::tryBuildHook()
+{
+ if (!settings.useBuildHook || getEnv("NIX_BUILD_HOOK") == "" || !useDerivation) return rpDecline;
+
+ if (!worker.hook)
+ worker.hook = std::make_unique<HookInstance>();
+
+ try {
+
+ /* Tell the hook about system features (beyond the system type)
+ required from the build machine. (The hook could parse the
+ drv file itself, but this is easier.) */
+ Strings features = tokenizeString<Strings>(get(drv->env, "requiredSystemFeatures"));
+ for (auto & i : features) checkStoreName(i); /* !!! abuse */
+
+ /* Send the request to the hook. */
+ writeLine(worker.hook->toHook.writeSide.get(), (format("%1% %2% %3% %4%")
+ % (worker.getNrLocalBuilds() < settings.maxBuildJobs ? "1" : "0")
+ % drv->platform % drvPath % concatStringsSep(",", features)).str());
+
+ /* Read the first line of input, which should be a word indicating
+ whether the hook wishes to perform the build. */
+ string reply;
+ while (true) {
+ string s = readLine(worker.hook->fromHook.readSide.get());
+ if (string(s, 0, 2) == "# ") {
+ reply = string(s, 2);
+ break;
+ }
+ s += "\n";
+ writeToStderr(s);
+ }
+
+ debug(format("hook reply is ‘%1%’") % reply);
+
+ if (reply == "decline" || reply == "postpone")
+ return reply == "decline" ? rpDecline : rpPostpone;
+ else if (reply != "accept")
+ throw Error(format("bad hook reply ‘%1%’") % reply);
+
+ } catch (SysError & e) {
+ if (e.errNo == EPIPE) {
+ printError("build hook died unexpectedly: %s",
+ chomp(drainFD(worker.hook->fromHook.readSide.get())));
+ worker.hook = 0;
+ return rpDecline;
+ } else
+ throw;
+ }
+
+ printMsg(lvlTalkative, format("using hook to build path(s) %1%") % showPaths(missingPaths));
+
+ hook = std::move(worker.hook);
+
+ /* Tell the hook all the inputs that have to be copied to the
+ remote system. This unfortunately has to contain the entire
+ derivation closure to ensure that the validity invariant holds
+ on the remote system. (I.e., it's unfortunate that we have to
+ list it since the remote system *probably* already has it.) */
+ PathSet allInputs;
+ allInputs.insert(inputPaths.begin(), inputPaths.end());
+ worker.store.computeFSClosure(drvPath, allInputs);
+
+ string s;
+ for (auto & i : allInputs) { s += i; s += ' '; }
+ writeLine(hook->toHook.writeSide.get(), s);
+
+ /* Tell the hooks the missing outputs that have to be copied back
+ from the remote system. */
+ s = "";
+ for (auto & i : missingPaths) { s += i; s += ' '; }
+ writeLine(hook->toHook.writeSide.get(), s);
+
+ hook->toHook.writeSide = -1;
+
+ /* Create the log file and pipe. */
+ Path logFile = openLogFile();
+
+ set<int> fds;
+ fds.insert(hook->fromHook.readSide.get());
+ fds.insert(hook->builderOut.readSide.get());
+ worker.childStarted(shared_from_this(), fds, false, false);
+
+ return rpAccept;
+}
+
+
+void chmod_(const Path & path, mode_t mode)
+{
+ if (chmod(path.c_str(), mode) == -1)
+ throw SysError(format("setting permissions on ‘%1%’") % path);
+}
+
+
+int childEntry(void * arg)
+{
+ ((DerivationGoal *) arg)->runChild();
+ return 1;
+}
+
+
+void DerivationGoal::startBuilder()
+{
+ auto f = format(
+ buildMode == bmRepair ? "repairing path(s) %1%" :
+ buildMode == bmCheck ? "checking path(s) %1%" :
+ nrRounds > 1 ? "building path(s) %1% (round %2%/%3%)" :
+ "building path(s) %1%");
+ f.exceptions(boost::io::all_error_bits ^ boost::io::too_many_args_bit);
+ printInfo(f % showPaths(missingPaths) % curRound % nrRounds);
+
+ /* Right platform? */
+ if (!drv->canBuildLocally()) {
+ throw Error(
+ format("a ‘%1%’ is required to build ‘%3%’, but I am a ‘%2%’")
+ % drv->platform % settings.thisSystem % drvPath);
+ }
+
+#if __APPLE__
+ additionalSandboxProfile = get(drv->env, "__sandboxProfile");
+#endif
+
+ /* Are we doing a chroot build? */
+ {
+ if (settings.sandboxMode == smEnabled) {
+ if (get(drv->env, "__noChroot") == "1")
+ throw Error(format("derivation ‘%1%’ has ‘__noChroot’ set, "
+ "but that's not allowed when ‘build-use-sandbox’ is ‘true’") % drvPath);
+#if __APPLE__
+ if (additionalSandboxProfile != "")
+ throw Error(format("derivation ‘%1%’ specifies a sandbox profile, "
+ "but this is only allowed when ‘build-use-sandbox’ is ‘relaxed’") % drvPath);
+#endif
+ useChroot = true;
+ }
+ else if (settings.sandboxMode == smDisabled)
+ useChroot = false;
+ else if (settings.sandboxMode == smRelaxed)
+ useChroot = !fixedOutput && get(drv->env, "__noChroot") != "1";
+ }
+
+ if (worker.store.storeDir != worker.store.realStoreDir)
+ useChroot = true;
+
+ /* If `build-users-group' is not empty, then we have to build as
+ one of the members of that group. */
+ if (settings.buildUsersGroup != "" && getuid() == 0) {
+ buildUser = std::make_unique<UserLock>();
+
+ /* Make sure that no other processes are executing under this
+ uid. */
+ buildUser->kill();
+ }
+
+ /* Create a temporary directory where the build will take
+ place. */
+ auto drvName = storePathToName(drvPath);
+ tmpDir = createTempDir("", "nix-build-" + drvName, false, false, 0700);
+
+ /* In a sandbox, for determinism, always use the same temporary
+ directory. */
+ tmpDirInSandbox = useChroot ? canonPath("/tmp", true) + "/nix-build-" + drvName + "-0" : tmpDir;
+ chownToBuilder(tmpDir);
+
+ /* Substitute output placeholders with the actual output paths. */
+ for (auto & output : drv->outputs)
+ inputRewrites[hashPlaceholder(output.first)] = output.second.path;
+
+ /* Construct the environment passed to the builder. */
+ initEnv();
+
+ writeStructuredAttrs();
+
+ /* Handle exportReferencesGraph(), if set. */
+ doExportReferencesGraph();
+
+ if (useChroot) {
+
+ /* Allow a user-configurable set of directories from the
+ host file system. */
+ PathSet dirs = settings.sandboxPaths;
+ PathSet dirs2 = settings.extraSandboxPaths;
+ dirs.insert(dirs2.begin(), dirs2.end());
+
+ dirsInChroot.clear();
+
+ for (auto i : dirs) {
+ if (i.empty()) continue;
+ bool optional = false;
+ if (i[i.size() - 1] == '?') {
+ optional = true;
+ i.pop_back();
+ }
+ size_t p = i.find('=');
+ if (p == string::npos)
+ dirsInChroot[i] = {i, optional};
+ else
+ dirsInChroot[string(i, 0, p)] = {string(i, p + 1), optional};
+ }
+ dirsInChroot[tmpDirInSandbox] = tmpDir;
+
+ /* Add the closure of store paths to the chroot. */
+ PathSet closure;
+ for (auto & i : dirsInChroot)
+ try {
+ if (worker.store.isInStore(i.second.source))
+ worker.store.computeFSClosure(worker.store.toStorePath(i.second.source), closure);
+ } catch (Error & e) {
+ throw Error(format("while processing ‘build-sandbox-paths’: %s") % e.what());
+ }
+ for (auto & i : closure)
+ dirsInChroot[i] = i;
+
+ PathSet allowedPaths = settings.allowedImpureHostPrefixes;
+
+ /* This works like the above, except on a per-derivation level */
+ Strings impurePaths = tokenizeString<Strings>(get(drv->env, "__impureHostDeps"));
+
+ for (auto & i : impurePaths) {
+ bool found = false;
+ /* Note: we're not resolving symlinks here to prevent
+ giving a non-root user info about inaccessible
+ files. */
+ Path canonI = canonPath(i);
+ /* If only we had a trie to do this more efficiently :) luckily, these are generally going to be pretty small */
+ for (auto & a : allowedPaths) {
+ Path canonA = canonPath(a);
+ if (canonI == canonA || isInDir(canonI, canonA)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ throw Error(format("derivation ‘%1%’ requested impure path ‘%2%’, but it was not in allowed-impure-host-deps") % drvPath % i);
+
+ dirsInChroot[i] = i;
+ }
+
+#if __linux__
+ /* Create a temporary directory in which we set up the chroot
+ environment using bind-mounts. We put it in the Nix store
+ to ensure that we can create hard-links to non-directory
+ inputs in the fake Nix store in the chroot (see below). */
+ chrootRootDir = worker.store.toRealPath(drvPath) + ".chroot";
+ deletePath(chrootRootDir);
+
+ /* Clean up the chroot directory automatically. */
+ autoDelChroot = std::make_shared<AutoDelete>(chrootRootDir);
+
+ printMsg(lvlChatty, format("setting up chroot environment in ‘%1%’") % chrootRootDir);
+
+ if (mkdir(chrootRootDir.c_str(), 0750) == -1)
+ throw SysError(format("cannot create ‘%1%’") % chrootRootDir);
+
+ if (buildUser && chown(chrootRootDir.c_str(), 0, buildUser->getGID()) == -1)
+ throw SysError(format("cannot change ownership of ‘%1%’") % chrootRootDir);
+
+ /* Create a writable /tmp in the chroot. Many builders need
+ this. (Of course they should really respect $TMPDIR
+ instead.) */
+ Path chrootTmpDir = chrootRootDir + "/tmp";
+ createDirs(chrootTmpDir);
+ chmod_(chrootTmpDir, 01777);
+
+ /* Create a /etc/passwd with entries for the build user and the
+ nobody account. The latter is kind of a hack to support
+ Samba-in-QEMU. */
+ createDirs(chrootRootDir + "/etc");
+
+ writeFile(chrootRootDir + "/etc/passwd",
+ (format(
+ "root:x:0:0:Nix build user:/:/noshell\n"
+ "nixbld:x:%1%:%2%:Nix build user:/:/noshell\n"
+ "nobody:x:65534:65534:Nobody:/:/noshell\n") % sandboxUid % sandboxGid).str());
+
+ /* Declare the build user's group so that programs get a consistent
+ view of the system (e.g., "id -gn"). */
+ writeFile(chrootRootDir + "/etc/group",
+ (format(
+ "root:x:0:\n"
+ "nixbld:!:%1%:\n"
+ "nogroup:x:65534:\n") % sandboxGid).str());
+
+ /* Create /etc/hosts with localhost entry. */
+ if (!fixedOutput)
+ writeFile(chrootRootDir + "/etc/hosts", "127.0.0.1 localhost\n");
+
+ /* Make the closure of the inputs available in the chroot,
+ rather than the whole Nix store. This prevents any access
+ to undeclared dependencies. Directories are bind-mounted,
+ while other inputs are hard-linked (since only directories
+ can be bind-mounted). !!! As an extra security
+ precaution, make the fake Nix store only writable by the
+ build user. */
+ Path chrootStoreDir = chrootRootDir + worker.store.storeDir;
+ createDirs(chrootStoreDir);
+ chmod_(chrootStoreDir, 01775);
+
+ if (buildUser && chown(chrootStoreDir.c_str(), 0, buildUser->getGID()) == -1)
+ throw SysError(format("cannot change ownership of ‘%1%’") % chrootStoreDir);
+
+ for (auto & i : inputPaths) {
+ Path r = worker.store.toRealPath(i);
+ struct stat st;
+ if (lstat(r.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % i);
+ if (S_ISDIR(st.st_mode))
+ dirsInChroot[i] = r;
+ else {
+ Path p = chrootRootDir + i;
+ if (link(r.c_str(), p.c_str()) == -1) {
+ /* Hard-linking fails if we exceed the maximum
+ link count on a file (e.g. 32000 of ext3),
+ which is quite possible after a `nix-store
+ --optimise'. */
+ if (errno != EMLINK)
+ throw SysError(format("linking ‘%1%’ to ‘%2%’") % p % i);
+ StringSink sink;
+ dumpPath(r, sink);
+ StringSource source(*sink.s);
+ restorePath(p, source);
+ }
+ }
+ }
+
+ /* If we're repairing, checking or rebuilding part of a
+ multiple-outputs derivation, it's possible that we're
+ rebuilding a path that is in settings.dirsInChroot
+ (typically the dependencies of /bin/sh). Throw them
+ out. */
+ for (auto & i : drv->outputs)
+ dirsInChroot.erase(i.second.path);
+
+#elif __APPLE__
+ /* We don't really have any parent prep work to do (yet?)
+ All work happens in the child, instead. */
+#else
+ throw Error("sandboxing builds is not supported on this platform");
+#endif
+ }
+
+ else {
+
+ if (pathExists(homeDir))
+ throw Error(format("directory ‘%1%’ exists; please remove it") % homeDir);
+
+ /* We're not doing a chroot build, but we have some valid
+ output paths. Since we can't just overwrite or delete
+ them, we have to do hash rewriting: i.e. in the
+ environment/arguments passed to the build, we replace the
+ hashes of the valid outputs with unique dummy strings;
+ after the build, we discard the redirected outputs
+ corresponding to the valid outputs, and rewrite the
+ contents of the new outputs to replace the dummy strings
+ with the actual hashes. */
+ if (validPaths.size() > 0)
+ for (auto & i : validPaths)
+ addHashRewrite(i);
+
+ /* If we're repairing, then we don't want to delete the
+ corrupt outputs in advance. So rewrite them as well. */
+ if (buildMode == bmRepair)
+ for (auto & i : missingPaths)
+ if (worker.store.isValidPath(i) && pathExists(i)) {
+ addHashRewrite(i);
+ redirectedBadOutputs.insert(i);
+ }
+ }
+
+ if (settings.preBuildHook != "") {
+ printMsg(lvlChatty, format("executing pre-build hook ‘%1%’")
+ % settings.preBuildHook);
+ auto args = useChroot ? Strings({drvPath, chrootRootDir}) :
+ Strings({ drvPath });
+ enum BuildHookState {
+ stBegin,
+ stExtraChrootDirs
+ };
+ auto state = stBegin;
+ auto lines = runProgram(settings.preBuildHook, false, args);
+ auto lastPos = std::string::size_type{0};
+ for (auto nlPos = lines.find('\n'); nlPos != string::npos;
+ nlPos = lines.find('\n', lastPos)) {
+ auto line = std::string{lines, lastPos, nlPos - lastPos};
+ lastPos = nlPos + 1;
+ if (state == stBegin) {
+ if (line == "extra-sandbox-paths" || line == "extra-chroot-dirs") {
+ state = stExtraChrootDirs;
+ } else {
+ throw Error(format("unknown pre-build hook command ‘%1%’")
+ % line);
+ }
+ } else if (state == stExtraChrootDirs) {
+ if (line == "") {
+ state = stBegin;
+ } else {
+ auto p = line.find('=');
+ if (p == string::npos)
+ dirsInChroot[line] = line;
+ else
+ dirsInChroot[string(line, 0, p)] = string(line, p + 1);
+ }
+ }
+ }
+ }
+
+ /* Run the builder. */
+ printMsg(lvlChatty, format("executing builder ‘%1%’") % drv->builder);
+
+ /* Create the log file. */
+ Path logFile = openLogFile();
+
+ /* Create a pipe to get the output of the builder. */
+ builderOut.create();
+
+ result.startTime = time(0);
+
+ /* Fork a child to build the package. */
+ ProcessOptions options;
+
+#if __linux__
+ if (useChroot) {
+ /* Set up private namespaces for the build:
+
+ - The PID namespace causes the build to start as PID 1.
+ Processes outside of the chroot are not visible to those
+ on the inside, but processes inside the chroot are
+ visible from the outside (though with different PIDs).
+
+ - The private mount namespace ensures that all the bind
+ mounts we do will only show up in this process and its
+ children, and will disappear automatically when we're
+ done.
+
+ - The private network namespace ensures that the builder
+ cannot talk to the outside world (or vice versa). It
+ only has a private loopback interface. (Fixed-output
+ derivations are not run in a private network namespace
+ to allow functions like fetchurl to work.)
+
+ - The IPC namespace prevents the builder from communicating
+ with outside processes using SysV IPC mechanisms (shared
+ memory, message queues, semaphores). It also ensures
+ that all IPC objects are destroyed when the builder
+ exits.
+
+ - The UTS namespace ensures that builders see a hostname of
+ localhost rather than the actual hostname.
+
+ We use a helper process to do the clone() to work around
+ clone() being broken in multi-threaded programs due to
+ at-fork handlers not being run. Note that we use
+ CLONE_PARENT to ensure that the real builder is parented to
+ us.
+ */
+
+ if (!fixedOutput)
+ privateNetwork = true;
+
+ userNamespaceSync.create();
+
+ options.allowVfork = false;
+
+ Pid helper = startProcess([&]() {
+
+ /* Drop additional groups here because we can't do it
+ after we've created the new user namespace. FIXME:
+ this means that if we're not root in the parent
+ namespace, we can't drop additional groups; they will
+ be mapped to nogroup in the child namespace. There does
+ not seem to be a workaround for this. (But who can tell
+ from reading user_namespaces(7)?)
+ See also https://lwn.net/Articles/621612/. */
+ if (getuid() == 0 && setgroups(0, 0) == -1)
+ throw SysError("setgroups failed");
+
+ size_t stackSize = 1 * 1024 * 1024;
+ char * stack = (char *) mmap(0, stackSize,
+ PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
+ if (stack == MAP_FAILED) throw SysError("allocating stack");
+
+ int flags = CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD;
+ if (privateNetwork)
+ flags |= CLONE_NEWNET;
+
+ pid_t child = clone(childEntry, stack + stackSize, flags, this);
+ if (child == -1 && errno == EINVAL)
+ /* Fallback for Linux < 2.13 where CLONE_NEWPID and
+ CLONE_PARENT are not allowed together. */
+ child = clone(childEntry, stack + stackSize, flags & ~CLONE_NEWPID, this);
+ if (child == -1) throw SysError("cloning builder process");
+
+ writeFull(builderOut.writeSide.get(), std::to_string(child) + "\n");
+ _exit(0);
+ }, options);
+
+ if (helper.wait() != 0)
+ throw Error("unable to start build process");
+
+ userNamespaceSync.readSide = -1;
+
+ pid_t tmp;
+ if (!string2Int<pid_t>(readLine(builderOut.readSide.get()), tmp)) abort();
+ pid = tmp;
+
+ /* Set the UID/GID mapping of the builder's user namespace
+ such that the sandbox user maps to the build user, or to
+ the calling user (if build users are disabled). */
+ uid_t hostUid = buildUser ? buildUser->getUID() : getuid();
+ uid_t hostGid = buildUser ? buildUser->getGID() : getgid();
+
+ writeFile("/proc/" + std::to_string(pid) + "/uid_map",
+ (format("%d %d 1") % sandboxUid % hostUid).str());
+
+ writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny");
+
+ writeFile("/proc/" + std::to_string(pid) + "/gid_map",
+ (format("%d %d 1") % sandboxGid % hostGid).str());
+
+ /* Signal the builder that we've updated its user
+ namespace. */
+ writeFull(userNamespaceSync.writeSide.get(), "1");
+ userNamespaceSync.writeSide = -1;
+
+ } else
+#endif
+ {
+ options.allowVfork = !buildUser && !drv->isBuiltin();
+ pid = startProcess([&]() {
+ runChild();
+ }, options);
+ }
+
+ /* parent */
+ pid.setSeparatePG(true);
+ builderOut.writeSide = -1;
+ worker.childStarted(shared_from_this(), {builderOut.readSide.get()}, true, true);
+
+ /* Check if setting up the build environment failed. */
+ while (true) {
+ string msg = readLine(builderOut.readSide.get());
+ if (string(msg, 0, 1) == "\1") {
+ if (msg.size() == 1) break;
+ throw Error(string(msg, 1));
+ }
+ debug(msg);
+ }
+}
+
+
+void DerivationGoal::initEnv()
+{
+ env.clear();
+
+ /* Most shells initialise PATH to some default (/bin:/usr/bin:...) when
+ PATH is not set. We don't want this, so we fill it in with some dummy
+ value. */
+ env["PATH"] = "/path-not-set";
+
+ /* Set HOME to a non-existing path to prevent certain programs from using
+ /etc/passwd (or NIS, or whatever) to locate the home directory (for
+ example, wget looks for ~/.wgetrc). I.e., these tools use /etc/passwd
+ if HOME is not set, but they will just assume that the settings file
+ they are looking for does not exist if HOME is set but points to some
+ non-existing path. */
+ env["HOME"] = homeDir;
+
+ /* Tell the builder where the Nix store is. Usually they
+ shouldn't care, but this is useful for purity checking (e.g.,
+ the compiler or linker might only want to accept paths to files
+ in the store or in the build directory). */
+ env["NIX_STORE"] = worker.store.storeDir;
+
+ /* The maximum number of cores to utilize for parallel building. */
+ env["NIX_BUILD_CORES"] = (format("%d") % settings.buildCores).str();
+
+ /* In non-structured mode, add all bindings specified in the
+ derivation via the environments, except those listed in the
+ passAsFile attribute. Those are passed as file names pointing
+ to temporary files containing the contents. Note that
+ passAsFile is ignored in structure mode because it's not
+ needed (attributes are not passed through the environment, so
+ there is no size constraint). */
+ if (!drv->env.count("__json")) {
+
+ StringSet passAsFile = tokenizeString<StringSet>(get(drv->env, "passAsFile"));
+ int fileNr = 0;
+ for (auto & i : drv->env) {
+ if (passAsFile.find(i.first) == passAsFile.end()) {
+ env[i.first] = i.second;
+ } else {
+ string fn = ".attr-" + std::to_string(fileNr++);
+ Path p = tmpDir + "/" + fn;
+ writeFile(p, i.second);
+ chownToBuilder(p);
+ env[i.first + "Path"] = tmpDirInSandbox + "/" + fn;
+ }
+ }
+
+ }
+
+ /* For convenience, set an environment pointing to the top build
+ directory. */
+ env["NIX_BUILD_TOP"] = tmpDirInSandbox;
+
+ /* Also set TMPDIR and variants to point to this directory. */
+ env["TMPDIR"] = env["TEMPDIR"] = env["TMP"] = env["TEMP"] = tmpDirInSandbox;
+
+ /* Explicitly set PWD to prevent problems with chroot builds. In
+ particular, dietlibc cannot figure out the cwd because the
+ inode of the current directory doesn't appear in .. (because
+ getdents returns the inode of the mount point). */
+ env["PWD"] = tmpDirInSandbox;
+
+ /* Compatibility hack with Nix <= 0.7: if this is a fixed-output
+ derivation, tell the builder, so that for instance `fetchurl'
+ can skip checking the output. On older Nixes, this environment
+ variable won't be set, so `fetchurl' will do the check. */
+ if (fixedOutput) env["NIX_OUTPUT_CHECKED"] = "1";
+
+ /* *Only* if this is a fixed-output derivation, propagate the
+ values of the environment variables specified in the
+ `impureEnvVars' attribute to the builder. This allows for
+ instance environment variables for proxy configuration such as
+ `http_proxy' to be easily passed to downloaders like
+ `fetchurl'. Passing such environment variables from the caller
+ to the builder is generally impure, but the output of
+ fixed-output derivations is by definition pure (since we
+ already know the cryptographic hash of the output). */
+ if (fixedOutput) {
+ Strings varNames = tokenizeString<Strings>(get(drv->env, "impureEnvVars"));
+ for (auto & i : varNames) env[i] = getEnv(i);
+ }
+}
+
+
+void DerivationGoal::writeStructuredAttrs()
+{
+ auto json = drv->env.find("__json");
+ if (json == drv->env.end()) return;
+
+ writeFile(tmpDir + "/.attrs.json", rewriteStrings(json->second, inputRewrites));
+}
+
+
+void DerivationGoal::chownToBuilder(const Path & path)
+{
+ if (!buildUser) return;
+ if (chown(path.c_str(), buildUser->getUID(), buildUser->getGID()) == -1)
+ throw SysError(format("cannot change ownership of ‘%1%’") % path);
+}
+
+
+void DerivationGoal::doExportReferencesGraph()
+{
+ /* The `exportReferencesGraph' feature allows the references graph
+ to be passed to a builder. This attribute should be a list of
+ pairs [name1 path1 name2 path2 ...]. The references graph of
+ each `pathN' will be stored in a text file `nameN' in the
+ temporary build directory. The text files have the format used
+ by `nix-store --register-validity'. However, the deriver
+ fields are left empty. */
+ string s = get(drv->env, "exportReferencesGraph");
+ Strings ss = tokenizeString<Strings>(s);
+ if (ss.size() % 2 != 0)
+ throw BuildError(format("odd number of tokens in ‘exportReferencesGraph’: ‘%1%’") % s);
+ for (Strings::iterator i = ss.begin(); i != ss.end(); ) {
+ string fileName = *i++;
+ checkStoreName(fileName); /* !!! abuse of this function */
+
+ /* Check that the store path is valid. */
+ Path storePath = *i++;
+ if (!worker.store.isInStore(storePath))
+ throw BuildError(format("‘exportReferencesGraph’ contains a non-store path ‘%1%’")
+ % storePath);
+ storePath = worker.store.toStorePath(storePath);
+ if (!worker.store.isValidPath(storePath))
+ throw BuildError(format("‘exportReferencesGraph’ contains an invalid path ‘%1%’")
+ % storePath);
+
+ /* If there are derivations in the graph, then include their
+ outputs as well. This is useful if you want to do things
+ like passing all build-time dependencies of some path to a
+ derivation that builds a NixOS DVD image. */
+ PathSet paths, paths2;
+ worker.store.computeFSClosure(storePath, paths);
+ paths2 = paths;
+
+ for (auto & j : paths2) {
+ if (isDerivation(j)) {
+ Derivation drv = worker.store.derivationFromPath(j);
+ for (auto & k : drv.outputs)
+ worker.store.computeFSClosure(k.second.path, paths);
+ }
+ }
+
+ if (!drv->env.count("__json")) {
+
+ /* Write closure info to <fileName>. */
+ writeFile(tmpDir + "/" + fileName,
+ worker.store.makeValidityRegistration(paths, false, false));
+
+ } else {
+
+ /* Write a more comprehensive JSON serialisation to
+ <fileName>. */
+ std::ostringstream str;
+ {
+ JSONPlaceholder jsonRoot(str, true);
+ worker.store.pathInfoToJSON(jsonRoot, paths, false, true);
+ }
+ writeFile(tmpDir + "/" + fileName, str.str());
+
+ }
+ }
+}
+
+
+void DerivationGoal::runChild()
+{
+ /* Warning: in the child we should absolutely not make any SQLite
+ calls! */
+
+ try { /* child */
+
+ commonChildInit(builderOut);
+
+ bool setUser = true;
+
+ /* Make the contents of netrc available to builtin:fetchurl
+ (which may run under a different uid and/or in a sandbox). */
+ std::string netrcData;
+ try {
+ if (drv->isBuiltin() && drv->builder == "builtin:fetchurl")
+ netrcData = readFile(settings.netrcFile);
+ } catch (SysError &) { }
+
+#if __linux__
+ if (useChroot) {
+
+ userNamespaceSync.writeSide = -1;
+
+ if (drainFD(userNamespaceSync.readSide.get()) != "1")
+ throw Error("user namespace initialisation failed");
+
+ userNamespaceSync.readSide = -1;
+
+ if (privateNetwork) {
+
+ /* Initialise the loopback interface. */
+ AutoCloseFD fd(socket(PF_INET, SOCK_DGRAM, IPPROTO_IP));
+ if (!fd) throw SysError("cannot open IP socket");
+
+ struct ifreq ifr;
+ strcpy(ifr.ifr_name, "lo");
+ ifr.ifr_flags = IFF_UP | IFF_LOOPBACK | IFF_RUNNING;
+ if (ioctl(fd.get(), SIOCSIFFLAGS, &ifr) == -1)
+ throw SysError("cannot set loopback interface flags");
+ }
+
+ /* Set the hostname etc. to fixed values. */
+ char hostname[] = "localhost";
+ if (sethostname(hostname, sizeof(hostname)) == -1)
+ throw SysError("cannot set host name");
+ char domainname[] = "(none)"; // kernel default
+ if (setdomainname(domainname, sizeof(domainname)) == -1)
+ throw SysError("cannot set domain name");
+
+ /* Make all filesystems private. This is necessary
+ because subtrees may have been mounted as "shared"
+ (MS_SHARED). (Systemd does this, for instance.) Even
+ though we have a private mount namespace, mounting
+ filesystems on top of a shared subtree still propagates
+ outside of the namespace. Making a subtree private is
+ local to the namespace, though, so setting MS_PRIVATE
+ does not affect the outside world. */
+ if (mount(0, "/", 0, MS_REC|MS_PRIVATE, 0) == -1) {
+ throw SysError("unable to make ‘/’ private mount");
+ }
+
+ /* Bind-mount chroot directory to itself, to treat it as a
+ different filesystem from /, as needed for pivot_root. */
+ if (mount(chrootRootDir.c_str(), chrootRootDir.c_str(), 0, MS_BIND, 0) == -1)
+ throw SysError(format("unable to bind mount ‘%1%’") % chrootRootDir);
+
+ /* Set up a nearly empty /dev, unless the user asked to
+ bind-mount the host /dev. */
+ Strings ss;
+ if (dirsInChroot.find("/dev") == dirsInChroot.end()) {
+ createDirs(chrootRootDir + "/dev/shm");
+ createDirs(chrootRootDir + "/dev/pts");
+ ss.push_back("/dev/full");
+#ifdef __linux__
+ if (pathExists("/dev/kvm"))
+ ss.push_back("/dev/kvm");
+#endif
+ ss.push_back("/dev/null");
+ ss.push_back("/dev/random");
+ ss.push_back("/dev/tty");
+ ss.push_back("/dev/urandom");
+ ss.push_back("/dev/zero");
+ createSymlink("/proc/self/fd", chrootRootDir + "/dev/fd");
+ createSymlink("/proc/self/fd/0", chrootRootDir + "/dev/stdin");
+ createSymlink("/proc/self/fd/1", chrootRootDir + "/dev/stdout");
+ createSymlink("/proc/self/fd/2", chrootRootDir + "/dev/stderr");
+ }
+
+ /* Fixed-output derivations typically need to access the
+ network, so give them access to /etc/resolv.conf and so
+ on. */
+ if (fixedOutput) {
+ ss.push_back("/etc/resolv.conf");
+ ss.push_back("/etc/nsswitch.conf");
+ ss.push_back("/etc/services");
+ ss.push_back("/etc/hosts");
+ if (pathExists("/var/run/nscd/socket"))
+ ss.push_back("/var/run/nscd/socket");
+ }
+
+ for (auto & i : ss) dirsInChroot.emplace(i, i);
+
+ /* Bind-mount all the directories from the "host"
+ filesystem that we want in the chroot
+ environment. */
+ for (auto & i : dirsInChroot) {
+ struct stat st;
+ Path source = i.second.source;
+ Path target = chrootRootDir + i.first;
+ if (source == "/proc") continue; // backwards compatibility
+ debug(format("bind mounting ‘%1%’ to ‘%2%’") % source % target);
+ if (stat(source.c_str(), &st) == -1) {
+ if (i.second.optional && errno == ENOENT)
+ continue;
+ else
+ throw SysError(format("getting attributes of path ‘%1%’") % source);
+ }
+ if (S_ISDIR(st.st_mode))
+ createDirs(target);
+ else {
+ createDirs(dirOf(target));
+ writeFile(target, "");
+ }
+ if (mount(source.c_str(), target.c_str(), "", MS_BIND | MS_REC, 0) == -1)
+ throw SysError(format("bind mount from ‘%1%’ to ‘%2%’ failed") % source % target);
+ }
+
+ /* Bind a new instance of procfs on /proc. */
+ createDirs(chrootRootDir + "/proc");
+ if (mount("none", (chrootRootDir + "/proc").c_str(), "proc", 0, 0) == -1)
+ throw SysError("mounting /proc");
+
+ /* Mount a new tmpfs on /dev/shm to ensure that whatever
+ the builder puts in /dev/shm is cleaned up automatically. */
+ if (pathExists("/dev/shm") && mount("none", (chrootRootDir + "/dev/shm").c_str(), "tmpfs", 0,
+ fmt("size=%s", settings.sandboxShmSize).c_str()) == -1)
+ throw SysError("mounting /dev/shm");
+
+ /* Mount a new devpts on /dev/pts. Note that this
+ requires the kernel to be compiled with
+ CONFIG_DEVPTS_MULTIPLE_INSTANCES=y (which is the case
+ if /dev/ptx/ptmx exists). */
+ if (pathExists("/dev/pts/ptmx") &&
+ !pathExists(chrootRootDir + "/dev/ptmx")
+ && !dirsInChroot.count("/dev/pts"))
+ {
+ if (mount("none", (chrootRootDir + "/dev/pts").c_str(), "devpts", 0, "newinstance,mode=0620") == -1)
+ throw SysError("mounting /dev/pts");
+ createSymlink("/dev/pts/ptmx", chrootRootDir + "/dev/ptmx");
+
+ /* Make sure /dev/pts/ptmx is world-writable. With some
+ Linux versions, it is created with permissions 0. */
+ chmod_(chrootRootDir + "/dev/pts/ptmx", 0666);
+ }
+
+ /* Do the chroot(). */
+ if (chdir(chrootRootDir.c_str()) == -1)
+ throw SysError(format("cannot change directory to ‘%1%’") % chrootRootDir);
+
+ if (mkdir("real-root", 0) == -1)
+ throw SysError("cannot create real-root directory");
+
+ if (pivot_root(".", "real-root") == -1)
+ throw SysError(format("cannot pivot old root directory onto ‘%1%’") % (chrootRootDir + "/real-root"));
+
+ if (chroot(".") == -1)
+ throw SysError(format("cannot change root directory to ‘%1%’") % chrootRootDir);
+
+ if (umount2("real-root", MNT_DETACH) == -1)
+ throw SysError("cannot unmount real root filesystem");
+
+ if (rmdir("real-root") == -1)
+ throw SysError("cannot remove real-root directory");
+
+ /* Switch to the sandbox uid/gid in the user namespace,
+ which corresponds to the build user or calling user in
+ the parent namespace. */
+ if (setgid(sandboxGid) == -1)
+ throw SysError("setgid failed");
+ if (setuid(sandboxUid) == -1)
+ throw SysError("setuid failed");
+
+ setUser = false;
+ }
+#endif
+
+ if (chdir(tmpDirInSandbox.c_str()) == -1)
+ throw SysError(format("changing into ‘%1%’") % tmpDir);
+
+ /* Close all other file descriptors. */
+ closeMostFDs(set<int>());
+
+#if __linux__
+ /* Change the personality to 32-bit if we're doing an
+ i686-linux build on an x86_64-linux machine. */
+ struct utsname utsbuf;
+ uname(&utsbuf);
+ if (drv->platform == "i686-linux" &&
+ (settings.thisSystem == "x86_64-linux" ||
+ (!strcmp(utsbuf.sysname, "Linux") && !strcmp(utsbuf.machine, "x86_64")))) {
+ if (personality(PER_LINUX32) == -1)
+ throw SysError("cannot set i686-linux personality");
+ }
+
+ /* Impersonate a Linux 2.6 machine to get some determinism in
+ builds that depend on the kernel version. */
+ if ((drv->platform == "i686-linux" || drv->platform == "x86_64-linux") && settings.impersonateLinux26) {
+ int cur = personality(0xffffffff);
+ if (cur != -1) personality(cur | 0x0020000 /* == UNAME26 */);
+ }
+
+ /* Disable address space randomization for improved
+ determinism. */
+ int cur = personality(0xffffffff);
+ if (cur != -1) personality(cur | ADDR_NO_RANDOMIZE);
+#endif
+
+ /* Disable core dumps by default. */
+ struct rlimit limit = { 0, RLIM_INFINITY };
+ setrlimit(RLIMIT_CORE, &limit);
+
+ // FIXME: set other limits to deterministic values?
+
+ /* Fill in the environment. */
+ Strings envStrs;
+ for (auto & i : env)
+ envStrs.push_back(rewriteStrings(i.first + "=" + i.second, inputRewrites));
+
+ /* If we are running in `build-users' mode, then switch to the
+ user we allocated above. Make sure that we drop all root
+ privileges. Note that above we have closed all file
+ descriptors except std*, so that's safe. Also note that
+ setuid() when run as root sets the real, effective and
+ saved UIDs. */
+ if (setUser && buildUser) {
+ /* Preserve supplementary groups of the build user, to allow
+ admins to specify groups such as "kvm". */
+ if (!buildUser->getSupplementaryGIDs().empty() &&
+ setgroups(buildUser->getSupplementaryGIDs().size(),
+ buildUser->getSupplementaryGIDs().data()) == -1)
+ throw SysError("cannot set supplementary groups of build user");
+
+ if (setgid(buildUser->getGID()) == -1 ||
+ getgid() != buildUser->getGID() ||
+ getegid() != buildUser->getGID())
+ throw SysError("setgid failed");
+
+ if (setuid(buildUser->getUID()) == -1 ||
+ getuid() != buildUser->getUID() ||
+ geteuid() != buildUser->getUID())
+ throw SysError("setuid failed");
+ }
+
+ /* Fill in the arguments. */
+ Strings args;
+
+ const char *builder = "invalid";
+
+ string sandboxProfile;
+ if (drv->isBuiltin()) {
+ ;
+#if __APPLE__
+ } else if (useChroot) {
+ /* Lots and lots and lots of file functions freak out if they can't stat their full ancestry */
+ PathSet ancestry;
+
+ /* We build the ancestry before adding all inputPaths to the store because we know they'll
+ all have the same parents (the store), and there might be lots of inputs. This isn't
+ particularly efficient... I doubt it'll be a bottleneck in practice */
+ for (auto & i : dirsInChroot) {
+ Path cur = i.first;
+ while (cur.compare("/") != 0) {
+ cur = dirOf(cur);
+ ancestry.insert(cur);
+ }
+ }
+
+ /* And we want the store in there regardless of how empty dirsInChroot. We include the innermost
+ path component this time, since it's typically /nix/store and we care about that. */
+ Path cur = worker.store.storeDir;
+ while (cur.compare("/") != 0) {
+ ancestry.insert(cur);
+ cur = dirOf(cur);
+ }
+
+ /* Add all our input paths to the chroot */
+ for (auto & i : inputPaths)
+ dirsInChroot[i] = i;
+
+ /* This has to appear before import statements */
+ sandboxProfile += "(version 1)\n";
+
+ /* Violations will go to the syslog if you set this. Unfortunately the destination does not appear to be configurable */
+ if (settings.darwinLogSandboxViolations) {
+ sandboxProfile += "(deny default)\n";
+ } else {
+ sandboxProfile += "(deny default (with no-log))\n";
+ }
+
+ /* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms
+ to find temporary directories, so we want to open up a broader place for them to dump their files, if needed. */
+ Path globalTmpDir = canonPath(getEnv("TMPDIR", "/tmp"), true);
+
+ /* They don't like trailing slashes on subpath directives */
+ if (globalTmpDir.back() == '/') globalTmpDir.pop_back();
+
+ /* Our rwx outputs */
+ sandboxProfile += "(allow file-read* file-write* process-exec\n";
+ for (auto & i : missingPaths) {
+ sandboxProfile += (format("\t(subpath \"%1%\")\n") % i.c_str()).str();
+ }
+ sandboxProfile += ")\n";
+
+ /* Our inputs (transitive dependencies and any impurities computed above)
+
+ without file-write* allowed, access() incorrectly returns EPERM
+ */
+ sandboxProfile += "(allow file-read* file-write* process-exec\n";
+ for (auto & i : dirsInChroot) {
+ if (i.first != i.second.source)
+ throw Error(format(
+ "can't map '%1%' to '%2%': mismatched impure paths not supported on Darwin")
+ % i.first % i.second.source);
+
+ string path = i.first;
+ struct stat st;
+ if (lstat(path.c_str(), &st)) {
+ if (i.second.optional && errno == ENOENT)
+ continue;
+ throw SysError(format("getting attributes of path ‘%1%’") % path);
+ }
+ if (S_ISDIR(st.st_mode))
+ sandboxProfile += (format("\t(subpath \"%1%\")\n") % path).str();
+ else
+ sandboxProfile += (format("\t(literal \"%1%\")\n") % path).str();
+ }
+ sandboxProfile += ")\n";
+
+ /* Allow file-read* on full directory hierarchy to self. Allows realpath() */
+ sandboxProfile += "(allow file-read*\n";
+ for (auto & i : ancestry) {
+ sandboxProfile += (format("\t(literal \"%1%\")\n") % i.c_str()).str();
+ }
+ sandboxProfile += ")\n";
+
+ sandboxProfile += additionalSandboxProfile;
+
+ debug("Generated sandbox profile:");
+ debug(sandboxProfile);
+
+ Path sandboxFile = drvPath + ".sb";
+ deletePath(sandboxFile);
+ autoDelSandbox.reset(sandboxFile, false);
+
+ writeFile(sandboxFile, sandboxProfile);
+
+ builder = "/usr/bin/sandbox-exec";
+ args.push_back("sandbox-exec");
+ args.push_back("-f");
+ args.push_back(sandboxFile);
+ args.push_back("-D");
+ args.push_back("_GLOBAL_TMP_DIR=" + globalTmpDir);
+ args.push_back(drv->builder);
+#endif
+ } else {
+ builder = drv->builder.c_str();
+ string builderBasename = baseNameOf(drv->builder);
+ args.push_back(builderBasename);
+ }
+
+ for (auto & i : drv->args)
+ args.push_back(rewriteStrings(i, inputRewrites));
+
+ /* Indicate that we managed to set up the build environment. */
+ writeFull(STDERR_FILENO, string("\1\n"));
+
+ /* Execute the program. This should not return. */
+ if (drv->isBuiltin()) {
+ try {
+ if (drv->builder == "builtin:fetchurl")
+ builtinFetchurl(*drv, netrcData);
+ else
+ throw Error(format("unsupported builtin function ‘%1%’") % string(drv->builder, 8));
+ _exit(0);
+ } catch (std::exception & e) {
+ writeFull(STDERR_FILENO, "error: " + string(e.what()) + "\n");
+ _exit(1);
+ }
+ }
+
+ execve(builder, stringsToCharPtrs(args).data(), stringsToCharPtrs(envStrs).data());
+
+ throw SysError(format("executing ‘%1%’") % drv->builder);
+
+ } catch (std::exception & e) {
+ writeFull(STDERR_FILENO, "\1while setting up the build environment: " + string(e.what()) + "\n");
+ _exit(1);
+ }
+}
+
+
+/* Parse a list of reference specifiers. Each element must either be
+ a store path, or the symbolic name of the output of the derivation
+ (such as `out'). */
+PathSet parseReferenceSpecifiers(Store & store, const BasicDerivation & drv, string attr)
+{
+ PathSet result;
+ Paths paths = tokenizeString<Paths>(attr);
+ for (auto & i : paths) {
+ if (store.isStorePath(i))
+ result.insert(i);
+ else if (drv.outputs.find(i) != drv.outputs.end())
+ result.insert(drv.outputs.find(i)->second.path);
+ else throw BuildError(
+ format("derivation contains an illegal reference specifier ‘%1%’") % i);
+ }
+ return result;
+}
+
+
+void DerivationGoal::registerOutputs()
+{
+ /* When using a build hook, the build hook can register the output
+ as valid (by doing `nix-store --import'). If so we don't have
+ to do anything here. */
+ if (hook) {
+ bool allValid = true;
+ for (auto & i : drv->outputs)
+ if (!worker.store.isValidPath(i.second.path)) allValid = false;
+ if (allValid) return;
+ }
+
+ ValidPathInfos infos;
+
+ /* Set of inodes seen during calls to canonicalisePathMetaData()
+ for this build's outputs. This needs to be shared between
+ outputs to allow hard links between outputs. */
+ InodesSeen inodesSeen;
+
+ Path checkSuffix = ".check";
+ bool runDiffHook = settings.runDiffHook;
+ bool keepPreviousRound = settings.keepFailed || runDiffHook;
+
+ /* Check whether the output paths were created, and grep each
+ output path to determine what other paths it references. Also make all
+ output paths read-only. */
+ for (auto & i : drv->outputs) {
+ Path path = i.second.path;
+ if (missingPaths.find(path) == missingPaths.end()) continue;
+
+ ValidPathInfo info;
+
+ Path actualPath = path;
+ if (useChroot) {
+ actualPath = chrootRootDir + path;
+ if (pathExists(actualPath)) {
+ /* Move output paths from the chroot to the Nix store. */
+ if (buildMode == bmRepair)
+ replaceValidPath(path, actualPath);
+ else
+ if (buildMode != bmCheck && rename(actualPath.c_str(), worker.store.toRealPath(path).c_str()) == -1)
+ throw SysError(format("moving build output ‘%1%’ from the sandbox to the Nix store") % path);
+ }
+ if (buildMode != bmCheck) actualPath = worker.store.toRealPath(path);
+ } else {
+ Path redirected = redirectedOutputs[path];
+ if (buildMode == bmRepair
+ && redirectedBadOutputs.find(path) != redirectedBadOutputs.end()
+ && pathExists(redirected))
+ replaceValidPath(path, redirected);
+ if (buildMode == bmCheck && redirected != "")
+ actualPath = redirected;
+ }
+
+ struct stat st;
+ if (lstat(actualPath.c_str(), &st) == -1) {
+ if (errno == ENOENT)
+ throw BuildError(
+ format("builder for ‘%1%’ failed to produce output path ‘%2%’")
+ % drvPath % path);
+ throw SysError(format("getting attributes of path ‘%1%’") % actualPath);
+ }
+
+#ifndef __CYGWIN__
+ /* Check that the output is not group or world writable, as
+ that means that someone else can have interfered with the
+ build. Also, the output should be owned by the build
+ user. */
+ if ((!S_ISLNK(st.st_mode) && (st.st_mode & (S_IWGRP | S_IWOTH))) ||
+ (buildUser && st.st_uid != buildUser->getUID()))
+ throw BuildError(format("suspicious ownership or permission on ‘%1%’; rejecting this build output") % path);
+#endif
+
+ /* Apply hash rewriting if necessary. */
+ bool rewritten = false;
+ if (!outputRewrites.empty()) {
+ printError(format("warning: rewriting hashes in ‘%1%’; cross fingers") % path);
+
+ /* Canonicalise first. This ensures that the path we're
+ rewriting doesn't contain a hard link to /etc/shadow or
+ something like that. */
+ canonicalisePathMetaData(actualPath, buildUser ? buildUser->getUID() : -1, inodesSeen);
+
+ /* FIXME: this is in-memory. */
+ StringSink sink;
+ dumpPath(actualPath, sink);
+ deletePath(actualPath);
+ sink.s = make_ref<std::string>(rewriteStrings(*sink.s, outputRewrites));
+ StringSource source(*sink.s);
+ restorePath(actualPath, source);
+
+ rewritten = true;
+ }
+
+ /* Check that fixed-output derivations produced the right
+ outputs (i.e., the content hash should match the specified
+ hash). */
+ if (i.second.hash != "") {
+
+ bool recursive; Hash h;
+ i.second.parseHashInfo(recursive, h);
+
+ if (!recursive) {
+ /* The output path should be a regular file without
+ execute permission. */
+ if (!S_ISREG(st.st_mode) || (st.st_mode & S_IXUSR) != 0)
+ throw BuildError(
+ format("output path ‘%1%’ should be a non-executable regular file") % path);
+ }
+
+ /* Check the hash. In hash mode, move the path produced by
+ the derivation to its content-addressed location. */
+ Hash h2 = recursive ? hashPath(h.type, actualPath).first : hashFile(h.type, actualPath);
+ if (buildMode == bmHash) {
+ Path dest = worker.store.makeFixedOutputPath(recursive, h2, drv->env["name"]);
+ printError(format("build produced path ‘%1%’ with %2% hash ‘%3%’")
+ % dest % printHashType(h.type) % printHash16or32(h2));
+ if (worker.store.isValidPath(dest))
+ return;
+ Path actualDest = worker.store.toRealPath(dest);
+ if (actualPath != actualDest) {
+ PathLocks outputLocks({actualDest});
+ deletePath(actualDest);
+ if (rename(actualPath.c_str(), actualDest.c_str()) == -1)
+ throw SysError(format("moving ‘%1%’ to ‘%2%’") % actualPath % dest);
+ }
+ path = dest;
+ actualPath = actualDest;
+ } else {
+ if (h != h2)
+ throw BuildError(
+ format("output path ‘%1%’ has %2% hash ‘%3%’ when ‘%4%’ was expected")
+ % path % i.second.hashAlgo % printHash16or32(h2) % printHash16or32(h));
+ }
+
+ info.ca = makeFixedOutputCA(recursive, h2);
+ }
+
+ /* Get rid of all weird permissions. This also checks that
+ all files are owned by the build user, if applicable. */
+ canonicalisePathMetaData(actualPath,
+ buildUser && !rewritten ? buildUser->getUID() : -1, inodesSeen);
+
+ /* For this output path, find the references to other paths
+ contained in it. Compute the SHA-256 NAR hash at the same
+ time. The hash is stored in the database so that we can
+ verify later on whether nobody has messed with the store. */
+ Activity act(*logger, lvlTalkative, format("scanning for references inside ‘%1%’") % path);
+ HashResult hash;
+ PathSet references = scanForReferences(actualPath, allPaths, hash);
+
+ if (buildMode == bmCheck) {
+ if (!worker.store.isValidPath(path)) continue;
+ auto info = *worker.store.queryPathInfo(path);
+ if (hash.first != info.narHash) {
+ if (settings.keepFailed) {
+ Path dst = worker.store.toRealPath(path + checkSuffix);
+ deletePath(dst);
+ if (rename(actualPath.c_str(), dst.c_str()))
+ throw SysError(format("renaming ‘%1%’ to ‘%2%’") % actualPath % dst);
+ throw Error(format("derivation ‘%1%’ may not be deterministic: output ‘%2%’ differs from ‘%3%’")
+ % drvPath % path % dst);
+ } else
+ throw Error(format("derivation ‘%1%’ may not be deterministic: output ‘%2%’ differs")
+ % drvPath % path);
+ }
+
+ /* Since we verified the build, it's now ultimately
+ trusted. */
+ if (!info.ultimate) {
+ info.ultimate = true;
+ worker.store.signPathInfo(info);
+ worker.store.registerValidPaths({info});
+ }
+
+ continue;
+ }
+
+ /* For debugging, print out the referenced and unreferenced
+ paths. */
+ for (auto & i : inputPaths) {
+ PathSet::iterator j = references.find(i);
+ if (j == references.end())
+ debug(format("unreferenced input: ‘%1%’") % i);
+ else
+ debug(format("referenced input: ‘%1%’") % i);
+ }
+
+ /* Enforce `allowedReferences' and friends. */
+ auto checkRefs = [&](const string & attrName, bool allowed, bool recursive) {
+ if (drv->env.find(attrName) == drv->env.end()) return;
+
+ PathSet spec = parseReferenceSpecifiers(worker.store, *drv, get(drv->env, attrName));
+
+ PathSet used;
+ if (recursive) {
+ /* Our requisites are the union of the closures of our references. */
+ for (auto & i : references)
+ /* Don't call computeFSClosure on ourselves. */
+ if (path != i)
+ worker.store.computeFSClosure(i, used);
+ } else
+ used = references;
+
+ PathSet badPaths;
+
+ for (auto & i : used)
+ if (allowed) {
+ if (spec.find(i) == spec.end())
+ badPaths.insert(i);
+ } else {
+ if (spec.find(i) != spec.end())
+ badPaths.insert(i);
+ }
+
+ if (!badPaths.empty()) {
+ string badPathsStr;
+ for (auto & i : badPaths) {
+ badPathsStr += "\n\t";
+ badPathsStr += i;
+ }
+ throw BuildError(format("output ‘%1%’ is not allowed to refer to the following paths:%2%") % actualPath % badPathsStr);
+ }
+ };
+
+ checkRefs("allowedReferences", true, false);
+ checkRefs("allowedRequisites", true, true);
+ checkRefs("disallowedReferences", false, false);
+ checkRefs("disallowedRequisites", false, true);
+
+ if (curRound == nrRounds) {
+ worker.store.optimisePath(actualPath); // FIXME: combine with scanForReferences()
+ worker.markContentsGood(path);
+ }
+
+ info.path = path;
+ info.narHash = hash.first;
+ info.narSize = hash.second;
+ info.references = references;
+ info.deriver = drvPath;
+ info.ultimate = true;
+ worker.store.signPathInfo(info);
+
+ infos.push_back(info);
+ }
+
+ if (buildMode == bmCheck) return;
+
+ /* Compare the result with the previous round, and report which
+ path is different, if any.*/
+ if (curRound > 1 && prevInfos != infos) {
+ assert(prevInfos.size() == infos.size());
+ for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); ++i, ++j)
+ if (!(*i == *j)) {
+ result.isNonDeterministic = true;
+ Path prev = i->path + checkSuffix;
+ bool prevExists = keepPreviousRound && pathExists(prev);
+ auto msg = prevExists
+ ? fmt("output ‘%1%’ of ‘%2%’ differs from ‘%3%’ from previous round", i->path, drvPath, prev)
+ : fmt("output ‘%1%’ of ‘%2%’ differs from previous round", i->path, drvPath);
+
+ auto diffHook = settings.diffHook;
+ if (prevExists && diffHook != "" && runDiffHook) {
+ try {
+ auto diff = runProgram(diffHook, true, {prev, i->path});
+ if (diff != "")
+ printError(chomp(diff));
+ } catch (Error & error) {
+ printError("diff hook execution failed: %s", error.what());
+ }
+ }
+
+ if (settings.enforceDeterminism)
+ throw NotDeterministic(msg);
+
+ printError(msg);
+ curRound = nrRounds; // we know enough, bail out early
+ }
+ }
+
+ /* If this is the first round of several, then move the output out
+ of the way. */
+ if (nrRounds > 1 && curRound == 1 && curRound < nrRounds && keepPreviousRound) {
+ for (auto & i : drv->outputs) {
+ Path prev = i.second.path + checkSuffix;
+ deletePath(prev);
+ Path dst = i.second.path + checkSuffix;
+ if (rename(i.second.path.c_str(), dst.c_str()))
+ throw SysError(format("renaming ‘%1%’ to ‘%2%’") % i.second.path % dst);
+ }
+ }
+
+ if (curRound < nrRounds) {
+ prevInfos = infos;
+ return;
+ }
+
+ /* Remove the .check directories if we're done. FIXME: keep them
+ if the result was not determistic? */
+ if (curRound == nrRounds) {
+ for (auto & i : drv->outputs) {
+ Path prev = i.second.path + checkSuffix;
+ deletePath(prev);
+ }
+ }
+
+ /* Register each output path as valid, and register the sets of
+ paths referenced by each of them. If there are cycles in the
+ outputs, this will fail. */
+ worker.store.registerValidPaths(infos);
+}
+
+
+Path DerivationGoal::openLogFile()
+{
+ logSize = 0;
+
+ if (!settings.keepLog) return "";
+
+ string baseName = baseNameOf(drvPath);
+
+ /* Create a log file. */
+ Path dir = fmt("%s/%s/%s/", worker.store.logDir, worker.store.drvsLogDir, string(baseName, 0, 2));
+ createDirs(dir);
+
+ Path logFileName = fmt("%s/%s%s", dir, string(baseName, 2),
+ settings.compressLog ? ".bz2" : "");
+
+ fdLogFile = open(logFileName.c_str(), O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC, 0666);
+ if (!fdLogFile) throw SysError(format("creating log file ‘%1%’") % logFileName);
+
+ logFileSink = std::make_shared<FdSink>(fdLogFile.get());
+
+ if (settings.compressLog)
+ logSink = std::shared_ptr<CompressionSink>(makeCompressionSink("bzip2", *logFileSink));
+ else
+ logSink = logFileSink;
+
+ return logFileName;
+}
+
+
+void DerivationGoal::closeLogFile()
+{
+ auto logSink2 = std::dynamic_pointer_cast<CompressionSink>(logSink);
+ if (logSink2) logSink2->finish();
+ if (logFileSink) logFileSink->flush();
+ logSink = logFileSink = 0;
+ fdLogFile = -1;
+}
+
+
+void DerivationGoal::deleteTmpDir(bool force)
+{
+ if (tmpDir != "") {
+ /* Don't keep temporary directories for builtins because they
+ might have privileged stuff (like a copy of netrc). */
+ if (settings.keepFailed && !force && !drv->isBuiltin()) {
+ printError(
+ format("note: keeping build directory ‘%2%’")
+ % drvPath % tmpDir);
+ chmod(tmpDir.c_str(), 0755);
+ }
+ else
+ deletePath(tmpDir);
+ tmpDir = "";
+ }
+}
+
+
+void DerivationGoal::handleChildOutput(int fd, const string & data)
+{
+ if ((hook && fd == hook->builderOut.readSide.get()) ||
+ (!hook && fd == builderOut.readSide.get()))
+ {
+ logSize += data.size();
+ if (settings.maxLogSize && logSize > settings.maxLogSize) {
+ printError(
+ format("%1% killed after writing more than %2% bytes of log output")
+ % getName() % settings.maxLogSize);
+ killChild();
+ done(BuildResult::LogLimitExceeded);
+ return;
+ }
+
+ for (auto c : data)
+ if (c == '\r')
+ currentLogLinePos = 0;
+ else if (c == '\n')
+ flushLine();
+ else {
+ if (currentLogLinePos >= currentLogLine.size())
+ currentLogLine.resize(currentLogLinePos + 1);
+ currentLogLine[currentLogLinePos++] = c;
+ }
+
+ if (logSink) (*logSink)(data);
+ }
+
+ if (hook && fd == hook->fromHook.readSide.get())
+ printError(data); // FIXME?
+}
+
+
+void DerivationGoal::handleEOF(int fd)
+{
+ if (!currentLogLine.empty()) flushLine();
+ worker.wakeUp(shared_from_this());
+}
+
+
+void DerivationGoal::flushLine()
+{
+ if (settings.verboseBuild &&
+ (settings.printRepeatedBuilds || curRound == 1))
+ printError(filterANSIEscapes(currentLogLine, true));
+ else {
+ logTail.push_back(currentLogLine);
+ if (logTail.size() > settings.logLines) logTail.pop_front();
+ }
+ currentLogLine = "";
+ currentLogLinePos = 0;
+}
+
+
+PathSet DerivationGoal::checkPathValidity(bool returnValid, bool checkHash)
+{
+ PathSet result;
+ for (auto & i : drv->outputs) {
+ if (!wantOutput(i.first, wantedOutputs)) continue;
+ bool good =
+ worker.store.isValidPath(i.second.path) &&
+ (!checkHash || worker.pathContentsGood(i.second.path));
+ if (good == returnValid) result.insert(i.second.path);
+ }
+ return result;
+}
+
+
+Path DerivationGoal::addHashRewrite(const Path & path)
+{
+ string h1 = string(path, worker.store.storeDir.size() + 1, 32);
+ string h2 = string(printHash32(hashString(htSHA256, "rewrite:" + drvPath + ":" + path)), 0, 32);
+ Path p = worker.store.storeDir + "/" + h2 + string(path, worker.store.storeDir.size() + 33);
+ deletePath(p);
+ assert(path.size() == p.size());
+ inputRewrites[h1] = h2;
+ outputRewrites[h2] = h1;
+ redirectedOutputs[path] = p;
+ return p;
+}
+
+
+void DerivationGoal::done(BuildResult::Status status, const string & msg)
+{
+ result.status = status;
+ result.errorMsg = msg;
+ amDone(result.success() ? ecSuccess : ecFailed);
+ if (result.status == BuildResult::TimedOut)
+ worker.timedOut = true;
+ if (result.status == BuildResult::PermanentFailure)
+ worker.permanentFailure = true;
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+class SubstitutionGoal : public Goal
+{
+ friend class Worker;
+
+private:
+ /* The store path that should be realised through a substitute. */
+ Path storePath;
+
+ /* The remaining substituters. */
+ std::list<ref<Store>> subs;
+
+ /* The current substituter. */
+ std::shared_ptr<Store> sub;
+
+ /* Whether any substituter can realise this path. */
+ bool hasSubstitute;
+
+ /* Path info returned by the substituter's query info operation. */
+ std::shared_ptr<const ValidPathInfo> info;
+
+ /* Pipe for the substituter's standard output. */
+ Pipe outPipe;
+
+ /* The substituter thread. */
+ std::thread thr;
+
+ std::promise<void> promise;
+
+ /* Whether to try to repair a valid path. */
+ bool repair;
+
+ /* Location where we're downloading the substitute. Differs from
+ storePath when doing a repair. */
+ Path destPath;
+
+ typedef void (SubstitutionGoal::*GoalState)();
+ GoalState state;
+
+public:
+ SubstitutionGoal(const Path & storePath, Worker & worker, bool repair = false);
+ ~SubstitutionGoal();
+
+ void timedOut() { abort(); };
+
+ string key()
+ {
+ /* "a$" ensures substitution goals happen before derivation
+ goals. */
+ return "a$" + storePathToName(storePath) + "$" + storePath;
+ }
+
+ void work();
+
+ /* The states. */
+ void init();
+ void tryNext();
+ void gotInfo();
+ void referencesValid();
+ void tryToRun();
+ void finished();
+
+ /* Callback used by the worker to write to the log. */
+ void handleChildOutput(int fd, const string & data);
+ void handleEOF(int fd);
+
+ Path getStorePath() { return storePath; }
+};
+
+
+SubstitutionGoal::SubstitutionGoal(const Path & storePath, Worker & worker, bool repair)
+ : Goal(worker)
+ , hasSubstitute(false)
+ , repair(repair)
+{
+ this->storePath = storePath;
+ state = &SubstitutionGoal::init;
+ name = (format("substitution of ‘%1%’") % storePath).str();
+ trace("created");
+}
+
+
+SubstitutionGoal::~SubstitutionGoal()
+{
+ try {
+ if (thr.joinable()) {
+ // FIXME: signal worker thread to quit.
+ thr.join();
+ worker.childTerminated(this);
+ }
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+
+void SubstitutionGoal::work()
+{
+ (this->*state)();
+}
+
+
+void SubstitutionGoal::init()
+{
+ trace("init");
+
+ worker.store.addTempRoot(storePath);
+
+ /* If the path already exists we're done. */
+ if (!repair && worker.store.isValidPath(storePath)) {
+ amDone(ecSuccess);
+ return;
+ }
+
+ if (settings.readOnlyMode)
+ throw Error(format("cannot substitute path ‘%1%’ - no write access to the Nix store") % storePath);
+
+ subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>();
+
+ tryNext();
+}
+
+
+void SubstitutionGoal::tryNext()
+{
+ trace("trying next substituter");
+
+ if (subs.size() == 0) {
+ /* None left. Terminate this goal and let someone else deal
+ with it. */
+ debug(format("path ‘%1%’ is required, but there is no substituter that can build it") % storePath);
+
+ /* Hack: don't indicate failure if there were no substituters.
+ In that case the calling derivation should just do a
+ build. */
+ amDone(hasSubstitute ? ecFailed : ecNoSubstituters);
+ return;
+ }
+
+ sub = subs.front();
+ subs.pop_front();
+
+ if (sub->storeDir != worker.store.storeDir) {
+ tryNext();
+ return;
+ }
+
+ try {
+ // FIXME: make async
+ info = sub->queryPathInfo(storePath);
+ } catch (InvalidPath &) {
+ tryNext();
+ return;
+ }
+
+ hasSubstitute = true;
+
+ /* Bail out early if this substituter lacks a valid
+ signature. LocalStore::addToStore() also checks for this, but
+ only after we've downloaded the path. */
+ if (worker.store.requireSigs && !info->checkSignatures(worker.store, worker.store.publicKeys)) {
+ printInfo(format("warning: substituter ‘%s’ does not have a valid signature for path ‘%s’")
+ % sub->getUri() % storePath);
+ tryNext();
+ return;
+ }
+
+ /* To maintain the closure invariant, we first have to realise the
+ paths referenced by this one. */
+ for (auto & i : info->references)
+ if (i != storePath) /* ignore self-references */
+ addWaitee(worker.makeSubstitutionGoal(i));
+
+ if (waitees.empty()) /* to prevent hang (no wake-up event) */
+ referencesValid();
+ else
+ state = &SubstitutionGoal::referencesValid;
+}
+
+
+void SubstitutionGoal::referencesValid()
+{
+ trace("all references realised");
+
+ if (nrFailed > 0) {
+ debug(format("some references of path ‘%1%’ could not be realised") % storePath);
+ amDone(nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed);
+ return;
+ }
+
+ for (auto & i : info->references)
+ if (i != storePath) /* ignore self-references */
+ assert(worker.store.isValidPath(i));
+
+ state = &SubstitutionGoal::tryToRun;
+ worker.wakeUp(shared_from_this());
+}
+
+
+void SubstitutionGoal::tryToRun()
+{
+ trace("trying to run");
+
+ /* Make sure that we are allowed to start a build. Note that even
+ if maxBuildJobs == 0 (no local builds allowed), we still allow
+ a substituter to run. This is because substitutions cannot be
+ distributed to another machine via the build hook. */
+ if (worker.getNrLocalBuilds() >= std::min(1U, (unsigned int) settings.maxBuildJobs)) {
+ worker.waitForBuildSlot(shared_from_this());
+ return;
+ }
+
+ printInfo(format("fetching path ‘%1%’...") % storePath);
+
+ outPipe.create();
+
+ promise = std::promise<void>();
+
+ thr = std::thread([this]() {
+ try {
+ /* Wake up the worker loop when we're done. */
+ Finally updateStats([this]() { outPipe.writeSide = -1; });
+
+ copyStorePath(ref<Store>(sub), ref<Store>(worker.store.shared_from_this()),
+ storePath, repair);
+
+ promise.set_value();
+ } catch (...) {
+ promise.set_exception(std::current_exception());
+ }
+ });
+
+ worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false);
+
+ state = &SubstitutionGoal::finished;
+}
+
+
+void SubstitutionGoal::finished()
+{
+ trace("substitute finished");
+
+ thr.join();
+ worker.childTerminated(this);
+
+ try {
+ promise.get_future().get();
+ } catch (Error & e) {
+ printInfo(e.msg());
+
+ /* Try the next substitute. */
+ state = &SubstitutionGoal::tryNext;
+ worker.wakeUp(shared_from_this());
+ return;
+ }
+
+ worker.markContentsGood(storePath);
+
+ printMsg(lvlChatty,
+ format("substitution of path ‘%1%’ succeeded") % storePath);
+
+ amDone(ecSuccess);
+}
+
+
+void SubstitutionGoal::handleChildOutput(int fd, const string & data)
+{
+}
+
+
+void SubstitutionGoal::handleEOF(int fd)
+{
+ if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this());
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+static bool working = false;
+
+
+Worker::Worker(LocalStore & store)
+ : store(store)
+{
+ /* Debugging: prevent recursive workers. */
+ if (working) abort();
+ working = true;
+ nrLocalBuilds = 0;
+ lastWokenUp = steady_time_point::min();
+ permanentFailure = false;
+ timedOut = false;
+}
+
+
+Worker::~Worker()
+{
+ working = false;
+
+ /* Explicitly get rid of all strong pointers now. After this all
+ goals that refer to this worker should be gone. (Otherwise we
+ are in trouble, since goals may call childTerminated() etc. in
+ their destructors). */
+ topGoals.clear();
+}
+
+
+GoalPtr Worker::makeDerivationGoal(const Path & path,
+ const StringSet & wantedOutputs, BuildMode buildMode)
+{
+ GoalPtr goal = derivationGoals[path].lock();
+ if (!goal) {
+ goal = std::make_shared<DerivationGoal>(path, wantedOutputs, *this, buildMode);
+ derivationGoals[path] = goal;
+ wakeUp(goal);
+ } else
+ (dynamic_cast<DerivationGoal *>(goal.get()))->addWantedOutputs(wantedOutputs);
+ return goal;
+}
+
+
+std::shared_ptr<DerivationGoal> Worker::makeBasicDerivationGoal(const Path & drvPath,
+ const BasicDerivation & drv, BuildMode buildMode)
+{
+ auto goal = std::make_shared<DerivationGoal>(drvPath, drv, *this, buildMode);
+ wakeUp(goal);
+ return goal;
+}
+
+
+GoalPtr Worker::makeSubstitutionGoal(const Path & path, bool repair)
+{
+ GoalPtr goal = substitutionGoals[path].lock();
+ if (!goal) {
+ goal = std::make_shared<SubstitutionGoal>(path, *this, repair);
+ substitutionGoals[path] = goal;
+ wakeUp(goal);
+ }
+ return goal;
+}
+
+
+static void removeGoal(GoalPtr goal, WeakGoalMap & goalMap)
+{
+ /* !!! inefficient */
+ for (WeakGoalMap::iterator i = goalMap.begin();
+ i != goalMap.end(); )
+ if (i->second.lock() == goal) {
+ WeakGoalMap::iterator j = i; ++j;
+ goalMap.erase(i);
+ i = j;
+ }
+ else ++i;
+}
+
+
+void Worker::removeGoal(GoalPtr goal)
+{
+ nix::removeGoal(goal, derivationGoals);
+ nix::removeGoal(goal, substitutionGoals);
+ if (topGoals.find(goal) != topGoals.end()) {
+ topGoals.erase(goal);
+ /* If a top-level goal failed, then kill all other goals
+ (unless keepGoing was set). */
+ if (goal->getExitCode() == Goal::ecFailed && !settings.keepGoing)
+ topGoals.clear();
+ }
+
+ /* Wake up goals waiting for any goal to finish. */
+ for (auto & i : waitingForAnyGoal) {
+ GoalPtr goal = i.lock();
+ if (goal) wakeUp(goal);
+ }
+
+ waitingForAnyGoal.clear();
+}
+
+
+void Worker::wakeUp(GoalPtr goal)
+{
+ goal->trace("woken up");
+ addToWeakGoals(awake, goal);
+}
+
+
+unsigned Worker::getNrLocalBuilds()
+{
+ return nrLocalBuilds;
+}
+
+
+void Worker::childStarted(GoalPtr goal, const set<int> & fds,
+ bool inBuildSlot, bool respectTimeouts)
+{
+ Child child;
+ child.goal = goal;
+ child.goal2 = goal.get();
+ child.fds = fds;
+ child.timeStarted = child.lastOutput = steady_time_point::clock::now();
+ child.inBuildSlot = inBuildSlot;
+ child.respectTimeouts = respectTimeouts;
+ children.emplace_back(child);
+ if (inBuildSlot) nrLocalBuilds++;
+}
+
+
+void Worker::childTerminated(Goal * goal, bool wakeSleepers)
+{
+ auto i = std::find_if(children.begin(), children.end(),
+ [&](const Child & child) { return child.goal2 == goal; });
+ if (i == children.end()) return;
+
+ if (i->inBuildSlot) {
+ assert(nrLocalBuilds > 0);
+ nrLocalBuilds--;
+ }
+
+ children.erase(i);
+
+ if (wakeSleepers) {
+
+ /* Wake up goals waiting for a build slot. */
+ for (auto & j : wantingToBuild) {
+ GoalPtr goal = j.lock();
+ if (goal) wakeUp(goal);
+ }
+
+ wantingToBuild.clear();
+ }
+}
+
+
+void Worker::waitForBuildSlot(GoalPtr goal)
+{
+ debug("wait for build slot");
+ if (getNrLocalBuilds() < settings.maxBuildJobs)
+ wakeUp(goal); /* we can do it right away */
+ else
+ addToWeakGoals(wantingToBuild, goal);
+}
+
+
+void Worker::waitForAnyGoal(GoalPtr goal)
+{
+ debug("wait for any goal");
+ addToWeakGoals(waitingForAnyGoal, goal);
+}
+
+
+void Worker::waitForAWhile(GoalPtr goal)
+{
+ debug("wait for a while");
+ addToWeakGoals(waitingForAWhile, goal);
+}
+
+
+void Worker::run(const Goals & _topGoals)
+{
+ for (auto & i : _topGoals) topGoals.insert(i);
+
+ Activity act(*logger, lvlDebug, "entered goal loop");
+
+ while (1) {
+
+ checkInterrupt();
+
+ /* Call every wake goal (in the ordering established by
+ CompareGoalPtrs). */
+ while (!awake.empty() && !topGoals.empty()) {
+ Goals awake2;
+ for (auto & i : awake) {
+ GoalPtr goal = i.lock();
+ if (goal) awake2.insert(goal);
+ }
+ awake.clear();
+ for (auto & goal : awake2) {
+ checkInterrupt();
+ goal->work();
+ if (topGoals.empty()) break; // stuff may have been cancelled
+ }
+ }
+
+ if (topGoals.empty()) break;
+
+ /* Wait for input. */
+ if (!children.empty() || !waitingForAWhile.empty())
+ waitForInput();
+ else {
+ if (awake.empty() && 0 == settings.maxBuildJobs) throw Error(
+ "unable to start any build; either increase ‘--max-jobs’ "
+ "or enable distributed builds");
+ assert(!awake.empty());
+ }
+ }
+
+ /* If --keep-going is not set, it's possible that the main goal
+ exited while some of its subgoals were still active. But if
+ --keep-going *is* set, then they must all be finished now. */
+ assert(!settings.keepGoing || awake.empty());
+ assert(!settings.keepGoing || wantingToBuild.empty());
+ assert(!settings.keepGoing || children.empty());
+}
+
+
+void Worker::waitForInput()
+{
+ printMsg(lvlVomit, "waiting for children");
+
+ /* Process output from the file descriptors attached to the
+ children, namely log output and output path creation commands.
+ We also use this to detect child termination: if we get EOF on
+ the logger pipe of a build, we assume that the builder has
+ terminated. */
+
+ bool useTimeout = false;
+ struct timeval timeout;
+ timeout.tv_usec = 0;
+ auto before = steady_time_point::clock::now();
+
+ /* If we're monitoring for silence on stdout/stderr, or if there
+ is a build timeout, then wait for input until the first
+ deadline for any child. */
+ auto nearest = steady_time_point::max(); // nearest deadline
+ for (auto & i : children) {
+ if (!i.respectTimeouts) continue;
+ if (0 != settings.maxSilentTime)
+ nearest = std::min(nearest, i.lastOutput + std::chrono::seconds(settings.maxSilentTime));
+ if (0 != settings.buildTimeout)
+ nearest = std::min(nearest, i.timeStarted + std::chrono::seconds(settings.buildTimeout));
+ }
+ if (nearest != steady_time_point::max()) {
+ timeout.tv_sec = std::max(1L, (long) std::chrono::duration_cast<std::chrono::seconds>(nearest - before).count());
+ useTimeout = true;
+ }
+
+ /* If we are polling goals that are waiting for a lock, then wake
+ up after a few seconds at most. */
+ if (!waitingForAWhile.empty()) {
+ useTimeout = true;
+ if (lastWokenUp == steady_time_point::min())
+ printError("waiting for locks or build slots...");
+ if (lastWokenUp == steady_time_point::min() || lastWokenUp > before) lastWokenUp = before;
+ timeout.tv_sec = std::max(1L,
+ (long) std::chrono::duration_cast<std::chrono::seconds>(
+ lastWokenUp + std::chrono::seconds(settings.pollInterval) - before).count());
+ } else lastWokenUp = steady_time_point::min();
+
+ if (useTimeout)
+ vomit("sleeping %d seconds", timeout.tv_sec);
+
+ /* Use select() to wait for the input side of any logger pipe to
+ become `available'. Note that `available' (i.e., non-blocking)
+ includes EOF. */
+ fd_set fds;
+ FD_ZERO(&fds);
+ int fdMax = 0;
+ for (auto & i : children) {
+ for (auto & j : i.fds) {
+ FD_SET(j, &fds);
+ if (j >= fdMax) fdMax = j + 1;
+ }
+ }
+
+ if (select(fdMax, &fds, 0, 0, useTimeout ? &timeout : 0) == -1) {
+ if (errno == EINTR) return;
+ throw SysError("waiting for input");
+ }
+
+ auto after = steady_time_point::clock::now();
+
+ /* Process all available file descriptors. FIXME: this is
+ O(children * fds). */
+ decltype(children)::iterator i;
+ for (auto j = children.begin(); j != children.end(); j = i) {
+ i = std::next(j);
+
+ checkInterrupt();
+
+ GoalPtr goal = j->goal.lock();
+ assert(goal);
+
+ set<int> fds2(j->fds);
+ for (auto & k : fds2) {
+ if (FD_ISSET(k, &fds)) {
+ unsigned char buffer[4096];
+ ssize_t rd = read(k, buffer, sizeof(buffer));
+ if (rd == -1) {
+ if (errno != EINTR)
+ throw SysError(format("reading from %1%")
+ % goal->getName());
+ } else if (rd == 0) {
+ debug(format("%1%: got EOF") % goal->getName());
+ goal->handleEOF(k);
+ j->fds.erase(k);
+ } else {
+ printMsg(lvlVomit, format("%1%: read %2% bytes")
+ % goal->getName() % rd);
+ string data((char *) buffer, rd);
+ j->lastOutput = after;
+ goal->handleChildOutput(k, data);
+ }
+ }
+ }
+
+ if (goal->getExitCode() == Goal::ecBusy &&
+ 0 != settings.maxSilentTime &&
+ j->respectTimeouts &&
+ after - j->lastOutput >= std::chrono::seconds(settings.maxSilentTime))
+ {
+ printError(
+ format("%1% timed out after %2% seconds of silence")
+ % goal->getName() % settings.maxSilentTime);
+ goal->timedOut();
+ }
+
+ else if (goal->getExitCode() == Goal::ecBusy &&
+ 0 != settings.buildTimeout &&
+ j->respectTimeouts &&
+ after - j->timeStarted >= std::chrono::seconds(settings.buildTimeout))
+ {
+ printError(
+ format("%1% timed out after %2% seconds")
+ % goal->getName() % settings.buildTimeout);
+ goal->timedOut();
+ }
+ }
+
+ if (!waitingForAWhile.empty() && lastWokenUp + std::chrono::seconds(settings.pollInterval) <= after) {
+ lastWokenUp = after;
+ for (auto & i : waitingForAWhile) {
+ GoalPtr goal = i.lock();
+ if (goal) wakeUp(goal);
+ }
+ waitingForAWhile.clear();
+ }
+}
+
+
+unsigned int Worker::exitStatus()
+{
+ return timedOut ? 101 : (permanentFailure ? 100 : 1);
+}
+
+
+bool Worker::pathContentsGood(const Path & path)
+{
+ std::map<Path, bool>::iterator i = pathContentsGoodCache.find(path);
+ if (i != pathContentsGoodCache.end()) return i->second;
+ printInfo(format("checking path ‘%1%’...") % path);
+ auto info = store.queryPathInfo(path);
+ bool res;
+ if (!pathExists(path))
+ res = false;
+ else {
+ HashResult current = hashPath(info->narHash.type, path);
+ Hash nullHash(htSHA256);
+ res = info->narHash == nullHash || info->narHash == current.first;
+ }
+ pathContentsGoodCache[path] = res;
+ if (!res) printError(format("path ‘%1%’ is corrupted or missing!") % path);
+ return res;
+}
+
+
+void Worker::markContentsGood(const Path & path)
+{
+ pathContentsGoodCache[path] = true;
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+void LocalStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
+{
+ Worker worker(*this);
+
+ Goals goals;
+ for (auto & i : drvPaths) {
+ DrvPathWithOutputs i2 = parseDrvPathWithOutputs(i);
+ if (isDerivation(i2.first))
+ goals.insert(worker.makeDerivationGoal(i2.first, i2.second, buildMode));
+ else
+ goals.insert(worker.makeSubstitutionGoal(i, buildMode));
+ }
+
+ worker.run(goals);
+
+ PathSet failed;
+ for (auto & i : goals) {
+ if (i->getExitCode() != Goal::ecSuccess) {
+ DerivationGoal * i2 = dynamic_cast<DerivationGoal *>(i.get());
+ if (i2) failed.insert(i2->getDrvPath());
+ else failed.insert(dynamic_cast<SubstitutionGoal *>(i.get())->getStorePath());
+ }
+ }
+
+ if (!failed.empty())
+ throw Error(worker.exitStatus(), "build of %s failed", showPaths(failed));
+}
+
+
+BuildResult LocalStore::buildDerivation(const Path & drvPath, const BasicDerivation & drv,
+ BuildMode buildMode)
+{
+ Worker worker(*this);
+ auto goal = worker.makeBasicDerivationGoal(drvPath, drv, buildMode);
+
+ BuildResult result;
+
+ try {
+ worker.run(Goals{goal});
+ result = goal->getResult();
+ } catch (Error & e) {
+ result.status = BuildResult::MiscFailure;
+ result.errorMsg = e.msg();
+ }
+
+ return result;
+}
+
+
+void LocalStore::ensurePath(const Path & path)
+{
+ /* If the path is already valid, we're done. */
+ if (isValidPath(path)) return;
+
+ Worker worker(*this);
+ GoalPtr goal = worker.makeSubstitutionGoal(path);
+ Goals goals = {goal};
+
+ worker.run(goals);
+
+ if (goal->getExitCode() != Goal::ecSuccess)
+ throw Error(worker.exitStatus(), "path ‘%s’ does not exist and cannot be created", path);
+}
+
+
+void LocalStore::repairPath(const Path & path)
+{
+ Worker worker(*this);
+ GoalPtr goal = worker.makeSubstitutionGoal(path, true);
+ Goals goals = {goal};
+
+ worker.run(goals);
+
+ if (goal->getExitCode() != Goal::ecSuccess) {
+ /* Since substituting the path didn't work, if we have a valid
+ deriver, then rebuild the deriver. */
+ auto deriver = queryPathInfo(path)->deriver;
+ if (deriver != "" && isValidPath(deriver)) {
+ goals.clear();
+ goals.insert(worker.makeDerivationGoal(deriver, StringSet(), bmRepair));
+ worker.run(goals);
+ } else
+ throw Error(worker.exitStatus(), "cannot repair path ‘%s’", path);
+ }
+}
+
+
+}
diff --git a/src/libstore/builtins.cc b/src/libstore/builtins.cc
new file mode 100644
index 000000000..c5dbd57f8
--- /dev/null
+++ b/src/libstore/builtins.cc
@@ -0,0 +1,71 @@
+#include "builtins.hh"
+#include "download.hh"
+#include "store-api.hh"
+#include "archive.hh"
+#include "compression.hh"
+
+namespace nix {
+
+void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
+{
+ /* Make the host's netrc data available. Too bad curl requires
+ this to be stored in a file. It would be nice if we could just
+ pass a pointer to the data. */
+ if (netrcData != "") {
+ settings.netrcFile = "netrc";
+ writeFile(settings.netrcFile, netrcData, 0600);
+ }
+
+ auto getAttr = [&](const string & name) {
+ auto i = drv.env.find(name);
+ if (i == drv.env.end()) throw Error(format("attribute ‘%s’ missing") % name);
+ return i->second;
+ };
+
+ auto fetch = [&](const string & url) {
+ /* No need to do TLS verification, because we check the hash of
+ the result anyway. */
+ DownloadRequest request(url);
+ request.verifyTLS = false;
+
+ /* Show a progress indicator, even though stderr is not a tty. */
+ request.showProgress = DownloadRequest::yes;
+
+ /* Note: have to use a fresh downloader here because we're in
+ a forked process. */
+ auto data = makeDownloader()->download(request);
+ assert(data.data);
+
+ return data.data;
+ };
+
+ std::shared_ptr<std::string> data;
+
+ try {
+ if (getAttr("outputHashMode") == "flat")
+ data = fetch("http://tarballs.nixos.org/" + getAttr("outputHashAlgo") + "/" + getAttr("outputHash"));
+ } catch (Error & e) {
+ debug(e.what());
+ }
+
+ if (!data) data = fetch(getAttr("url"));
+
+ Path storePath = getAttr("out");
+
+ auto unpack = drv.env.find("unpack");
+ if (unpack != drv.env.end() && unpack->second == "1") {
+ if (string(*data, 0, 6) == string("\xfd" "7zXZ\0", 6))
+ data = decompress("xz", *data);
+ StringSource source(*data);
+ restorePath(storePath, source);
+ } else
+ writeFile(storePath, *data);
+
+ auto executable = drv.env.find("executable");
+ if (executable != drv.env.end() && executable->second == "1") {
+ if (chmod(storePath.c_str(), 0755) == -1)
+ throw SysError(format("making ‘%1%’ executable") % storePath);
+ }
+}
+
+}
diff --git a/src/libstore/builtins.hh b/src/libstore/builtins.hh
new file mode 100644
index 000000000..0cc6ba31f
--- /dev/null
+++ b/src/libstore/builtins.hh
@@ -0,0 +1,9 @@
+#pragma once
+
+#include "derivations.hh"
+
+namespace nix {
+
+void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData);
+
+}
diff --git a/src/libstore/crypto.cc b/src/libstore/crypto.cc
new file mode 100644
index 000000000..f56a6adab
--- /dev/null
+++ b/src/libstore/crypto.cc
@@ -0,0 +1,126 @@
+#include "crypto.hh"
+#include "util.hh"
+#include "globals.hh"
+
+#if HAVE_SODIUM
+#include <sodium.h>
+#endif
+
+namespace nix {
+
+static std::pair<std::string, std::string> split(const string & s)
+{
+ size_t colon = s.find(':');
+ if (colon == std::string::npos || colon == 0)
+ return {"", ""};
+ return {std::string(s, 0, colon), std::string(s, colon + 1)};
+}
+
+Key::Key(const string & s)
+{
+ auto ss = split(s);
+
+ name = ss.first;
+ key = ss.second;
+
+ if (name == "" || key == "")
+ throw Error("secret key is corrupt");
+
+ key = base64Decode(key);
+}
+
+SecretKey::SecretKey(const string & s)
+ : Key(s)
+{
+#if HAVE_SODIUM
+ if (key.size() != crypto_sign_SECRETKEYBYTES)
+ throw Error("secret key is not valid");
+#endif
+}
+
+#if !HAVE_SODIUM
+[[noreturn]] static void noSodium()
+{
+ throw Error("Nix was not compiled with libsodium, required for signed binary cache support");
+}
+#endif
+
+std::string SecretKey::signDetached(const std::string & data) const
+{
+#if HAVE_SODIUM
+ unsigned char sig[crypto_sign_BYTES];
+ unsigned long long sigLen;
+ crypto_sign_detached(sig, &sigLen, (unsigned char *) data.data(), data.size(),
+ (unsigned char *) key.data());
+ return name + ":" + base64Encode(std::string((char *) sig, sigLen));
+#else
+ noSodium();
+#endif
+}
+
+PublicKey SecretKey::toPublicKey() const
+{
+#if HAVE_SODIUM
+ unsigned char pk[crypto_sign_PUBLICKEYBYTES];
+ crypto_sign_ed25519_sk_to_pk(pk, (unsigned char *) key.data());
+ return PublicKey(name, std::string((char *) pk, crypto_sign_PUBLICKEYBYTES));
+#else
+ noSodium();
+#endif
+}
+
+PublicKey::PublicKey(const string & s)
+ : Key(s)
+{
+#if HAVE_SODIUM
+ if (key.size() != crypto_sign_PUBLICKEYBYTES)
+ throw Error("public key is not valid");
+#endif
+}
+
+bool verifyDetached(const std::string & data, const std::string & sig,
+ const PublicKeys & publicKeys)
+{
+#if HAVE_SODIUM
+ auto ss = split(sig);
+
+ auto key = publicKeys.find(ss.first);
+ if (key == publicKeys.end()) return false;
+
+ auto sig2 = base64Decode(ss.second);
+ if (sig2.size() != crypto_sign_BYTES)
+ throw Error("signature is not valid");
+
+ return crypto_sign_verify_detached((unsigned char *) sig2.data(),
+ (unsigned char *) data.data(), data.size(),
+ (unsigned char *) key->second.key.data()) == 0;
+#else
+ noSodium();
+#endif
+}
+
+PublicKeys getDefaultPublicKeys()
+{
+ PublicKeys publicKeys;
+
+ // FIXME: filter duplicates
+
+ for (auto s : settings.binaryCachePublicKeys.get()) {
+ PublicKey key(s);
+ publicKeys.emplace(key.name, key);
+ }
+
+ for (auto secretKeyFile : settings.secretKeyFiles.get()) {
+ try {
+ SecretKey secretKey(readFile(secretKeyFile));
+ publicKeys.emplace(secretKey.name, secretKey.toPublicKey());
+ } catch (SysError & e) {
+ /* Ignore unreadable key files. That's normal in a
+ multi-user installation. */
+ }
+ }
+
+ return publicKeys;
+}
+
+}
diff --git a/src/libstore/crypto.hh b/src/libstore/crypto.hh
new file mode 100644
index 000000000..9110af3aa
--- /dev/null
+++ b/src/libstore/crypto.hh
@@ -0,0 +1,54 @@
+#pragma once
+
+#include "types.hh"
+
+#include <map>
+
+namespace nix {
+
+struct Key
+{
+ std::string name;
+ std::string key;
+
+ /* Construct Key from a string in the format
+ ‘<name>:<key-in-base64>’. */
+ Key(const std::string & s);
+
+protected:
+ Key(const std::string & name, const std::string & key)
+ : name(name), key(key) { }
+};
+
+struct PublicKey;
+
+struct SecretKey : Key
+{
+ SecretKey(const std::string & s);
+
+ /* Return a detached signature of the given string. */
+ std::string signDetached(const std::string & s) const;
+
+ PublicKey toPublicKey() const;
+};
+
+struct PublicKey : Key
+{
+ PublicKey(const std::string & data);
+
+private:
+ PublicKey(const std::string & name, const std::string & key)
+ : Key(name, key) { }
+ friend struct SecretKey;
+};
+
+typedef std::map<std::string, PublicKey> PublicKeys;
+
+/* Return true iff ‘sig’ is a correct signature over ‘data’ using one
+ of the given public keys. */
+bool verifyDetached(const std::string & data, const std::string & sig,
+ const PublicKeys & publicKeys);
+
+PublicKeys getDefaultPublicKeys();
+
+}
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
new file mode 100644
index 000000000..0c6ceb9f6
--- /dev/null
+++ b/src/libstore/derivations.cc
@@ -0,0 +1,444 @@
+#include "derivations.hh"
+#include "store-api.hh"
+#include "globals.hh"
+#include "util.hh"
+#include "worker-protocol.hh"
+#include "fs-accessor.hh"
+#include "istringstream_nocopy.hh"
+
+namespace nix {
+
+
+void DerivationOutput::parseHashInfo(bool & recursive, Hash & hash) const
+{
+ recursive = false;
+ string algo = hashAlgo;
+
+ if (string(algo, 0, 2) == "r:") {
+ recursive = true;
+ algo = string(algo, 2);
+ }
+
+ HashType hashType = parseHashType(algo);
+ if (hashType == htUnknown)
+ throw Error(format("unknown hash algorithm ‘%1%’") % algo);
+
+ hash = parseHash(hashType, this->hash);
+}
+
+
+Path BasicDerivation::findOutput(const string & id) const
+{
+ auto i = outputs.find(id);
+ if (i == outputs.end())
+ throw Error(format("derivation has no output ‘%1%’") % id);
+ return i->second.path;
+}
+
+
+bool BasicDerivation::willBuildLocally() const
+{
+ return get(env, "preferLocalBuild") == "1" && canBuildLocally();
+}
+
+
+bool BasicDerivation::substitutesAllowed() const
+{
+ return get(env, "allowSubstitutes", "1") == "1";
+}
+
+
+bool BasicDerivation::isBuiltin() const
+{
+ return string(builder, 0, 8) == "builtin:";
+}
+
+
+bool BasicDerivation::canBuildLocally() const
+{
+ return platform == settings.thisSystem
+ || isBuiltin()
+#if __linux__
+ || (platform == "i686-linux" && settings.thisSystem == "x86_64-linux")
+ || (platform == "armv6l-linux" && settings.thisSystem == "armv7l-linux")
+ || (platform == "armv5tel-linux" && (settings.thisSystem == "armv7l-linux" || settings.thisSystem == "armv6l-linux"))
+#elif __FreeBSD__
+ || (platform == "i686-linux" && settings.thisSystem == "x86_64-freebsd")
+ || (platform == "i686-linux" && settings.thisSystem == "i686-freebsd")
+#endif
+ ;
+}
+
+
+Path writeDerivation(ref<Store> store,
+ const Derivation & drv, const string & name, bool repair)
+{
+ PathSet references;
+ references.insert(drv.inputSrcs.begin(), drv.inputSrcs.end());
+ for (auto & i : drv.inputDrvs)
+ references.insert(i.first);
+ /* Note that the outputs of a derivation are *not* references
+ (that can be missing (of course) and should not necessarily be
+ held during a garbage collection). */
+ string suffix = name + drvExtension;
+ string contents = drv.unparse();
+ return settings.readOnlyMode
+ ? store->computeStorePathForText(suffix, contents, references)
+ : store->addTextToStore(suffix, contents, references, repair);
+}
+
+
+/* Read string `s' from stream `str'. */
+static void expect(std::istream & str, const string & s)
+{
+ char s2[s.size()];
+ str.read(s2, s.size());
+ if (string(s2, s.size()) != s)
+ throw FormatError(format("expected string ‘%1%’") % s);
+}
+
+
+/* Read a C-style string from stream `str'. */
+static string parseString(std::istream & str)
+{
+ string res;
+ expect(str, "\"");
+ int c;
+ while ((c = str.get()) != '"')
+ if (c == '\\') {
+ c = str.get();
+ if (c == 'n') res += '\n';
+ else if (c == 'r') res += '\r';
+ else if (c == 't') res += '\t';
+ else res += c;
+ }
+ else res += c;
+ return res;
+}
+
+
+static Path parsePath(std::istream & str)
+{
+ string s = parseString(str);
+ if (s.size() == 0 || s[0] != '/')
+ throw FormatError(format("bad path ‘%1%’ in derivation") % s);
+ return s;
+}
+
+
+static bool endOfList(std::istream & str)
+{
+ if (str.peek() == ',') {
+ str.get();
+ return false;
+ }
+ if (str.peek() == ']') {
+ str.get();
+ return true;
+ }
+ return false;
+}
+
+
+static StringSet parseStrings(std::istream & str, bool arePaths)
+{
+ StringSet res;
+ while (!endOfList(str))
+ res.insert(arePaths ? parsePath(str) : parseString(str));
+ return res;
+}
+
+
+static Derivation parseDerivation(const string & s)
+{
+ Derivation drv;
+ istringstream_nocopy str(s);
+ expect(str, "Derive([");
+
+ /* Parse the list of outputs. */
+ while (!endOfList(str)) {
+ DerivationOutput out;
+ expect(str, "("); string id = parseString(str);
+ expect(str, ","); out.path = parsePath(str);
+ expect(str, ","); out.hashAlgo = parseString(str);
+ expect(str, ","); out.hash = parseString(str);
+ expect(str, ")");
+ drv.outputs[id] = out;
+ }
+
+ /* Parse the list of input derivations. */
+ expect(str, ",[");
+ while (!endOfList(str)) {
+ expect(str, "(");
+ Path drvPath = parsePath(str);
+ expect(str, ",[");
+ drv.inputDrvs[drvPath] = parseStrings(str, false);
+ expect(str, ")");
+ }
+
+ expect(str, ",["); drv.inputSrcs = parseStrings(str, true);
+ expect(str, ","); drv.platform = parseString(str);
+ expect(str, ","); drv.builder = parseString(str);
+
+ /* Parse the builder arguments. */
+ expect(str, ",[");
+ while (!endOfList(str))
+ drv.args.push_back(parseString(str));
+
+ /* Parse the environment variables. */
+ expect(str, ",[");
+ while (!endOfList(str)) {
+ expect(str, "("); string name = parseString(str);
+ expect(str, ","); string value = parseString(str);
+ expect(str, ")");
+ drv.env[name] = value;
+ }
+
+ expect(str, ")");
+ return drv;
+}
+
+
+Derivation readDerivation(const Path & drvPath)
+{
+ try {
+ return parseDerivation(readFile(drvPath));
+ } catch (FormatError & e) {
+ throw Error(format("error parsing derivation ‘%1%’: %2%") % drvPath % e.msg());
+ }
+}
+
+
+Derivation Store::derivationFromPath(const Path & drvPath)
+{
+ assertStorePath(drvPath);
+ ensurePath(drvPath);
+ auto accessor = getFSAccessor();
+ try {
+ return parseDerivation(accessor->readFile(drvPath));
+ } catch (FormatError & e) {
+ throw Error(format("error parsing derivation ‘%1%’: %2%") % drvPath % e.msg());
+ }
+}
+
+
+static void printString(string & res, const string & s)
+{
+ res += '"';
+ for (const char * i = s.c_str(); *i; i++)
+ if (*i == '\"' || *i == '\\') { res += "\\"; res += *i; }
+ else if (*i == '\n') res += "\\n";
+ else if (*i == '\r') res += "\\r";
+ else if (*i == '\t') res += "\\t";
+ else res += *i;
+ res += '"';
+}
+
+
+template<class ForwardIterator>
+static void printStrings(string & res, ForwardIterator i, ForwardIterator j)
+{
+ res += '[';
+ bool first = true;
+ for ( ; i != j; ++i) {
+ if (first) first = false; else res += ',';
+ printString(res, *i);
+ }
+ res += ']';
+}
+
+
+string Derivation::unparse() const
+{
+ string s;
+ s.reserve(65536);
+ s += "Derive([";
+
+ bool first = true;
+ for (auto & i : outputs) {
+ if (first) first = false; else s += ',';
+ s += '('; printString(s, i.first);
+ s += ','; printString(s, i.second.path);
+ s += ','; printString(s, i.second.hashAlgo);
+ s += ','; printString(s, i.second.hash);
+ s += ')';
+ }
+
+ s += "],[";
+ first = true;
+ for (auto & i : inputDrvs) {
+ if (first) first = false; else s += ',';
+ s += '('; printString(s, i.first);
+ s += ','; printStrings(s, i.second.begin(), i.second.end());
+ s += ')';
+ }
+
+ s += "],";
+ printStrings(s, inputSrcs.begin(), inputSrcs.end());
+
+ s += ','; printString(s, platform);
+ s += ','; printString(s, builder);
+ s += ','; printStrings(s, args.begin(), args.end());
+
+ s += ",[";
+ first = true;
+ for (auto & i : env) {
+ if (first) first = false; else s += ',';
+ s += '('; printString(s, i.first);
+ s += ','; printString(s, i.second);
+ s += ')';
+ }
+
+ s += "])";
+
+ return s;
+}
+
+
+bool isDerivation(const string & fileName)
+{
+ return hasSuffix(fileName, drvExtension);
+}
+
+
+bool BasicDerivation::isFixedOutput() const
+{
+ return outputs.size() == 1 &&
+ outputs.begin()->first == "out" &&
+ outputs.begin()->second.hash != "";
+}
+
+
+DrvHashes drvHashes;
+
+
+/* Returns the hash of a derivation modulo fixed-output
+ subderivations. A fixed-output derivation is a derivation with one
+ output (`out') for which an expected hash and hash algorithm are
+ specified (using the `outputHash' and `outputHashAlgo'
+ attributes). We don't want changes to such derivations to
+ propagate upwards through the dependency graph, changing output
+ paths everywhere.
+
+ For instance, if we change the url in a call to the `fetchurl'
+ function, we do not want to rebuild everything depending on it
+ (after all, (the hash of) the file being downloaded is unchanged).
+ So the *output paths* should not change. On the other hand, the
+ *derivation paths* should change to reflect the new dependency
+ graph.
+
+ That's what this function does: it returns a hash which is just the
+ hash of the derivation ATerm, except that any input derivation
+ paths have been replaced by the result of a recursive call to this
+ function, and that for fixed-output derivations we return a hash of
+ its output path. */
+Hash hashDerivationModulo(Store & store, Derivation drv)
+{
+ /* Return a fixed hash for fixed-output derivations. */
+ if (drv.isFixedOutput()) {
+ DerivationOutputs::const_iterator i = drv.outputs.begin();
+ return hashString(htSHA256, "fixed:out:"
+ + i->second.hashAlgo + ":"
+ + i->second.hash + ":"
+ + i->second.path);
+ }
+
+ /* For other derivations, replace the inputs paths with recursive
+ calls to this function.*/
+ DerivationInputs inputs2;
+ for (auto & i : drv.inputDrvs) {
+ Hash h = drvHashes[i.first];
+ if (!h) {
+ assert(store.isValidPath(i.first));
+ Derivation drv2 = readDerivation(i.first);
+ h = hashDerivationModulo(store, drv2);
+ drvHashes[i.first] = h;
+ }
+ inputs2[printHash(h)] = i.second;
+ }
+ drv.inputDrvs = inputs2;
+
+ return hashString(htSHA256, drv.unparse());
+}
+
+
+DrvPathWithOutputs parseDrvPathWithOutputs(const string & s)
+{
+ size_t n = s.find("!");
+ return n == s.npos
+ ? DrvPathWithOutputs(s, std::set<string>())
+ : DrvPathWithOutputs(string(s, 0, n), tokenizeString<std::set<string> >(string(s, n + 1), ","));
+}
+
+
+Path makeDrvPathWithOutputs(const Path & drvPath, const std::set<string> & outputs)
+{
+ return outputs.empty()
+ ? drvPath
+ : drvPath + "!" + concatStringsSep(",", outputs);
+}
+
+
+bool wantOutput(const string & output, const std::set<string> & wanted)
+{
+ return wanted.empty() || wanted.find(output) != wanted.end();
+}
+
+
+PathSet BasicDerivation::outputPaths() const
+{
+ PathSet paths;
+ for (auto & i : outputs)
+ paths.insert(i.second.path);
+ return paths;
+}
+
+
+Source & readDerivation(Source & in, Store & store, BasicDerivation & drv)
+{
+ drv.outputs.clear();
+ auto nr = readNum<size_t>(in);
+ for (size_t n = 0; n < nr; n++) {
+ auto name = readString(in);
+ DerivationOutput o;
+ in >> o.path >> o.hashAlgo >> o.hash;
+ store.assertStorePath(o.path);
+ drv.outputs[name] = o;
+ }
+
+ drv.inputSrcs = readStorePaths<PathSet>(store, in);
+ in >> drv.platform >> drv.builder;
+ drv.args = readStrings<Strings>(in);
+
+ nr = readNum<size_t>(in);
+ for (size_t n = 0; n < nr; n++) {
+ auto key = readString(in);
+ auto value = readString(in);
+ drv.env[key] = value;
+ }
+
+ return in;
+}
+
+
+Sink & operator << (Sink & out, const BasicDerivation & drv)
+{
+ out << drv.outputs.size();
+ for (auto & i : drv.outputs)
+ out << i.first << i.second.path << i.second.hashAlgo << i.second.hash;
+ out << drv.inputSrcs << drv.platform << drv.builder << drv.args;
+ out << drv.env.size();
+ for (auto & i : drv.env)
+ out << i.first << i.second;
+ return out;
+}
+
+
+std::string hashPlaceholder(const std::string & outputName)
+{
+ // FIXME: memoize?
+ return "/" + printHash32(hashString(htSHA256, "nix-output:" + outputName));
+}
+
+
+}
diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh
new file mode 100644
index 000000000..9717a81e4
--- /dev/null
+++ b/src/libstore/derivations.hh
@@ -0,0 +1,122 @@
+#pragma once
+
+#include "types.hh"
+#include "hash.hh"
+
+#include <map>
+
+
+namespace nix {
+
+
+/* Extension of derivations in the Nix store. */
+const string drvExtension = ".drv";
+
+
+/* Abstract syntax of derivations. */
+
+struct DerivationOutput
+{
+ Path path;
+ string hashAlgo; /* hash used for expected hash computation */
+ string hash; /* expected hash, may be null */
+ DerivationOutput()
+ {
+ }
+ DerivationOutput(Path path, string hashAlgo, string hash)
+ {
+ this->path = path;
+ this->hashAlgo = hashAlgo;
+ this->hash = hash;
+ }
+ void parseHashInfo(bool & recursive, Hash & hash) const;
+};
+
+typedef std::map<string, DerivationOutput> DerivationOutputs;
+
+/* For inputs that are sub-derivations, we specify exactly which
+ output IDs we are interested in. */
+typedef std::map<Path, StringSet> DerivationInputs;
+
+typedef std::map<string, string> StringPairs;
+
+struct BasicDerivation
+{
+ DerivationOutputs outputs; /* keyed on symbolic IDs */
+ PathSet inputSrcs; /* inputs that are sources */
+ string platform;
+ Path builder;
+ Strings args;
+ StringPairs env;
+
+ virtual ~BasicDerivation() { };
+
+ /* Return the path corresponding to the output identifier `id' in
+ the given derivation. */
+ Path findOutput(const string & id) const;
+
+ bool willBuildLocally() const;
+
+ bool substitutesAllowed() const;
+
+ bool isBuiltin() const;
+
+ bool canBuildLocally() const;
+
+ /* Return true iff this is a fixed-output derivation. */
+ bool isFixedOutput() const;
+
+ /* Return the output paths of a derivation. */
+ PathSet outputPaths() const;
+
+};
+
+struct Derivation : BasicDerivation
+{
+ DerivationInputs inputDrvs; /* inputs that are sub-derivations */
+
+ /* Print a derivation. */
+ std::string unparse() const;
+};
+
+
+class Store;
+
+
+/* Write a derivation to the Nix store, and return its path. */
+Path writeDerivation(ref<Store> store,
+ const Derivation & drv, const string & name, bool repair = false);
+
+/* Read a derivation from a file. */
+Derivation readDerivation(const Path & drvPath);
+
+/* Check whether a file name ends with the extension for
+ derivations. */
+bool isDerivation(const string & fileName);
+
+Hash hashDerivationModulo(Store & store, Derivation drv);
+
+/* Memoisation of hashDerivationModulo(). */
+typedef std::map<Path, Hash> DrvHashes;
+
+extern DrvHashes drvHashes; // FIXME: global, not thread-safe
+
+/* Split a string specifying a derivation and a set of outputs
+ (/nix/store/hash-foo!out1,out2,...) into the derivation path and
+ the outputs. */
+typedef std::pair<string, std::set<string> > DrvPathWithOutputs;
+DrvPathWithOutputs parseDrvPathWithOutputs(const string & s);
+
+Path makeDrvPathWithOutputs(const Path & drvPath, const std::set<string> & outputs);
+
+bool wantOutput(const string & output, const std::set<string> & wanted);
+
+struct Source;
+struct Sink;
+
+Source & readDerivation(Source & in, Store & store, BasicDerivation & drv);
+Sink & operator << (Sink & out, const BasicDerivation & drv);
+
+std::string hashPlaceholder(const std::string & outputName);
+
+}
diff --git a/src/libstore/download.cc b/src/libstore/download.cc
new file mode 100644
index 000000000..4d502219e
--- /dev/null
+++ b/src/libstore/download.cc
@@ -0,0 +1,708 @@
+#include "download.hh"
+#include "util.hh"
+#include "globals.hh"
+#include "hash.hh"
+#include "store-api.hh"
+#include "archive.hh"
+#include "s3.hh"
+#include "compression.hh"
+
+#ifdef ENABLE_S3
+#include <aws/core/client/ClientConfiguration.h>
+#endif
+
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <curl/curl.h>
+
+#include <queue>
+#include <iostream>
+#include <thread>
+#include <cmath>
+#include <random>
+
+namespace nix {
+
+double getTime()
+{
+ struct timeval tv;
+ gettimeofday(&tv, 0);
+ return tv.tv_sec + (tv.tv_usec / 1000000.0);
+}
+
+std::string resolveUri(const std::string & uri)
+{
+ if (uri.compare(0, 8, "channel:") == 0)
+ return "https://nixos.org/channels/" + std::string(uri, 8) + "/nixexprs.tar.xz";
+ else
+ return uri;
+}
+
+ref<std::string> decodeContent(const std::string & encoding, ref<std::string> data)
+{
+ if (encoding == "")
+ return data;
+ else if (encoding == "br")
+ return decompress(encoding, *data);
+ else
+ throw Error("unsupported Content-Encoding ‘%s’", encoding);
+}
+
+struct CurlDownloader : public Downloader
+{
+ CURLM * curlm = 0;
+
+ std::random_device rd;
+ std::mt19937 mt19937;
+
+ bool enableHttp2;
+
+ struct DownloadItem : public std::enable_shared_from_this<DownloadItem>
+ {
+ CurlDownloader & downloader;
+ DownloadRequest request;
+ DownloadResult result;
+ bool done = false; // whether either the success or failure function has been called
+ std::function<void(const DownloadResult &)> success;
+ std::function<void(std::exception_ptr exc)> failure;
+ CURL * req = 0;
+ bool active = false; // whether the handle has been added to the multi object
+ std::string status;
+
+ bool showProgress = false;
+ double prevProgressTime{0}, startTime{0};
+ unsigned int moveBack{1};
+
+ unsigned int attempt = 0;
+
+ /* Don't start this download until the specified time point
+ has been reached. */
+ std::chrono::steady_clock::time_point embargo;
+
+ struct curl_slist * requestHeaders = 0;
+
+ std::string encoding;
+
+ DownloadItem(CurlDownloader & downloader, const DownloadRequest & request)
+ : downloader(downloader), request(request)
+ {
+ showProgress =
+ request.showProgress == DownloadRequest::yes ||
+ (request.showProgress == DownloadRequest::automatic && isatty(STDERR_FILENO));
+
+ if (!request.expectedETag.empty())
+ requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str());
+ }
+
+ ~DownloadItem()
+ {
+ if (req) {
+ if (active)
+ curl_multi_remove_handle(downloader.curlm, req);
+ curl_easy_cleanup(req);
+ }
+ if (requestHeaders) curl_slist_free_all(requestHeaders);
+ try {
+ if (!done)
+ fail(DownloadError(Interrupted, format("download of ‘%s’ was interrupted") % request.uri));
+ } catch (...) {
+ ignoreException();
+ }
+ }
+
+ template<class T>
+ void fail(const T & e)
+ {
+ assert(!done);
+ done = true;
+ callFailure(failure, std::make_exception_ptr(e));
+ }
+
+ size_t writeCallback(void * contents, size_t size, size_t nmemb)
+ {
+ size_t realSize = size * nmemb;
+ result.data->append((char *) contents, realSize);
+ return realSize;
+ }
+
+ static size_t writeCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp)
+ {
+ return ((DownloadItem *) userp)->writeCallback(contents, size, nmemb);
+ }
+
+ size_t headerCallback(void * contents, size_t size, size_t nmemb)
+ {
+ size_t realSize = size * nmemb;
+ std::string line((char *) contents, realSize);
+ printMsg(lvlVomit, format("got header for ‘%s’: %s") % request.uri % trim(line));
+ if (line.compare(0, 5, "HTTP/") == 0) { // new response starts
+ result.etag = "";
+ auto ss = tokenizeString<vector<string>>(line, " ");
+ status = ss.size() >= 2 ? ss[1] : "";
+ result.data = std::make_shared<std::string>();
+ encoding = "";
+ } else {
+ auto i = line.find(':');
+ if (i != string::npos) {
+ string name = toLower(trim(string(line, 0, i)));
+ if (name == "etag") {
+ result.etag = trim(string(line, i + 1));
+ /* Hack to work around a GitHub bug: it sends
+ ETags, but ignores If-None-Match. So if we get
+ the expected ETag on a 200 response, then shut
+ down the connection because we already have the
+ data. */
+ if (result.etag == request.expectedETag && status == "200") {
+ debug(format("shutting down on 200 HTTP response with expected ETag"));
+ return 0;
+ }
+ } else if (name == "content-encoding")
+ encoding = trim(string(line, i + 1));;
+ }
+ }
+ return realSize;
+ }
+
+ static size_t headerCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp)
+ {
+ return ((DownloadItem *) userp)->headerCallback(contents, size, nmemb);
+ }
+
+ int progressCallback(double dltotal, double dlnow)
+ {
+ if (showProgress) {
+ double now = getTime();
+ if (prevProgressTime <= now - 1) {
+ string s = (format(" [%1$.0f/%2$.0f KiB, %3$.1f KiB/s]")
+ % (dlnow / 1024.0)
+ % (dltotal / 1024.0)
+ % (now == startTime ? 0 : dlnow / 1024.0 / (now - startTime))).str();
+ std::cerr << "\e[" << moveBack << "D" << s;
+ moveBack = s.size();
+ std::cerr.flush();
+ prevProgressTime = now;
+ }
+ }
+ return _isInterrupted;
+ }
+
+ static int progressCallbackWrapper(void * userp, double dltotal, double dlnow, double ultotal, double ulnow)
+ {
+ return ((DownloadItem *) userp)->progressCallback(dltotal, dlnow);
+ }
+
+ static int debugCallback(CURL * handle, curl_infotype type, char * data, size_t size, void * userptr)
+ {
+ if (type == CURLINFO_TEXT)
+ vomit("curl: %s", chomp(std::string(data, size)));
+ return 0;
+ }
+
+ void init()
+ {
+ // FIXME: handle parallel downloads.
+ if (showProgress) {
+ std::cerr << (format("downloading ‘%1%’... ") % request.uri);
+ std::cerr.flush();
+ startTime = getTime();
+ }
+
+ if (!req) req = curl_easy_init();
+
+ curl_easy_reset(req);
+
+ if (verbosity >= lvlVomit) {
+ curl_easy_setopt(req, CURLOPT_VERBOSE, 1);
+ curl_easy_setopt(req, CURLOPT_DEBUGFUNCTION, DownloadItem::debugCallback);
+ }
+
+ curl_easy_setopt(req, CURLOPT_URL, request.uri.c_str());
+ curl_easy_setopt(req, CURLOPT_FOLLOWLOCATION, 1L);
+ curl_easy_setopt(req, CURLOPT_NOSIGNAL, 1);
+ curl_easy_setopt(req, CURLOPT_USERAGENT, ("curl/" LIBCURL_VERSION " Nix/" + nixVersion).c_str());
+ #if LIBCURL_VERSION_NUM >= 0x072b00
+ curl_easy_setopt(req, CURLOPT_PIPEWAIT, 1);
+ #endif
+ #if LIBCURL_VERSION_NUM >= 0x072f00
+ if (downloader.enableHttp2)
+ curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS);
+ #endif
+ curl_easy_setopt(req, CURLOPT_WRITEFUNCTION, DownloadItem::writeCallbackWrapper);
+ curl_easy_setopt(req, CURLOPT_WRITEDATA, this);
+ curl_easy_setopt(req, CURLOPT_HEADERFUNCTION, DownloadItem::headerCallbackWrapper);
+ curl_easy_setopt(req, CURLOPT_HEADERDATA, this);
+
+ curl_easy_setopt(req, CURLOPT_PROGRESSFUNCTION, progressCallbackWrapper);
+ curl_easy_setopt(req, CURLOPT_PROGRESSDATA, this);
+ curl_easy_setopt(req, CURLOPT_NOPROGRESS, 0);
+
+ curl_easy_setopt(req, CURLOPT_HTTPHEADER, requestHeaders);
+
+ if (request.head)
+ curl_easy_setopt(req, CURLOPT_NOBODY, 1);
+
+ if (request.verifyTLS)
+ curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str());
+ else {
+ curl_easy_setopt(req, CURLOPT_SSL_VERIFYPEER, 0);
+ curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0);
+ }
+
+ curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, settings.connectTimeout.get());
+
+ /* If no file exist in the specified path, curl continues to work
+ anyway as if netrc support was disabled. */
+ curl_easy_setopt(req, CURLOPT_NETRC_FILE, settings.netrcFile.get().c_str());
+ curl_easy_setopt(req, CURLOPT_NETRC, CURL_NETRC_OPTIONAL);
+
+ result.data = std::make_shared<std::string>();
+ }
+
+ void finish(CURLcode code)
+ {
+ if (showProgress)
+ //std::cerr << "\e[" << moveBack << "D\e[K\n";
+ std::cerr << "\n";
+
+ long httpStatus = 0;
+ curl_easy_getinfo(req, CURLINFO_RESPONSE_CODE, &httpStatus);
+
+ char * effectiveUrlCStr;
+ curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUrlCStr);
+ if (effectiveUrlCStr)
+ result.effectiveUrl = effectiveUrlCStr;
+
+ debug(format("finished download of ‘%s’; curl status = %d, HTTP status = %d, body = %d bytes")
+ % request.uri % code % httpStatus % (result.data ? result.data->size() : 0));
+
+ if (code == CURLE_WRITE_ERROR && result.etag == request.expectedETag) {
+ code = CURLE_OK;
+ httpStatus = 304;
+ }
+
+ if (code == CURLE_OK &&
+ (httpStatus == 200 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */))
+ {
+ result.cached = httpStatus == 304;
+ done = true;
+
+ try {
+ result.data = decodeContent(encoding, ref<std::string>(result.data));
+ callSuccess(success, failure, const_cast<const DownloadResult &>(result));
+ } catch (...) {
+ done = true;
+ callFailure(failure, std::current_exception());
+ }
+ } else {
+ Error err =
+ (httpStatus == 404 || code == CURLE_FILE_COULDNT_READ_FILE) ? NotFound :
+ httpStatus == 403 ? Forbidden :
+ (httpStatus == 408 || httpStatus == 500 || httpStatus == 503
+ || httpStatus == 504 || httpStatus == 522 || httpStatus == 524
+ || code == CURLE_COULDNT_RESOLVE_HOST
+ || code == CURLE_RECV_ERROR
+
+ // this seems to occur occasionally for retriable reasons, and shows up in an error like this:
+ // curl: (23) Failed writing body (315 != 16366)
+ || code == CURLE_WRITE_ERROR
+
+ // this is a generic SSL failure that in some cases (e.g., certificate error) is permanent but also appears in transient cases, so we consider it retryable
+ || code == CURLE_SSL_CONNECT_ERROR
+#if LIBCURL_VERSION_NUM >= 0x073200
+ || code == CURLE_HTTP2
+ || code == CURLE_HTTP2_STREAM
+#endif
+ ) ? Transient :
+ Misc;
+
+ attempt++;
+
+ auto exc =
+ code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted
+ ? DownloadError(Interrupted, format("download of ‘%s’ was interrupted") % request.uri)
+ : httpStatus != 0
+ ? DownloadError(err, format("unable to download ‘%s’: HTTP error %d (curl error: %s)") % request.uri % httpStatus % curl_easy_strerror(code))
+ : DownloadError(err, format("unable to download ‘%s’: %s (%d)") % request.uri % curl_easy_strerror(code) % code);
+
+ /* If this is a transient error, then maybe retry the
+ download after a while. */
+ if (err == Transient && attempt < request.tries) {
+ int ms = request.baseRetryTimeMs * std::pow(2.0f, attempt - 1 + std::uniform_real_distribution<>(0.0, 0.5)(downloader.mt19937));
+ printError(format("warning: %s; retrying in %d ms") % exc.what() % ms);
+ embargo = std::chrono::steady_clock::now() + std::chrono::milliseconds(ms);
+ downloader.enqueueItem(shared_from_this());
+ }
+ else
+ fail(exc);
+ }
+ }
+ };
+
+ struct State
+ {
+ struct EmbargoComparator {
+ bool operator() (const std::shared_ptr<DownloadItem> & i1, const std::shared_ptr<DownloadItem> & i2) {
+ return i1->embargo > i2->embargo;
+ }
+ };
+ bool quit = false;
+ std::priority_queue<std::shared_ptr<DownloadItem>, std::vector<std::shared_ptr<DownloadItem>>, EmbargoComparator> incoming;
+ };
+
+ Sync<State> state_;
+
+ /* We can't use a std::condition_variable to wake up the curl
+ thread, because it only monitors file descriptors. So use a
+ pipe instead. */
+ Pipe wakeupPipe;
+
+ std::thread workerThread;
+
+ CurlDownloader()
+ : mt19937(rd())
+ {
+ static std::once_flag globalInit;
+ std::call_once(globalInit, curl_global_init, CURL_GLOBAL_ALL);
+
+ curlm = curl_multi_init();
+
+ #if LIBCURL_VERSION_NUM >= 0x072b00 // correct?
+ curl_multi_setopt(curlm, CURLMOPT_PIPELINING, CURLPIPE_MULTIPLEX);
+ #endif
+ curl_multi_setopt(curlm, CURLMOPT_MAX_TOTAL_CONNECTIONS,
+ settings.binaryCachesParallelConnections.get());
+
+ enableHttp2 = settings.enableHttp2;
+
+ wakeupPipe.create();
+ fcntl(wakeupPipe.readSide.get(), F_SETFL, O_NONBLOCK);
+
+ workerThread = std::thread([&]() { workerThreadEntry(); });
+ }
+
+ ~CurlDownloader()
+ {
+ stopWorkerThread();
+
+ workerThread.join();
+
+ if (curlm) curl_multi_cleanup(curlm);
+ }
+
+ void stopWorkerThread()
+ {
+ /* Signal the worker thread to exit. */
+ {
+ auto state(state_.lock());
+ state->quit = true;
+ }
+ writeFull(wakeupPipe.writeSide.get(), " ", false);
+ }
+
+ void workerThreadMain()
+ {
+ /* Cause this thread to be notified on SIGINT. */
+ auto callback = createInterruptCallback([&]() {
+ stopWorkerThread();
+ });
+
+ std::map<CURL *, std::shared_ptr<DownloadItem>> items;
+
+ bool quit = false;
+
+ std::chrono::steady_clock::time_point nextWakeup;
+
+ while (!quit) {
+ checkInterrupt();
+
+ /* Let curl do its thing. */
+ int running;
+ CURLMcode mc = curl_multi_perform(curlm, &running);
+ if (mc != CURLM_OK)
+ throw nix::Error(format("unexpected error from curl_multi_perform(): %s") % curl_multi_strerror(mc));
+
+ /* Set the promises of any finished requests. */
+ CURLMsg * msg;
+ int left;
+ while ((msg = curl_multi_info_read(curlm, &left))) {
+ if (msg->msg == CURLMSG_DONE) {
+ auto i = items.find(msg->easy_handle);
+ assert(i != items.end());
+ i->second->finish(msg->data.result);
+ curl_multi_remove_handle(curlm, i->second->req);
+ i->second->active = false;
+ items.erase(i);
+ }
+ }
+
+ /* Wait for activity, including wakeup events. */
+ int numfds = 0;
+ struct curl_waitfd extraFDs[1];
+ extraFDs[0].fd = wakeupPipe.readSide.get();
+ extraFDs[0].events = CURL_WAIT_POLLIN;
+ extraFDs[0].revents = 0;
+ auto sleepTimeMs =
+ nextWakeup != std::chrono::steady_clock::time_point()
+ ? std::max(0, (int) std::chrono::duration_cast<std::chrono::milliseconds>(nextWakeup - std::chrono::steady_clock::now()).count())
+ : 1000000000;
+ vomit("download thread waiting for %d ms", sleepTimeMs);
+ mc = curl_multi_wait(curlm, extraFDs, 1, sleepTimeMs, &numfds);
+ if (mc != CURLM_OK)
+ throw nix::Error(format("unexpected error from curl_multi_wait(): %s") % curl_multi_strerror(mc));
+
+ nextWakeup = std::chrono::steady_clock::time_point();
+
+ /* Add new curl requests from the incoming requests queue,
+ except for requests that are embargoed (waiting for a
+ retry timeout to expire). */
+ if (extraFDs[0].revents & CURL_WAIT_POLLIN) {
+ char buf[1024];
+ auto res = read(extraFDs[0].fd, buf, sizeof(buf));
+ if (res == -1 && errno != EINTR)
+ throw SysError("reading curl wakeup socket");
+ }
+
+ std::vector<std::shared_ptr<DownloadItem>> incoming;
+ auto now = std::chrono::steady_clock::now();
+
+ {
+ auto state(state_.lock());
+ while (!state->incoming.empty()) {
+ auto item = state->incoming.top();
+ if (item->embargo <= now) {
+ incoming.push_back(item);
+ state->incoming.pop();
+ } else {
+ if (nextWakeup == std::chrono::steady_clock::time_point()
+ || item->embargo < nextWakeup)
+ nextWakeup = item->embargo;
+ break;
+ }
+ }
+ quit = state->quit;
+ }
+
+ for (auto & item : incoming) {
+ debug(format("starting download of %s") % item->request.uri);
+ item->init();
+ curl_multi_add_handle(curlm, item->req);
+ item->active = true;
+ items[item->req] = item;
+ }
+ }
+
+ debug("download thread shutting down");
+ }
+
+ void workerThreadEntry()
+ {
+ try {
+ workerThreadMain();
+ } catch (nix::Interrupted & e) {
+ } catch (std::exception & e) {
+ printError(format("unexpected error in download thread: %s") % e.what());
+ }
+
+ {
+ auto state(state_.lock());
+ while (!state->incoming.empty()) state->incoming.pop();
+ state->quit = true;
+ }
+ }
+
+ void enqueueItem(std::shared_ptr<DownloadItem> item)
+ {
+ {
+ auto state(state_.lock());
+ if (state->quit)
+ throw nix::Error("cannot enqueue download request because the download thread is shutting down");
+ state->incoming.push(item);
+ }
+ writeFull(wakeupPipe.writeSide.get(), " ");
+ }
+
+ void enqueueDownload(const DownloadRequest & request,
+ std::function<void(const DownloadResult &)> success,
+ std::function<void(std::exception_ptr exc)> failure) override
+ {
+ /* Ugly hack to support s3:// URIs. */
+ if (hasPrefix(request.uri, "s3://")) {
+ // FIXME: do this on a worker thread
+ sync2async<DownloadResult>(success, failure, [&]() -> DownloadResult {
+#ifdef ENABLE_S3
+ S3Helper s3Helper(Aws::Region::US_EAST_1); // FIXME: make configurable
+ auto slash = request.uri.find('/', 5);
+ if (slash == std::string::npos)
+ throw nix::Error("bad S3 URI ‘%s’", request.uri);
+ std::string bucketName(request.uri, 5, slash - 5);
+ std::string key(request.uri, slash + 1);
+ // FIXME: implement ETag
+ auto s3Res = s3Helper.getObject(bucketName, key);
+ DownloadResult res;
+ if (!s3Res.data)
+ throw DownloadError(NotFound, fmt("S3 object ‘%s’ does not exist", request.uri));
+ res.data = s3Res.data;
+ return res;
+#else
+ throw nix::Error("cannot download ‘%s’ because Nix is not built with S3 support", request.uri);
+#endif
+ });
+ return;
+ }
+
+ auto item = std::make_shared<DownloadItem>(*this, request);
+ item->success = success;
+ item->failure = failure;
+ enqueueItem(item);
+ }
+};
+
+ref<Downloader> getDownloader()
+{
+ static std::shared_ptr<Downloader> downloader;
+ static std::once_flag downloaderCreated;
+ std::call_once(downloaderCreated, [&]() { downloader = makeDownloader(); });
+ return ref<Downloader>(downloader);
+}
+
+ref<Downloader> makeDownloader()
+{
+ return make_ref<CurlDownloader>();
+}
+
+std::future<DownloadResult> Downloader::enqueueDownload(const DownloadRequest & request)
+{
+ auto promise = std::make_shared<std::promise<DownloadResult>>();
+ enqueueDownload(request,
+ [promise](const DownloadResult & result) { promise->set_value(result); },
+ [promise](std::exception_ptr exc) { promise->set_exception(exc); });
+ return promise->get_future();
+}
+
+DownloadResult Downloader::download(const DownloadRequest & request)
+{
+ return enqueueDownload(request).get();
+}
+
+Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpack, string name, const Hash & expectedHash, string * effectiveUrl)
+{
+ auto url = resolveUri(url_);
+
+ if (name == "") {
+ auto p = url.rfind('/');
+ if (p != string::npos) name = string(url, p + 1);
+ }
+
+ Path expectedStorePath;
+ if (expectedHash) {
+ expectedStorePath = store->makeFixedOutputPath(unpack, expectedHash, name);
+ if (store->isValidPath(expectedStorePath))
+ return expectedStorePath;
+ }
+
+ Path cacheDir = getCacheDir() + "/nix/tarballs";
+ createDirs(cacheDir);
+
+ string urlHash = printHash32(hashString(htSHA256, url));
+
+ Path dataFile = cacheDir + "/" + urlHash + ".info";
+ Path fileLink = cacheDir + "/" + urlHash + "-file";
+
+ Path storePath;
+
+ string expectedETag;
+
+ int ttl = settings.tarballTtl;
+ bool skip = false;
+
+ if (pathExists(fileLink) && pathExists(dataFile)) {
+ storePath = readLink(fileLink);
+ store->addTempRoot(storePath);
+ if (store->isValidPath(storePath)) {
+ auto ss = tokenizeString<vector<string>>(readFile(dataFile), "\n");
+ if (ss.size() >= 3 && ss[0] == url) {
+ time_t lastChecked;
+ if (string2Int(ss[2], lastChecked) && lastChecked + ttl >= time(0)) {
+ skip = true;
+ if (effectiveUrl)
+ *effectiveUrl = url_;
+ } else if (!ss[1].empty()) {
+ debug(format("verifying previous ETag ‘%1%’") % ss[1]);
+ expectedETag = ss[1];
+ }
+ }
+ } else
+ storePath = "";
+ }
+
+ if (!skip) {
+
+ try {
+ DownloadRequest request(url);
+ request.expectedETag = expectedETag;
+ auto res = download(request);
+ if (effectiveUrl)
+ *effectiveUrl = res.effectiveUrl;
+
+ if (!res.cached) {
+ ValidPathInfo info;
+ StringSink sink;
+ dumpString(*res.data, sink);
+ Hash hash = hashString(expectedHash ? expectedHash.type : htSHA256, *res.data);
+ info.path = store->makeFixedOutputPath(false, hash, name);
+ info.narHash = hashString(htSHA256, *sink.s);
+ info.ca = makeFixedOutputCA(false, hash);
+ store->addToStore(info, sink.s, false, true);
+ storePath = info.path;
+ }
+
+ assert(!storePath.empty());
+ replaceSymlink(storePath, fileLink);
+
+ writeFile(dataFile, url + "\n" + res.etag + "\n" + std::to_string(time(0)) + "\n");
+ } catch (DownloadError & e) {
+ if (storePath.empty()) throw;
+ printError(format("warning: %1%; using cached result") % e.msg());
+ }
+ }
+
+ if (unpack) {
+ Path unpackedLink = cacheDir + "/" + baseNameOf(storePath) + "-unpacked";
+ Path unpackedStorePath;
+ if (pathExists(unpackedLink)) {
+ unpackedStorePath = readLink(unpackedLink);
+ store->addTempRoot(unpackedStorePath);
+ if (!store->isValidPath(unpackedStorePath))
+ unpackedStorePath = "";
+ }
+ if (unpackedStorePath.empty()) {
+ printInfo(format("unpacking ‘%1%’...") % url);
+ Path tmpDir = createTempDir();
+ AutoDelete autoDelete(tmpDir, true);
+ // FIXME: this requires GNU tar for decompression.
+ runProgram("tar", true, {"xf", storePath, "-C", tmpDir, "--strip-components", "1"});
+ unpackedStorePath = store->addToStore(name, tmpDir, true, htSHA256, defaultPathFilter, false);
+ }
+ replaceSymlink(unpackedStorePath, unpackedLink);
+ storePath = unpackedStorePath;
+ }
+
+ if (expectedStorePath != "" && storePath != expectedStorePath)
+ throw nix::Error(format("hash mismatch in file downloaded from ‘%s’") % url);
+
+ return storePath;
+}
+
+
+bool isUri(const string & s)
+{
+ if (s.compare(0, 8, "channel:") == 0) return true;
+ size_t pos = s.find("://");
+ if (pos == string::npos) return false;
+ string scheme(s, 0, pos);
+ return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git" || scheme == "s3";
+}
+
+
+}
diff --git a/src/libstore/download.hh b/src/libstore/download.hh
new file mode 100644
index 000000000..62f3860b9
--- /dev/null
+++ b/src/libstore/download.hh
@@ -0,0 +1,79 @@
+#pragma once
+
+#include "types.hh"
+#include "hash.hh"
+
+#include <string>
+#include <future>
+
+namespace nix {
+
+struct DownloadRequest
+{
+ std::string uri;
+ std::string expectedETag;
+ bool verifyTLS = true;
+ enum { yes, no, automatic } showProgress = yes;
+ bool head = false;
+ size_t tries = 5;
+ unsigned int baseRetryTimeMs = 250;
+
+ DownloadRequest(const std::string & uri) : uri(uri) { }
+};
+
+struct DownloadResult
+{
+ bool cached = false;
+ std::string etag;
+ std::string effectiveUrl;
+ std::shared_ptr<std::string> data;
+};
+
+class Store;
+
+struct Downloader
+{
+ /* Enqueue a download request, returning a future to the result of
+ the download. The future may throw a DownloadError
+ exception. */
+ virtual void enqueueDownload(const DownloadRequest & request,
+ std::function<void(const DownloadResult &)> success,
+ std::function<void(std::exception_ptr exc)> failure) = 0;
+
+ std::future<DownloadResult> enqueueDownload(const DownloadRequest & request);
+
+ /* Synchronously download a file. */
+ DownloadResult download(const DownloadRequest & request);
+
+ /* Check if the specified file is already in ~/.cache/nix/tarballs
+ and is more recent than ‘tarball-ttl’ seconds. Otherwise,
+ use the recorded ETag to verify if the server has a more
+ recent version, and if so, download it to the Nix store. */
+ Path downloadCached(ref<Store> store, const string & uri, bool unpack, string name = "",
+ const Hash & expectedHash = Hash(), string * effectiveUri = nullptr);
+
+ enum Error { NotFound, Forbidden, Misc, Transient, Interrupted };
+};
+
+/* Return a shared Downloader object. Using this object is preferred
+ because it enables connection reuse and HTTP/2 multiplexing. */
+ref<Downloader> getDownloader();
+
+/* Return a new Downloader object. */
+ref<Downloader> makeDownloader();
+
+class DownloadError : public Error
+{
+public:
+ Downloader::Error error;
+ DownloadError(Downloader::Error error, const FormatOrString & fs)
+ : Error(fs), error(error)
+ { }
+};
+
+bool isUri(const string & s);
+
+/* Decode data according to the Content-Encoding header. */
+ref<std::string> decodeContent(const std::string & encoding, ref<std::string> data);
+
+}
diff --git a/src/libstore/export-import.cc b/src/libstore/export-import.cc
new file mode 100644
index 000000000..2b8ab063e
--- /dev/null
+++ b/src/libstore/export-import.cc
@@ -0,0 +1,106 @@
+#include "store-api.hh"
+#include "archive.hh"
+#include "worker-protocol.hh"
+
+#include <algorithm>
+
+namespace nix {
+
+struct HashAndWriteSink : Sink
+{
+ Sink & writeSink;
+ HashSink hashSink;
+ HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256)
+ {
+ }
+ virtual void operator () (const unsigned char * data, size_t len)
+ {
+ writeSink(data, len);
+ hashSink(data, len);
+ }
+ Hash currentHash()
+ {
+ return hashSink.currentHash().first;
+ }
+};
+
+void Store::exportPaths(const Paths & paths, Sink & sink)
+{
+ Paths sorted = topoSortPaths(PathSet(paths.begin(), paths.end()));
+ std::reverse(sorted.begin(), sorted.end());
+
+ std::string doneLabel("paths exported");
+ logger->incExpected(doneLabel, sorted.size());
+
+ for (auto & path : sorted) {
+ Activity act(*logger, lvlInfo, format("exporting path ‘%s’") % path);
+ sink << 1;
+ exportPath(path, sink);
+ logger->incProgress(doneLabel);
+ }
+
+ sink << 0;
+}
+
+void Store::exportPath(const Path & path, Sink & sink)
+{
+ auto info = queryPathInfo(path);
+
+ HashAndWriteSink hashAndWriteSink(sink);
+
+ narFromPath(path, hashAndWriteSink);
+
+ /* Refuse to export paths that have changed. This prevents
+ filesystem corruption from spreading to other machines.
+ Don't complain if the stored hash is zero (unknown). */
+ Hash hash = hashAndWriteSink.currentHash();
+ if (hash != info->narHash && info->narHash != Hash(info->narHash.type))
+ throw Error(format("hash of path ‘%1%’ has changed from ‘%2%’ to ‘%3%’!") % path
+ % printHash(info->narHash) % printHash(hash));
+
+ hashAndWriteSink << exportMagic << path << info->references << info->deriver << 0;
+}
+
+Paths Store::importPaths(Source & source, std::shared_ptr<FSAccessor> accessor, bool dontCheckSigs)
+{
+ Paths res;
+ while (true) {
+ auto n = readNum<uint64_t>(source);
+ if (n == 0) break;
+ if (n != 1) throw Error("input doesn't look like something created by ‘nix-store --export’");
+
+ /* Extract the NAR from the source. */
+ TeeSink tee(source);
+ parseDump(tee, tee.source);
+
+ uint32_t magic = readInt(source);
+ if (magic != exportMagic)
+ throw Error("Nix archive cannot be imported; wrong format");
+
+ ValidPathInfo info;
+
+ info.path = readStorePath(*this, source);
+
+ Activity act(*logger, lvlInfo, format("importing path ‘%s’") % info.path);
+
+ info.references = readStorePaths<PathSet>(*this, source);
+
+ info.deriver = readString(source);
+ if (info.deriver != "") assertStorePath(info.deriver);
+
+ info.narHash = hashString(htSHA256, *tee.source.data);
+ info.narSize = tee.source.data->size();
+
+ // Ignore optional legacy signature.
+ if (readInt(source) == 1)
+ readString(source);
+
+ addToStore(info, tee.source.data, false, dontCheckSigs, accessor);
+
+ res.push_back(info.path);
+ }
+
+ return res;
+}
+
+}
diff --git a/src/libstore/fs-accessor.hh b/src/libstore/fs-accessor.hh
new file mode 100644
index 000000000..a67e0775b
--- /dev/null
+++ b/src/libstore/fs-accessor.hh
@@ -0,0 +1,30 @@
+#pragma once
+
+#include "types.hh"
+
+namespace nix {
+
+/* An abstract class for accessing a filesystem-like structure, such
+ as a (possibly remote) Nix store or the contents of a NAR file. */
+class FSAccessor
+{
+public:
+ enum Type { tMissing, tRegular, tSymlink, tDirectory };
+
+ struct Stat
+ {
+ Type type;
+ uint64_t fileSize; // regular files only
+ bool isExecutable; // regular files only
+ };
+
+ virtual Stat stat(const Path & path) = 0;
+
+ virtual StringSet readDirectory(const Path & path) = 0;
+
+ virtual std::string readFile(const Path & path) = 0;
+
+ virtual std::string readLink(const Path & path) = 0;
+};
+
+}
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
new file mode 100644
index 000000000..3e7e42cbc
--- /dev/null
+++ b/src/libstore/gc.cc
@@ -0,0 +1,851 @@
+#include "derivations.hh"
+#include "globals.hh"
+#include "local-store.hh"
+
+#include <functional>
+#include <queue>
+#include <algorithm>
+#include <regex>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <climits>
+
+namespace nix {
+
+
+static string gcLockName = "gc.lock";
+static string tempRootsDir = "temproots";
+static string gcRootsDir = "gcroots";
+
+
+/* Acquire the global GC lock. This is used to prevent new Nix
+ processes from starting after the temporary root files have been
+ read. To be precise: when they try to create a new temporary root
+ file, they will block until the garbage collector has finished /
+ yielded the GC lock. */
+int LocalStore::openGCLock(LockType lockType)
+{
+ Path fnGCLock = (format("%1%/%2%")
+ % stateDir % gcLockName).str();
+
+ debug(format("acquiring global GC lock ‘%1%’") % fnGCLock);
+
+ AutoCloseFD fdGCLock = open(fnGCLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
+ if (!fdGCLock)
+ throw SysError(format("opening global GC lock ‘%1%’") % fnGCLock);
+
+ if (!lockFile(fdGCLock.get(), lockType, false)) {
+ printError(format("waiting for the big garbage collector lock..."));
+ lockFile(fdGCLock.get(), lockType, true);
+ }
+
+ /* !!! Restrict read permission on the GC root. Otherwise any
+ process that can open the file for reading can DoS the
+ collector. */
+
+ return fdGCLock.release();
+}
+
+
+static void makeSymlink(const Path & link, const Path & target)
+{
+ /* Create directories up to `gcRoot'. */
+ createDirs(dirOf(link));
+
+ /* Create the new symlink. */
+ Path tempLink = (format("%1%.tmp-%2%-%3%")
+ % link % getpid() % rand()).str();
+ createSymlink(target, tempLink);
+
+ /* Atomically replace the old one. */
+ if (rename(tempLink.c_str(), link.c_str()) == -1)
+ throw SysError(format("cannot rename ‘%1%’ to ‘%2%’")
+ % tempLink % link);
+}
+
+
+void LocalStore::syncWithGC()
+{
+ AutoCloseFD fdGCLock = openGCLock(ltRead);
+}
+
+
+void LocalStore::addIndirectRoot(const Path & path)
+{
+ string hash = printHash32(hashString(htSHA1, path));
+ Path realRoot = canonPath((format("%1%/%2%/auto/%3%")
+ % stateDir % gcRootsDir % hash).str());
+ makeSymlink(realRoot, path);
+}
+
+
+Path LocalFSStore::addPermRoot(const Path & _storePath,
+ const Path & _gcRoot, bool indirect, bool allowOutsideRootsDir)
+{
+ Path storePath(canonPath(_storePath));
+ Path gcRoot(canonPath(_gcRoot));
+ assertStorePath(storePath);
+
+ if (isInStore(gcRoot))
+ throw Error(format(
+ "creating a garbage collector root (%1%) in the Nix store is forbidden "
+ "(are you running nix-build inside the store?)") % gcRoot);
+
+ if (indirect) {
+ /* Don't clobber the link if it already exists and doesn't
+ point to the Nix store. */
+ if (pathExists(gcRoot) && (!isLink(gcRoot) || !isInStore(readLink(gcRoot))))
+ throw Error(format("cannot create symlink ‘%1%’; already exists") % gcRoot);
+ makeSymlink(gcRoot, storePath);
+ addIndirectRoot(gcRoot);
+ }
+
+ else {
+ if (!allowOutsideRootsDir) {
+ Path rootsDir = canonPath((format("%1%/%2%") % stateDir % gcRootsDir).str());
+
+ if (string(gcRoot, 0, rootsDir.size() + 1) != rootsDir + "/")
+ throw Error(format(
+ "path ‘%1%’ is not a valid garbage collector root; "
+ "it's not in the directory ‘%2%’")
+ % gcRoot % rootsDir);
+ }
+
+ if (baseNameOf(gcRoot) == baseNameOf(storePath))
+ writeFile(gcRoot, "");
+ else
+ makeSymlink(gcRoot, storePath);
+ }
+
+ /* Check that the root can be found by the garbage collector.
+ !!! This can be very slow on machines that have many roots.
+ Instead of reading all the roots, it would be more efficient to
+ check if the root is in a directory in or linked from the
+ gcroots directory. */
+ if (settings.checkRootReachability) {
+ Roots roots = findRoots();
+ if (roots.find(gcRoot) == roots.end())
+ printError(
+ format(
+ "warning: ‘%1%’ is not in a directory where the garbage collector looks for roots; "
+ "therefore, ‘%2%’ might be removed by the garbage collector")
+ % gcRoot % storePath);
+ }
+
+ /* Grab the global GC root, causing us to block while a GC is in
+ progress. This prevents the set of permanent roots from
+ increasing while a GC is in progress. */
+ syncWithGC();
+
+ return gcRoot;
+}
+
+
+void LocalStore::addTempRoot(const Path & path)
+{
+ auto state(_state.lock());
+
+ /* Create the temporary roots file for this process. */
+ if (!state->fdTempRoots) {
+
+ while (1) {
+ Path dir = (format("%1%/%2%") % stateDir % tempRootsDir).str();
+ createDirs(dir);
+
+ state->fnTempRoots = (format("%1%/%2%") % dir % getpid()).str();
+
+ AutoCloseFD fdGCLock = openGCLock(ltRead);
+
+ if (pathExists(state->fnTempRoots))
+ /* It *must* be stale, since there can be no two
+ processes with the same pid. */
+ unlink(state->fnTempRoots.c_str());
+
+ state->fdTempRoots = openLockFile(state->fnTempRoots, true);
+
+ fdGCLock = -1;
+
+ debug(format("acquiring read lock on ‘%1%’") % state->fnTempRoots);
+ lockFile(state->fdTempRoots.get(), ltRead, true);
+
+ /* Check whether the garbage collector didn't get in our
+ way. */
+ struct stat st;
+ if (fstat(state->fdTempRoots.get(), &st) == -1)
+ throw SysError(format("statting ‘%1%’") % state->fnTempRoots);
+ if (st.st_size == 0) break;
+
+ /* The garbage collector deleted this file before we could
+ get a lock. (It won't delete the file after we get a
+ lock.) Try again. */
+ }
+
+ }
+
+ /* Upgrade the lock to a write lock. This will cause us to block
+ if the garbage collector is holding our lock. */
+ debug(format("acquiring write lock on ‘%1%’") % state->fnTempRoots);
+ lockFile(state->fdTempRoots.get(), ltWrite, true);
+
+ string s = path + '\0';
+ writeFull(state->fdTempRoots.get(), s);
+
+ /* Downgrade to a read lock. */
+ debug(format("downgrading to read lock on ‘%1%’") % state->fnTempRoots);
+ lockFile(state->fdTempRoots.get(), ltRead, true);
+}
+
+
+void LocalStore::readTempRoots(PathSet & tempRoots, FDs & fds)
+{
+ /* Read the `temproots' directory for per-process temporary root
+ files. */
+ DirEntries tempRootFiles = readDirectory(
+ (format("%1%/%2%") % stateDir % tempRootsDir).str());
+
+ for (auto & i : tempRootFiles) {
+ Path path = (format("%1%/%2%/%3%") % stateDir % tempRootsDir % i.name).str();
+
+ debug(format("reading temporary root file ‘%1%’") % path);
+ FDPtr fd(new AutoCloseFD(open(path.c_str(), O_CLOEXEC | O_RDWR, 0666)));
+ if (!*fd) {
+ /* It's okay if the file has disappeared. */
+ if (errno == ENOENT) continue;
+ throw SysError(format("opening temporary roots file ‘%1%’") % path);
+ }
+
+ /* This should work, but doesn't, for some reason. */
+ //FDPtr fd(new AutoCloseFD(openLockFile(path, false)));
+ //if (*fd == -1) continue;
+
+ /* Try to acquire a write lock without blocking. This can
+ only succeed if the owning process has died. In that case
+ we don't care about its temporary roots. */
+ if (lockFile(fd->get(), ltWrite, false)) {
+ printError(format("removing stale temporary roots file ‘%1%’") % path);
+ unlink(path.c_str());
+ writeFull(fd->get(), "d");
+ continue;
+ }
+
+ /* Acquire a read lock. This will prevent the owning process
+ from upgrading to a write lock, therefore it will block in
+ addTempRoot(). */
+ debug(format("waiting for read lock on ‘%1%’") % path);
+ lockFile(fd->get(), ltRead, true);
+
+ /* Read the entire file. */
+ string contents = readFile(fd->get());
+
+ /* Extract the roots. */
+ string::size_type pos = 0, end;
+
+ while ((end = contents.find((char) 0, pos)) != string::npos) {
+ Path root(contents, pos, end - pos);
+ debug(format("got temporary root ‘%1%’") % root);
+ assertStorePath(root);
+ tempRoots.insert(root);
+ pos = end + 1;
+ }
+
+ fds.push_back(fd); /* keep open */
+ }
+}
+
+
+void LocalStore::findRoots(const Path & path, unsigned char type, Roots & roots)
+{
+ auto foundRoot = [&](const Path & path, const Path & target) {
+ Path storePath = toStorePath(target);
+ if (isStorePath(storePath) && isValidPath(storePath))
+ roots[path] = storePath;
+ else
+ printInfo(format("skipping invalid root from ‘%1%’ to ‘%2%’") % path % storePath);
+ };
+
+ try {
+
+ if (type == DT_UNKNOWN)
+ type = getFileType(path);
+
+ if (type == DT_DIR) {
+ for (auto & i : readDirectory(path))
+ findRoots(path + "/" + i.name, i.type, roots);
+ }
+
+ else if (type == DT_LNK) {
+ Path target = readLink(path);
+ if (isInStore(target))
+ foundRoot(path, target);
+
+ /* Handle indirect roots. */
+ else {
+ target = absPath(target, dirOf(path));
+ if (!pathExists(target)) {
+ if (isInDir(path, stateDir + "/" + gcRootsDir + "/auto")) {
+ printInfo(format("removing stale link from ‘%1%’ to ‘%2%’") % path % target);
+ unlink(path.c_str());
+ }
+ } else {
+ struct stat st2 = lstat(target);
+ if (!S_ISLNK(st2.st_mode)) return;
+ Path target2 = readLink(target);
+ if (isInStore(target2)) foundRoot(target, target2);
+ }
+ }
+ }
+
+ else if (type == DT_REG) {
+ Path storePath = storeDir + "/" + baseNameOf(path);
+ if (isStorePath(storePath) && isValidPath(storePath))
+ roots[path] = storePath;
+ }
+
+ }
+
+ catch (SysError & e) {
+ /* We only ignore permanent failures. */
+ if (e.errNo == EACCES || e.errNo == ENOENT || e.errNo == ENOTDIR)
+ printInfo(format("cannot read potential root ‘%1%’") % path);
+ else
+ throw;
+ }
+}
+
+
+Roots LocalStore::findRoots()
+{
+ Roots roots;
+
+ /* Process direct roots in {gcroots,manifests,profiles}. */
+ findRoots(stateDir + "/" + gcRootsDir, DT_UNKNOWN, roots);
+ if (pathExists(stateDir + "/manifests"))
+ findRoots(stateDir + "/manifests", DT_UNKNOWN, roots);
+ findRoots(stateDir + "/profiles", DT_UNKNOWN, roots);
+
+ return roots;
+}
+
+
+static void readProcLink(const string & file, StringSet & paths)
+{
+ /* 64 is the starting buffer size gnu readlink uses... */
+ auto bufsiz = ssize_t{64};
+try_again:
+ char buf[bufsiz];
+ auto res = readlink(file.c_str(), buf, bufsiz);
+ if (res == -1) {
+ if (errno == ENOENT || errno == EACCES)
+ return;
+ throw SysError("reading symlink");
+ }
+ if (res == bufsiz) {
+ if (SSIZE_MAX / 2 < bufsiz)
+ throw Error("stupidly long symlink");
+ bufsiz *= 2;
+ goto try_again;
+ }
+ if (res > 0 && buf[0] == '/')
+ paths.emplace(static_cast<char *>(buf), res);
+ return;
+}
+
+static string quoteRegexChars(const string & raw)
+{
+ static auto specialRegex = std::regex(R"([.^$\\*+?()\[\]{}|])");
+ return std::regex_replace(raw, specialRegex, R"(\$&)");
+}
+
+static void readFileRoots(const char * path, StringSet & paths)
+{
+ try {
+ paths.emplace(readFile(path));
+ } catch (SysError & e) {
+ if (e.errNo != ENOENT && e.errNo != EACCES)
+ throw;
+ }
+}
+
+void LocalStore::findRuntimeRoots(PathSet & roots)
+{
+ StringSet paths;
+ auto procDir = AutoCloseDir{opendir("/proc")};
+ if (procDir) {
+ struct dirent * ent;
+ auto digitsRegex = std::regex(R"(^\d+$)");
+ auto mapRegex = std::regex(R"(^\s*\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+(/\S+)\s*$)");
+ auto storePathRegex = std::regex(quoteRegexChars(storeDir) + R"(/[0-9a-z]+[0-9a-zA-Z\+\-\._\?=]*)");
+ while (errno = 0, ent = readdir(procDir.get())) {
+ checkInterrupt();
+ if (std::regex_match(ent->d_name, digitsRegex)) {
+ readProcLink((format("/proc/%1%/exe") % ent->d_name).str(), paths);
+ readProcLink((format("/proc/%1%/cwd") % ent->d_name).str(), paths);
+
+ auto fdStr = (format("/proc/%1%/fd") % ent->d_name).str();
+ auto fdDir = AutoCloseDir(opendir(fdStr.c_str()));
+ if (!fdDir) {
+ if (errno == ENOENT || errno == EACCES)
+ continue;
+ throw SysError(format("opening %1%") % fdStr);
+ }
+ struct dirent * fd_ent;
+ while (errno = 0, fd_ent = readdir(fdDir.get())) {
+ if (fd_ent->d_name[0] != '.') {
+ readProcLink((format("%1%/%2%") % fdStr % fd_ent->d_name).str(), paths);
+ }
+ }
+ if (errno)
+ throw SysError(format("iterating /proc/%1%/fd") % ent->d_name);
+ fdDir.reset();
+
+ auto mapLines =
+ tokenizeString<std::vector<string>>(readFile((format("/proc/%1%/maps") % ent->d_name).str(), true), "\n");
+ for (const auto& line : mapLines) {
+ auto match = std::smatch{};
+ if (std::regex_match(line, match, mapRegex))
+ paths.emplace(match[1]);
+ }
+
+ try {
+ auto envString = readFile((format("/proc/%1%/environ") % ent->d_name).str(), true);
+ auto env_end = std::sregex_iterator{};
+ for (auto i = std::sregex_iterator{envString.begin(), envString.end(), storePathRegex}; i != env_end; ++i)
+ paths.emplace(i->str());
+ } catch (SysError & e) {
+ if (errno == ENOENT || errno == EACCES)
+ continue;
+ throw;
+ }
+ }
+ }
+ if (errno)
+ throw SysError("iterating /proc");
+ }
+
+#if !defined(__linux__)
+ try {
+ std::regex lsofRegex(R"(^n(/.*)$)");
+ auto lsofLines =
+ tokenizeString<std::vector<string>>(runProgram(LSOF, true, { "-n", "-w", "-F", "n" }), "\n");
+ for (const auto & line : lsofLines) {
+ std::smatch match;
+ if (std::regex_match(line, match, lsofRegex))
+ paths.emplace(match[1]);
+ }
+ } catch (ExecError & e) {
+ /* lsof not installed, lsof failed */
+ }
+#endif
+
+#if defined(__linux__)
+ readFileRoots("/proc/sys/kernel/modprobe", paths);
+ readFileRoots("/proc/sys/kernel/fbsplash", paths);
+ readFileRoots("/proc/sys/kernel/poweroff_cmd", paths);
+#endif
+
+ for (auto & i : paths)
+ if (isInStore(i)) {
+ Path path = toStorePath(i);
+ if (roots.find(path) == roots.end() && isStorePath(path) && isValidPath(path)) {
+ debug(format("got additional root ‘%1%’") % path);
+ roots.insert(path);
+ }
+ }
+}
+
+
+struct GCLimitReached { };
+
+
+struct LocalStore::GCState
+{
+ GCOptions options;
+ GCResults & results;
+ PathSet roots;
+ PathSet tempRoots;
+ PathSet dead;
+ PathSet alive;
+ bool gcKeepOutputs;
+ bool gcKeepDerivations;
+ unsigned long long bytesInvalidated;
+ bool moveToTrash = true;
+ bool shouldDelete;
+ GCState(GCResults & results_) : results(results_), bytesInvalidated(0) { }
+};
+
+
+bool LocalStore::isActiveTempFile(const GCState & state,
+ const Path & path, const string & suffix)
+{
+ return hasSuffix(path, suffix)
+ && state.tempRoots.find(string(path, 0, path.size() - suffix.size())) != state.tempRoots.end();
+}
+
+
+void LocalStore::deleteGarbage(GCState & state, const Path & path)
+{
+ unsigned long long bytesFreed;
+ deletePath(path, bytesFreed);
+ state.results.bytesFreed += bytesFreed;
+}
+
+
+void LocalStore::deletePathRecursive(GCState & state, const Path & path)
+{
+ checkInterrupt();
+
+ unsigned long long size = 0;
+
+ if (isStorePath(path) && isValidPath(path)) {
+ PathSet referrers;
+ queryReferrers(path, referrers);
+ for (auto & i : referrers)
+ if (i != path) deletePathRecursive(state, i);
+ size = queryPathInfo(path)->narSize;
+ invalidatePathChecked(path);
+ }
+
+ Path realPath = realStoreDir + "/" + baseNameOf(path);
+
+ struct stat st;
+ if (lstat(realPath.c_str(), &st)) {
+ if (errno == ENOENT) return;
+ throw SysError(format("getting status of %1%") % realPath);
+ }
+
+ printInfo(format("deleting ‘%1%’") % path);
+
+ state.results.paths.insert(path);
+
+ /* If the path is not a regular file or symlink, move it to the
+ trash directory. The move is to ensure that later (when we're
+ not holding the global GC lock) we can delete the path without
+ being afraid that the path has become alive again. Otherwise
+ delete it right away. */
+ if (state.moveToTrash && S_ISDIR(st.st_mode)) {
+ // Estimate the amount freed using the narSize field. FIXME:
+ // if the path was not valid, need to determine the actual
+ // size.
+ try {
+ if (chmod(realPath.c_str(), st.st_mode | S_IWUSR) == -1)
+ throw SysError(format("making ‘%1%’ writable") % realPath);
+ Path tmp = trashDir + "/" + baseNameOf(path);
+ if (rename(realPath.c_str(), tmp.c_str()))
+ throw SysError(format("unable to rename ‘%1%’ to ‘%2%’") % realPath % tmp);
+ state.bytesInvalidated += size;
+ } catch (SysError & e) {
+ if (e.errNo == ENOSPC) {
+ printInfo(format("note: can't create move ‘%1%’: %2%") % realPath % e.msg());
+ deleteGarbage(state, realPath);
+ }
+ }
+ } else
+ deleteGarbage(state, realPath);
+
+ if (state.results.bytesFreed + state.bytesInvalidated > state.options.maxFreed) {
+ printInfo(format("deleted or invalidated more than %1% bytes; stopping") % state.options.maxFreed);
+ throw GCLimitReached();
+ }
+}
+
+
+bool LocalStore::canReachRoot(GCState & state, PathSet & visited, const Path & path)
+{
+ if (visited.find(path) != visited.end()) return false;
+
+ if (state.alive.find(path) != state.alive.end()) {
+ return true;
+ }
+
+ if (state.dead.find(path) != state.dead.end()) {
+ return false;
+ }
+
+ if (state.roots.find(path) != state.roots.end()) {
+ debug(format("cannot delete ‘%1%’ because it's a root") % path);
+ state.alive.insert(path);
+ return true;
+ }
+
+ visited.insert(path);
+
+ if (!isStorePath(path) || !isValidPath(path)) return false;
+
+ PathSet incoming;
+
+ /* Don't delete this path if any of its referrers are alive. */
+ queryReferrers(path, incoming);
+
+ /* If gc-keep-derivations is set and this is a derivation, then
+ don't delete the derivation if any of the outputs are alive. */
+ if (state.gcKeepDerivations && isDerivation(path)) {
+ PathSet outputs = queryDerivationOutputs(path);
+ for (auto & i : outputs)
+ if (isValidPath(i) && queryPathInfo(i)->deriver == path)
+ incoming.insert(i);
+ }
+
+ /* If gc-keep-outputs is set, then don't delete this path if there
+ are derivers of this path that are not garbage. */
+ if (state.gcKeepOutputs) {
+ PathSet derivers = queryValidDerivers(path);
+ for (auto & i : derivers)
+ incoming.insert(i);
+ }
+
+ for (auto & i : incoming)
+ if (i != path)
+ if (canReachRoot(state, visited, i)) {
+ state.alive.insert(path);
+ return true;
+ }
+
+ return false;
+}
+
+
+void LocalStore::tryToDelete(GCState & state, const Path & path)
+{
+ checkInterrupt();
+
+ auto realPath = realStoreDir + "/" + baseNameOf(path);
+ if (realPath == linksDir || realPath == trashDir) return;
+
+ Activity act(*logger, lvlDebug, format("considering whether to delete ‘%1%’") % path);
+
+ if (!isStorePath(path) || !isValidPath(path)) {
+ /* A lock file belonging to a path that we're building right
+ now isn't garbage. */
+ if (isActiveTempFile(state, path, ".lock")) return;
+
+ /* Don't delete .chroot directories for derivations that are
+ currently being built. */
+ if (isActiveTempFile(state, path, ".chroot")) return;
+
+ /* Don't delete .check directories for derivations that are
+ currently being built, because we may need to run
+ diff-hook. */
+ if (isActiveTempFile(state, path, ".check")) return;
+ }
+
+ PathSet visited;
+
+ if (canReachRoot(state, visited, path)) {
+ debug(format("cannot delete ‘%1%’ because it's still reachable") % path);
+ } else {
+ /* No path we visited was a root, so everything is garbage.
+ But we only delete ‘path’ and its referrers here so that
+ ‘nix-store --delete’ doesn't have the unexpected effect of
+ recursing into derivations and outputs. */
+ state.dead.insert(visited.begin(), visited.end());
+ if (state.shouldDelete)
+ deletePathRecursive(state, path);
+ }
+}
+
+
+/* Unlink all files in /nix/store/.links that have a link count of 1,
+ which indicates that there are no other links and so they can be
+ safely deleted. FIXME: race condition with optimisePath(): we
+ might see a link count of 1 just before optimisePath() increases
+ the link count. */
+void LocalStore::removeUnusedLinks(const GCState & state)
+{
+ AutoCloseDir dir(opendir(linksDir.c_str()));
+ if (!dir) throw SysError(format("opening directory ‘%1%’") % linksDir);
+
+ long long actualSize = 0, unsharedSize = 0;
+
+ struct dirent * dirent;
+ while (errno = 0, dirent = readdir(dir.get())) {
+ checkInterrupt();
+ string name = dirent->d_name;
+ if (name == "." || name == "..") continue;
+ Path path = linksDir + "/" + name;
+
+ struct stat st;
+ if (lstat(path.c_str(), &st) == -1)
+ throw SysError(format("statting ‘%1%’") % path);
+
+ if (st.st_nlink != 1) {
+ unsigned long long size = st.st_blocks * 512ULL;
+ actualSize += size;
+ unsharedSize += (st.st_nlink - 1) * size;
+ continue;
+ }
+
+ printMsg(lvlTalkative, format("deleting unused link ‘%1%’") % path);
+
+ if (unlink(path.c_str()) == -1)
+ throw SysError(format("deleting ‘%1%’") % path);
+
+ state.results.bytesFreed += st.st_blocks * 512ULL;
+ }
+
+ struct stat st;
+ if (stat(linksDir.c_str(), &st) == -1)
+ throw SysError(format("statting ‘%1%’") % linksDir);
+ long long overhead = st.st_blocks * 512ULL;
+
+ printInfo(format("note: currently hard linking saves %.2f MiB")
+ % ((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
+}
+
+
+void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
+{
+ GCState state(results);
+ state.options = options;
+ state.gcKeepOutputs = settings.gcKeepOutputs;
+ state.gcKeepDerivations = settings.gcKeepDerivations;
+
+ /* Using `--ignore-liveness' with `--delete' can have unintended
+ consequences if `gc-keep-outputs' or `gc-keep-derivations' are
+ true (the garbage collector will recurse into deleting the
+ outputs or derivers, respectively). So disable them. */
+ if (options.action == GCOptions::gcDeleteSpecific && options.ignoreLiveness) {
+ state.gcKeepOutputs = false;
+ state.gcKeepDerivations = false;
+ }
+
+ state.shouldDelete = options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific;
+
+ if (state.shouldDelete)
+ deletePath(reservedPath);
+
+ /* Acquire the global GC root. This prevents
+ a) New roots from being added.
+ b) Processes from creating new temporary root files. */
+ AutoCloseFD fdGCLock = openGCLock(ltWrite);
+
+ /* Find the roots. Since we've grabbed the GC lock, the set of
+ permanent roots cannot increase now. */
+ printError(format("finding garbage collector roots..."));
+ Roots rootMap = options.ignoreLiveness ? Roots() : findRoots();
+
+ for (auto & i : rootMap) state.roots.insert(i.second);
+
+ /* Add additional roots returned by the program specified by the
+ NIX_ROOT_FINDER environment variable. This is typically used
+ to add running programs to the set of roots (to prevent them
+ from being garbage collected). */
+ if (!options.ignoreLiveness)
+ findRuntimeRoots(state.roots);
+
+ /* Read the temporary roots. This acquires read locks on all
+ per-process temporary root files. So after this point no paths
+ can be added to the set of temporary roots. */
+ FDs fds;
+ readTempRoots(state.tempRoots, fds);
+ state.roots.insert(state.tempRoots.begin(), state.tempRoots.end());
+
+ /* After this point the set of roots or temporary roots cannot
+ increase, since we hold locks on everything. So everything
+ that is not reachable from `roots' is garbage. */
+
+ if (state.shouldDelete) {
+ if (pathExists(trashDir)) deleteGarbage(state, trashDir);
+ try {
+ createDirs(trashDir);
+ } catch (SysError & e) {
+ if (e.errNo == ENOSPC) {
+ printInfo(format("note: can't create trash directory: %1%") % e.msg());
+ state.moveToTrash = false;
+ }
+ }
+ }
+
+ /* Now either delete all garbage paths, or just the specified
+ paths (for gcDeleteSpecific). */
+
+ if (options.action == GCOptions::gcDeleteSpecific) {
+
+ for (auto & i : options.pathsToDelete) {
+ assertStorePath(i);
+ tryToDelete(state, i);
+ if (state.dead.find(i) == state.dead.end())
+ throw Error(format("cannot delete path ‘%1%’ since it is still alive") % i);
+ }
+
+ } else if (options.maxFreed > 0) {
+
+ if (state.shouldDelete)
+ printError(format("deleting garbage..."));
+ else
+ printError(format("determining live/dead paths..."));
+
+ try {
+
+ AutoCloseDir dir(opendir(realStoreDir.c_str()));
+ if (!dir) throw SysError(format("opening directory ‘%1%’") % realStoreDir);
+
+ /* Read the store and immediately delete all paths that
+ aren't valid. When using --max-freed etc., deleting
+ invalid paths is preferred over deleting unreachable
+ paths, since unreachable paths could become reachable
+ again. We don't use readDirectory() here so that GCing
+ can start faster. */
+ Paths entries;
+ struct dirent * dirent;
+ while (errno = 0, dirent = readdir(dir.get())) {
+ checkInterrupt();
+ string name = dirent->d_name;
+ if (name == "." || name == "..") continue;
+ Path path = storeDir + "/" + name;
+ if (isStorePath(path) && isValidPath(path))
+ entries.push_back(path);
+ else
+ tryToDelete(state, path);
+ }
+
+ dir.reset();
+
+ /* Now delete the unreachable valid paths. Randomise the
+ order in which we delete entries to make the collector
+ less biased towards deleting paths that come
+ alphabetically first (e.g. /nix/store/000...). This
+ matters when using --max-freed etc. */
+ vector<Path> entries_(entries.begin(), entries.end());
+ random_shuffle(entries_.begin(), entries_.end());
+
+ for (auto & i : entries_)
+ tryToDelete(state, i);
+
+ } catch (GCLimitReached & e) {
+ }
+ }
+
+ if (state.options.action == GCOptions::gcReturnLive) {
+ state.results.paths = state.alive;
+ return;
+ }
+
+ if (state.options.action == GCOptions::gcReturnDead) {
+ state.results.paths = state.dead;
+ return;
+ }
+
+ /* Allow other processes to add to the store from here on. */
+ fdGCLock = -1;
+ fds.clear();
+
+ /* Delete the trash directory. */
+ printInfo(format("deleting ‘%1%’") % trashDir);
+ deleteGarbage(state, trashDir);
+
+ /* Clean up the links directory. */
+ if (options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific) {
+ printError(format("deleting unused links..."));
+ removeUnusedLinks(state);
+ }
+
+ /* While we're at it, vacuum the database. */
+ //if (options.action == GCOptions::gcDeleteDead) vacuumDB();
+}
+
+
+}
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
new file mode 100644
index 000000000..953bf6aaa
--- /dev/null
+++ b/src/libstore/globals.cc
@@ -0,0 +1,104 @@
+#include "globals.hh"
+#include "util.hh"
+#include "archive.hh"
+#include "args.hh"
+
+#include <algorithm>
+#include <map>
+#include <thread>
+
+
+namespace nix {
+
+
+/* The default location of the daemon socket, relative to nixStateDir.
+ The socket is in a directory to allow you to control access to the
+ Nix daemon by setting the mode/ownership of the directory
+ appropriately. (This wouldn't work on the socket itself since it
+ must be deleted and recreated on startup.) */
+#define DEFAULT_SOCKET_PATH "/daemon-socket/socket"
+
+/* chroot-like behavior from Apple's sandbox */
+#if __APPLE__
+ #define DEFAULT_ALLOWED_IMPURE_PREFIXES "/System/Library /usr/lib /dev /bin/sh"
+#else
+ #define DEFAULT_ALLOWED_IMPURE_PREFIXES ""
+#endif
+
+Settings settings;
+
+Settings::Settings()
+ : Config({})
+ , nixPrefix(NIX_PREFIX)
+ , nixStore(canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR))))
+ , nixDataDir(canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR)))
+ , nixLogDir(canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR)))
+ , nixStateDir(canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR)))
+ , nixConfDir(canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR)))
+ , nixLibexecDir(canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR)))
+ , nixBinDir(canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR)))
+ , nixDaemonSocketFile(canonPath(nixStateDir + DEFAULT_SOCKET_PATH))
+{
+ buildUsersGroup = getuid() == 0 ? "nixbld" : "";
+ lockCPU = getEnv("NIX_AFFINITY_HACK", "1") == "1";
+ caFile = getEnv("NIX_SSL_CERT_FILE", getEnv("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt"));
+
+#if __linux__
+ sandboxPaths = tokenizeString<StringSet>("/bin/sh=" BASH_PATH);
+#endif
+
+ allowedImpureHostPrefixes = tokenizeString<StringSet>(DEFAULT_ALLOWED_IMPURE_PREFIXES);
+}
+
+void Settings::loadConfFile()
+{
+ applyConfigFile(nixConfDir + "/nix.conf");
+
+ /* We only want to send overrides to the daemon, i.e. stuff from
+ ~/.nix/nix.conf or the command line. */
+ resetOverriden();
+
+ applyConfigFile(getConfigDir() + "/nix/nix.conf");
+}
+
+void Settings::set(const string & name, const string & value)
+{
+ Config::set(name, value);
+}
+
+unsigned int Settings::getDefaultCores()
+{
+ return std::max(1U, std::thread::hardware_concurrency());
+}
+
+const string nixVersion = PACKAGE_VERSION;
+
+template<> void BaseSetting<SandboxMode>::set(const std::string & str)
+{
+ if (str == "true") value = smEnabled;
+ else if (str == "relaxed") value = smRelaxed;
+ else if (str == "false") value = smDisabled;
+ else throw UsageError("option '%s' has invalid value '%s'", name, str);
+}
+
+template<> std::string BaseSetting<SandboxMode>::to_string()
+{
+ if (value == smEnabled) return "true";
+ else if (value == smRelaxed) return "relaxed";
+ else if (value == smDisabled) return "false";
+ else abort();
+}
+
+template<> void BaseSetting<SandboxMode>::toJSON(JSONPlaceholder & out)
+{
+ AbstractSetting::toJSON(out);
+}
+
+void MaxBuildJobsSetting::set(const std::string & str)
+{
+ if (str == "auto") value = std::max(1U, std::thread::hardware_concurrency());
+ else if (!string2Int(str, value))
+ throw UsageError("configuration setting ‘%s’ should be ‘auto’ or an integer", name);
+}
+
+}
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
new file mode 100644
index 000000000..b4f44de2e
--- /dev/null
+++ b/src/libstore/globals.hh
@@ -0,0 +1,318 @@
+#pragma once
+
+#include "types.hh"
+#include "config.hh"
+
+#include <map>
+#include <sys/types.h>
+
+
+namespace nix {
+
+typedef enum { smEnabled, smRelaxed, smDisabled } SandboxMode;
+
+extern bool useCaseHack; // FIXME
+
+struct CaseHackSetting : public BaseSetting<bool>
+{
+ CaseHackSetting(Config * options,
+ const std::string & name,
+ const std::string & description,
+ const std::set<std::string> & aliases = {})
+ : BaseSetting<bool>(useCaseHack, name, description, aliases)
+ {
+ options->addSetting(this);
+ }
+
+ void set(const std::string & str) override
+ {
+ BaseSetting<bool>::set(str);
+ nix::useCaseHack = true;
+ }
+};
+
+struct MaxBuildJobsSetting : public BaseSetting<unsigned int>
+{
+ MaxBuildJobsSetting(Config * options,
+ unsigned int def,
+ const std::string & name,
+ const std::string & description,
+ const std::set<std::string> & aliases = {})
+ : BaseSetting<unsigned int>(def, name, description, aliases)
+ {
+ options->addSetting(this);
+ }
+
+ void set(const std::string & str) override;
+};
+
+class Settings : public Config {
+
+ unsigned int getDefaultCores();
+
+public:
+
+ Settings();
+
+ void loadConfFile();
+
+ void set(const string & name, const string & value);
+
+ Path nixPrefix;
+
+ /* The directory where we store sources and derived files. */
+ Path nixStore;
+
+ Path nixDataDir; /* !!! fix */
+
+ /* The directory where we log various operations. */
+ Path nixLogDir;
+
+ /* The directory where state is stored. */
+ Path nixStateDir;
+
+ /* The directory where configuration files are stored. */
+ Path nixConfDir;
+
+ /* The directory where internal helper programs are stored. */
+ Path nixLibexecDir;
+
+ /* The directory where the main programs are stored. */
+ Path nixBinDir;
+
+ /* File name of the socket the daemon listens to. */
+ Path nixDaemonSocketFile;
+
+ Setting<bool> keepFailed{this, false, "keep-failed",
+ "Whether to keep temporary directories of failed builds."};
+
+ Setting<bool> keepGoing{this, false, "keep-going",
+ "Whether to keep building derivations when another build fails."};
+
+ Setting<bool> tryFallback{this, false, "build-fallback",
+ "Whether to fall back to building when substitution fails."};
+
+ /* Whether to show build log output in real time. */
+ bool verboseBuild = true;
+
+ /* If verboseBuild is false, the number of lines of the tail of
+ the log to show if a build fails. */
+ size_t logLines = 10;
+
+ MaxBuildJobsSetting maxBuildJobs{this, 1, "build-max-jobs",
+ "Maximum number of parallel build jobs. \"auto\" means use number of cores."};
+
+ Setting<unsigned int> buildCores{this, getDefaultCores(), "build-cores",
+ "Number of CPU cores to utilize in parallel within a build, "
+ "i.e. by passing this number to Make via '-j'. 0 means that the "
+ "number of actual CPU cores on the local host ought to be "
+ "auto-detected."};
+
+ /* Read-only mode. Don't copy stuff to the store, don't change
+ the database. */
+ bool readOnlyMode = false;
+
+ Setting<std::string> thisSystem{this, SYSTEM, "system",
+ "The canonical Nix system name."};
+
+ Setting<time_t> maxSilentTime{this, 0, "build-max-silent-time",
+ "The maximum time in seconds that a builer can go without "
+ "producing any output on stdout/stderr before it is killed. "
+ "0 means infinity."};
+
+ Setting<time_t> buildTimeout{this, 0, "build-timeout",
+ "The maximum duration in seconds that a builder can run. "
+ "0 means infinity."};
+
+ Setting<bool> useBuildHook{this, true, "remote-builds",
+ "Whether to use build hooks (for distributed builds)."};
+
+ Setting<off_t> reservedSize{this, 8 * 1024 * 1024, "gc-reserved-space",
+ "Amount of reserved disk space for the garbage collector."};
+
+ Setting<bool> fsyncMetadata{this, true, "fsync-metadata",
+ "Whether SQLite should use fsync()."};
+
+ Setting<bool> useSQLiteWAL{this, true, "use-sqlite-wal",
+ "Whether SQLite should use WAL mode."};
+
+ Setting<bool> syncBeforeRegistering{this, false, "sync-before-registering",
+ "Whether to call sync() before registering a path as valid."};
+
+ Setting<bool> useSubstitutes{this, true, "build-use-substitutes",
+ "Whether to use substitutes."};
+
+ Setting<std::string> buildUsersGroup{this, "", "build-users-group",
+ "The Unix group that contains the build users."};
+
+ Setting<bool> impersonateLinux26{this, false, "build-impersonate-linux-26",
+ "Whether to impersonate a Linux 2.6 machine on newer kernels."};
+
+ Setting<bool> keepLog{this, true, "build-keep-log",
+ "Whether to store build logs."};
+
+ Setting<bool> compressLog{this, true, "build-compress-log",
+ "Whether to compress logs."};
+
+ Setting<unsigned long> maxLogSize{this, 0, "build-max-log-size",
+ "Maximum number of bytes a builder can write to stdout/stderr "
+ "before being killed (0 means no limit)."};
+
+ /* When build-repeat > 0 and verboseBuild == true, whether to
+ print repeated builds (i.e. builds other than the first one) to
+ stderr. Hack to prevent Hydra logs from being polluted. */
+ bool printRepeatedBuilds = true;
+
+ Setting<unsigned int> pollInterval{this, 5, "build-poll-interval",
+ "How often (in seconds) to poll for locks."};
+
+ Setting<bool> checkRootReachability{this, false, "gc-check-reachability",
+ "Whether to check if new GC roots can in fact be found by the "
+ "garbage collector."};
+
+ Setting<bool> gcKeepOutputs{this, false, "gc-keep-outputs",
+ "Whether the garbage collector should keep outputs of live derivations."};
+
+ Setting<bool> gcKeepDerivations{this, true, "gc-keep-derivations",
+ "Whether the garbage collector should keep derivers of live paths."};
+
+ Setting<bool> autoOptimiseStore{this, false, "auto-optimise-store",
+ "Whether to automatically replace files with identical contents with hard links."};
+
+ Setting<bool> envKeepDerivations{this, false, "env-keep-derivations",
+ "Whether to add derivations as a dependency of user environments "
+ "(to prevent them from being GCed)."};
+
+ /* Whether to lock the Nix client and worker to the same CPU. */
+ bool lockCPU;
+
+ /* Whether to show a stack trace if Nix evaluation fails. */
+ bool showTrace = false;
+
+ Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation",
+ "Whether builtin functions that allow executing native code should be enabled."};
+
+ Setting<SandboxMode> sandboxMode{this, smDisabled, "build-use-sandbox",
+ "Whether to enable sandboxed builds. Can be \"true\", \"false\" or \"relaxed\".",
+ {"build-use-chroot"}};
+
+ Setting<PathSet> sandboxPaths{this, {}, "build-sandbox-paths",
+ "The paths to make available inside the build sandbox.",
+ {"build-chroot-dirs"}};
+
+ Setting<PathSet> extraSandboxPaths{this, {}, "build-extra-sandbox-paths",
+ "Additional paths to make available inside the build sandbox.",
+ {"build-extra-chroot-dirs"}};
+
+ Setting<bool> restrictEval{this, false, "restrict-eval",
+ "Whether to restrict file system access to paths in $NIX_PATH, "
+ "and to disallow fetching files from the network."};
+
+ Setting<size_t> buildRepeat{this, 0, "build-repeat",
+ "The number of times to repeat a build in order to verify determinism."};
+
+#if __linux__
+ Setting<std::string> sandboxShmSize{this, "50%", "sandbox-dev-shm-size",
+ "The size of /dev/shm in the build sandbox."};
+#endif
+
+ Setting<PathSet> allowedImpureHostPrefixes{this, {}, "allowed-impure-host-deps",
+ "Which prefixes to allow derivations to ask for access to (primarily for Darwin)."};
+
+#if __APPLE__
+ Setting<bool> darwinLogSandboxViolations{this, false, "darwin-log-sandbox-violations",
+ "Whether to log Darwin sandbox access violations to the system log."};
+#endif
+
+ Setting<bool> runDiffHook{this, false, "run-diff-hook",
+ "Whether to run the program specified by the diff-hook setting "
+ "repeated builds produce a different result. Typically used to "
+ "plug in diffoscope."};
+
+ PathSetting diffHook{this, true, "", "diff-hook",
+ "A program that prints out the differences between the two paths "
+ "specified on its command line."};
+
+ Setting<bool> enforceDeterminism{this, true, "enforce-determinism",
+ "Whether to fail if repeated builds produce different output."};
+
+ Setting<Strings> binaryCachePublicKeys{this,
+ {"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="},
+ "binary-cache-public-keys",
+ "Trusted public keys for secure substitution."};
+
+ Setting<Strings> secretKeyFiles{this, {}, "secret-key-files",
+ "Secret keys with which to sign local builds."};
+
+ Setting<size_t> binaryCachesParallelConnections{this, 25, "http-connections",
+ "Number of parallel HTTP connections.",
+ {"binary-caches-parallel-connections"}};
+
+ Setting<bool> enableHttp2{this, true, "enable-http2",
+ "Whether to enable HTTP/2 support."};
+
+ Setting<unsigned int> tarballTtl{this, 60 * 60, "tarball-ttl",
+ "How soon to expire files fetched by builtins.fetchTarball and builtins.fetchurl."};
+
+ Setting<std::string> signedBinaryCaches{this, "*", "signed-binary-caches",
+ "Obsolete."};
+
+ Setting<Strings> substituters{this,
+ nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"} : Strings(),
+ "substituters",
+ "The URIs of substituters (such as https://cache.nixos.org/).",
+ {"binary-caches"}};
+
+ // FIXME: provide a way to add to option values.
+ Setting<Strings> extraSubstituters{this, {}, "extra-substituters",
+ "Additional URIs of substituters.",
+ {"extra-binary-caches"}};
+
+ Setting<StringSet> trustedSubstituters{this, {}, "trusted-substituters",
+ "Disabled substituters that may be enabled via the substituters option by untrusted users.",
+ {"trusted-binary-caches"}};
+
+ Setting<Strings> trustedUsers{this, {"root"}, "trusted-users",
+ "Which users or groups are trusted to ask the daemon to do unsafe things."};
+
+ /* ?Who we trust to use the daemon in safe ways */
+ Setting<Strings> allowedUsers{this, {"*"}, "allowed-users",
+ "Which users or groups are allowed to connect to the daemon."};
+
+ Setting<bool> printMissing{this, true, "print-missing",
+ "Whether to print what paths need to be built or downloaded."};
+
+ Setting<std::string> preBuildHook{this,
+#if __APPLE__
+ nixLibexecDir + "/nix/resolve-system-dependencies",
+#else
+ "",
+#endif
+ "pre-build-hook",
+ "A program to run just before a build to set derivation-specific build settings."};
+
+ Setting<std::string> netrcFile{this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
+ "Path to the netrc file used to obtain usernames/passwords for downloads."};
+
+ /* Path to the SSL CA file used */
+ Path caFile;
+
+ Setting<bool> enableImportFromDerivation{this, true, "allow-import-from-derivation",
+ "Whether the evaluator allows importing the result of a derivation."};
+
+ CaseHackSetting useCaseHack{this, "use-case-hack",
+ "Whether to enable a Darwin-specific hack for dealing with file name collisions."};
+
+ Setting<unsigned long> connectTimeout{this, 0, "connect-timeout",
+ "Timeout for connecting to servers during downloads. 0 means use curl's builtin default."};
+};
+
+
+// FIXME: don't use a global variable.
+extern Settings settings;
+
+
+extern const string nixVersion;
+
+
+}
diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc
new file mode 100644
index 000000000..37a7d6ace
--- /dev/null
+++ b/src/libstore/http-binary-cache-store.cc
@@ -0,0 +1,115 @@
+#include "binary-cache-store.hh"
+#include "download.hh"
+#include "globals.hh"
+#include "nar-info-disk-cache.hh"
+
+namespace nix {
+
+MakeError(UploadToHTTP, Error);
+
+class HttpBinaryCacheStore : public BinaryCacheStore
+{
+private:
+
+ Path cacheUri;
+
+public:
+
+ HttpBinaryCacheStore(
+ const Params & params, const Path & _cacheUri)
+ : BinaryCacheStore(params)
+ , cacheUri(_cacheUri)
+ {
+ if (cacheUri.back() == '/')
+ cacheUri.pop_back();
+
+ diskCache = getNarInfoDiskCache();
+ }
+
+ std::string getUri() override
+ {
+ return cacheUri;
+ }
+
+ void init() override
+ {
+ // FIXME: do this lazily?
+ if (!diskCache->cacheExists(cacheUri, wantMassQuery_, priority)) {
+ try {
+ BinaryCacheStore::init();
+ } catch (UploadToHTTP &) {
+ throw Error(format("‘%s’ does not appear to be a binary cache") % cacheUri);
+ }
+ diskCache->createCache(cacheUri, storeDir, wantMassQuery_, priority);
+ }
+ }
+
+protected:
+
+ bool fileExists(const std::string & path) override
+ {
+ try {
+ DownloadRequest request(cacheUri + "/" + path);
+ request.showProgress = DownloadRequest::no;
+ request.head = true;
+ request.tries = 5;
+ getDownloader()->download(request);
+ return true;
+ } catch (DownloadError & e) {
+ /* S3 buckets return 403 if a file doesn't exist and the
+ bucket is unlistable, so treat 403 as 404. */
+ if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
+ return false;
+ throw;
+ }
+ }
+
+ void upsertFile(const std::string & path,
+ const std::string & data,
+ const std::string & mimeType) override
+ {
+ throw UploadToHTTP("uploading to an HTTP binary cache is not supported");
+ }
+
+ void getFile(const std::string & path,
+ std::function<void(std::shared_ptr<std::string>)> success,
+ std::function<void(std::exception_ptr exc)> failure) override
+ {
+ DownloadRequest request(cacheUri + "/" + path);
+ request.showProgress = DownloadRequest::no;
+ request.tries = 8;
+
+ getDownloader()->enqueueDownload(request,
+ [success](const DownloadResult & result) {
+ success(result.data);
+ },
+ [success, failure](std::exception_ptr exc) {
+ try {
+ std::rethrow_exception(exc);
+ } catch (DownloadError & e) {
+ if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
+ return success(0);
+ failure(exc);
+ } catch (...) {
+ failure(exc);
+ }
+ });
+ }
+
+};
+
+static RegisterStoreImplementation regStore([](
+ const std::string & uri, const Store::Params & params)
+ -> std::shared_ptr<Store>
+{
+ if (std::string(uri, 0, 7) != "http://" &&
+ std::string(uri, 0, 8) != "https://" &&
+ (getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") != "1" || std::string(uri, 0, 7) != "file://")
+ ) return 0;
+ auto store = std::make_shared<HttpBinaryCacheStore>(params, uri);
+ store->init();
+ return store;
+});
+
+}
+
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
new file mode 100644
index 000000000..befc560bf
--- /dev/null
+++ b/src/libstore/legacy-ssh-store.cc
@@ -0,0 +1,256 @@
+#include "archive.hh"
+#include "pool.hh"
+#include "remote-store.hh"
+#include "serve-protocol.hh"
+#include "store-api.hh"
+#include "worker-protocol.hh"
+#include "ssh.hh"
+
+namespace nix {
+
+static std::string uriScheme = "ssh://";
+
+struct LegacySSHStore : public Store
+{
+ const Setting<int> maxConnections{this, 1, "max-connections", "maximum number of concurrent SSH connections"};
+ const Setting<Path> sshKey{this, "", "ssh-key", "path to an SSH private key"};
+ const Setting<bool> compress{this, false, "compress", "whether to compress the connection"};
+
+ struct Connection
+ {
+ std::unique_ptr<SSHMaster::Connection> sshConn;
+ FdSink to;
+ FdSource from;
+ };
+
+ std::string host;
+
+ ref<Pool<Connection>> connections;
+
+ SSHMaster master;
+
+ LegacySSHStore(const string & host, const Params & params)
+ : Store(params)
+ , host(host)
+ , connections(make_ref<Pool<Connection>>(
+ std::max(1, (int) maxConnections),
+ [this]() { return openConnection(); },
+ [](const ref<Connection> & r) { return true; }
+ ))
+ , master(
+ host,
+ sshKey,
+ // Use SSH master only if using more than 1 connection.
+ connections->capacity() > 1,
+ compress)
+ {
+ }
+
+ ref<Connection> openConnection()
+ {
+ auto conn = make_ref<Connection>();
+ conn->sshConn = master.startCommand("nix-store --serve --write");
+ conn->to = FdSink(conn->sshConn->in.get());
+ conn->from = FdSource(conn->sshConn->out.get());
+
+ int remoteVersion;
+
+ try {
+ conn->to << SERVE_MAGIC_1 << SERVE_PROTOCOL_VERSION;
+ conn->to.flush();
+
+ unsigned int magic = readInt(conn->from);
+ if (magic != SERVE_MAGIC_2)
+ throw Error("protocol mismatch with ‘nix-store --serve’ on ‘%s’", host);
+ remoteVersion = readInt(conn->from);
+ if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200)
+ throw Error("unsupported ‘nix-store --serve’ protocol version on ‘%s’", host);
+
+ } catch (EndOfFile & e) {
+ throw Error("cannot connect to ‘%1%’", host);
+ }
+
+ return conn;
+ };
+
+ string getUri() override
+ {
+ return uriScheme + host;
+ }
+
+ void queryPathInfoUncached(const Path & path,
+ std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure) override
+ {
+ sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() -> std::shared_ptr<ValidPathInfo> {
+ auto conn(connections->get());
+
+ debug("querying remote host ‘%s’ for info on ‘%s’", host, path);
+
+ conn->to << cmdQueryPathInfos << PathSet{path};
+ conn->to.flush();
+
+ auto info = std::make_shared<ValidPathInfo>();
+ conn->from >> info->path;
+ if (info->path.empty()) return nullptr;
+ assert(path == info->path);
+
+ PathSet references;
+ conn->from >> info->deriver;
+ info->references = readStorePaths<PathSet>(*this, conn->from);
+ readLongLong(conn->from); // download size
+ info->narSize = readLongLong(conn->from);
+
+ auto s = readString(conn->from);
+ assert(s == "");
+
+ return info;
+ });
+ }
+
+ void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+ bool repair, bool dontCheckSigs,
+ std::shared_ptr<FSAccessor> accessor) override
+ {
+ debug("adding path ‘%s’ to remote host ‘%s’", info.path, host);
+
+ auto conn(connections->get());
+
+ conn->to
+ << cmdImportPaths
+ << 1;
+ conn->to(*nar);
+ conn->to
+ << exportMagic
+ << info.path
+ << info.references
+ << info.deriver
+ << 0
+ << 0;
+ conn->to.flush();
+
+ if (readInt(conn->from) != 1)
+ throw Error("failed to add path ‘%s’ to remote host ‘%s’, info.path, host");
+
+ }
+
+ void narFromPath(const Path & path, Sink & sink) override
+ {
+ auto conn(connections->get());
+
+ conn->to << cmdDumpStorePath << path;
+ conn->to.flush();
+
+ /* FIXME: inefficient. */
+ ParseSink parseSink; /* null sink; just parse the NAR */
+ TeeSource savedNAR(conn->from);
+ parseDump(parseSink, savedNAR);
+ sink(*savedNAR.data);
+ }
+
+ /* Unsupported methods. */
+ [[noreturn]] void unsupported()
+ {
+ throw Error("operation not supported on SSH stores");
+ }
+
+ PathSet queryAllValidPaths() override { unsupported(); }
+
+ void queryReferrers(const Path & path, PathSet & referrers) override
+ { unsupported(); }
+
+ PathSet queryDerivationOutputs(const Path & path) override
+ { unsupported(); }
+
+ StringSet queryDerivationOutputNames(const Path & path) override
+ { unsupported(); }
+
+ Path queryPathFromHashPart(const string & hashPart) override
+ { unsupported(); }
+
+ Path addToStore(const string & name, const Path & srcPath,
+ bool recursive, HashType hashAlgo,
+ PathFilter & filter, bool repair) override
+ { unsupported(); }
+
+ Path addTextToStore(const string & name, const string & s,
+ const PathSet & references, bool repair) override
+ { unsupported(); }
+
+ void buildPaths(const PathSet & paths, BuildMode buildMode) override
+ { unsupported(); }
+
+ BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
+ BuildMode buildMode) override
+ { unsupported(); }
+
+ void ensurePath(const Path & path) override
+ { unsupported(); }
+
+ void addTempRoot(const Path & path) override
+ { unsupported(); }
+
+ void addIndirectRoot(const Path & path) override
+ { unsupported(); }
+
+ Roots findRoots() override
+ { unsupported(); }
+
+ void collectGarbage(const GCOptions & options, GCResults & results) override
+ { unsupported(); }
+
+ ref<FSAccessor> getFSAccessor() override
+ { unsupported(); }
+
+ void addSignatures(const Path & storePath, const StringSet & sigs) override
+ { unsupported(); }
+
+ bool isTrusted() override
+ { return true; }
+
+ void computeFSClosure(const PathSet & paths,
+ PathSet & out, bool flipDirection = false,
+ bool includeOutputs = false, bool includeDerivers = false) override
+ {
+ if (flipDirection || includeDerivers) {
+ Store::computeFSClosure(paths, out, flipDirection, includeOutputs, includeDerivers);
+ return;
+ }
+
+ auto conn(connections->get());
+
+ conn->to
+ << cmdQueryClosure
+ << includeOutputs
+ << paths;
+ conn->to.flush();
+
+ auto res = readStorePaths<PathSet>(*this, conn->from);
+
+ out.insert(res.begin(), res.end());
+ }
+
+ PathSet queryValidPaths(const PathSet & paths, bool maybeSubstitute = false) override
+ {
+ auto conn(connections->get());
+
+ conn->to
+ << cmdQueryValidPaths
+ << false // lock
+ << maybeSubstitute
+ << paths;
+ conn->to.flush();
+
+ return readStorePaths<PathSet>(*this, conn->from);
+ }
+};
+
+static RegisterStoreImplementation regStore([](
+ const std::string & uri, const Store::Params & params)
+ -> std::shared_ptr<Store>
+{
+ if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
+ return std::make_shared<LegacySSHStore>(std::string(uri, uriScheme.size()), params);
+});
+
+}
diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc
new file mode 100644
index 000000000..aff22f9fc
--- /dev/null
+++ b/src/libstore/local-binary-cache-store.cc
@@ -0,0 +1,107 @@
+#include "binary-cache-store.hh"
+#include "globals.hh"
+#include "nar-info-disk-cache.hh"
+
+namespace nix {
+
+class LocalBinaryCacheStore : public BinaryCacheStore
+{
+private:
+
+ Path binaryCacheDir;
+
+public:
+
+ LocalBinaryCacheStore(
+ const Params & params, const Path & binaryCacheDir)
+ : BinaryCacheStore(params)
+ , binaryCacheDir(binaryCacheDir)
+ {
+ }
+
+ void init() override;
+
+ std::string getUri() override
+ {
+ return "file://" + binaryCacheDir;
+ }
+
+protected:
+
+ bool fileExists(const std::string & path) override;
+
+ void upsertFile(const std::string & path,
+ const std::string & data,
+ const std::string & mimeType) override;
+
+ void getFile(const std::string & path,
+ std::function<void(std::shared_ptr<std::string>)> success,
+ std::function<void(std::exception_ptr exc)> failure) override
+ {
+ sync2async<std::shared_ptr<std::string>>(success, failure, [&]() {
+ try {
+ return std::make_shared<std::string>(readFile(binaryCacheDir + "/" + path));
+ } catch (SysError & e) {
+ if (e.errNo == ENOENT) return std::shared_ptr<std::string>();
+ throw;
+ }
+ });
+ }
+
+ PathSet queryAllValidPaths() override
+ {
+ PathSet paths;
+
+ for (auto & entry : readDirectory(binaryCacheDir)) {
+ if (entry.name.size() != 40 ||
+ !hasSuffix(entry.name, ".narinfo"))
+ continue;
+ paths.insert(storeDir + "/" + entry.name.substr(0, entry.name.size() - 8));
+ }
+
+ return paths;
+ }
+
+};
+
+void LocalBinaryCacheStore::init()
+{
+ createDirs(binaryCacheDir + "/nar");
+ BinaryCacheStore::init();
+}
+
+static void atomicWrite(const Path & path, const std::string & s)
+{
+ Path tmp = path + ".tmp." + std::to_string(getpid());
+ AutoDelete del(tmp, false);
+ writeFile(tmp, s);
+ if (rename(tmp.c_str(), path.c_str()))
+ throw SysError(format("renaming ‘%1%’ to ‘%2%’") % tmp % path);
+ del.cancel();
+}
+
+bool LocalBinaryCacheStore::fileExists(const std::string & path)
+{
+ return pathExists(binaryCacheDir + "/" + path);
+}
+
+void LocalBinaryCacheStore::upsertFile(const std::string & path,
+ const std::string & data,
+ const std::string & mimeType)
+{
+ atomicWrite(binaryCacheDir + "/" + path, data);
+}
+
+static RegisterStoreImplementation regStore([](
+ const std::string & uri, const Store::Params & params)
+ -> std::shared_ptr<Store>
+{
+ if (getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") == "1" ||
+ std::string(uri, 0, 7) != "file://")
+ return 0;
+ auto store = std::make_shared<LocalBinaryCacheStore>(params, std::string(uri, 7));
+ store->init();
+ return store;
+});
+
+}
diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc
new file mode 100644
index 000000000..bf247903c
--- /dev/null
+++ b/src/libstore/local-fs-store.cc
@@ -0,0 +1,130 @@
+#include "archive.hh"
+#include "fs-accessor.hh"
+#include "store-api.hh"
+#include "globals.hh"
+#include "compression.hh"
+#include "derivations.hh"
+
+namespace nix {
+
+LocalFSStore::LocalFSStore(const Params & params)
+ : Store(params)
+{
+}
+
+struct LocalStoreAccessor : public FSAccessor
+{
+ ref<LocalFSStore> store;
+
+ LocalStoreAccessor(ref<LocalFSStore> store) : store(store) { }
+
+ Path toRealPath(const Path & path)
+ {
+ Path storePath = store->toStorePath(path);
+ if (!store->isValidPath(storePath))
+ throw InvalidPath(format("path ‘%1%’ is not a valid store path") % storePath);
+ return store->getRealStoreDir() + std::string(path, store->storeDir.size());
+ }
+
+ FSAccessor::Stat stat(const Path & path) override
+ {
+ auto realPath = toRealPath(path);
+
+ struct stat st;
+ if (lstat(path.c_str(), &st)) {
+ if (errno == ENOENT || errno == ENOTDIR) return {Type::tMissing, 0, false};
+ throw SysError(format("getting status of ‘%1%’") % path);
+ }
+
+ if (!S_ISREG(st.st_mode) && !S_ISDIR(st.st_mode) && !S_ISLNK(st.st_mode))
+ throw Error(format("file ‘%1%’ has unsupported type") % path);
+
+ return {
+ S_ISREG(st.st_mode) ? Type::tRegular :
+ S_ISLNK(st.st_mode) ? Type::tSymlink :
+ Type::tDirectory,
+ S_ISREG(st.st_mode) ? (uint64_t) st.st_size : 0,
+ S_ISREG(st.st_mode) && st.st_mode & S_IXUSR};
+ }
+
+ StringSet readDirectory(const Path & path) override
+ {
+ auto realPath = toRealPath(path);
+
+ auto entries = nix::readDirectory(path);
+
+ StringSet res;
+ for (auto & entry : entries)
+ res.insert(entry.name);
+
+ return res;
+ }
+
+ std::string readFile(const Path & path) override
+ {
+ return nix::readFile(toRealPath(path));
+ }
+
+ std::string readLink(const Path & path) override
+ {
+ return nix::readLink(toRealPath(path));
+ }
+};
+
+ref<FSAccessor> LocalFSStore::getFSAccessor()
+{
+ return make_ref<LocalStoreAccessor>(ref<LocalFSStore>(std::dynamic_pointer_cast<LocalFSStore>(shared_from_this())));
+}
+
+void LocalFSStore::narFromPath(const Path & path, Sink & sink)
+{
+ if (!isValidPath(path))
+ throw Error(format("path ‘%s’ is not valid") % path);
+ dumpPath(getRealStoreDir() + std::string(path, storeDir.size()), sink);
+}
+
+const string LocalFSStore::drvsLogDir = "drvs";
+
+
+
+std::shared_ptr<std::string> LocalFSStore::getBuildLog(const Path & path_)
+{
+ auto path(path_);
+
+ assertStorePath(path);
+
+
+ if (!isDerivation(path)) {
+ try {
+ path = queryPathInfo(path)->deriver;
+ } catch (InvalidPath &) {
+ return nullptr;
+ }
+ if (path == "") return nullptr;
+ }
+
+ string baseName = baseNameOf(path);
+
+ for (int j = 0; j < 2; j++) {
+
+ Path logPath =
+ j == 0
+ ? fmt("%s/%s/%s/%s", logDir, drvsLogDir, string(baseName, 0, 2), string(baseName, 2))
+ : fmt("%s/%s/%s", logDir, drvsLogDir, baseName);
+ Path logBz2Path = logPath + ".bz2";
+
+ if (pathExists(logPath))
+ return std::make_shared<std::string>(readFile(logPath));
+
+ else if (pathExists(logBz2Path)) {
+ try {
+ return decompress("bzip2", readFile(logBz2Path));
+ } catch (Error &) { }
+ }
+
+ }
+
+ return nullptr;
+}
+
+}
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
new file mode 100644
index 000000000..5a98454ab
--- /dev/null
+++ b/src/libstore/local-store.cc
@@ -0,0 +1,1345 @@
+#include "local-store.hh"
+#include "globals.hh"
+#include "archive.hh"
+#include "pathlocks.hh"
+#include "worker-protocol.hh"
+#include "derivations.hh"
+#include "nar-info.hh"
+
+#include <iostream>
+#include <algorithm>
+#include <cstring>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/select.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <utime.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <stdio.h>
+#include <time.h>
+#include <grp.h>
+
+#if __linux__
+#include <sched.h>
+#include <sys/statvfs.h>
+#include <sys/mount.h>
+#include <sys/ioctl.h>
+#endif
+
+#include <sqlite3.h>
+
+
+namespace nix {
+
+
+LocalStore::LocalStore(const Params & params)
+ : Store(params)
+ , LocalFSStore(params)
+ , realStoreDir_{this, false, rootDir != "" ? rootDir + "/nix/store" : storeDir, "real",
+ "physical path to the Nix store"}
+ , realStoreDir(realStoreDir_)
+ , dbDir(stateDir + "/db")
+ , linksDir(realStoreDir + "/.links")
+ , reservedPath(dbDir + "/reserved")
+ , schemaPath(dbDir + "/schema")
+ , trashDir(realStoreDir + "/trash")
+ , publicKeys(getDefaultPublicKeys())
+{
+ auto state(_state.lock());
+
+ /* Create missing state directories if they don't already exist. */
+ createDirs(realStoreDir);
+ makeStoreWritable();
+ createDirs(linksDir);
+ Path profilesDir = stateDir + "/profiles";
+ createDirs(profilesDir);
+ createDirs(stateDir + "/temproots");
+ createDirs(dbDir);
+ Path gcRootsDir = stateDir + "/gcroots";
+ if (!pathExists(gcRootsDir)) {
+ createDirs(gcRootsDir);
+ createSymlink(profilesDir, gcRootsDir + "/profiles");
+ }
+
+ /* Optionally, create directories and set permissions for a
+ multi-user install. */
+ if (getuid() == 0 && settings.buildUsersGroup != "") {
+
+ Path perUserDir = profilesDir + "/per-user";
+ createDirs(perUserDir);
+ if (chmod(perUserDir.c_str(), 01777) == -1)
+ throw SysError(format("could not set permissions on ‘%1%’ to 1777") % perUserDir);
+
+ mode_t perm = 01775;
+
+ struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
+ if (!gr)
+ printError(format("warning: the group ‘%1%’ specified in ‘build-users-group’ does not exist")
+ % settings.buildUsersGroup);
+ else {
+ struct stat st;
+ if (stat(realStoreDir.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % realStoreDir);
+
+ if (st.st_uid != 0 || st.st_gid != gr->gr_gid || (st.st_mode & ~S_IFMT) != perm) {
+ if (chown(realStoreDir.c_str(), 0, gr->gr_gid) == -1)
+ throw SysError(format("changing ownership of path ‘%1%’") % realStoreDir);
+ if (chmod(realStoreDir.c_str(), perm) == -1)
+ throw SysError(format("changing permissions on path ‘%1%’") % realStoreDir);
+ }
+ }
+ }
+
+ /* Ensure that the store and its parents are not symlinks. */
+ if (getEnv("NIX_IGNORE_SYMLINK_STORE") != "1") {
+ Path path = realStoreDir;
+ struct stat st;
+ while (path != "/") {
+ if (lstat(path.c_str(), &st))
+ throw SysError(format("getting status of ‘%1%’") % path);
+ if (S_ISLNK(st.st_mode))
+ throw Error(format(
+ "the path ‘%1%’ is a symlink; "
+ "this is not allowed for the Nix store and its parent directories")
+ % path);
+ path = dirOf(path);
+ }
+ }
+
+ /* We can't open a SQLite database if the disk is full. Since
+ this prevents the garbage collector from running when it's most
+ needed, we reserve some dummy space that we can free just
+ before doing a garbage collection. */
+ try {
+ struct stat st;
+ if (stat(reservedPath.c_str(), &st) == -1 ||
+ st.st_size != settings.reservedSize)
+ {
+ AutoCloseFD fd = open(reservedPath.c_str(), O_WRONLY | O_CREAT | O_CLOEXEC, 0600);
+ int res = -1;
+#if HAVE_POSIX_FALLOCATE
+ res = posix_fallocate(fd.get(), 0, settings.reservedSize);
+#endif
+ if (res == -1) {
+ writeFull(fd.get(), string(settings.reservedSize, 'X'));
+ [[gnu::unused]] auto res2 = ftruncate(fd.get(), settings.reservedSize);
+ }
+ }
+ } catch (SysError & e) { /* don't care about errors */
+ }
+
+ /* Acquire the big fat lock in shared mode to make sure that no
+ schema upgrade is in progress. */
+ Path globalLockPath = dbDir + "/big-lock";
+ globalLock = openLockFile(globalLockPath.c_str(), true);
+
+ if (!lockFile(globalLock.get(), ltRead, false)) {
+ printError("waiting for the big Nix store lock...");
+ lockFile(globalLock.get(), ltRead, true);
+ }
+
+ /* Check the current database schema and if necessary do an
+ upgrade. */
+ int curSchema = getSchema();
+ if (curSchema > nixSchemaVersion)
+ throw Error(format("current Nix store schema is version %1%, but I only support %2%")
+ % curSchema % nixSchemaVersion);
+
+ else if (curSchema == 0) { /* new store */
+ curSchema = nixSchemaVersion;
+ openDB(*state, true);
+ writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
+ }
+
+ else if (curSchema < nixSchemaVersion) {
+ if (curSchema < 5)
+ throw Error(
+ "Your Nix store has a database in Berkeley DB format,\n"
+ "which is no longer supported. To convert to the new format,\n"
+ "please upgrade Nix to version 0.12 first.");
+
+ if (curSchema < 6)
+ throw Error(
+ "Your Nix store has a database in flat file format,\n"
+ "which is no longer supported. To convert to the new format,\n"
+ "please upgrade Nix to version 1.11 first.");
+
+ if (!lockFile(globalLock.get(), ltWrite, false)) {
+ printError("waiting for exclusive access to the Nix store...");
+ lockFile(globalLock.get(), ltWrite, true);
+ }
+
+ /* Get the schema version again, because another process may
+ have performed the upgrade already. */
+ curSchema = getSchema();
+
+ if (curSchema < 7) { upgradeStore7(); }
+
+ openDB(*state, false);
+
+ if (curSchema < 8) {
+ SQLiteTxn txn(state->db);
+ state->db.exec("alter table ValidPaths add column ultimate integer");
+ state->db.exec("alter table ValidPaths add column sigs text");
+ txn.commit();
+ }
+
+ if (curSchema < 9) {
+ SQLiteTxn txn(state->db);
+ state->db.exec("drop table FailedPaths");
+ txn.commit();
+ }
+
+ if (curSchema < 10) {
+ SQLiteTxn txn(state->db);
+ state->db.exec("alter table ValidPaths add column ca text");
+ txn.commit();
+ }
+
+ writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
+
+ lockFile(globalLock.get(), ltRead, true);
+ }
+
+ else openDB(*state, false);
+
+ /* Prepare SQL statements. */
+ state->stmtRegisterValidPath.create(state->db,
+ "insert into ValidPaths (path, hash, registrationTime, deriver, narSize, ultimate, sigs, ca) values (?, ?, ?, ?, ?, ?, ?, ?);");
+ state->stmtUpdatePathInfo.create(state->db,
+ "update ValidPaths set narSize = ?, hash = ?, ultimate = ?, sigs = ?, ca = ? where path = ?;");
+ state->stmtAddReference.create(state->db,
+ "insert or replace into Refs (referrer, reference) values (?, ?);");
+ state->stmtQueryPathInfo.create(state->db,
+ "select id, hash, registrationTime, deriver, narSize, ultimate, sigs, ca from ValidPaths where path = ?;");
+ state->stmtQueryReferences.create(state->db,
+ "select path from Refs join ValidPaths on reference = id where referrer = ?;");
+ state->stmtQueryReferrers.create(state->db,
+ "select path from Refs join ValidPaths on referrer = id where reference = (select id from ValidPaths where path = ?);");
+ state->stmtInvalidatePath.create(state->db,
+ "delete from ValidPaths where path = ?;");
+ state->stmtAddDerivationOutput.create(state->db,
+ "insert or replace into DerivationOutputs (drv, id, path) values (?, ?, ?);");
+ state->stmtQueryValidDerivers.create(state->db,
+ "select v.id, v.path from DerivationOutputs d join ValidPaths v on d.drv = v.id where d.path = ?;");
+ state->stmtQueryDerivationOutputs.create(state->db,
+ "select id, path from DerivationOutputs where drv = ?;");
+ // Use "path >= ?" with limit 1 rather than "path like '?%'" to
+ // ensure efficient lookup.
+ state->stmtQueryPathFromHashPart.create(state->db,
+ "select path from ValidPaths where path >= ? limit 1;");
+ state->stmtQueryValidPaths.create(state->db, "select path from ValidPaths");
+}
+
+
+LocalStore::~LocalStore()
+{
+ auto state(_state.lock());
+
+ try {
+ if (state->fdTempRoots) {
+ state->fdTempRoots = -1;
+ unlink(state->fnTempRoots.c_str());
+ }
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+
+std::string LocalStore::getUri()
+{
+ return "local";
+}
+
+
+int LocalStore::getSchema()
+{
+ int curSchema = 0;
+ if (pathExists(schemaPath)) {
+ string s = readFile(schemaPath);
+ if (!string2Int(s, curSchema))
+ throw Error(format("‘%1%’ is corrupt") % schemaPath);
+ }
+ return curSchema;
+}
+
+
+void LocalStore::openDB(State & state, bool create)
+{
+ if (access(dbDir.c_str(), R_OK | W_OK))
+ throw SysError(format("Nix database directory ‘%1%’ is not writable") % dbDir);
+
+ /* Open the Nix database. */
+ string dbPath = dbDir + "/db.sqlite";
+ auto & db(state.db);
+ if (sqlite3_open_v2(dbPath.c_str(), &db.db,
+ SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK)
+ throw Error(format("cannot open Nix database ‘%1%’") % dbPath);
+
+ if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
+ throwSQLiteError(db, "setting timeout");
+
+ db.exec("pragma foreign_keys = 1");
+
+ /* !!! check whether sqlite has been built with foreign key
+ support */
+
+ /* Whether SQLite should fsync(). "Normal" synchronous mode
+ should be safe enough. If the user asks for it, don't sync at
+ all. This can cause database corruption if the system
+ crashes. */
+ string syncMode = settings.fsyncMetadata ? "normal" : "off";
+ db.exec("pragma synchronous = " + syncMode);
+
+ /* Set the SQLite journal mode. WAL mode is fastest, so it's the
+ default. */
+ string mode = settings.useSQLiteWAL ? "wal" : "truncate";
+ string prevMode;
+ {
+ SQLiteStmt stmt;
+ stmt.create(db, "pragma main.journal_mode;");
+ if (sqlite3_step(stmt) != SQLITE_ROW)
+ throwSQLiteError(db, "querying journal mode");
+ prevMode = string((const char *) sqlite3_column_text(stmt, 0));
+ }
+ if (prevMode != mode &&
+ sqlite3_exec(db, ("pragma main.journal_mode = " + mode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
+ throwSQLiteError(db, "setting journal mode");
+
+ /* Increase the auto-checkpoint interval to 40000 pages. This
+ seems enough to ensure that instantiating the NixOS system
+ derivation is done in a single fsync(). */
+ if (mode == "wal" && sqlite3_exec(db, "pragma wal_autocheckpoint = 40000;", 0, 0, 0) != SQLITE_OK)
+ throwSQLiteError(db, "setting autocheckpoint interval");
+
+ /* Initialise the database schema, if necessary. */
+ if (create) {
+ const char * schema =
+#include "schema.sql.hh"
+ ;
+ db.exec(schema);
+ }
+}
+
+
+/* To improve purity, users may want to make the Nix store a read-only
+ bind mount. So make the Nix store writable for this process. */
+void LocalStore::makeStoreWritable()
+{
+#if __linux__
+ if (getuid() != 0) return;
+ /* Check if /nix/store is on a read-only mount. */
+ struct statvfs stat;
+ if (statvfs(realStoreDir.c_str(), &stat) != 0)
+ throw SysError("getting info about the Nix store mount point");
+
+ if (stat.f_flag & ST_RDONLY) {
+ if (unshare(CLONE_NEWNS) == -1)
+ throw SysError("setting up a private mount namespace");
+
+ if (mount(0, realStoreDir.c_str(), "none", MS_REMOUNT | MS_BIND, 0) == -1)
+ throw SysError(format("remounting %1% writable") % realStoreDir);
+ }
+#endif
+}
+
+
+const time_t mtimeStore = 1; /* 1 second into the epoch */
+
+
+static void canonicaliseTimestampAndPermissions(const Path & path, const struct stat & st)
+{
+ if (!S_ISLNK(st.st_mode)) {
+
+ /* Mask out all type related bits. */
+ mode_t mode = st.st_mode & ~S_IFMT;
+
+ if (mode != 0444 && mode != 0555) {
+ mode = (st.st_mode & S_IFMT)
+ | 0444
+ | (st.st_mode & S_IXUSR ? 0111 : 0);
+ if (chmod(path.c_str(), mode) == -1)
+ throw SysError(format("changing mode of ‘%1%’ to %2$o") % path % mode);
+ }
+
+ }
+
+ if (st.st_mtime != mtimeStore) {
+ struct timeval times[2];
+ times[0].tv_sec = st.st_atime;
+ times[0].tv_usec = 0;
+ times[1].tv_sec = mtimeStore;
+ times[1].tv_usec = 0;
+#if HAVE_LUTIMES
+ if (lutimes(path.c_str(), times) == -1)
+ if (errno != ENOSYS ||
+ (!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1))
+#else
+ if (!S_ISLNK(st.st_mode) && utimes(path.c_str(), times) == -1)
+#endif
+ throw SysError(format("changing modification time of ‘%1%’") % path);
+ }
+}
+
+
+void canonicaliseTimestampAndPermissions(const Path & path)
+{
+ struct stat st;
+ if (lstat(path.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % path);
+ canonicaliseTimestampAndPermissions(path, st);
+}
+
+
+static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
+{
+ checkInterrupt();
+
+ struct stat st;
+ if (lstat(path.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % path);
+
+ /* Really make sure that the path is of a supported type. */
+ if (!(S_ISREG(st.st_mode) || S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode)))
+ throw Error(format("file ‘%1%’ has an unsupported type") % path);
+
+ /* Fail if the file is not owned by the build user. This prevents
+ us from messing up the ownership/permissions of files
+ hard-linked into the output (e.g. "ln /etc/shadow $out/foo").
+ However, ignore files that we chown'ed ourselves previously to
+ ensure that we don't fail on hard links within the same build
+ (i.e. "touch $out/foo; ln $out/foo $out/bar"). */
+ if (fromUid != (uid_t) -1 && st.st_uid != fromUid) {
+ assert(!S_ISDIR(st.st_mode));
+ if (inodesSeen.find(Inode(st.st_dev, st.st_ino)) == inodesSeen.end())
+ throw BuildError(format("invalid ownership on file ‘%1%’") % path);
+ mode_t mode = st.st_mode & ~S_IFMT;
+ assert(S_ISLNK(st.st_mode) || (st.st_uid == geteuid() && (mode == 0444 || mode == 0555) && st.st_mtime == mtimeStore));
+ return;
+ }
+
+ inodesSeen.insert(Inode(st.st_dev, st.st_ino));
+
+ canonicaliseTimestampAndPermissions(path, st);
+
+ /* Change ownership to the current uid. If it's a symlink, use
+ lchown if available, otherwise don't bother. Wrong ownership
+ of a symlink doesn't matter, since the owning user can't change
+ the symlink and can't delete it because the directory is not
+ writable. The only exception is top-level paths in the Nix
+ store (since that directory is group-writable for the Nix build
+ users group); we check for this case below. */
+ if (st.st_uid != geteuid()) {
+#if HAVE_LCHOWN
+ if (lchown(path.c_str(), geteuid(), getegid()) == -1)
+#else
+ if (!S_ISLNK(st.st_mode) &&
+ chown(path.c_str(), geteuid(), getegid()) == -1)
+#endif
+ throw SysError(format("changing owner of ‘%1%’ to %2%")
+ % path % geteuid());
+ }
+
+ if (S_ISDIR(st.st_mode)) {
+ DirEntries entries = readDirectory(path);
+ for (auto & i : entries)
+ canonicalisePathMetaData_(path + "/" + i.name, fromUid, inodesSeen);
+ }
+}
+
+
+void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
+{
+ canonicalisePathMetaData_(path, fromUid, inodesSeen);
+
+ /* On platforms that don't have lchown(), the top-level path can't
+ be a symlink, since we can't change its ownership. */
+ struct stat st;
+ if (lstat(path.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % path);
+
+ if (st.st_uid != geteuid()) {
+ assert(S_ISLNK(st.st_mode));
+ throw Error(format("wrong ownership of top-level store path ‘%1%’") % path);
+ }
+}
+
+
+void canonicalisePathMetaData(const Path & path, uid_t fromUid)
+{
+ InodesSeen inodesSeen;
+ canonicalisePathMetaData(path, fromUid, inodesSeen);
+}
+
+
+void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation & drv)
+{
+ string drvName = storePathToName(drvPath);
+ assert(isDerivation(drvName));
+ drvName = string(drvName, 0, drvName.size() - drvExtension.size());
+
+ if (drv.isFixedOutput()) {
+ DerivationOutputs::const_iterator out = drv.outputs.find("out");
+ if (out == drv.outputs.end())
+ throw Error(format("derivation ‘%1%’ does not have an output named ‘out’") % drvPath);
+
+ bool recursive; Hash h;
+ out->second.parseHashInfo(recursive, h);
+ Path outPath = makeFixedOutputPath(recursive, h, drvName);
+
+ StringPairs::const_iterator j = drv.env.find("out");
+ if (out->second.path != outPath || j == drv.env.end() || j->second != outPath)
+ throw Error(format("derivation ‘%1%’ has incorrect output ‘%2%’, should be ‘%3%’")
+ % drvPath % out->second.path % outPath);
+ }
+
+ else {
+ Derivation drvCopy(drv);
+ for (auto & i : drvCopy.outputs) {
+ i.second.path = "";
+ drvCopy.env[i.first] = "";
+ }
+
+ Hash h = hashDerivationModulo(*this, drvCopy);
+
+ for (auto & i : drv.outputs) {
+ Path outPath = makeOutputPath(i.first, h, drvName);
+ StringPairs::const_iterator j = drv.env.find(i.first);
+ if (i.second.path != outPath || j == drv.env.end() || j->second != outPath)
+ throw Error(format("derivation ‘%1%’ has incorrect output ‘%2%’, should be ‘%3%’")
+ % drvPath % i.second.path % outPath);
+ }
+ }
+}
+
+
+uint64_t LocalStore::addValidPath(State & state,
+ const ValidPathInfo & info, bool checkOutputs)
+{
+ assert(info.ca == "" || info.isContentAddressed(*this));
+
+ state.stmtRegisterValidPath.use()
+ (info.path)
+ ("sha256:" + printHash(info.narHash))
+ (info.registrationTime == 0 ? time(0) : info.registrationTime)
+ (info.deriver, info.deriver != "")
+ (info.narSize, info.narSize != 0)
+ (info.ultimate ? 1 : 0, info.ultimate)
+ (concatStringsSep(" ", info.sigs), !info.sigs.empty())
+ (info.ca, !info.ca.empty())
+ .exec();
+ uint64_t id = sqlite3_last_insert_rowid(state.db);
+
+ /* If this is a derivation, then store the derivation outputs in
+ the database. This is useful for the garbage collector: it can
+ efficiently query whether a path is an output of some
+ derivation. */
+ if (isDerivation(info.path)) {
+ Derivation drv = readDerivation(realStoreDir + "/" + baseNameOf(info.path));
+
+ /* Verify that the output paths in the derivation are correct
+ (i.e., follow the scheme for computing output paths from
+ derivations). Note that if this throws an error, then the
+ DB transaction is rolled back, so the path validity
+ registration above is undone. */
+ if (checkOutputs) checkDerivationOutputs(info.path, drv);
+
+ for (auto & i : drv.outputs) {
+ state.stmtAddDerivationOutput.use()
+ (id)
+ (i.first)
+ (i.second.path)
+ .exec();
+ }
+ }
+
+ {
+ auto state_(Store::state.lock());
+ state_->pathInfoCache.upsert(storePathToHash(info.path), std::make_shared<ValidPathInfo>(info));
+ }
+
+ return id;
+}
+
+
+Hash parseHashField(const Path & path, const string & s)
+{
+ string::size_type colon = s.find(':');
+ if (colon == string::npos)
+ throw Error(format("corrupt hash ‘%1%’ in valid-path entry for ‘%2%’")
+ % s % path);
+ HashType ht = parseHashType(string(s, 0, colon));
+ if (ht == htUnknown)
+ throw Error(format("unknown hash type ‘%1%’ in valid-path entry for ‘%2%’")
+ % string(s, 0, colon) % path);
+ return parseHash(ht, string(s, colon + 1));
+}
+
+
+void LocalStore::queryPathInfoUncached(const Path & path,
+ std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure)
+{
+ sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() {
+
+ auto info = std::make_shared<ValidPathInfo>();
+ info->path = path;
+
+ assertStorePath(path);
+
+ return retrySQLite<std::shared_ptr<ValidPathInfo>>([&]() {
+ auto state(_state.lock());
+
+ /* Get the path info. */
+ auto useQueryPathInfo(state->stmtQueryPathInfo.use()(path));
+
+ if (!useQueryPathInfo.next())
+ return std::shared_ptr<ValidPathInfo>();
+
+ info->id = useQueryPathInfo.getInt(0);
+
+ info->narHash = parseHashField(path, useQueryPathInfo.getStr(1));
+
+ info->registrationTime = useQueryPathInfo.getInt(2);
+
+ auto s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 3);
+ if (s) info->deriver = s;
+
+ /* Note that narSize = NULL yields 0. */
+ info->narSize = useQueryPathInfo.getInt(4);
+
+ info->ultimate = useQueryPathInfo.getInt(5) == 1;
+
+ s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 6);
+ if (s) info->sigs = tokenizeString<StringSet>(s, " ");
+
+ s = (const char *) sqlite3_column_text(state->stmtQueryPathInfo, 7);
+ if (s) info->ca = s;
+
+ /* Get the references. */
+ auto useQueryReferences(state->stmtQueryReferences.use()(info->id));
+
+ while (useQueryReferences.next())
+ info->references.insert(useQueryReferences.getStr(0));
+
+ return info;
+ });
+ });
+}
+
+
+/* Update path info in the database. */
+void LocalStore::updatePathInfo(State & state, const ValidPathInfo & info)
+{
+ state.stmtUpdatePathInfo.use()
+ (info.narSize, info.narSize != 0)
+ ("sha256:" + printHash(info.narHash))
+ (info.ultimate ? 1 : 0, info.ultimate)
+ (concatStringsSep(" ", info.sigs), !info.sigs.empty())
+ (info.ca, !info.ca.empty())
+ (info.path)
+ .exec();
+}
+
+
+uint64_t LocalStore::queryValidPathId(State & state, const Path & path)
+{
+ auto use(state.stmtQueryPathInfo.use()(path));
+ if (!use.next())
+ throw Error(format("path ‘%1%’ is not valid") % path);
+ return use.getInt(0);
+}
+
+
+bool LocalStore::isValidPath_(State & state, const Path & path)
+{
+ return state.stmtQueryPathInfo.use()(path).next();
+}
+
+
+bool LocalStore::isValidPathUncached(const Path & path)
+{
+ return retrySQLite<bool>([&]() {
+ auto state(_state.lock());
+ return isValidPath_(*state, path);
+ });
+}
+
+
+PathSet LocalStore::queryValidPaths(const PathSet & paths, bool maybeSubstitute)
+{
+ PathSet res;
+ for (auto & i : paths)
+ if (isValidPath(i)) res.insert(i);
+ return res;
+}
+
+
+PathSet LocalStore::queryAllValidPaths()
+{
+ return retrySQLite<PathSet>([&]() {
+ auto state(_state.lock());
+ auto use(state->stmtQueryValidPaths.use());
+ PathSet res;
+ while (use.next()) res.insert(use.getStr(0));
+ return res;
+ });
+}
+
+
+void LocalStore::queryReferrers(State & state, const Path & path, PathSet & referrers)
+{
+ auto useQueryReferrers(state.stmtQueryReferrers.use()(path));
+
+ while (useQueryReferrers.next())
+ referrers.insert(useQueryReferrers.getStr(0));
+}
+
+
+void LocalStore::queryReferrers(const Path & path, PathSet & referrers)
+{
+ assertStorePath(path);
+ return retrySQLite<void>([&]() {
+ auto state(_state.lock());
+ queryReferrers(*state, path, referrers);
+ });
+}
+
+
+PathSet LocalStore::queryValidDerivers(const Path & path)
+{
+ assertStorePath(path);
+
+ return retrySQLite<PathSet>([&]() {
+ auto state(_state.lock());
+
+ auto useQueryValidDerivers(state->stmtQueryValidDerivers.use()(path));
+
+ PathSet derivers;
+ while (useQueryValidDerivers.next())
+ derivers.insert(useQueryValidDerivers.getStr(1));
+
+ return derivers;
+ });
+}
+
+
+PathSet LocalStore::queryDerivationOutputs(const Path & path)
+{
+ return retrySQLite<PathSet>([&]() {
+ auto state(_state.lock());
+
+ auto useQueryDerivationOutputs(state->stmtQueryDerivationOutputs.use()
+ (queryValidPathId(*state, path)));
+
+ PathSet outputs;
+ while (useQueryDerivationOutputs.next())
+ outputs.insert(useQueryDerivationOutputs.getStr(1));
+
+ return outputs;
+ });
+}
+
+
+StringSet LocalStore::queryDerivationOutputNames(const Path & path)
+{
+ return retrySQLite<StringSet>([&]() {
+ auto state(_state.lock());
+
+ auto useQueryDerivationOutputs(state->stmtQueryDerivationOutputs.use()
+ (queryValidPathId(*state, path)));
+
+ StringSet outputNames;
+ while (useQueryDerivationOutputs.next())
+ outputNames.insert(useQueryDerivationOutputs.getStr(0));
+
+ return outputNames;
+ });
+}
+
+
+Path LocalStore::queryPathFromHashPart(const string & hashPart)
+{
+ if (hashPart.size() != storePathHashLen) throw Error("invalid hash part");
+
+ Path prefix = storeDir + "/" + hashPart;
+
+ return retrySQLite<Path>([&]() -> std::string {
+ auto state(_state.lock());
+
+ auto useQueryPathFromHashPart(state->stmtQueryPathFromHashPart.use()(prefix));
+
+ if (!useQueryPathFromHashPart.next()) return "";
+
+ const char * s = (const char *) sqlite3_column_text(state->stmtQueryPathFromHashPart, 0);
+ return s && prefix.compare(0, prefix.size(), s, prefix.size()) == 0 ? s : "";
+ });
+}
+
+
+PathSet LocalStore::querySubstitutablePaths(const PathSet & paths)
+{
+ if (!settings.useSubstitutes) return PathSet();
+
+ auto remaining = paths;
+ PathSet res;
+
+ for (auto & sub : getDefaultSubstituters()) {
+ if (remaining.empty()) break;
+ if (sub->storeDir != storeDir) continue;
+ if (!sub->wantMassQuery()) continue;
+
+ auto valid = sub->queryValidPaths(remaining);
+
+ PathSet remaining2;
+ for (auto & path : remaining)
+ if (valid.count(path))
+ res.insert(path);
+ else
+ remaining2.insert(path);
+
+ std::swap(remaining, remaining2);
+ }
+
+ return res;
+}
+
+
+void LocalStore::querySubstitutablePathInfos(const PathSet & paths,
+ SubstitutablePathInfos & infos)
+{
+ if (!settings.useSubstitutes) return;
+ for (auto & sub : getDefaultSubstituters()) {
+ if (sub->storeDir != storeDir) continue;
+ for (auto & path : paths) {
+ if (infos.count(path)) continue;
+ debug(format("checking substituter ‘%s’ for path ‘%s’")
+ % sub->getUri() % path);
+ try {
+ auto info = sub->queryPathInfo(path);
+ auto narInfo = std::dynamic_pointer_cast<const NarInfo>(
+ std::shared_ptr<const ValidPathInfo>(info));
+ infos[path] = SubstitutablePathInfo{
+ info->deriver,
+ info->references,
+ narInfo ? narInfo->fileSize : 0,
+ info->narSize};
+ } catch (InvalidPath) {
+ }
+ }
+ }
+}
+
+
+void LocalStore::registerValidPath(const ValidPathInfo & info)
+{
+ ValidPathInfos infos;
+ infos.push_back(info);
+ registerValidPaths(infos);
+}
+
+
+void LocalStore::registerValidPaths(const ValidPathInfos & infos)
+{
+ /* SQLite will fsync by default, but the new valid paths may not
+ be fsync-ed. So some may want to fsync them before registering
+ the validity, at the expense of some speed of the path
+ registering operation. */
+ if (settings.syncBeforeRegistering) sync();
+
+ return retrySQLite<void>([&]() {
+ auto state(_state.lock());
+
+ SQLiteTxn txn(state->db);
+ PathSet paths;
+
+ for (auto & i : infos) {
+ assert(i.narHash.type == htSHA256);
+ if (isValidPath_(*state, i.path))
+ updatePathInfo(*state, i);
+ else
+ addValidPath(*state, i, false);
+ paths.insert(i.path);
+ }
+
+ for (auto & i : infos) {
+ auto referrer = queryValidPathId(*state, i.path);
+ for (auto & j : i.references)
+ state->stmtAddReference.use()(referrer)(queryValidPathId(*state, j)).exec();
+ }
+
+ /* Check that the derivation outputs are correct. We can't do
+ this in addValidPath() above, because the references might
+ not be valid yet. */
+ for (auto & i : infos)
+ if (isDerivation(i.path)) {
+ // FIXME: inefficient; we already loaded the
+ // derivation in addValidPath().
+ Derivation drv = readDerivation(realStoreDir + "/" + baseNameOf(i.path));
+ checkDerivationOutputs(i.path, drv);
+ }
+
+ /* Do a topological sort of the paths. This will throw an
+ error if a cycle is detected and roll back the
+ transaction. Cycles can only occur when a derivation
+ has multiple outputs. */
+ topoSortPaths(paths);
+
+ txn.commit();
+ });
+}
+
+
+/* Invalidate a path. The caller is responsible for checking that
+ there are no referrers. */
+void LocalStore::invalidatePath(State & state, const Path & path)
+{
+ debug(format("invalidating path ‘%1%’") % path);
+
+ state.stmtInvalidatePath.use()(path).exec();
+
+ /* Note that the foreign key constraints on the Refs table take
+ care of deleting the references entries for `path'. */
+
+ {
+ auto state_(Store::state.lock());
+ state_->pathInfoCache.erase(storePathToHash(path));
+ }
+}
+
+
+void LocalStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+ bool repair, bool dontCheckSigs, std::shared_ptr<FSAccessor> accessor)
+{
+ Hash h = hashString(htSHA256, *nar);
+ if (h != info.narHash)
+ throw Error(format("hash mismatch importing path ‘%s’; expected hash ‘%s’, got ‘%s’") %
+ info.path % info.narHash.to_string() % h.to_string());
+
+ if (requireSigs && !dontCheckSigs && !info.checkSignatures(*this, publicKeys))
+ throw Error("cannot add path ‘%s’ because it lacks a valid signature", info.path);
+
+ addTempRoot(info.path);
+
+ if (repair || !isValidPath(info.path)) {
+
+ PathLocks outputLock;
+
+ Path realPath = realStoreDir + "/" + baseNameOf(info.path);
+
+ /* Lock the output path. But don't lock if we're being called
+ from a build hook (whose parent process already acquired a
+ lock on this path). */
+ Strings locksHeld = tokenizeString<Strings>(getEnv("NIX_HELD_LOCKS"));
+ if (find(locksHeld.begin(), locksHeld.end(), info.path) == locksHeld.end())
+ outputLock.lockPaths({realPath});
+
+ if (repair || !isValidPath(info.path)) {
+
+ deletePath(realPath);
+
+ StringSource source(*nar);
+ restorePath(realPath, source);
+
+ canonicalisePathMetaData(realPath, -1);
+
+ optimisePath(realPath); // FIXME: combine with hashPath()
+
+ registerValidPath(info);
+ }
+
+ outputLock.setDeletion(true);
+ }
+}
+
+
+Path LocalStore::addToStoreFromDump(const string & dump, const string & name,
+ bool recursive, HashType hashAlgo, bool repair)
+{
+ Hash h = hashString(hashAlgo, dump);
+
+ Path dstPath = makeFixedOutputPath(recursive, h, name);
+
+ addTempRoot(dstPath);
+
+ if (repair || !isValidPath(dstPath)) {
+
+ /* The first check above is an optimisation to prevent
+ unnecessary lock acquisition. */
+
+ Path realPath = realStoreDir + "/" + baseNameOf(dstPath);
+
+ PathLocks outputLock({realPath});
+
+ if (repair || !isValidPath(dstPath)) {
+
+ deletePath(realPath);
+
+ if (recursive) {
+ StringSource source(dump);
+ restorePath(realPath, source);
+ } else
+ writeFile(realPath, dump);
+
+ canonicalisePathMetaData(realPath, -1);
+
+ /* Register the SHA-256 hash of the NAR serialisation of
+ the path in the database. We may just have computed it
+ above (if called with recursive == true and hashAlgo ==
+ sha256); otherwise, compute it here. */
+ HashResult hash;
+ if (recursive) {
+ hash.first = hashAlgo == htSHA256 ? h : hashString(htSHA256, dump);
+ hash.second = dump.size();
+ } else
+ hash = hashPath(htSHA256, realPath);
+
+ optimisePath(realPath); // FIXME: combine with hashPath()
+
+ ValidPathInfo info;
+ info.path = dstPath;
+ info.narHash = hash.first;
+ info.narSize = hash.second;
+ info.ultimate = true;
+ info.ca = makeFixedOutputCA(recursive, h);
+ registerValidPath(info);
+ }
+
+ outputLock.setDeletion(true);
+ }
+
+ return dstPath;
+}
+
+
+Path LocalStore::addToStore(const string & name, const Path & _srcPath,
+ bool recursive, HashType hashAlgo, PathFilter & filter, bool repair)
+{
+ Path srcPath(absPath(_srcPath));
+
+ /* Read the whole path into memory. This is not a very scalable
+ method for very large paths, but `copyPath' is mainly used for
+ small files. */
+ StringSink sink;
+ if (recursive)
+ dumpPath(srcPath, sink, filter);
+ else
+ sink.s = make_ref<std::string>(readFile(srcPath));
+
+ return addToStoreFromDump(*sink.s, name, recursive, hashAlgo, repair);
+}
+
+
+Path LocalStore::addTextToStore(const string & name, const string & s,
+ const PathSet & references, bool repair)
+{
+ auto hash = hashString(htSHA256, s);
+ auto dstPath = makeTextPath(name, hash, references);
+
+ addTempRoot(dstPath);
+
+ if (repair || !isValidPath(dstPath)) {
+
+ Path realPath = realStoreDir + "/" + baseNameOf(dstPath);
+
+ PathLocks outputLock({realPath});
+
+ if (repair || !isValidPath(dstPath)) {
+
+ deletePath(realPath);
+
+ writeFile(realPath, s);
+
+ canonicalisePathMetaData(realPath, -1);
+
+ StringSink sink;
+ dumpString(s, sink);
+ auto narHash = hashString(htSHA256, *sink.s);
+
+ optimisePath(realPath);
+
+ ValidPathInfo info;
+ info.path = dstPath;
+ info.narHash = narHash;
+ info.narSize = sink.s->size();
+ info.references = references;
+ info.ultimate = true;
+ info.ca = "text:" + hash.to_string();
+ registerValidPath(info);
+ }
+
+ outputLock.setDeletion(true);
+ }
+
+ return dstPath;
+}
+
+
+/* Create a temporary directory in the store that won't be
+ garbage-collected. */
+Path LocalStore::createTempDirInStore()
+{
+ Path tmpDir;
+ do {
+ /* There is a slight possibility that `tmpDir' gets deleted by
+ the GC between createTempDir() and addTempRoot(), so repeat
+ until `tmpDir' exists. */
+ tmpDir = createTempDir(realStoreDir);
+ addTempRoot(tmpDir);
+ } while (!pathExists(tmpDir));
+ return tmpDir;
+}
+
+
+void LocalStore::invalidatePathChecked(const Path & path)
+{
+ assertStorePath(path);
+
+ retrySQLite<void>([&]() {
+ auto state(_state.lock());
+
+ SQLiteTxn txn(state->db);
+
+ if (isValidPath_(*state, path)) {
+ PathSet referrers; queryReferrers(*state, path, referrers);
+ referrers.erase(path); /* ignore self-references */
+ if (!referrers.empty())
+ throw PathInUse(format("cannot delete path ‘%1%’ because it is in use by %2%")
+ % path % showPaths(referrers));
+ invalidatePath(*state, path);
+ }
+
+ txn.commit();
+ });
+}
+
+
+bool LocalStore::verifyStore(bool checkContents, bool repair)
+{
+ printError(format("reading the Nix store..."));
+
+ bool errors = false;
+
+ /* Acquire the global GC lock to prevent a garbage collection. */
+ AutoCloseFD fdGCLock = openGCLock(ltWrite);
+
+ PathSet store;
+ for (auto & i : readDirectory(realStoreDir)) store.insert(i.name);
+
+ /* Check whether all valid paths actually exist. */
+ printInfo("checking path existence...");
+
+ PathSet validPaths2 = queryAllValidPaths(), validPaths, done;
+
+ for (auto & i : validPaths2)
+ verifyPath(i, store, done, validPaths, repair, errors);
+
+ /* Release the GC lock so that checking content hashes (which can
+ take ages) doesn't block the GC or builds. */
+ fdGCLock = -1;
+
+ /* Optionally, check the content hashes (slow). */
+ if (checkContents) {
+ printInfo("checking hashes...");
+
+ Hash nullHash(htSHA256);
+
+ for (auto & i : validPaths) {
+ try {
+ auto info = std::const_pointer_cast<ValidPathInfo>(std::shared_ptr<const ValidPathInfo>(queryPathInfo(i)));
+
+ /* Check the content hash (optionally - slow). */
+ printMsg(lvlTalkative, format("checking contents of ‘%1%’") % i);
+ HashResult current = hashPath(info->narHash.type, i);
+
+ if (info->narHash != nullHash && info->narHash != current.first) {
+ printError(format("path ‘%1%’ was modified! "
+ "expected hash ‘%2%’, got ‘%3%’")
+ % i % printHash(info->narHash) % printHash(current.first));
+ if (repair) repairPath(i); else errors = true;
+ } else {
+
+ bool update = false;
+
+ /* Fill in missing hashes. */
+ if (info->narHash == nullHash) {
+ printError(format("fixing missing hash on ‘%1%’") % i);
+ info->narHash = current.first;
+ update = true;
+ }
+
+ /* Fill in missing narSize fields (from old stores). */
+ if (info->narSize == 0) {
+ printError(format("updating size field on ‘%1%’ to %2%") % i % current.second);
+ info->narSize = current.second;
+ update = true;
+ }
+
+ if (update) {
+ auto state(_state.lock());
+ updatePathInfo(*state, *info);
+ }
+
+ }
+
+ } catch (Error & e) {
+ /* It's possible that the path got GC'ed, so ignore
+ errors on invalid paths. */
+ if (isValidPath(i))
+ printError(format("error: %1%") % e.msg());
+ else
+ printError(format("warning: %1%") % e.msg());
+ errors = true;
+ }
+ }
+ }
+
+ return errors;
+}
+
+
+void LocalStore::verifyPath(const Path & path, const PathSet & store,
+ PathSet & done, PathSet & validPaths, bool repair, bool & errors)
+{
+ checkInterrupt();
+
+ if (done.find(path) != done.end()) return;
+ done.insert(path);
+
+ if (!isStorePath(path)) {
+ printError(format("path ‘%1%’ is not in the Nix store") % path);
+ auto state(_state.lock());
+ invalidatePath(*state, path);
+ return;
+ }
+
+ if (store.find(baseNameOf(path)) == store.end()) {
+ /* Check any referrers first. If we can invalidate them
+ first, then we can invalidate this path as well. */
+ bool canInvalidate = true;
+ PathSet referrers; queryReferrers(path, referrers);
+ for (auto & i : referrers)
+ if (i != path) {
+ verifyPath(i, store, done, validPaths, repair, errors);
+ if (validPaths.find(i) != validPaths.end())
+ canInvalidate = false;
+ }
+
+ if (canInvalidate) {
+ printError(format("path ‘%1%’ disappeared, removing from database...") % path);
+ auto state(_state.lock());
+ invalidatePath(*state, path);
+ } else {
+ printError(format("path ‘%1%’ disappeared, but it still has valid referrers!") % path);
+ if (repair)
+ try {
+ repairPath(path);
+ } catch (Error & e) {
+ printError(format("warning: %1%") % e.msg());
+ errors = true;
+ }
+ else errors = true;
+ }
+
+ return;
+ }
+
+ validPaths.insert(path);
+}
+
+
+#if defined(FS_IOC_SETFLAGS) && defined(FS_IOC_GETFLAGS) && defined(FS_IMMUTABLE_FL)
+
+static void makeMutable(const Path & path)
+{
+ checkInterrupt();
+
+ struct stat st = lstat(path);
+
+ if (!S_ISDIR(st.st_mode) && !S_ISREG(st.st_mode)) return;
+
+ if (S_ISDIR(st.st_mode)) {
+ for (auto & i : readDirectory(path))
+ makeMutable(path + "/" + i.name);
+ }
+
+ /* The O_NOFOLLOW is important to prevent us from changing the
+ mutable bit on the target of a symlink (which would be a
+ security hole). */
+ AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_NOFOLLOW | O_CLOEXEC);
+ if (fd == -1) {
+ if (errno == ELOOP) return; // it's a symlink
+ throw SysError(format("opening file ‘%1%’") % path);
+ }
+
+ unsigned int flags = 0, old;
+
+ /* Silently ignore errors getting/setting the immutable flag so
+ that we work correctly on filesystems that don't support it. */
+ if (ioctl(fd, FS_IOC_GETFLAGS, &flags)) return;
+ old = flags;
+ flags &= ~FS_IMMUTABLE_FL;
+ if (old == flags) return;
+ if (ioctl(fd, FS_IOC_SETFLAGS, &flags)) return;
+}
+
+/* Upgrade from schema 6 (Nix 0.15) to schema 7 (Nix >= 1.3). */
+void LocalStore::upgradeStore7()
+{
+ if (getuid() != 0) return;
+ printError("removing immutable bits from the Nix store (this may take a while)...");
+ makeMutable(realStoreDir);
+}
+
+#else
+
+void LocalStore::upgradeStore7()
+{
+}
+
+#endif
+
+
+void LocalStore::vacuumDB()
+{
+ auto state(_state.lock());
+ state->db.exec("vacuum");
+}
+
+
+void LocalStore::addSignatures(const Path & storePath, const StringSet & sigs)
+{
+ retrySQLite<void>([&]() {
+ auto state(_state.lock());
+
+ SQLiteTxn txn(state->db);
+
+ auto info = std::const_pointer_cast<ValidPathInfo>(std::shared_ptr<const ValidPathInfo>(queryPathInfo(storePath)));
+
+ info->sigs.insert(sigs.begin(), sigs.end());
+
+ updatePathInfo(*state, *info);
+
+ txn.commit();
+ });
+}
+
+
+void LocalStore::signPathInfo(ValidPathInfo & info)
+{
+ // FIXME: keep secret keys in memory.
+
+ auto secretKeyFiles = settings.secretKeyFiles;
+
+ for (auto & secretKeyFile : secretKeyFiles.get()) {
+ SecretKey secretKey(readFile(secretKeyFile));
+ info.sign(secretKey);
+ }
+}
+
+
+}
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
new file mode 100644
index 000000000..f2c40e964
--- /dev/null
+++ b/src/libstore/local-store.hh
@@ -0,0 +1,289 @@
+#pragma once
+
+#include "sqlite.hh"
+
+#include "pathlocks.hh"
+#include "store-api.hh"
+#include "sync.hh"
+#include "util.hh"
+
+#include <string>
+#include <unordered_set>
+
+
+namespace nix {
+
+
+/* Nix store and database schema version. Version 1 (or 0) was Nix <=
+ 0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10.
+ Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is
+ Nix 1.0. Version 7 is Nix 1.3. Version 10 is 1.12. */
+const int nixSchemaVersion = 10;
+
+
+struct Derivation;
+
+
+struct OptimiseStats
+{
+ unsigned long filesLinked = 0;
+ unsigned long long bytesFreed = 0;
+ unsigned long long blocksFreed = 0;
+};
+
+
+class LocalStore : public LocalFSStore
+{
+private:
+
+ /* Lock file used for upgrading. */
+ AutoCloseFD globalLock;
+
+ struct State
+ {
+ /* The SQLite database object. */
+ SQLite db;
+
+ /* Some precompiled SQLite statements. */
+ SQLiteStmt stmtRegisterValidPath;
+ SQLiteStmt stmtUpdatePathInfo;
+ SQLiteStmt stmtAddReference;
+ SQLiteStmt stmtQueryPathInfo;
+ SQLiteStmt stmtQueryReferences;
+ SQLiteStmt stmtQueryReferrers;
+ SQLiteStmt stmtInvalidatePath;
+ SQLiteStmt stmtAddDerivationOutput;
+ SQLiteStmt stmtQueryValidDerivers;
+ SQLiteStmt stmtQueryDerivationOutputs;
+ SQLiteStmt stmtQueryPathFromHashPart;
+ SQLiteStmt stmtQueryValidPaths;
+
+ /* The file to which we write our temporary roots. */
+ Path fnTempRoots;
+ AutoCloseFD fdTempRoots;
+ };
+
+ Sync<State, std::recursive_mutex> _state;
+
+public:
+
+ PathSetting realStoreDir_;
+
+ const Path realStoreDir;
+ const Path dbDir;
+ const Path linksDir;
+ const Path reservedPath;
+ const Path schemaPath;
+ const Path trashDir;
+
+private:
+
+ Setting<bool> requireSigs{(Store*) this,
+ settings.signedBinaryCaches != "", // FIXME
+ "require-sigs", "whether store paths should have a trusted signature on import"};
+
+ PublicKeys publicKeys;
+
+public:
+
+ /* Initialise the local store, upgrading the schema if
+ necessary. */
+ LocalStore(const Params & params);
+
+ ~LocalStore();
+
+ /* Implementations of abstract store API methods. */
+
+ std::string getUri() override;
+
+ bool isValidPathUncached(const Path & path) override;
+
+ PathSet queryValidPaths(const PathSet & paths, bool maybeSubstitute = false) override;
+
+ PathSet queryAllValidPaths() override;
+
+ void queryPathInfoUncached(const Path & path,
+ std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure) override;
+
+ void queryReferrers(const Path & path, PathSet & referrers) override;
+
+ PathSet queryValidDerivers(const Path & path) override;
+
+ PathSet queryDerivationOutputs(const Path & path) override;
+
+ StringSet queryDerivationOutputNames(const Path & path) override;
+
+ Path queryPathFromHashPart(const string & hashPart) override;
+
+ PathSet querySubstitutablePaths(const PathSet & paths) override;
+
+ void querySubstitutablePathInfos(const PathSet & paths,
+ SubstitutablePathInfos & infos) override;
+
+ void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+ bool repair, bool dontCheckSigs,
+ std::shared_ptr<FSAccessor> accessor) override;
+
+ Path addToStore(const string & name, const Path & srcPath,
+ bool recursive, HashType hashAlgo,
+ PathFilter & filter, bool repair) override;
+
+ /* Like addToStore(), but the contents of the path are contained
+ in `dump', which is either a NAR serialisation (if recursive ==
+ true) or simply the contents of a regular file (if recursive ==
+ false). */
+ Path addToStoreFromDump(const string & dump, const string & name,
+ bool recursive = true, HashType hashAlgo = htSHA256, bool repair = false);
+
+ Path addTextToStore(const string & name, const string & s,
+ const PathSet & references, bool repair) override;
+
+ void buildPaths(const PathSet & paths, BuildMode buildMode) override;
+
+ BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
+ BuildMode buildMode) override;
+
+ void ensurePath(const Path & path) override;
+
+ void addTempRoot(const Path & path) override;
+
+ void addIndirectRoot(const Path & path) override;
+
+ void syncWithGC() override;
+
+private:
+
+ typedef std::shared_ptr<AutoCloseFD> FDPtr;
+ typedef list<FDPtr> FDs;
+
+ void readTempRoots(PathSet & tempRoots, FDs & fds);
+
+public:
+
+ Roots findRoots() override;
+
+ void collectGarbage(const GCOptions & options, GCResults & results) override;
+
+ /* Optimise the disk space usage of the Nix store by hard-linking
+ files with the same contents. */
+ void optimiseStore(OptimiseStats & stats);
+
+ void optimiseStore() override;
+
+ /* Optimise a single store path. */
+ void optimisePath(const Path & path);
+
+ bool verifyStore(bool checkContents, bool repair) override;
+
+ /* Register the validity of a path, i.e., that `path' exists, that
+ the paths referenced by it exists, and in the case of an output
+ path of a derivation, that it has been produced by a successful
+ execution of the derivation (or something equivalent). Also
+ register the hash of the file system contents of the path. The
+ hash must be a SHA-256 hash. */
+ void registerValidPath(const ValidPathInfo & info);
+
+ void registerValidPaths(const ValidPathInfos & infos);
+
+ void vacuumDB();
+
+ /* Repair the contents of the given path by redownloading it using
+ a substituter (if available). */
+ void repairPath(const Path & path);
+
+ void addSignatures(const Path & storePath, const StringSet & sigs) override;
+
+private:
+
+ int getSchema();
+
+ void openDB(State & state, bool create);
+
+ void makeStoreWritable();
+
+ uint64_t queryValidPathId(State & state, const Path & path);
+
+ uint64_t addValidPath(State & state, const ValidPathInfo & info, bool checkOutputs = true);
+
+ void invalidatePath(State & state, const Path & path);
+
+ /* Delete a path from the Nix store. */
+ void invalidatePathChecked(const Path & path);
+
+ void verifyPath(const Path & path, const PathSet & store,
+ PathSet & done, PathSet & validPaths, bool repair, bool & errors);
+
+ void updatePathInfo(State & state, const ValidPathInfo & info);
+
+ void upgradeStore6();
+ void upgradeStore7();
+ PathSet queryValidPathsOld();
+ ValidPathInfo queryPathInfoOld(const Path & path);
+
+ struct GCState;
+
+ void deleteGarbage(GCState & state, const Path & path);
+
+ void tryToDelete(GCState & state, const Path & path);
+
+ bool canReachRoot(GCState & state, PathSet & visited, const Path & path);
+
+ void deletePathRecursive(GCState & state, const Path & path);
+
+ bool isActiveTempFile(const GCState & state,
+ const Path & path, const string & suffix);
+
+ int openGCLock(LockType lockType);
+
+ void findRoots(const Path & path, unsigned char type, Roots & roots);
+
+ void findRuntimeRoots(PathSet & roots);
+
+ void removeUnusedLinks(const GCState & state);
+
+ Path createTempDirInStore();
+
+ void checkDerivationOutputs(const Path & drvPath, const Derivation & drv);
+
+ typedef std::unordered_set<ino_t> InodeHash;
+
+ InodeHash loadInodeHash();
+ Strings readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash);
+ void optimisePath_(OptimiseStats & stats, const Path & path, InodeHash & inodeHash);
+
+ // Internal versions that are not wrapped in retry_sqlite.
+ bool isValidPath_(State & state, const Path & path);
+ void queryReferrers(State & state, const Path & path, PathSet & referrers);
+
+ /* Add signatures to a ValidPathInfo using the secret keys
+ specified by the ‘secret-key-files’ option. */
+ void signPathInfo(ValidPathInfo & info);
+
+ Path getRealStoreDir() override { return realStoreDir; }
+
+ friend class DerivationGoal;
+ friend class SubstitutionGoal;
+};
+
+
+typedef std::pair<dev_t, ino_t> Inode;
+typedef set<Inode> InodesSeen;
+
+
+/* "Fix", or canonicalise, the meta-data of the files in a store path
+ after it has been built. In particular:
+ - the last modification date on each file is set to 1 (i.e.,
+ 00:00:01 1/1/1970 UTC)
+ - the permissions are set of 444 or 555 (i.e., read-only with or
+ without execute permission; setuid bits etc. are cleared)
+ - the owner and group are set to the Nix user and group, if we're
+ running as root. */
+void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen);
+void canonicalisePathMetaData(const Path & path, uid_t fromUid);
+
+void canonicaliseTimestampAndPermissions(const Path & path);
+
+MakeError(PathInUse, Error);
+
+}
diff --git a/src/libstore/local.mk b/src/libstore/local.mk
new file mode 100644
index 000000000..4da20330c
--- /dev/null
+++ b/src/libstore/local.mk
@@ -0,0 +1,41 @@
+libraries += libstore
+
+libstore_NAME = libnixstore
+
+libstore_DIR := $(d)
+
+libstore_SOURCES := $(wildcard $(d)/*.cc)
+
+libstore_LIBS = libutil libformat
+
+libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread
+
+ifeq ($(ENABLE_S3), 1)
+ libstore_LDFLAGS += -laws-cpp-sdk-s3 -laws-cpp-sdk-core
+endif
+
+ifeq ($(OS), SunOS)
+ libstore_LDFLAGS += -lsocket
+endif
+
+libstore_CXXFLAGS = \
+ -DNIX_PREFIX=\"$(prefix)\" \
+ -DNIX_STORE_DIR=\"$(storedir)\" \
+ -DNIX_DATA_DIR=\"$(datadir)\" \
+ -DNIX_STATE_DIR=\"$(localstatedir)/nix\" \
+ -DNIX_LOG_DIR=\"$(localstatedir)/log/nix\" \
+ -DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \
+ -DNIX_LIBEXEC_DIR=\"$(libexecdir)\" \
+ -DNIX_BIN_DIR=\"$(bindir)\" \
+ -DBASH_PATH="\"$(bash)\"" \
+ -DLSOF=\"$(lsof)\"
+
+$(d)/local-store.cc: $(d)/schema.sql.hh
+
+%.sql.hh: %.sql
+ $(trace-gen) sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $< > $@ || (rm $@ && exit 1)
+
+clean-files += $(d)/schema.sql.hh
+
+$(eval $(call install-file-in, $(d)/nix-store.pc, $(prefix)/lib/pkgconfig, 0644))
+$(eval $(call install-file-in, $(d)/sandbox-defaults.sb, $(datadir)/nix, 0644))
diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc
new file mode 100644
index 000000000..9a88cdc31
--- /dev/null
+++ b/src/libstore/misc.cc
@@ -0,0 +1,277 @@
+#include "derivations.hh"
+#include "globals.hh"
+#include "local-store.hh"
+#include "store-api.hh"
+#include "thread-pool.hh"
+
+
+namespace nix {
+
+
+void Store::computeFSClosure(const PathSet & startPaths,
+ PathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
+{
+ struct State
+ {
+ size_t pending;
+ PathSet & paths;
+ std::exception_ptr exc;
+ };
+
+ Sync<State> state_(State{0, paths_, 0});
+
+ std::function<void(const Path &)> enqueue;
+
+ std::condition_variable done;
+
+ enqueue = [&](const Path & path) -> void {
+ {
+ auto state(state_.lock());
+ if (state->exc) return;
+ if (state->paths.count(path)) return;
+ state->paths.insert(path);
+ state->pending++;
+ }
+
+ queryPathInfo(path,
+ [&, path](ref<ValidPathInfo> info) {
+ // FIXME: calls to isValidPath() should be async
+
+ if (flipDirection) {
+
+ PathSet referrers;
+ queryReferrers(path, referrers);
+ for (auto & ref : referrers)
+ if (ref != path)
+ enqueue(ref);
+
+ if (includeOutputs)
+ for (auto & i : queryValidDerivers(path))
+ enqueue(i);
+
+ if (includeDerivers && isDerivation(path))
+ for (auto & i : queryDerivationOutputs(path))
+ if (isValidPath(i) && queryPathInfo(i)->deriver == path)
+ enqueue(i);
+
+ } else {
+
+ for (auto & ref : info->references)
+ if (ref != path)
+ enqueue(ref);
+
+ if (includeOutputs && isDerivation(path))
+ for (auto & i : queryDerivationOutputs(path))
+ if (isValidPath(i)) enqueue(i);
+
+ if (includeDerivers && isValidPath(info->deriver))
+ enqueue(info->deriver);
+
+ }
+
+ {
+ auto state(state_.lock());
+ assert(state->pending);
+ if (!--state->pending) done.notify_one();
+ }
+
+ },
+
+ [&, path](std::exception_ptr exc) {
+ auto state(state_.lock());
+ if (!state->exc) state->exc = exc;
+ assert(state->pending);
+ if (!--state->pending) done.notify_one();
+ });
+ };
+
+ for (auto & startPath : startPaths)
+ enqueue(startPath);
+
+ {
+ auto state(state_.lock());
+ while (state->pending) state.wait(done);
+ if (state->exc) std::rethrow_exception(state->exc);
+ }
+}
+
+
+void Store::computeFSClosure(const Path & startPath,
+ PathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
+{
+ computeFSClosure(PathSet{startPath}, paths_, flipDirection, includeOutputs, includeDerivers);
+}
+
+
+void Store::queryMissing(const PathSet & targets,
+ PathSet & willBuild_, PathSet & willSubstitute_, PathSet & unknown_,
+ unsigned long long & downloadSize_, unsigned long long & narSize_)
+{
+ downloadSize_ = narSize_ = 0;
+
+ ThreadPool pool;
+
+ struct State
+ {
+ PathSet done;
+ PathSet & unknown, & willSubstitute, & willBuild;
+ unsigned long long & downloadSize;
+ unsigned long long & narSize;
+ };
+
+ struct DrvState
+ {
+ size_t left;
+ bool done = false;
+ PathSet outPaths;
+ DrvState(size_t left) : left(left) { }
+ };
+
+ Sync<State> state_(State{PathSet(), unknown_, willSubstitute_, willBuild_, downloadSize_, narSize_});
+
+ std::function<void(Path)> doPath;
+
+ auto mustBuildDrv = [&](const Path & drvPath, const Derivation & drv) {
+ {
+ auto state(state_.lock());
+ state->willBuild.insert(drvPath);
+ }
+
+ for (auto & i : drv.inputDrvs)
+ pool.enqueue(std::bind(doPath, makeDrvPathWithOutputs(i.first, i.second)));
+ };
+
+ auto checkOutput = [&](
+ const Path & drvPath, ref<Derivation> drv, const Path & outPath, ref<Sync<DrvState>> drvState_)
+ {
+ if (drvState_->lock()->done) return;
+
+ SubstitutablePathInfos infos;
+ querySubstitutablePathInfos({outPath}, infos);
+
+ if (infos.empty()) {
+ drvState_->lock()->done = true;
+ mustBuildDrv(drvPath, *drv);
+ } else {
+ {
+ auto drvState(drvState_->lock());
+ if (drvState->done) return;
+ assert(drvState->left);
+ drvState->left--;
+ drvState->outPaths.insert(outPath);
+ if (!drvState->left) {
+ for (auto & path : drvState->outPaths)
+ pool.enqueue(std::bind(doPath, path));
+ }
+ }
+ }
+ };
+
+ doPath = [&](const Path & path) {
+
+ {
+ auto state(state_.lock());
+ if (state->done.count(path)) return;
+ state->done.insert(path);
+ }
+
+ DrvPathWithOutputs i2 = parseDrvPathWithOutputs(path);
+
+ if (isDerivation(i2.first)) {
+ if (!isValidPath(i2.first)) {
+ // FIXME: we could try to substitute the derivation.
+ auto state(state_.lock());
+ state->unknown.insert(path);
+ return;
+ }
+
+ Derivation drv = derivationFromPath(i2.first);
+
+ PathSet invalid;
+ for (auto & j : drv.outputs)
+ if (wantOutput(j.first, i2.second)
+ && !isValidPath(j.second.path))
+ invalid.insert(j.second.path);
+ if (invalid.empty()) return;
+
+ if (settings.useSubstitutes && drv.substitutesAllowed()) {
+ auto drvState = make_ref<Sync<DrvState>>(DrvState(invalid.size()));
+ for (auto & output : invalid)
+ pool.enqueue(std::bind(checkOutput, i2.first, make_ref<Derivation>(drv), output, drvState));
+ } else
+ mustBuildDrv(i2.first, drv);
+
+ } else {
+
+ if (isValidPath(path)) return;
+
+ SubstitutablePathInfos infos;
+ querySubstitutablePathInfos({path}, infos);
+
+ if (infos.empty()) {
+ auto state(state_.lock());
+ state->unknown.insert(path);
+ return;
+ }
+
+ auto info = infos.find(path);
+ assert(info != infos.end());
+
+ {
+ auto state(state_.lock());
+ state->willSubstitute.insert(path);
+ state->downloadSize += info->second.downloadSize;
+ state->narSize += info->second.narSize;
+ }
+
+ for (auto & ref : info->second.references)
+ pool.enqueue(std::bind(doPath, ref));
+ }
+ };
+
+ for (auto & path : targets)
+ pool.enqueue(std::bind(doPath, path));
+
+ pool.process();
+}
+
+
+Paths Store::topoSortPaths(const PathSet & paths)
+{
+ Paths sorted;
+ PathSet visited, parents;
+
+ std::function<void(const Path & path, const Path * parent)> dfsVisit;
+
+ dfsVisit = [&](const Path & path, const Path * parent) {
+ if (parents.find(path) != parents.end())
+ throw BuildError(format("cycle detected in the references of ‘%1%’ from ‘%2%’") % path % *parent);
+
+ if (visited.find(path) != visited.end()) return;
+ visited.insert(path);
+ parents.insert(path);
+
+ PathSet references;
+ try {
+ references = queryPathInfo(path)->references;
+ } catch (InvalidPath &) {
+ }
+
+ for (auto & i : references)
+ /* Don't traverse into paths that don't exist. That can
+ happen due to substitutes for non-existent paths. */
+ if (i != path && paths.find(i) != paths.end())
+ dfsVisit(i, &path);
+
+ sorted.push_front(path);
+ parents.erase(path);
+ };
+
+ for (auto & i : paths)
+ dfsVisit(i, nullptr);
+
+ return sorted;
+}
+
+
+}
diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc
new file mode 100644
index 000000000..4cb5de744
--- /dev/null
+++ b/src/libstore/nar-accessor.cc
@@ -0,0 +1,142 @@
+#include "nar-accessor.hh"
+#include "archive.hh"
+
+#include <map>
+
+namespace nix {
+
+struct NarMember
+{
+ FSAccessor::Type type;
+
+ bool isExecutable;
+
+ /* If this is a regular file, position of the contents of this
+ file in the NAR. */
+ size_t start, size;
+
+ std::string target;
+};
+
+struct NarIndexer : ParseSink, StringSource
+{
+ // FIXME: should store this as a tree. Now we're vulnerable to
+ // O(nm) memory consumption (e.g. for x_0/.../x_n/{y_0..y_m}).
+ typedef std::map<Path, NarMember> Members;
+ Members members;
+
+ Path currentPath;
+ std::string currentStart;
+ bool isExec = false;
+
+ NarIndexer(const std::string & nar) : StringSource(nar)
+ {
+ }
+
+ void createDirectory(const Path & path) override
+ {
+ members.emplace(path,
+ NarMember{FSAccessor::Type::tDirectory, false, 0, 0});
+ }
+
+ void createRegularFile(const Path & path) override
+ {
+ currentPath = path;
+ }
+
+ void isExecutable() override
+ {
+ isExec = true;
+ }
+
+ void preallocateContents(unsigned long long size) override
+ {
+ currentStart = string(s, pos, 16);
+ assert(size <= std::numeric_limits<size_t>::max());
+ members.emplace(currentPath,
+ NarMember{FSAccessor::Type::tRegular, isExec, pos, (size_t) size});
+ }
+
+ void receiveContents(unsigned char * data, unsigned int len) override
+ {
+ // Sanity check
+ if (!currentStart.empty()) {
+ assert(len < 16 || currentStart == string((char *) data, 16));
+ currentStart.clear();
+ }
+ }
+
+ void createSymlink(const Path & path, const string & target) override
+ {
+ members.emplace(path,
+ NarMember{FSAccessor::Type::tSymlink, false, 0, 0, target});
+ }
+
+ Members::iterator find(const Path & path)
+ {
+ auto i = members.find(path);
+ if (i == members.end())
+ throw Error(format("NAR file does not contain path ‘%1%’") % path);
+ return i;
+ }
+};
+
+struct NarAccessor : public FSAccessor
+{
+ ref<const std::string> nar;
+ NarIndexer indexer;
+
+ NarAccessor(ref<const std::string> nar) : nar(nar), indexer(*nar)
+ {
+ parseDump(indexer, indexer);
+ }
+
+ Stat stat(const Path & path) override
+ {
+ auto i = indexer.members.find(path);
+ if (i == indexer.members.end())
+ return {FSAccessor::Type::tMissing, 0, false};
+ return {i->second.type, i->second.size, i->second.isExecutable};
+ }
+
+ StringSet readDirectory(const Path & path) override
+ {
+ auto i = indexer.find(path);
+
+ if (i->second.type != FSAccessor::Type::tDirectory)
+ throw Error(format("path ‘%1%’ inside NAR file is not a directory") % path);
+
+ ++i;
+ StringSet res;
+ while (i != indexer.members.end() && isInDir(i->first, path)) {
+ // FIXME: really bad performance.
+ if (i->first.find('/', path.size() + 1) == std::string::npos)
+ res.insert(std::string(i->first, path.size() + 1));
+ ++i;
+ }
+ return res;
+ }
+
+ std::string readFile(const Path & path) override
+ {
+ auto i = indexer.find(path);
+ if (i->second.type != FSAccessor::Type::tRegular)
+ throw Error(format("path ‘%1%’ inside NAR file is not a regular file") % path);
+ return std::string(*nar, i->second.start, i->second.size);
+ }
+
+ std::string readLink(const Path & path) override
+ {
+ auto i = indexer.find(path);
+ if (i->second.type != FSAccessor::Type::tSymlink)
+ throw Error(format("path ‘%1%’ inside NAR file is not a symlink") % path);
+ return i->second.target;
+ }
+};
+
+ref<FSAccessor> makeNarAccessor(ref<const std::string> nar)
+{
+ return make_ref<NarAccessor>(nar);
+}
+
+}
diff --git a/src/libstore/nar-accessor.hh b/src/libstore/nar-accessor.hh
new file mode 100644
index 000000000..83c570be4
--- /dev/null
+++ b/src/libstore/nar-accessor.hh
@@ -0,0 +1,11 @@
+#pragma once
+
+#include "fs-accessor.hh"
+
+namespace nix {
+
+/* Return an object that provides access to the contents of a NAR
+ file. */
+ref<FSAccessor> makeNarAccessor(ref<const std::string> nar);
+
+}
diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc
new file mode 100644
index 000000000..180a936ed
--- /dev/null
+++ b/src/libstore/nar-info-disk-cache.cc
@@ -0,0 +1,270 @@
+#include "nar-info-disk-cache.hh"
+#include "sync.hh"
+#include "sqlite.hh"
+
+#include <sqlite3.h>
+
+namespace nix {
+
+static const char * schema = R"sql(
+
+create table if not exists BinaryCaches (
+ id integer primary key autoincrement not null,
+ url text unique not null,
+ timestamp integer not null,
+ storeDir text not null,
+ wantMassQuery integer not null,
+ priority integer not null
+);
+
+create table if not exists NARs (
+ cache integer not null,
+ hashPart text not null,
+ namePart text,
+ url text,
+ compression text,
+ fileHash text,
+ fileSize integer,
+ narHash text,
+ narSize integer,
+ refs text,
+ deriver text,
+ sigs text,
+ timestamp integer not null,
+ present integer not null,
+ primary key (cache, hashPart),
+ foreign key (cache) references BinaryCaches(id) on delete cascade
+);
+
+create table if not exists LastPurge (
+ dummy text primary key,
+ value integer
+);
+
+)sql";
+
+class NarInfoDiskCacheImpl : public NarInfoDiskCache
+{
+public:
+
+ /* How long negative and positive lookups are valid. */
+ const int ttlNegative = 3600;
+ const int ttlPositive = 30 * 24 * 3600;
+
+ /* How often to purge expired entries from the cache. */
+ const int purgeInterval = 24 * 3600;
+
+ struct Cache
+ {
+ int id;
+ Path storeDir;
+ bool wantMassQuery;
+ int priority;
+ };
+
+ struct State
+ {
+ SQLite db;
+ SQLiteStmt insertCache, queryCache, insertNAR, insertMissingNAR, queryNAR, purgeCache;
+ std::map<std::string, Cache> caches;
+ };
+
+ Sync<State> _state;
+
+ NarInfoDiskCacheImpl()
+ {
+ auto state(_state.lock());
+
+ Path dbPath = getCacheDir() + "/nix/binary-cache-v5.sqlite";
+ createDirs(dirOf(dbPath));
+
+ state->db = SQLite(dbPath);
+
+ if (sqlite3_busy_timeout(state->db, 60 * 60 * 1000) != SQLITE_OK)
+ throwSQLiteError(state->db, "setting timeout");
+
+ // We can always reproduce the cache.
+ state->db.exec("pragma synchronous = off");
+ state->db.exec("pragma main.journal_mode = truncate");
+
+ state->db.exec(schema);
+
+ state->insertCache.create(state->db,
+ "insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)");
+
+ state->queryCache.create(state->db,
+ "select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ?");
+
+ state->insertNAR.create(state->db,
+ "insert or replace into NARs(cache, hashPart, namePart, url, compression, fileHash, fileSize, narHash, "
+ "narSize, refs, deriver, sigs, timestamp, present) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1)");
+
+ state->insertMissingNAR.create(state->db,
+ "insert or replace into NARs(cache, hashPart, timestamp, present) values (?, ?, ?, 0)");
+
+ state->queryNAR.create(state->db,
+ "select * from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))");
+
+ /* Periodically purge expired entries from the database. */
+ retrySQLite<void>([&]() {
+ auto now = time(0);
+
+ SQLiteStmt queryLastPurge(state->db, "select value from LastPurge");
+ auto queryLastPurge_(queryLastPurge.use());
+
+ if (!queryLastPurge_.next() || queryLastPurge_.getInt(0) < now - purgeInterval) {
+ SQLiteStmt(state->db,
+ "delete from NARs where ((present = 0 and timestamp < ?) or (present = 1 and timestamp < ?))")
+ .use()
+ (now - ttlNegative)
+ (now - ttlPositive)
+ .exec();
+
+ debug("deleted %d entries from the NAR info disk cache", sqlite3_changes(state->db));
+
+ SQLiteStmt(state->db,
+ "insert or replace into LastPurge(dummy, value) values ('', ?)")
+ .use()(now).exec();
+ }
+ });
+ }
+
+ Cache & getCache(State & state, const std::string & uri)
+ {
+ auto i = state.caches.find(uri);
+ if (i == state.caches.end()) abort();
+ return i->second;
+ }
+
+ void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
+ {
+ retrySQLite<void>([&]() {
+ auto state(_state.lock());
+
+ // FIXME: race
+
+ state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec();
+ assert(sqlite3_changes(state->db) == 1);
+ state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority};
+ });
+ }
+
+ bool cacheExists(const std::string & uri,
+ bool & wantMassQuery, int & priority) override
+ {
+ return retrySQLite<bool>([&]() {
+ auto state(_state.lock());
+
+ auto i = state->caches.find(uri);
+ if (i == state->caches.end()) {
+ auto queryCache(state->queryCache.use()(uri));
+ if (!queryCache.next()) return false;
+ state->caches.emplace(uri,
+ Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
+ }
+
+ auto & cache(getCache(*state, uri));
+
+ wantMassQuery = cache.wantMassQuery;
+ priority = cache.priority;
+
+ return true;
+ });
+ }
+
+ std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
+ const std::string & uri, const std::string & hashPart) override
+ {
+ return retrySQLite<std::pair<Outcome, std::shared_ptr<NarInfo>>>(
+ [&]() -> std::pair<Outcome, std::shared_ptr<NarInfo>> {
+ auto state(_state.lock());
+
+ auto & cache(getCache(*state, uri));
+
+ auto now = time(0);
+
+ auto queryNAR(state->queryNAR.use()
+ (cache.id)
+ (hashPart)
+ (now - ttlNegative)
+ (now - ttlPositive));
+
+ if (!queryNAR.next())
+ return {oUnknown, 0};
+
+ if (!queryNAR.getInt(13))
+ return {oInvalid, 0};
+
+ auto narInfo = make_ref<NarInfo>();
+
+ auto namePart = queryNAR.getStr(2);
+ narInfo->path = cache.storeDir + "/" +
+ hashPart + (namePart.empty() ? "" : "-" + namePart);
+ narInfo->url = queryNAR.getStr(3);
+ narInfo->compression = queryNAR.getStr(4);
+ if (!queryNAR.isNull(5))
+ narInfo->fileHash = parseHash(queryNAR.getStr(5));
+ narInfo->fileSize = queryNAR.getInt(6);
+ narInfo->narHash = parseHash(queryNAR.getStr(7));
+ narInfo->narSize = queryNAR.getInt(8);
+ for (auto & r : tokenizeString<Strings>(queryNAR.getStr(9), " "))
+ narInfo->references.insert(cache.storeDir + "/" + r);
+ if (!queryNAR.isNull(10))
+ narInfo->deriver = cache.storeDir + "/" + queryNAR.getStr(10);
+ for (auto & sig : tokenizeString<Strings>(queryNAR.getStr(11), " "))
+ narInfo->sigs.insert(sig);
+
+ return {oValid, narInfo};
+ });
+ }
+
+ void upsertNarInfo(
+ const std::string & uri, const std::string & hashPart,
+ std::shared_ptr<ValidPathInfo> info) override
+ {
+ retrySQLite<void>([&]() {
+ auto state(_state.lock());
+
+ auto & cache(getCache(*state, uri));
+
+ if (info) {
+
+ auto narInfo = std::dynamic_pointer_cast<NarInfo>(info);
+
+ assert(hashPart == storePathToHash(info->path));
+
+ state->insertNAR.use()
+ (cache.id)
+ (hashPart)
+ (storePathToName(info->path))
+ (narInfo ? narInfo->url : "", narInfo != 0)
+ (narInfo ? narInfo->compression : "", narInfo != 0)
+ (narInfo && narInfo->fileHash ? narInfo->fileHash.to_string() : "", narInfo && narInfo->fileHash)
+ (narInfo ? narInfo->fileSize : 0, narInfo != 0 && narInfo->fileSize)
+ (info->narHash.to_string())
+ (info->narSize)
+ (concatStringsSep(" ", info->shortRefs()))
+ (info->deriver != "" ? baseNameOf(info->deriver) : "", info->deriver != "")
+ (concatStringsSep(" ", info->sigs))
+ (time(0)).exec();
+
+ } else {
+ state->insertMissingNAR.use()
+ (cache.id)
+ (hashPart)
+ (time(0)).exec();
+ }
+ });
+ }
+};
+
+ref<NarInfoDiskCache> getNarInfoDiskCache()
+{
+ static Sync<std::shared_ptr<NarInfoDiskCache>> cache;
+
+ auto cache_(cache.lock());
+ if (!*cache_) *cache_ = std::make_shared<NarInfoDiskCacheImpl>();
+ return ref<NarInfoDiskCache>(*cache_);
+}
+
+}
diff --git a/src/libstore/nar-info-disk-cache.hh b/src/libstore/nar-info-disk-cache.hh
new file mode 100644
index 000000000..88d909732
--- /dev/null
+++ b/src/libstore/nar-info-disk-cache.hh
@@ -0,0 +1,31 @@
+#pragma once
+
+#include "ref.hh"
+#include "nar-info.hh"
+
+namespace nix {
+
+class NarInfoDiskCache
+{
+public:
+ typedef enum { oValid, oInvalid, oUnknown } Outcome;
+
+ virtual void createCache(const std::string & uri, const Path & storeDir,
+ bool wantMassQuery, int priority) = 0;
+
+ virtual bool cacheExists(const std::string & uri,
+ bool & wantMassQuery, int & priority) = 0;
+
+ virtual std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
+ const std::string & uri, const std::string & hashPart) = 0;
+
+ virtual void upsertNarInfo(
+ const std::string & uri, const std::string & hashPart,
+ std::shared_ptr<ValidPathInfo> info) = 0;
+};
+
+/* Return a singleton cache object that can be used concurrently by
+ multiple threads. */
+ref<NarInfoDiskCache> getNarInfoDiskCache();
+
+}
diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc
new file mode 100644
index 000000000..d1042c6de
--- /dev/null
+++ b/src/libstore/nar-info.cc
@@ -0,0 +1,116 @@
+#include "globals.hh"
+#include "nar-info.hh"
+
+namespace nix {
+
+NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & whence)
+{
+ auto corrupt = [&]() {
+ throw Error(format("NAR info file ‘%1%’ is corrupt") % whence);
+ };
+
+ auto parseHashField = [&](const string & s) {
+ try {
+ return parseHash(s);
+ } catch (BadHash &) {
+ corrupt();
+ return Hash(); // never reached
+ }
+ };
+
+ size_t pos = 0;
+ while (pos < s.size()) {
+
+ size_t colon = s.find(':', pos);
+ if (colon == std::string::npos) corrupt();
+
+ std::string name(s, pos, colon - pos);
+
+ size_t eol = s.find('\n', colon + 2);
+ if (eol == std::string::npos) corrupt();
+
+ std::string value(s, colon + 2, eol - colon - 2);
+
+ if (name == "StorePath") {
+ if (!store.isStorePath(value)) corrupt();
+ path = value;
+ }
+ else if (name == "URL")
+ url = value;
+ else if (name == "Compression")
+ compression = value;
+ else if (name == "FileHash")
+ fileHash = parseHashField(value);
+ else if (name == "FileSize") {
+ if (!string2Int(value, fileSize)) corrupt();
+ }
+ else if (name == "NarHash")
+ narHash = parseHashField(value);
+ else if (name == "NarSize") {
+ if (!string2Int(value, narSize)) corrupt();
+ }
+ else if (name == "References") {
+ auto refs = tokenizeString<Strings>(value, " ");
+ if (!references.empty()) corrupt();
+ for (auto & r : refs) {
+ auto r2 = store.storeDir + "/" + r;
+ if (!store.isStorePath(r2)) corrupt();
+ references.insert(r2);
+ }
+ }
+ else if (name == "Deriver") {
+ if (value != "unknown-deriver") {
+ auto p = store.storeDir + "/" + value;
+ if (!store.isStorePath(p)) corrupt();
+ deriver = p;
+ }
+ }
+ else if (name == "System")
+ system = value;
+ else if (name == "Sig")
+ sigs.insert(value);
+ else if (name == "CA") {
+ if (!ca.empty()) corrupt();
+ ca = value;
+ }
+
+ pos = eol + 1;
+ }
+
+ if (compression == "") compression = "bzip2";
+
+ if (path.empty() || url.empty() || narSize == 0 || !narHash) corrupt();
+}
+
+std::string NarInfo::to_string() const
+{
+ std::string res;
+ res += "StorePath: " + path + "\n";
+ res += "URL: " + url + "\n";
+ assert(compression != "");
+ res += "Compression: " + compression + "\n";
+ assert(fileHash.type == htSHA256);
+ res += "FileHash: sha256:" + printHash32(fileHash) + "\n";
+ res += "FileSize: " + std::to_string(fileSize) + "\n";
+ assert(narHash.type == htSHA256);
+ res += "NarHash: sha256:" + printHash32(narHash) + "\n";
+ res += "NarSize: " + std::to_string(narSize) + "\n";
+
+ res += "References: " + concatStringsSep(" ", shortRefs()) + "\n";
+
+ if (!deriver.empty())
+ res += "Deriver: " + baseNameOf(deriver) + "\n";
+
+ if (!system.empty())
+ res += "System: " + system + "\n";
+
+ for (auto sig : sigs)
+ res += "Sig: " + sig + "\n";
+
+ if (!ca.empty())
+ res += "CA: " + ca + "\n";
+
+ return res;
+}
+
+}
diff --git a/src/libstore/nar-info.hh b/src/libstore/nar-info.hh
new file mode 100644
index 000000000..4995061fb
--- /dev/null
+++ b/src/libstore/nar-info.hh
@@ -0,0 +1,24 @@
+#pragma once
+
+#include "types.hh"
+#include "hash.hh"
+#include "store-api.hh"
+
+namespace nix {
+
+struct NarInfo : ValidPathInfo
+{
+ std::string url;
+ std::string compression;
+ Hash fileHash;
+ uint64_t fileSize = 0;
+ std::string system;
+
+ NarInfo() { }
+ NarInfo(const ValidPathInfo & info) : ValidPathInfo(info) { }
+ NarInfo(const Store & store, const std::string & s, const std::string & whence);
+
+ std::string to_string() const;
+};
+
+}
diff --git a/src/libstore/nix-store.pc.in b/src/libstore/nix-store.pc.in
new file mode 100644
index 000000000..3f1a2d83d
--- /dev/null
+++ b/src/libstore/nix-store.pc.in
@@ -0,0 +1,9 @@
+prefix=@prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: Nix
+Description: Nix Package Manager
+Version: @PACKAGE_VERSION@
+Libs: -L${libdir} -lnixstore -lnixutil -lnixformat
+Cflags: -I${includedir}/nix
diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc
new file mode 100644
index 000000000..cf234e35d
--- /dev/null
+++ b/src/libstore/optimise-store.cc
@@ -0,0 +1,275 @@
+#include "util.hh"
+#include "local-store.hh"
+#include "globals.hh"
+
+#include <cstdlib>
+#include <cstring>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+
+
+namespace nix {
+
+
+static void makeWritable(const Path & path)
+{
+ struct stat st;
+ if (lstat(path.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % path);
+ if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1)
+ throw SysError(format("changing writability of ‘%1%’") % path);
+}
+
+
+struct MakeReadOnly
+{
+ Path path;
+ MakeReadOnly(const Path & path) : path(path) { }
+ ~MakeReadOnly()
+ {
+ try {
+ /* This will make the path read-only. */
+ if (path != "") canonicaliseTimestampAndPermissions(path);
+ } catch (...) {
+ ignoreException();
+ }
+ }
+};
+
+
+LocalStore::InodeHash LocalStore::loadInodeHash()
+{
+ debug("loading hash inodes in memory");
+ InodeHash inodeHash;
+
+ AutoCloseDir dir(opendir(linksDir.c_str()));
+ if (!dir) throw SysError(format("opening directory ‘%1%’") % linksDir);
+
+ struct dirent * dirent;
+ while (errno = 0, dirent = readdir(dir.get())) { /* sic */
+ checkInterrupt();
+ // We don't care if we hit non-hash files, anything goes
+ inodeHash.insert(dirent->d_ino);
+ }
+ if (errno) throw SysError(format("reading directory ‘%1%’") % linksDir);
+
+ printMsg(lvlTalkative, format("loaded %1% hash inodes") % inodeHash.size());
+
+ return inodeHash;
+}
+
+
+Strings LocalStore::readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash)
+{
+ Strings names;
+
+ AutoCloseDir dir(opendir(path.c_str()));
+ if (!dir) throw SysError(format("opening directory ‘%1%’") % path);
+
+ struct dirent * dirent;
+ while (errno = 0, dirent = readdir(dir.get())) { /* sic */
+ checkInterrupt();
+
+ if (inodeHash.count(dirent->d_ino)) {
+ debug(format("‘%1%’ is already linked") % dirent->d_name);
+ continue;
+ }
+
+ string name = dirent->d_name;
+ if (name == "." || name == "..") continue;
+ names.push_back(name);
+ }
+ if (errno) throw SysError(format("reading directory ‘%1%’") % path);
+
+ return names;
+}
+
+
+void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHash & inodeHash)
+{
+ checkInterrupt();
+
+ struct stat st;
+ if (lstat(path.c_str(), &st))
+ throw SysError(format("getting attributes of path ‘%1%’") % path);
+
+ if (S_ISDIR(st.st_mode)) {
+ Strings names = readDirectoryIgnoringInodes(path, inodeHash);
+ for (auto & i : names)
+ optimisePath_(stats, path + "/" + i, inodeHash);
+ return;
+ }
+
+ /* We can hard link regular files and maybe symlinks. */
+ if (!S_ISREG(st.st_mode)
+#if CAN_LINK_SYMLINK
+ && !S_ISLNK(st.st_mode)
+#endif
+ ) return;
+
+ /* Sometimes SNAFUs can cause files in the Nix store to be
+ modified, in particular when running programs as root under
+ NixOS (example: $fontconfig/var/cache being modified). Skip
+ those files. FIXME: check the modification time. */
+ if (S_ISREG(st.st_mode) && (st.st_mode & S_IWUSR)) {
+ printError(format("skipping suspicious writable file ‘%1%’") % path);
+ return;
+ }
+
+ /* This can still happen on top-level files. */
+ if (st.st_nlink > 1 && inodeHash.count(st.st_ino)) {
+ debug(format("‘%1%’ is already linked, with %2% other file(s)") % path % (st.st_nlink - 2));
+ return;
+ }
+
+ /* Hash the file. Note that hashPath() returns the hash over the
+ NAR serialisation, which includes the execute bit on the file.
+ Thus, executable and non-executable files with the same
+ contents *won't* be linked (which is good because otherwise the
+ permissions would be screwed up).
+
+ Also note that if `path' is a symlink, then we're hashing the
+ contents of the symlink (i.e. the result of readlink()), not
+ the contents of the target (which may not even exist). */
+ Hash hash = hashPath(htSHA256, path).first;
+ debug(format("‘%1%’ has hash ‘%2%’") % path % printHash(hash));
+
+ /* Check if this is a known hash. */
+ Path linkPath = linksDir + "/" + printHash32(hash);
+
+ retry:
+ if (!pathExists(linkPath)) {
+ /* Nope, create a hard link in the links directory. */
+ if (link(path.c_str(), linkPath.c_str()) == 0) {
+ inodeHash.insert(st.st_ino);
+ return;
+ }
+
+ switch (errno) {
+ case EEXIST:
+ /* Fall through if another process created ‘linkPath’ before
+ we did. */
+ break;
+
+ case ENOSPC:
+ /* On ext4, that probably means the directory index is
+ full. When that happens, it's fine to ignore it: we
+ just effectively disable deduplication of this
+ file. */
+ printInfo("cannot link ‘%s’ to ‘%s’: %s", linkPath, path, strerror(errno));
+ return;
+
+ default:
+ throw SysError("cannot link ‘%1%’ to ‘%2%’", linkPath, path);
+ }
+ }
+
+ /* Yes! We've seen a file with the same contents. Replace the
+ current file with a hard link to that file. */
+ struct stat stLink;
+ if (lstat(linkPath.c_str(), &stLink))
+ throw SysError(format("getting attributes of path ‘%1%’") % linkPath);
+
+ if (st.st_ino == stLink.st_ino) {
+ debug(format("‘%1%’ is already linked to ‘%2%’") % path % linkPath);
+ return;
+ }
+
+ if (st.st_size != stLink.st_size) {
+ printError(format("removing corrupted link ‘%1%’") % linkPath);
+ unlink(linkPath.c_str());
+ goto retry;
+ }
+
+ printMsg(lvlTalkative, format("linking ‘%1%’ to ‘%2%’") % path % linkPath);
+
+ /* Make the containing directory writable, but only if it's not
+ the store itself (we don't want or need to mess with its
+ permissions). */
+ bool mustToggle = dirOf(path) != realStoreDir;
+ if (mustToggle) makeWritable(dirOf(path));
+
+ /* When we're done, make the directory read-only again and reset
+ its timestamp back to 0. */
+ MakeReadOnly makeReadOnly(mustToggle ? dirOf(path) : "");
+
+ Path tempLink = (format("%1%/.tmp-link-%2%-%3%")
+ % realStoreDir % getpid() % rand()).str();
+
+ if (link(linkPath.c_str(), tempLink.c_str()) == -1) {
+ if (errno == EMLINK) {
+ /* Too many links to the same file (>= 32000 on most file
+ systems). This is likely to happen with empty files.
+ Just shrug and ignore. */
+ if (st.st_size)
+ printInfo(format("‘%1%’ has maximum number of links") % linkPath);
+ return;
+ }
+ throw SysError("cannot link ‘%1%’ to ‘%2%’", tempLink, linkPath);
+ }
+
+ /* Atomically replace the old file with the new hard link. */
+ if (rename(tempLink.c_str(), path.c_str()) == -1) {
+ if (unlink(tempLink.c_str()) == -1)
+ printError(format("unable to unlink ‘%1%’") % tempLink);
+ if (errno == EMLINK) {
+ /* Some filesystems generate too many links on the rename,
+ rather than on the original link. (Probably it
+ temporarily increases the st_nlink field before
+ decreasing it again.) */
+ if (st.st_size)
+ printInfo(format("‘%1%’ has maximum number of links") % linkPath);
+ return;
+ }
+ throw SysError(format("cannot rename ‘%1%’ to ‘%2%’") % tempLink % path);
+ }
+
+ stats.filesLinked++;
+ stats.bytesFreed += st.st_size;
+ stats.blocksFreed += st.st_blocks;
+}
+
+
+void LocalStore::optimiseStore(OptimiseStats & stats)
+{
+ PathSet paths = queryAllValidPaths();
+ InodeHash inodeHash = loadInodeHash();
+
+ for (auto & i : paths) {
+ addTempRoot(i);
+ if (!isValidPath(i)) continue; /* path was GC'ed, probably */
+ Activity act(*logger, lvlChatty, format("hashing files in ‘%1%’") % i);
+ optimisePath_(stats, realStoreDir + "/" + baseNameOf(i), inodeHash);
+ }
+}
+
+static string showBytes(unsigned long long bytes)
+{
+ return (format("%.2f MiB") % (bytes / (1024.0 * 1024.0))).str();
+}
+
+void LocalStore::optimiseStore()
+{
+ OptimiseStats stats;
+
+ optimiseStore(stats);
+
+ printError(
+ format("%1% freed by hard-linking %2% files")
+ % showBytes(stats.bytesFreed)
+ % stats.filesLinked);
+}
+
+void LocalStore::optimisePath(const Path & path)
+{
+ OptimiseStats stats;
+ InodeHash inodeHash;
+
+ if (settings.autoOptimiseStore) optimisePath_(stats, path, inodeHash);
+}
+
+
+}
diff --git a/src/libstore/pathlocks.cc b/src/libstore/pathlocks.cc
new file mode 100644
index 000000000..bf7ad3d21
--- /dev/null
+++ b/src/libstore/pathlocks.cc
@@ -0,0 +1,216 @@
+#include "pathlocks.hh"
+#include "util.hh"
+#include "sync.hh"
+
+#include <cerrno>
+#include <cstdlib>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+
+namespace nix {
+
+
+AutoCloseFD openLockFile(const Path & path, bool create)
+{
+ AutoCloseFD fd;
+
+ fd = open(path.c_str(), O_CLOEXEC | O_RDWR | (create ? O_CREAT : 0), 0600);
+ if (!fd && (create || errno != ENOENT))
+ throw SysError(format("opening lock file ‘%1%’") % path);
+
+ return fd;
+}
+
+
+void deleteLockFile(const Path & path, int fd)
+{
+ /* Get rid of the lock file. Have to be careful not to introduce
+ races. Write a (meaningless) token to the file to indicate to
+ other processes waiting on this lock that the lock is stale
+ (deleted). */
+ unlink(path.c_str());
+ writeFull(fd, "d");
+ /* Note that the result of unlink() is ignored; removing the lock
+ file is an optimisation, not a necessity. */
+}
+
+
+bool lockFile(int fd, LockType lockType, bool wait)
+{
+ struct flock lock;
+ if (lockType == ltRead) lock.l_type = F_RDLCK;
+ else if (lockType == ltWrite) lock.l_type = F_WRLCK;
+ else if (lockType == ltNone) lock.l_type = F_UNLCK;
+ else abort();
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 0; /* entire file */
+
+ if (wait) {
+ while (fcntl(fd, F_SETLKW, &lock) != 0) {
+ checkInterrupt();
+ if (errno != EINTR)
+ throw SysError(format("acquiring/releasing lock"));
+ else
+ return false;
+ }
+ } else {
+ while (fcntl(fd, F_SETLK, &lock) != 0) {
+ checkInterrupt();
+ if (errno == EACCES || errno == EAGAIN) return false;
+ if (errno != EINTR)
+ throw SysError(format("acquiring/releasing lock"));
+ }
+ }
+
+ return true;
+}
+
+
+/* This enables us to check whether are not already holding a lock on
+ a file ourselves. POSIX locks (fcntl) suck in this respect: if we
+ close a descriptor, the previous lock will be closed as well. And
+ there is no way to query whether we already have a lock (F_GETLK
+ only works on locks held by other processes). */
+static Sync<StringSet> lockedPaths_;
+
+
+PathLocks::PathLocks()
+ : deletePaths(false)
+{
+}
+
+
+PathLocks::PathLocks(const PathSet & paths, const string & waitMsg)
+ : deletePaths(false)
+{
+ lockPaths(paths, waitMsg);
+}
+
+
+bool PathLocks::lockPaths(const PathSet & _paths,
+ const string & waitMsg, bool wait)
+{
+ assert(fds.empty());
+
+ /* Note that `fds' is built incrementally so that the destructor
+ will only release those locks that we have already acquired. */
+
+ /* Sort the paths. This assures that locks are always acquired in
+ the same order, thus preventing deadlocks. */
+ Paths paths(_paths.begin(), _paths.end());
+ paths.sort();
+
+ /* Acquire the lock for each path. */
+ for (auto & path : paths) {
+ checkInterrupt();
+ Path lockPath = path + ".lock";
+
+ debug(format("locking path ‘%1%’") % path);
+
+ {
+ auto lockedPaths(lockedPaths_.lock());
+ if (lockedPaths->count(lockPath))
+ throw Error("deadlock: trying to re-acquire self-held lock ‘%s’", lockPath);
+ lockedPaths->insert(lockPath);
+ }
+
+ try {
+
+ AutoCloseFD fd;
+
+ while (1) {
+
+ /* Open/create the lock file. */
+ fd = openLockFile(lockPath, true);
+
+ /* Acquire an exclusive lock. */
+ if (!lockFile(fd.get(), ltWrite, false)) {
+ if (wait) {
+ if (waitMsg != "") printError(waitMsg);
+ lockFile(fd.get(), ltWrite, true);
+ } else {
+ /* Failed to lock this path; release all other
+ locks. */
+ unlock();
+ lockedPaths_.lock()->erase(lockPath);
+ return false;
+ }
+ }
+
+ debug(format("lock acquired on ‘%1%’") % lockPath);
+
+ /* Check that the lock file hasn't become stale (i.e.,
+ hasn't been unlinked). */
+ struct stat st;
+ if (fstat(fd.get(), &st) == -1)
+ throw SysError(format("statting lock file ‘%1%’") % lockPath);
+ if (st.st_size != 0)
+ /* This lock file has been unlinked, so we're holding
+ a lock on a deleted file. This means that other
+ processes may create and acquire a lock on
+ `lockPath', and proceed. So we must retry. */
+ debug(format("open lock file ‘%1%’ has become stale") % lockPath);
+ else
+ break;
+ }
+
+ /* Use borrow so that the descriptor isn't closed. */
+ fds.push_back(FDPair(fd.release(), lockPath));
+
+ } catch (...) {
+ lockedPaths_.lock()->erase(lockPath);
+ throw;
+ }
+
+ }
+
+ return true;
+}
+
+
+PathLocks::~PathLocks()
+{
+ try {
+ unlock();
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+
+void PathLocks::unlock()
+{
+ for (auto & i : fds) {
+ if (deletePaths) deleteLockFile(i.second, i.first);
+
+ lockedPaths_.lock()->erase(i.second);
+
+ if (close(i.first) == -1)
+ printError(
+ format("error (ignored): cannot close lock file on ‘%1%’") % i.second);
+
+ debug(format("lock released on ‘%1%’") % i.second);
+ }
+
+ fds.clear();
+}
+
+
+void PathLocks::setDeletion(bool deletePaths)
+{
+ this->deletePaths = deletePaths;
+}
+
+
+bool pathIsLockedByMe(const Path & path)
+{
+ Path lockPath = path + ".lock";
+ return lockedPaths_.lock()->count(lockPath);
+}
+
+
+}
diff --git a/src/libstore/pathlocks.hh b/src/libstore/pathlocks.hh
new file mode 100644
index 000000000..2a7de6114
--- /dev/null
+++ b/src/libstore/pathlocks.hh
@@ -0,0 +1,46 @@
+#pragma once
+
+#include "util.hh"
+
+
+namespace nix {
+
+
+/* Open (possibly create) a lock file and return the file descriptor.
+ -1 is returned if create is false and the lock could not be opened
+ because it doesn't exist. Any other error throws an exception. */
+AutoCloseFD openLockFile(const Path & path, bool create);
+
+/* Delete an open lock file. */
+void deleteLockFile(const Path & path, int fd);
+
+enum LockType { ltRead, ltWrite, ltNone };
+
+bool lockFile(int fd, LockType lockType, bool wait);
+
+
+class PathLocks
+{
+private:
+ typedef std::pair<int, Path> FDPair;
+ list<FDPair> fds;
+ bool deletePaths;
+
+public:
+ PathLocks();
+ PathLocks(const PathSet & paths,
+ const string & waitMsg = "");
+ bool lockPaths(const PathSet & _paths,
+ const string & waitMsg = "",
+ bool wait = true);
+ ~PathLocks();
+ void unlock();
+ void setDeletion(bool deletePaths);
+};
+
+
+// FIXME: not thread-safe!
+bool pathIsLockedByMe(const Path & path);
+
+
+}
diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc
new file mode 100644
index 000000000..f24daa886
--- /dev/null
+++ b/src/libstore/profiles.cc
@@ -0,0 +1,236 @@
+#include "profiles.hh"
+#include "store-api.hh"
+#include "util.hh"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+
+
+namespace nix {
+
+
+static bool cmpGensByNumber(const Generation & a, const Generation & b)
+{
+ return a.number < b.number;
+}
+
+
+/* Parse a generation name of the format
+ `<profilename>-<number>-link'. */
+static int parseName(const string & profileName, const string & name)
+{
+ if (string(name, 0, profileName.size() + 1) != profileName + "-") return -1;
+ string s = string(name, profileName.size() + 1);
+ string::size_type p = s.find("-link");
+ if (p == string::npos) return -1;
+ int n;
+ if (string2Int(string(s, 0, p), n) && n >= 0)
+ return n;
+ else
+ return -1;
+}
+
+
+
+Generations findGenerations(Path profile, int & curGen)
+{
+ Generations gens;
+
+ Path profileDir = dirOf(profile);
+ string profileName = baseNameOf(profile);
+
+ for (auto & i : readDirectory(profileDir)) {
+ int n;
+ if ((n = parseName(profileName, i.name)) != -1) {
+ Generation gen;
+ gen.path = profileDir + "/" + i.name;
+ gen.number = n;
+ struct stat st;
+ if (lstat(gen.path.c_str(), &st) != 0)
+ throw SysError(format("statting ‘%1%’") % gen.path);
+ gen.creationTime = st.st_mtime;
+ gens.push_back(gen);
+ }
+ }
+
+ gens.sort(cmpGensByNumber);
+
+ curGen = pathExists(profile)
+ ? parseName(profileName, readLink(profile))
+ : -1;
+
+ return gens;
+}
+
+
+static void makeName(const Path & profile, unsigned int num,
+ Path & outLink)
+{
+ Path prefix = (format("%1%-%2%") % profile % num).str();
+ outLink = prefix + "-link";
+}
+
+
+Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath)
+{
+ /* The new generation number should be higher than old the
+ previous ones. */
+ int dummy;
+ Generations gens = findGenerations(profile, dummy);
+
+ unsigned int num;
+ if (gens.size() > 0) {
+ Generation last = gens.back();
+
+ if (readLink(last.path) == outPath) {
+ /* We only create a new generation symlink if it differs
+ from the last one.
+
+ This helps keeping gratuitous installs/rebuilds from piling
+ up uncontrolled numbers of generations, cluttering up the
+ UI like grub. */
+ return last.path;
+ }
+
+ num = gens.back().number;
+ } else {
+ num = 0;
+ }
+
+ /* Create the new generation. Note that addPermRoot() blocks if
+ the garbage collector is running to prevent the stuff we've
+ built from moving from the temporary roots (which the GC knows)
+ to the permanent roots (of which the GC would have a stale
+ view). If we didn't do it this way, the GC might remove the
+ user environment etc. we've just built. */
+ Path generation;
+ makeName(profile, num + 1, generation);
+ store->addPermRoot(outPath, generation, false, true);
+
+ return generation;
+}
+
+
+static void removeFile(const Path & path)
+{
+ if (remove(path.c_str()) == -1)
+ throw SysError(format("cannot unlink ‘%1%’") % path);
+}
+
+
+void deleteGeneration(const Path & profile, unsigned int gen)
+{
+ Path generation;
+ makeName(profile, gen, generation);
+ removeFile(generation);
+}
+
+
+static void deleteGeneration2(const Path & profile, unsigned int gen, bool dryRun)
+{
+ if (dryRun)
+ printInfo(format("would remove generation %1%") % gen);
+ else {
+ printInfo(format("removing generation %1%") % gen);
+ deleteGeneration(profile, gen);
+ }
+}
+
+
+void deleteGenerations(const Path & profile, const std::set<unsigned int> & gensToDelete, bool dryRun)
+{
+ PathLocks lock;
+ lockProfile(lock, profile);
+
+ int curGen;
+ Generations gens = findGenerations(profile, curGen);
+
+ if (gensToDelete.find(curGen) != gensToDelete.end())
+ throw Error(format("cannot delete current generation of profile %1%’") % profile);
+
+ for (auto & i : gens) {
+ if (gensToDelete.find(i.number) == gensToDelete.end()) continue;
+ deleteGeneration2(profile, i.number, dryRun);
+ }
+}
+
+
+void deleteOldGenerations(const Path & profile, bool dryRun)
+{
+ PathLocks lock;
+ lockProfile(lock, profile);
+
+ int curGen;
+ Generations gens = findGenerations(profile, curGen);
+
+ for (auto & i : gens)
+ if (i.number != curGen)
+ deleteGeneration2(profile, i.number, dryRun);
+}
+
+
+void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun)
+{
+ PathLocks lock;
+ lockProfile(lock, profile);
+
+ int curGen;
+ Generations gens = findGenerations(profile, curGen);
+
+ bool canDelete = false;
+ for (auto i = gens.rbegin(); i != gens.rend(); ++i)
+ if (canDelete) {
+ assert(i->creationTime < t);
+ if (i->number != curGen)
+ deleteGeneration2(profile, i->number, dryRun);
+ } else if (i->creationTime < t) {
+ /* We may now start deleting generations, but we don't
+ delete this generation yet, because this generation was
+ still the one that was active at the requested point in
+ time. */
+ canDelete = true;
+ }
+}
+
+
+void deleteGenerationsOlderThan(const Path & profile, const string & timeSpec, bool dryRun)
+{
+ time_t curTime = time(0);
+ string strDays = string(timeSpec, 0, timeSpec.size() - 1);
+ int days;
+
+ if (!string2Int(strDays, days) || days < 1)
+ throw Error(format("invalid number of days specifier ‘%1%’") % timeSpec);
+
+ time_t oldTime = curTime - days * 24 * 3600;
+
+ deleteGenerationsOlderThan(profile, oldTime, dryRun);
+}
+
+
+void switchLink(Path link, Path target)
+{
+ /* Hacky. */
+ if (dirOf(target) == dirOf(link)) target = baseNameOf(target);
+
+ replaceSymlink(target, link);
+}
+
+
+void lockProfile(PathLocks & lock, const Path & profile)
+{
+ lock.lockPaths({profile}, (format("waiting for lock on profile ‘%1%’") % profile).str());
+ lock.setDeletion(true);
+}
+
+
+string optimisticLockProfile(const Path & profile)
+{
+ return pathExists(profile) ? readLink(profile) : "";
+}
+
+
+}
diff --git a/src/libstore/profiles.hh b/src/libstore/profiles.hh
new file mode 100644
index 000000000..1d4e6d303
--- /dev/null
+++ b/src/libstore/profiles.hh
@@ -0,0 +1,65 @@
+#pragma once
+
+#include "types.hh"
+#include "pathlocks.hh"
+
+#include <time.h>
+
+
+namespace nix {
+
+
+struct Generation
+{
+ int number;
+ Path path;
+ time_t creationTime;
+ Generation()
+ {
+ number = -1;
+ }
+ operator bool() const
+ {
+ return number != -1;
+ }
+};
+
+typedef list<Generation> Generations;
+
+
+/* Returns the list of currently present generations for the specified
+ profile, sorted by generation number. */
+Generations findGenerations(Path profile, int & curGen);
+
+class LocalFSStore;
+
+Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath);
+
+void deleteGeneration(const Path & profile, unsigned int gen);
+
+void deleteGenerations(const Path & profile, const std::set<unsigned int> & gensToDelete, bool dryRun);
+
+void deleteOldGenerations(const Path & profile, bool dryRun);
+
+void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun);
+
+void deleteGenerationsOlderThan(const Path & profile, const string & timeSpec, bool dryRun);
+
+void switchLink(Path link, Path target);
+
+/* Ensure exclusive access to a profile. Any command that modifies
+ the profile first acquires this lock. */
+void lockProfile(PathLocks & lock, const Path & profile);
+
+/* Optimistic locking is used by long-running operations like `nix-env
+ -i'. Instead of acquiring the exclusive lock for the entire
+ duration of the operation, we just perform the operation
+ optimistically (without an exclusive lock), and check at the end
+ whether the profile changed while we were busy (i.e., the symlink
+ target changed). If so, the operation is restarted. Restarting is
+ generally cheap, since the build results are still in the Nix
+ store. Most of the time, only the user environment has to be
+ rebuilt. */
+string optimisticLockProfile(const Path & profile);
+
+}
diff --git a/src/libstore/references.cc b/src/libstore/references.cc
new file mode 100644
index 000000000..33eab5a24
--- /dev/null
+++ b/src/libstore/references.cc
@@ -0,0 +1,122 @@
+#include "references.hh"
+#include "hash.hh"
+#include "util.hh"
+#include "archive.hh"
+
+#include <map>
+#include <cstdlib>
+
+
+namespace nix {
+
+
+static unsigned int refLength = 32; /* characters */
+
+
+static void search(const unsigned char * s, unsigned int len,
+ StringSet & hashes, StringSet & seen)
+{
+ static bool initialised = false;
+ static bool isBase32[256];
+ if (!initialised) {
+ for (unsigned int i = 0; i < 256; ++i) isBase32[i] = false;
+ for (unsigned int i = 0; i < base32Chars.size(); ++i)
+ isBase32[(unsigned char) base32Chars[i]] = true;
+ initialised = true;
+ }
+
+ for (unsigned int i = 0; i + refLength <= len; ) {
+ int j;
+ bool match = true;
+ for (j = refLength - 1; j >= 0; --j)
+ if (!isBase32[(unsigned char) s[i + j]]) {
+ i += j + 1;
+ match = false;
+ break;
+ }
+ if (!match) continue;
+ string ref((const char *) s + i, refLength);
+ if (hashes.find(ref) != hashes.end()) {
+ debug(format("found reference to ‘%1%’ at offset ‘%2%’")
+ % ref % i);
+ seen.insert(ref);
+ hashes.erase(ref);
+ }
+ ++i;
+ }
+}
+
+
+struct RefScanSink : Sink
+{
+ HashSink hashSink;
+ StringSet hashes;
+ StringSet seen;
+
+ string tail;
+
+ RefScanSink() : hashSink(htSHA256) { }
+
+ void operator () (const unsigned char * data, size_t len);
+};
+
+
+void RefScanSink::operator () (const unsigned char * data, size_t len)
+{
+ hashSink(data, len);
+
+ /* It's possible that a reference spans the previous and current
+ fragment, so search in the concatenation of the tail of the
+ previous fragment and the start of the current fragment. */
+ string s = tail + string((const char *) data, len > refLength ? refLength : len);
+ search((const unsigned char *) s.data(), s.size(), hashes, seen);
+
+ search(data, len, hashes, seen);
+
+ unsigned int tailLen = len <= refLength ? len : refLength;
+ tail =
+ string(tail, tail.size() < refLength - tailLen ? 0 : tail.size() - (refLength - tailLen)) +
+ string((const char *) data + len - tailLen, tailLen);
+}
+
+
+PathSet scanForReferences(const string & path,
+ const PathSet & refs, HashResult & hash)
+{
+ RefScanSink sink;
+ std::map<string, Path> backMap;
+
+ /* For efficiency (and a higher hit rate), just search for the
+ hash part of the file name. (This assumes that all references
+ have the form `HASH-bla'). */
+ for (auto & i : refs) {
+ string baseName = baseNameOf(i);
+ string::size_type pos = baseName.find('-');
+ if (pos == string::npos)
+ throw Error(format("bad reference ‘%1%’") % i);
+ string s = string(baseName, 0, pos);
+ assert(s.size() == refLength);
+ assert(backMap.find(s) == backMap.end());
+ // parseHash(htSHA256, s);
+ sink.hashes.insert(s);
+ backMap[s] = i;
+ }
+
+ /* Look for the hashes in the NAR dump of the path. */
+ dumpPath(path, sink);
+
+ /* Map the hashes found back to their store paths. */
+ PathSet found;
+ for (auto & i : sink.seen) {
+ std::map<string, Path>::iterator j;
+ if ((j = backMap.find(i)) == backMap.end()) abort();
+ found.insert(j->second);
+ }
+
+ hash = sink.hashSink.finish();
+
+ return found;
+}
+
+
+}
diff --git a/src/libstore/references.hh b/src/libstore/references.hh
new file mode 100644
index 000000000..013809d12
--- /dev/null
+++ b/src/libstore/references.hh
@@ -0,0 +1,11 @@
+#pragma once
+
+#include "types.hh"
+#include "hash.hh"
+
+namespace nix {
+
+PathSet scanForReferences(const Path & path, const PathSet & refs,
+ HashResult & hash);
+
+}
diff --git a/src/libstore/remote-fs-accessor.cc b/src/libstore/remote-fs-accessor.cc
new file mode 100644
index 000000000..ca14057c2
--- /dev/null
+++ b/src/libstore/remote-fs-accessor.cc
@@ -0,0 +1,57 @@
+#include "remote-fs-accessor.hh"
+#include "nar-accessor.hh"
+
+namespace nix {
+
+
+RemoteFSAccessor::RemoteFSAccessor(ref<Store> store)
+ : store(store)
+{
+}
+
+std::pair<ref<FSAccessor>, Path> RemoteFSAccessor::fetch(const Path & path_)
+{
+ auto path = canonPath(path_);
+
+ auto storePath = store->toStorePath(path);
+ std::string restPath = std::string(path, storePath.size());
+
+ if (!store->isValidPath(storePath))
+ throw InvalidPath(format("path ‘%1%’ is not a valid store path") % storePath);
+
+ auto i = nars.find(storePath);
+ if (i != nars.end()) return {i->second, restPath};
+
+ StringSink sink;
+ store->narFromPath(storePath, sink);
+
+ auto accessor = makeNarAccessor(sink.s);
+ nars.emplace(storePath, accessor);
+ return {accessor, restPath};
+}
+
+FSAccessor::Stat RemoteFSAccessor::stat(const Path & path)
+{
+ auto res = fetch(path);
+ return res.first->stat(res.second);
+}
+
+StringSet RemoteFSAccessor::readDirectory(const Path & path)
+{
+ auto res = fetch(path);
+ return res.first->readDirectory(res.second);
+}
+
+std::string RemoteFSAccessor::readFile(const Path & path)
+{
+ auto res = fetch(path);
+ return res.first->readFile(res.second);
+}
+
+std::string RemoteFSAccessor::readLink(const Path & path)
+{
+ auto res = fetch(path);
+ return res.first->readLink(res.second);
+}
+
+}
diff --git a/src/libstore/remote-fs-accessor.hh b/src/libstore/remote-fs-accessor.hh
new file mode 100644
index 000000000..28f36c829
--- /dev/null
+++ b/src/libstore/remote-fs-accessor.hh
@@ -0,0 +1,29 @@
+#pragma once
+
+#include "fs-accessor.hh"
+#include "ref.hh"
+#include "store-api.hh"
+
+namespace nix {
+
+class RemoteFSAccessor : public FSAccessor
+{
+ ref<Store> store;
+
+ std::map<Path, ref<FSAccessor>> nars;
+
+ std::pair<ref<FSAccessor>, Path> fetch(const Path & path_);
+public:
+
+ RemoteFSAccessor(ref<Store> store);
+
+ Stat stat(const Path & path) override;
+
+ StringSet readDirectory(const Path & path) override;
+
+ std::string readFile(const Path & path) override;
+
+ std::string readLink(const Path & path) override;
+};
+
+}
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
new file mode 100644
index 000000000..bc9ef3d47
--- /dev/null
+++ b/src/libstore/remote-store.cc
@@ -0,0 +1,655 @@
+#include "serialise.hh"
+#include "util.hh"
+#include "remote-store.hh"
+#include "worker-protocol.hh"
+#include "archive.hh"
+#include "affinity.hh"
+#include "globals.hh"
+#include "derivations.hh"
+#include "pool.hh"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <cstring>
+
+namespace nix {
+
+
+Path readStorePath(Store & store, Source & from)
+{
+ Path path = readString(from);
+ store.assertStorePath(path);
+ return path;
+}
+
+
+template<class T> T readStorePaths(Store & store, Source & from)
+{
+ T paths = readStrings<T>(from);
+ for (auto & i : paths) store.assertStorePath(i);
+ return paths;
+}
+
+template PathSet readStorePaths(Store & store, Source & from);
+template Paths readStorePaths(Store & store, Source & from);
+
+/* TODO: Separate these store impls into different files, give them better names */
+RemoteStore::RemoteStore(const Params & params)
+ : Store(params)
+ , connections(make_ref<Pool<Connection>>(
+ std::max(1, (int) maxConnections),
+ [this]() { return openConnectionWrapper(); },
+ [](const ref<Connection> & r) { return r->to.good() && r->from.good(); }
+ ))
+{
+}
+
+
+ref<RemoteStore::Connection> RemoteStore::openConnectionWrapper()
+{
+ if (failed)
+ throw Error("opening a connection to remote store ‘%s’ previously failed", getUri());
+ try {
+ return openConnection();
+ } catch (...) {
+ failed = true;
+ throw;
+ }
+}
+
+
+UDSRemoteStore::UDSRemoteStore(const Params & params)
+ : Store(params)
+ , LocalFSStore(params)
+ , RemoteStore(params)
+{
+}
+
+
+std::string UDSRemoteStore::getUri()
+{
+ return "daemon";
+}
+
+
+ref<RemoteStore::Connection> UDSRemoteStore::openConnection()
+{
+ auto conn = make_ref<Connection>();
+
+ /* Connect to a daemon that does the privileged work for us. */
+ conn->fd = socket(PF_UNIX, SOCK_STREAM
+ #ifdef SOCK_CLOEXEC
+ | SOCK_CLOEXEC
+ #endif
+ , 0);
+ if (!conn->fd)
+ throw SysError("cannot create Unix domain socket");
+ closeOnExec(conn->fd.get());
+
+ string socketPath = settings.nixDaemonSocketFile;
+
+ struct sockaddr_un addr;
+ addr.sun_family = AF_UNIX;
+ if (socketPath.size() + 1 >= sizeof(addr.sun_path))
+ throw Error(format("socket path ‘%1%’ is too long") % socketPath);
+ strcpy(addr.sun_path, socketPath.c_str());
+
+ if (connect(conn->fd.get(), (struct sockaddr *) &addr, sizeof(addr)) == -1)
+ throw SysError(format("cannot connect to daemon at ‘%1%’") % socketPath);
+
+ conn->from.fd = conn->fd.get();
+ conn->to.fd = conn->fd.get();
+
+ initConnection(*conn);
+
+ return conn;
+}
+
+
+void RemoteStore::initConnection(Connection & conn)
+{
+ /* Send the magic greeting, check for the reply. */
+ try {
+ conn.to << WORKER_MAGIC_1;
+ conn.to.flush();
+ unsigned int magic = readInt(conn.from);
+ if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch");
+
+ conn.from >> conn.daemonVersion;
+ if (GET_PROTOCOL_MAJOR(conn.daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION))
+ throw Error("Nix daemon protocol version not supported");
+ if (GET_PROTOCOL_MINOR(conn.daemonVersion) < 10)
+ throw Error("the Nix daemon version is too old");
+ conn.to << PROTOCOL_VERSION;
+
+ if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 14) {
+ int cpu = settings.lockCPU ? lockToCurrentCPU() : -1;
+ if (cpu != -1)
+ conn.to << 1 << cpu;
+ else
+ conn.to << 0;
+ }
+
+ if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 11)
+ conn.to << false;
+
+ conn.processStderr();
+ }
+ catch (Error & e) {
+ throw Error("cannot open connection to remote store ‘%s’: %s", getUri(), e.what());
+ }
+
+ setOptions(conn);
+}
+
+
+void RemoteStore::setOptions(Connection & conn)
+{
+ conn.to << wopSetOptions
+ << settings.keepFailed
+ << settings.keepGoing
+ << settings.tryFallback
+ << verbosity
+ << settings.maxBuildJobs
+ << settings.maxSilentTime
+ << settings.useBuildHook
+ << (settings.verboseBuild ? lvlError : lvlVomit)
+ << 0 // obsolete log type
+ << 0 /* obsolete print build trace */
+ << settings.buildCores
+ << settings.useSubstitutes;
+
+ if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 12) {
+ auto overrides = settings.getSettings(true);
+ conn.to << overrides.size();
+ for (auto & i : overrides)
+ conn.to << i.first << i.second;
+ }
+
+ conn.processStderr();
+}
+
+
+bool RemoteStore::isValidPathUncached(const Path & path)
+{
+ auto conn(connections->get());
+ conn->to << wopIsValidPath << path;
+ conn->processStderr();
+ return readInt(conn->from);
+}
+
+
+PathSet RemoteStore::queryValidPaths(const PathSet & paths, bool maybeSubstitute)
+{
+ auto conn(connections->get());
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
+ PathSet res;
+ for (auto & i : paths)
+ if (isValidPath(i)) res.insert(i);
+ return res;
+ } else {
+ conn->to << wopQueryValidPaths << paths;
+ conn->processStderr();
+ return readStorePaths<PathSet>(*this, conn->from);
+ }
+}
+
+
+PathSet RemoteStore::queryAllValidPaths()
+{
+ auto conn(connections->get());
+ conn->to << wopQueryAllValidPaths;
+ conn->processStderr();
+ return readStorePaths<PathSet>(*this, conn->from);
+}
+
+
+PathSet RemoteStore::querySubstitutablePaths(const PathSet & paths)
+{
+ auto conn(connections->get());
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
+ PathSet res;
+ for (auto & i : paths) {
+ conn->to << wopHasSubstitutes << i;
+ conn->processStderr();
+ if (readInt(conn->from)) res.insert(i);
+ }
+ return res;
+ } else {
+ conn->to << wopQuerySubstitutablePaths << paths;
+ conn->processStderr();
+ return readStorePaths<PathSet>(*this, conn->from);
+ }
+}
+
+
+void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
+ SubstitutablePathInfos & infos)
+{
+ if (paths.empty()) return;
+
+ auto conn(connections->get());
+
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
+
+ for (auto & i : paths) {
+ SubstitutablePathInfo info;
+ conn->to << wopQuerySubstitutablePathInfo << i;
+ conn->processStderr();
+ unsigned int reply = readInt(conn->from);
+ if (reply == 0) continue;
+ info.deriver = readString(conn->from);
+ if (info.deriver != "") assertStorePath(info.deriver);
+ info.references = readStorePaths<PathSet>(*this, conn->from);
+ info.downloadSize = readLongLong(conn->from);
+ info.narSize = readLongLong(conn->from);
+ infos[i] = info;
+ }
+
+ } else {
+
+ conn->to << wopQuerySubstitutablePathInfos << paths;
+ conn->processStderr();
+ size_t count = readNum<size_t>(conn->from);
+ for (size_t n = 0; n < count; n++) {
+ Path path = readStorePath(*this, conn->from);
+ SubstitutablePathInfo & info(infos[path]);
+ info.deriver = readString(conn->from);
+ if (info.deriver != "") assertStorePath(info.deriver);
+ info.references = readStorePaths<PathSet>(*this, conn->from);
+ info.downloadSize = readLongLong(conn->from);
+ info.narSize = readLongLong(conn->from);
+ }
+
+ }
+}
+
+
+void RemoteStore::queryPathInfoUncached(const Path & path,
+ std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure)
+{
+ sync2async<std::shared_ptr<ValidPathInfo>>(success, failure, [&]() {
+ auto conn(connections->get());
+ conn->to << wopQueryPathInfo << path;
+ try {
+ conn->processStderr();
+ } catch (Error & e) {
+ // Ugly backwards compatibility hack.
+ if (e.msg().find("is not valid") != std::string::npos)
+ throw InvalidPath(e.what());
+ throw;
+ }
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
+ bool valid; conn->from >> valid;
+ if (!valid) throw InvalidPath(format("path ‘%s’ is not valid") % path);
+ }
+ auto info = std::make_shared<ValidPathInfo>();
+ info->path = path;
+ info->deriver = readString(conn->from);
+ if (info->deriver != "") assertStorePath(info->deriver);
+ info->narHash = parseHash(htSHA256, readString(conn->from));
+ info->references = readStorePaths<PathSet>(*this, conn->from);
+ conn->from >> info->registrationTime >> info->narSize;
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) {
+ conn->from >> info->ultimate;
+ info->sigs = readStrings<StringSet>(conn->from);
+ conn->from >> info->ca;
+ }
+ return info;
+ });
+}
+
+
+void RemoteStore::queryReferrers(const Path & path,
+ PathSet & referrers)
+{
+ auto conn(connections->get());
+ conn->to << wopQueryReferrers << path;
+ conn->processStderr();
+ PathSet referrers2 = readStorePaths<PathSet>(*this, conn->from);
+ referrers.insert(referrers2.begin(), referrers2.end());
+}
+
+
+PathSet RemoteStore::queryValidDerivers(const Path & path)
+{
+ auto conn(connections->get());
+ conn->to << wopQueryValidDerivers << path;
+ conn->processStderr();
+ return readStorePaths<PathSet>(*this, conn->from);
+}
+
+
+PathSet RemoteStore::queryDerivationOutputs(const Path & path)
+{
+ auto conn(connections->get());
+ conn->to << wopQueryDerivationOutputs << path;
+ conn->processStderr();
+ return readStorePaths<PathSet>(*this, conn->from);
+}
+
+
+PathSet RemoteStore::queryDerivationOutputNames(const Path & path)
+{
+ auto conn(connections->get());
+ conn->to << wopQueryDerivationOutputNames << path;
+ conn->processStderr();
+ return readStrings<PathSet>(conn->from);
+}
+
+
+Path RemoteStore::queryPathFromHashPart(const string & hashPart)
+{
+ auto conn(connections->get());
+ conn->to << wopQueryPathFromHashPart << hashPart;
+ conn->processStderr();
+ Path path = readString(conn->from);
+ if (!path.empty()) assertStorePath(path);
+ return path;
+}
+
+
+void RemoteStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+ bool repair, bool dontCheckSigs, std::shared_ptr<FSAccessor> accessor)
+{
+ auto conn(connections->get());
+
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) {
+ conn->to << wopImportPaths;
+
+ StringSink sink;
+ sink << 1 // == path follows
+ ;
+ assert(nar->size() % 8 == 0);
+ sink((unsigned char *) nar->data(), nar->size());
+ sink
+ << exportMagic
+ << info.path
+ << info.references
+ << info.deriver
+ << 0 // == no legacy signature
+ << 0 // == no path follows
+ ;
+
+ StringSource source(*sink.s);
+ conn->processStderr(0, &source);
+
+ auto importedPaths = readStorePaths<PathSet>(*this, conn->from);
+ assert(importedPaths.size() <= 1);
+ }
+
+ else {
+ conn->to << wopAddToStoreNar
+ << info.path << info.deriver << printHash(info.narHash)
+ << info.references << info.registrationTime << info.narSize
+ << info.ultimate << info.sigs << info.ca
+ << repair << dontCheckSigs;
+ conn->to(*nar);
+ conn->processStderr();
+ }
+}
+
+
+Path RemoteStore::addToStore(const string & name, const Path & _srcPath,
+ bool recursive, HashType hashAlgo, PathFilter & filter, bool repair)
+{
+ if (repair) throw Error("repairing is not supported when building through the Nix daemon");
+
+ auto conn(connections->get());
+
+ Path srcPath(absPath(_srcPath));
+
+ conn->to << wopAddToStore << name
+ << ((hashAlgo == htSHA256 && recursive) ? 0 : 1) /* backwards compatibility hack */
+ << (recursive ? 1 : 0)
+ << printHashType(hashAlgo);
+
+ try {
+ conn->to.written = 0;
+ conn->to.warn = true;
+ dumpPath(srcPath, conn->to, filter);
+ conn->to.warn = false;
+ conn->processStderr();
+ } catch (SysError & e) {
+ /* Daemon closed while we were sending the path. Probably OOM
+ or I/O error. */
+ if (e.errNo == EPIPE)
+ try {
+ conn->processStderr();
+ } catch (EndOfFile & e) { }
+ throw;
+ }
+
+ return readStorePath(*this, conn->from);
+}
+
+
+Path RemoteStore::addTextToStore(const string & name, const string & s,
+ const PathSet & references, bool repair)
+{
+ if (repair) throw Error("repairing is not supported when building through the Nix daemon");
+
+ auto conn(connections->get());
+ conn->to << wopAddTextToStore << name << s << references;
+
+ conn->processStderr();
+ return readStorePath(*this, conn->from);
+}
+
+
+void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
+{
+ auto conn(connections->get());
+ conn->to << wopBuildPaths;
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13) {
+ conn->to << drvPaths;
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 15)
+ conn->to << buildMode;
+ else
+ /* Old daemons did not take a 'buildMode' parameter, so we
+ need to validate it here on the client side. */
+ if (buildMode != bmNormal)
+ throw Error("repairing or checking is not supported when building through the Nix daemon");
+ } else {
+ /* For backwards compatibility with old daemons, strip output
+ identifiers. */
+ PathSet drvPaths2;
+ for (auto & i : drvPaths)
+ drvPaths2.insert(string(i, 0, i.find('!')));
+ conn->to << drvPaths2;
+ }
+ conn->processStderr();
+ readInt(conn->from);
+}
+
+
+BuildResult RemoteStore::buildDerivation(const Path & drvPath, const BasicDerivation & drv,
+ BuildMode buildMode)
+{
+ auto conn(connections->get());
+ conn->to << wopBuildDerivation << drvPath << drv << buildMode;
+ conn->processStderr();
+ BuildResult res;
+ unsigned int status;
+ conn->from >> status >> res.errorMsg;
+ res.status = (BuildResult::Status) status;
+ return res;
+}
+
+
+void RemoteStore::ensurePath(const Path & path)
+{
+ auto conn(connections->get());
+ conn->to << wopEnsurePath << path;
+ conn->processStderr();
+ readInt(conn->from);
+}
+
+
+void RemoteStore::addTempRoot(const Path & path)
+{
+ auto conn(connections->get());
+ conn->to << wopAddTempRoot << path;
+ conn->processStderr();
+ readInt(conn->from);
+}
+
+
+void RemoteStore::addIndirectRoot(const Path & path)
+{
+ auto conn(connections->get());
+ conn->to << wopAddIndirectRoot << path;
+ conn->processStderr();
+ readInt(conn->from);
+}
+
+
+void RemoteStore::syncWithGC()
+{
+ auto conn(connections->get());
+ conn->to << wopSyncWithGC;
+ conn->processStderr();
+ readInt(conn->from);
+}
+
+
+Roots RemoteStore::findRoots()
+{
+ auto conn(connections->get());
+ conn->to << wopFindRoots;
+ conn->processStderr();
+ size_t count = readNum<size_t>(conn->from);
+ Roots result;
+ while (count--) {
+ Path link = readString(conn->from);
+ Path target = readStorePath(*this, conn->from);
+ result[link] = target;
+ }
+ return result;
+}
+
+
+void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
+{
+ auto conn(connections->get());
+
+ conn->to
+ << wopCollectGarbage << options.action << options.pathsToDelete << options.ignoreLiveness
+ << options.maxFreed
+ /* removed options */
+ << 0 << 0 << 0;
+
+ conn->processStderr();
+
+ results.paths = readStrings<PathSet>(conn->from);
+ results.bytesFreed = readLongLong(conn->from);
+ readLongLong(conn->from); // obsolete
+
+ {
+ auto state_(Store::state.lock());
+ state_->pathInfoCache.clear();
+ }
+}
+
+
+void RemoteStore::optimiseStore()
+{
+ auto conn(connections->get());
+ conn->to << wopOptimiseStore;
+ conn->processStderr();
+ readInt(conn->from);
+}
+
+
+bool RemoteStore::verifyStore(bool checkContents, bool repair)
+{
+ auto conn(connections->get());
+ conn->to << wopVerifyStore << checkContents << repair;
+ conn->processStderr();
+ return readInt(conn->from);
+}
+
+
+void RemoteStore::addSignatures(const Path & storePath, const StringSet & sigs)
+{
+ auto conn(connections->get());
+ conn->to << wopAddSignatures << storePath << sigs;
+ conn->processStderr();
+ readInt(conn->from);
+}
+
+
+void RemoteStore::queryMissing(const PathSet & targets,
+ PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
+ unsigned long long & downloadSize, unsigned long long & narSize)
+{
+ {
+ auto conn(connections->get());
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 19)
+ // Don't hold the connection handle in the fallback case
+ // to prevent a deadlock.
+ goto fallback;
+ conn->to << wopQueryMissing << targets;
+ conn->processStderr();
+ willBuild = readStorePaths<PathSet>(*this, conn->from);
+ willSubstitute = readStorePaths<PathSet>(*this, conn->from);
+ unknown = readStorePaths<PathSet>(*this, conn->from);
+ conn->from >> downloadSize >> narSize;
+ return;
+ }
+
+ fallback:
+ return Store::queryMissing(targets, willBuild, willSubstitute,
+ unknown, downloadSize, narSize);
+}
+
+
+RemoteStore::Connection::~Connection()
+{
+ try {
+ to.flush();
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+
+void RemoteStore::Connection::processStderr(Sink * sink, Source * source)
+{
+ to.flush();
+ unsigned int msg;
+ while ((msg = readInt(from)) == STDERR_NEXT
+ || msg == STDERR_READ || msg == STDERR_WRITE) {
+ if (msg == STDERR_WRITE) {
+ string s = readString(from);
+ if (!sink) throw Error("no sink");
+ (*sink)(s);
+ }
+ else if (msg == STDERR_READ) {
+ if (!source) throw Error("no source");
+ size_t len = readNum<size_t>(from);
+ auto buf = std::make_unique<unsigned char[]>(len);
+ writeString(buf.get(), source->read(buf.get(), len), to);
+ to.flush();
+ }
+ else
+ printError(chomp(readString(from)));
+ }
+ if (msg == STDERR_ERROR) {
+ string error = readString(from);
+ unsigned int status = readInt(from);
+ throw Error(status, error);
+ }
+ else if (msg != STDERR_LAST)
+ throw Error("protocol error processing standard error");
+}
+
+
+}
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
new file mode 100644
index 000000000..479cf3a79
--- /dev/null
+++ b/src/libstore/remote-store.hh
@@ -0,0 +1,142 @@
+#pragma once
+
+#include <limits>
+#include <string>
+
+#include "store-api.hh"
+
+
+namespace nix {
+
+
+class Pipe;
+class Pid;
+struct FdSink;
+struct FdSource;
+template<typename T> class Pool;
+
+
+/* FIXME: RemoteStore is a misnomer - should be something like
+ DaemonStore. */
+class RemoteStore : public virtual Store
+{
+public:
+
+ const Setting<int> maxConnections{(Store*) this, 1,
+ "max-connections", "maximum number of concurrent connections to the Nix daemon"};
+
+ RemoteStore(const Params & params);
+
+ /* Implementations of abstract store API methods. */
+
+ bool isValidPathUncached(const Path & path) override;
+
+ PathSet queryValidPaths(const PathSet & paths, bool maybeSubstitute = false) override;
+
+ PathSet queryAllValidPaths() override;
+
+ void queryPathInfoUncached(const Path & path,
+ std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure) override;
+
+ void queryReferrers(const Path & path, PathSet & referrers) override;
+
+ PathSet queryValidDerivers(const Path & path) override;
+
+ PathSet queryDerivationOutputs(const Path & path) override;
+
+ StringSet queryDerivationOutputNames(const Path & path) override;
+
+ Path queryPathFromHashPart(const string & hashPart) override;
+
+ PathSet querySubstitutablePaths(const PathSet & paths) override;
+
+ void querySubstitutablePathInfos(const PathSet & paths,
+ SubstitutablePathInfos & infos) override;
+
+ void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+ bool repair, bool dontCheckSigs,
+ std::shared_ptr<FSAccessor> accessor) override;
+
+ Path addToStore(const string & name, const Path & srcPath,
+ bool recursive = true, HashType hashAlgo = htSHA256,
+ PathFilter & filter = defaultPathFilter, bool repair = false) override;
+
+ Path addTextToStore(const string & name, const string & s,
+ const PathSet & references, bool repair = false) override;
+
+ void buildPaths(const PathSet & paths, BuildMode buildMode) override;
+
+ BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
+ BuildMode buildMode) override;
+
+ void ensurePath(const Path & path) override;
+
+ void addTempRoot(const Path & path) override;
+
+ void addIndirectRoot(const Path & path) override;
+
+ void syncWithGC() override;
+
+ Roots findRoots() override;
+
+ void collectGarbage(const GCOptions & options, GCResults & results) override;
+
+ void optimiseStore() override;
+
+ bool verifyStore(bool checkContents, bool repair) override;
+
+ void addSignatures(const Path & storePath, const StringSet & sigs) override;
+
+ void queryMissing(const PathSet & targets,
+ PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
+ unsigned long long & downloadSize, unsigned long long & narSize) override;
+
+protected:
+
+ struct Connection
+ {
+ FdSink to;
+ FdSource from;
+ unsigned int daemonVersion;
+
+ virtual ~Connection();
+
+ void processStderr(Sink * sink = 0, Source * source = 0);
+ };
+
+ ref<Connection> openConnectionWrapper();
+
+ virtual ref<Connection> openConnection() = 0;
+
+ void initConnection(Connection & conn);
+
+ ref<Pool<Connection>> connections;
+
+private:
+
+ std::atomic_bool failed{false};
+
+ void setOptions(Connection & conn);
+};
+
+class UDSRemoteStore : public LocalFSStore, public RemoteStore
+{
+public:
+
+ UDSRemoteStore(const Params & params);
+
+ std::string getUri() override;
+
+private:
+
+ struct Connection : RemoteStore::Connection
+ {
+ AutoCloseFD fd;
+ };
+
+ ref<RemoteStore::Connection> openConnection() override;
+};
+
+
+}
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
new file mode 100644
index 000000000..245455296
--- /dev/null
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -0,0 +1,339 @@
+#if ENABLE_S3
+
+#include "s3.hh"
+#include "s3-binary-cache-store.hh"
+#include "nar-info.hh"
+#include "nar-info-disk-cache.hh"
+#include "globals.hh"
+#include "compression.hh"
+#include "download.hh"
+#include "istringstream_nocopy.hh"
+
+#include <aws/core/Aws.h>
+#include <aws/core/client/ClientConfiguration.h>
+#include <aws/core/client/DefaultRetryStrategy.h>
+#include <aws/s3/S3Client.h>
+#include <aws/s3/model/CreateBucketRequest.h>
+#include <aws/s3/model/GetBucketLocationRequest.h>
+#include <aws/s3/model/GetObjectRequest.h>
+#include <aws/s3/model/HeadObjectRequest.h>
+#include <aws/s3/model/ListObjectsRequest.h>
+#include <aws/s3/model/PutObjectRequest.h>
+
+namespace nix {
+
+struct S3Error : public Error
+{
+ Aws::S3::S3Errors err;
+ S3Error(Aws::S3::S3Errors err, const FormatOrString & fs)
+ : Error(fs), err(err) { };
+};
+
+/* Helper: given an Outcome<R, E>, return R in case of success, or
+ throw an exception in case of an error. */
+template<typename R, typename E>
+R && checkAws(const FormatOrString & fs, Aws::Utils::Outcome<R, E> && outcome)
+{
+ if (!outcome.IsSuccess())
+ throw S3Error(
+ outcome.GetError().GetErrorType(),
+ fs.s + ": " + outcome.GetError().GetMessage());
+ return outcome.GetResultWithOwnership();
+}
+
+static void initAWS()
+{
+ static std::once_flag flag;
+ std::call_once(flag, []() {
+ Aws::SDKOptions options;
+
+ /* We install our own OpenSSL locking function (see
+ shared.cc), so don't let aws-sdk-cpp override it. */
+ options.cryptoOptions.initAndCleanupOpenSSL = false;
+
+ Aws::InitAPI(options);
+ });
+}
+
+S3Helper::S3Helper(const string & region)
+ : config(makeConfig(region))
+ , client(make_ref<Aws::S3::S3Client>(*config))
+{
+}
+
+/* Log AWS retries. */
+class RetryStrategy : public Aws::Client::DefaultRetryStrategy
+{
+ long CalculateDelayBeforeNextRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const override
+ {
+ auto res = Aws::Client::DefaultRetryStrategy::CalculateDelayBeforeNextRetry(error, attemptedRetries);
+ printError("AWS error '%s' (%s), will retry in %d ms",
+ error.GetExceptionName(), error.GetMessage(), res);
+ return res;
+ }
+};
+
+ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig(const string & region)
+{
+ initAWS();
+ auto res = make_ref<Aws::Client::ClientConfiguration>();
+ res->region = region;
+ res->requestTimeoutMs = 600 * 1000;
+ res->retryStrategy = std::make_shared<RetryStrategy>();
+ res->caFile = settings.caFile;
+ return res;
+}
+
+S3Helper::DownloadResult S3Helper::getObject(
+ const std::string & bucketName, const std::string & key)
+{
+ debug("fetching ‘s3://%s/%s’...", bucketName, key);
+
+ auto request =
+ Aws::S3::Model::GetObjectRequest()
+ .WithBucket(bucketName)
+ .WithKey(key);
+
+ request.SetResponseStreamFactory([&]() {
+ return Aws::New<std::stringstream>("STRINGSTREAM");
+ });
+
+ DownloadResult res;
+
+ auto now1 = std::chrono::steady_clock::now();
+
+ try {
+
+ auto result = checkAws(fmt("AWS error fetching ‘%s’", key),
+ client->GetObject(request));
+
+ res.data = decodeContent(
+ result.GetContentEncoding(),
+ make_ref<std::string>(
+ dynamic_cast<std::stringstream &>(result.GetBody()).str()));
+
+ } catch (S3Error & e) {
+ if (e.err != Aws::S3::S3Errors::NO_SUCH_KEY) throw;
+ }
+
+ auto now2 = std::chrono::steady_clock::now();
+
+ res.durationMs = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
+
+ return res;
+}
+
+struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
+{
+ const Setting<std::string> region{this, Aws::Region::US_EAST_1, "region", {"aws-region"}};
+ const Setting<std::string> narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"};
+ const Setting<std::string> lsCompression{this, "", "ls-compression", "compression method for .ls files"};
+ const Setting<std::string> logCompression{this, "", "log-compression", "compression method for log/* files"};
+
+ std::string bucketName;
+
+ Stats stats;
+
+ S3Helper s3Helper;
+
+ S3BinaryCacheStoreImpl(
+ const Params & params, const std::string & bucketName)
+ : S3BinaryCacheStore(params)
+ , bucketName(bucketName)
+ , s3Helper(region)
+ {
+ diskCache = getNarInfoDiskCache();
+ }
+
+ std::string getUri() override
+ {
+ return "s3://" + bucketName;
+ }
+
+ void init() override
+ {
+ if (!diskCache->cacheExists(getUri(), wantMassQuery_, priority)) {
+
+ /* Create the bucket if it doesn't already exists. */
+ // FIXME: HeadBucket would be more appropriate, but doesn't return
+ // an easily parsed 404 message.
+ auto res = s3Helper.client->GetBucketLocation(
+ Aws::S3::Model::GetBucketLocationRequest().WithBucket(bucketName));
+
+ if (!res.IsSuccess()) {
+ if (res.GetError().GetErrorType() != Aws::S3::S3Errors::NO_SUCH_BUCKET)
+ throw Error(format("AWS error checking bucket ‘%s’: %s") % bucketName % res.GetError().GetMessage());
+
+ checkAws(format("AWS error creating bucket ‘%s’") % bucketName,
+ s3Helper.client->CreateBucket(
+ Aws::S3::Model::CreateBucketRequest()
+ .WithBucket(bucketName)
+ .WithCreateBucketConfiguration(
+ Aws::S3::Model::CreateBucketConfiguration()
+ /* .WithLocationConstraint(
+ Aws::S3::Model::BucketLocationConstraint::US) */ )));
+ }
+
+ BinaryCacheStore::init();
+
+ diskCache->createCache(getUri(), storeDir, wantMassQuery_, priority);
+ }
+ }
+
+ const Stats & getS3Stats() override
+ {
+ return stats;
+ }
+
+ /* This is a specialisation of isValidPath() that optimistically
+ fetches the .narinfo file, rather than first checking for its
+ existence via a HEAD request. Since .narinfos are small, doing
+ a GET is unlikely to be slower than HEAD. */
+ bool isValidPathUncached(const Path & storePath) override
+ {
+ try {
+ queryPathInfo(storePath);
+ return true;
+ } catch (InvalidPath & e) {
+ return false;
+ }
+ }
+
+ bool fileExists(const std::string & path) override
+ {
+ stats.head++;
+
+ auto res = s3Helper.client->HeadObject(
+ Aws::S3::Model::HeadObjectRequest()
+ .WithBucket(bucketName)
+ .WithKey(path));
+
+ if (!res.IsSuccess()) {
+ auto & error = res.GetError();
+ if (error.GetErrorType() == Aws::S3::S3Errors::UNKNOWN // FIXME
+ && error.GetMessage().find("404") != std::string::npos)
+ return false;
+ throw Error(format("AWS error fetching ‘%s’: %s") % path % error.GetMessage());
+ }
+
+ return true;
+ }
+
+ void uploadFile(const std::string & path, const std::string & data,
+ const std::string & mimeType,
+ const std::string & contentEncoding)
+ {
+ auto request =
+ Aws::S3::Model::PutObjectRequest()
+ .WithBucket(bucketName)
+ .WithKey(path);
+
+ request.SetContentType(mimeType);
+
+ if (contentEncoding != "")
+ request.SetContentEncoding(contentEncoding);
+
+ auto stream = std::make_shared<istringstream_nocopy>(data);
+
+ request.SetBody(stream);
+
+ stats.put++;
+ stats.putBytes += data.size();
+
+ auto now1 = std::chrono::steady_clock::now();
+
+ auto result = checkAws(format("AWS error uploading ‘%s’") % path,
+ s3Helper.client->PutObject(request));
+
+ auto now2 = std::chrono::steady_clock::now();
+
+ auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
+
+ printInfo(format("uploaded ‘s3://%1%/%2%’ (%3% bytes) in %4% ms")
+ % bucketName % path % data.size() % duration);
+
+ stats.putTimeMs += duration;
+ }
+
+ void upsertFile(const std::string & path, const std::string & data,
+ const std::string & mimeType) override
+ {
+ if (narinfoCompression != "" && hasSuffix(path, ".narinfo"))
+ uploadFile(path, *compress(narinfoCompression, data), mimeType, narinfoCompression);
+ else if (lsCompression != "" && hasSuffix(path, ".ls"))
+ uploadFile(path, *compress(lsCompression, data), mimeType, lsCompression);
+ else if (logCompression != "" && hasPrefix(path, "log/"))
+ uploadFile(path, *compress(logCompression, data), mimeType, logCompression);
+ else
+ uploadFile(path, data, mimeType, "");
+ }
+
+ void getFile(const std::string & path,
+ std::function<void(std::shared_ptr<std::string>)> success,
+ std::function<void(std::exception_ptr exc)> failure) override
+ {
+ sync2async<std::shared_ptr<std::string>>(success, failure, [&]() {
+ debug(format("fetching ‘s3://%1%/%2%’...") % bucketName % path);
+
+ stats.get++;
+
+ auto res = s3Helper.getObject(bucketName, path);
+
+ stats.getBytes += res.data ? res.data->size() : 0;
+ stats.getTimeMs += res.durationMs;
+
+ if (res.data)
+ printTalkative("downloaded ‘s3://%s/%s’ (%d bytes) in %d ms",
+ bucketName, path, res.data->size(), res.durationMs);
+
+ return res.data;
+ });
+ }
+
+ PathSet queryAllValidPaths() override
+ {
+ PathSet paths;
+ std::string marker;
+
+ do {
+ debug(format("listing bucket ‘s3://%s’ from key ‘%s’...") % bucketName % marker);
+
+ auto res = checkAws(format("AWS error listing bucket ‘%s’") % bucketName,
+ s3Helper.client->ListObjects(
+ Aws::S3::Model::ListObjectsRequest()
+ .WithBucket(bucketName)
+ .WithDelimiter("/")
+ .WithMarker(marker)));
+
+ auto & contents = res.GetContents();
+
+ debug(format("got %d keys, next marker ‘%s’")
+ % contents.size() % res.GetNextMarker());
+
+ for (auto object : contents) {
+ auto & key = object.GetKey();
+ if (key.size() != 40 || !hasSuffix(key, ".narinfo")) continue;
+ paths.insert(storeDir + "/" + key.substr(0, key.size() - 8));
+ }
+
+ marker = res.GetNextMarker();
+ } while (!marker.empty());
+
+ return paths;
+ }
+
+};
+
+static RegisterStoreImplementation regStore([](
+ const std::string & uri, const Store::Params & params)
+ -> std::shared_ptr<Store>
+{
+ if (std::string(uri, 0, 5) != "s3://") return 0;
+ auto store = std::make_shared<S3BinaryCacheStoreImpl>(params, std::string(uri, 5));
+ store->init();
+ return store;
+});
+
+}
+
+#endif
diff --git a/src/libstore/s3-binary-cache-store.hh b/src/libstore/s3-binary-cache-store.hh
new file mode 100644
index 000000000..4d43fe4d2
--- /dev/null
+++ b/src/libstore/s3-binary-cache-store.hh
@@ -0,0 +1,33 @@
+#pragma once
+
+#include "binary-cache-store.hh"
+
+#include <atomic>
+
+namespace nix {
+
+class S3BinaryCacheStore : public BinaryCacheStore
+{
+protected:
+
+ S3BinaryCacheStore(const Params & params)
+ : BinaryCacheStore(params)
+ { }
+
+public:
+
+ struct Stats
+ {
+ std::atomic<uint64_t> put{0};
+ std::atomic<uint64_t> putBytes{0};
+ std::atomic<uint64_t> putTimeMs{0};
+ std::atomic<uint64_t> get{0};
+ std::atomic<uint64_t> getBytes{0};
+ std::atomic<uint64_t> getTimeMs{0};
+ std::atomic<uint64_t> head{0};
+ };
+
+ virtual const Stats & getS3Stats() = 0;
+};
+
+}
diff --git a/src/libstore/s3.hh b/src/libstore/s3.hh
new file mode 100644
index 000000000..08a7fbf96
--- /dev/null
+++ b/src/libstore/s3.hh
@@ -0,0 +1,33 @@
+#pragma once
+
+#if ENABLE_S3
+
+#include "ref.hh"
+
+namespace Aws { namespace Client { class ClientConfiguration; } }
+namespace Aws { namespace S3 { class S3Client; } }
+
+namespace nix {
+
+struct S3Helper
+{
+ ref<Aws::Client::ClientConfiguration> config;
+ ref<Aws::S3::S3Client> client;
+
+ S3Helper(const std::string & region);
+
+ ref<Aws::Client::ClientConfiguration> makeConfig(const std::string & region);
+
+ struct DownloadResult
+ {
+ std::shared_ptr<std::string> data;
+ unsigned int durationMs;
+ };
+
+ DownloadResult getObject(
+ const std::string & bucketName, const std::string & key);
+};
+
+}
+
+#endif
diff --git a/src/libstore/sandbox-defaults.sb.in b/src/libstore/sandbox-defaults.sb.in
new file mode 100644
index 000000000..b5e80085f
--- /dev/null
+++ b/src/libstore/sandbox-defaults.sb.in
@@ -0,0 +1,63 @@
+(allow file-read* file-write-data (literal "/dev/null"))
+(allow ipc-posix*)
+(allow mach-lookup (global-name "com.apple.SecurityServer"))
+
+(allow file-read*
+ (literal "/dev/dtracehelper")
+ (literal "/dev/tty")
+ (literal "/dev/autofs_nowait")
+ (literal "/System/Library/CoreServices/SystemVersion.plist")
+ (literal "/private/var/run/systemkeychaincheck.done")
+ (literal "/private/etc/protocols")
+ (literal "/private/var/tmp")
+ (literal "/private/var/db")
+ (subpath "/private/var/db/mds"))
+
+(allow file-read*
+ (subpath "/usr/share/icu")
+ (subpath "/usr/share/locale")
+ (subpath "/usr/share/zoneinfo"))
+
+(allow file-write*
+ (literal "/dev/tty")
+ (literal "/dev/dtracehelper")
+ (literal "/mds"))
+
+(allow file-ioctl (literal "/dev/dtracehelper"))
+
+(allow file-read-metadata
+ (literal "/var")
+ (literal "/tmp")
+ ; symlinks
+ (literal "@sysconfdir@")
+ (literal "@sysconfdir@/nix")
+ (literal "@sysconfdir@/nix/nix.conf")
+ (literal "/etc/resolv.conf")
+ (literal "/private/etc/resolv.conf"))
+
+(allow file-read*
+ (literal "/private@sysconfdir@/nix/nix.conf")
+ (literal "/private/var/run/resolv.conf"))
+
+; some builders use filehandles other than stdin/stdout
+(allow file*
+ (subpath "/dev/fd")
+ (literal "/dev/ptmx")
+ (regex #"^/dev/[pt]ty.*$"))
+
+; allow everything inside TMP
+(allow file* process-exec
+ (subpath (param "_GLOBAL_TMP_DIR"))
+ (subpath "/private/tmp"))
+
+(allow process-fork)
+(allow sysctl-read)
+(allow signal (target same-sandbox))
+
+; allow getpwuid (for git and other packages)
+(allow mach-lookup
+ (global-name "com.apple.system.notification_center")
+ (global-name "com.apple.system.opendirectoryd.libinfo"))
+
+; allow local networking
+(allow network* (local ip) (remote unix-socket))
diff --git a/src/libstore/schema.sql b/src/libstore/schema.sql
new file mode 100644
index 000000000..09c71a2b8
--- /dev/null
+++ b/src/libstore/schema.sql
@@ -0,0 +1,42 @@
+create table if not exists ValidPaths (
+ id integer primary key autoincrement not null,
+ path text unique not null,
+ hash text not null,
+ registrationTime integer not null,
+ deriver text,
+ narSize integer,
+ ultimate integer, -- null implies "false"
+ sigs text, -- space-separated
+ ca text -- if not null, an assertion that the path is content-addressed; see ValidPathInfo
+);
+
+create table if not exists Refs (
+ referrer integer not null,
+ reference integer not null,
+ primary key (referrer, reference),
+ foreign key (referrer) references ValidPaths(id) on delete cascade,
+ foreign key (reference) references ValidPaths(id) on delete restrict
+);
+
+create index if not exists IndexReferrer on Refs(referrer);
+create index if not exists IndexReference on Refs(reference);
+
+-- Paths can refer to themselves, causing a tuple (N, N) in the Refs
+-- table. This causes a deletion of the corresponding row in
+-- ValidPaths to cause a foreign key constraint violation (due to `on
+-- delete restrict' on the `reference' column). Therefore, explicitly
+-- get rid of self-references.
+create trigger if not exists DeleteSelfRefs before delete on ValidPaths
+ begin
+ delete from Refs where referrer = old.id and reference = old.id;
+ end;
+
+create table if not exists DerivationOutputs (
+ drv integer not null,
+ id text not null, -- symbolic output id, usually "out"
+ path text not null,
+ primary key (drv, id),
+ foreign key (drv) references ValidPaths(id) on delete cascade
+);
+
+create index if not exists IndexDerivationOutputs on DerivationOutputs(path);
diff --git a/src/libstore/serve-protocol.hh b/src/libstore/serve-protocol.hh
new file mode 100644
index 000000000..f8cc9a4b6
--- /dev/null
+++ b/src/libstore/serve-protocol.hh
@@ -0,0 +1,23 @@
+#pragma once
+
+namespace nix {
+
+#define SERVE_MAGIC_1 0x390c9deb
+#define SERVE_MAGIC_2 0x5452eecb
+
+#define SERVE_PROTOCOL_VERSION 0x203
+#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
+#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
+
+typedef enum {
+ cmdQueryValidPaths = 1,
+ cmdQueryPathInfos = 2,
+ cmdDumpStorePath = 3,
+ cmdImportPaths = 4,
+ cmdExportPaths = 5,
+ cmdBuildPaths = 6,
+ cmdQueryClosure = 7,
+ cmdBuildDerivation = 8,
+} ServeCommand;
+
+}
diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc
new file mode 100644
index 000000000..a81e62dbd
--- /dev/null
+++ b/src/libstore/sqlite.cc
@@ -0,0 +1,197 @@
+#include "sqlite.hh"
+#include "util.hh"
+
+#include <sqlite3.h>
+
+#include <atomic>
+
+namespace nix {
+
+[[noreturn]] void throwSQLiteError(sqlite3 * db, const format & f)
+{
+ int err = sqlite3_errcode(db);
+
+ auto path = sqlite3_db_filename(db, nullptr);
+ if (!path) path = "(in-memory)";
+
+ if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) {
+ throw SQLiteBusy(
+ err == SQLITE_PROTOCOL
+ ? fmt("SQLite database ‘%s’ is busy (SQLITE_PROTOCOL)", path)
+ : fmt("SQLite database ‘%s’ is busy", path));
+ }
+ else
+ throw SQLiteError("%s: %s (in ‘%s’)", f.str(), sqlite3_errstr(err), path);
+}
+
+SQLite::SQLite(const Path & path)
+{
+ if (sqlite3_open_v2(path.c_str(), &db,
+ SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0) != SQLITE_OK)
+ throw Error(format("cannot open SQLite database ‘%s’") % path);
+}
+
+SQLite::~SQLite()
+{
+ try {
+ if (db && sqlite3_close(db) != SQLITE_OK)
+ throwSQLiteError(db, "closing database");
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+void SQLite::exec(const std::string & stmt)
+{
+ retrySQLite<void>([&]() {
+ if (sqlite3_exec(db, stmt.c_str(), 0, 0, 0) != SQLITE_OK)
+ throwSQLiteError(db, format("executing SQLite statement ‘%s’") % stmt);
+ });
+}
+
+void SQLiteStmt::create(sqlite3 * db, const string & sql)
+{
+ checkInterrupt();
+ assert(!stmt);
+ if (sqlite3_prepare_v2(db, sql.c_str(), -1, &stmt, 0) != SQLITE_OK)
+ throwSQLiteError(db, fmt("creating statement ‘%s’", sql));
+ this->db = db;
+ this->sql = sql;
+}
+
+SQLiteStmt::~SQLiteStmt()
+{
+ try {
+ if (stmt && sqlite3_finalize(stmt) != SQLITE_OK)
+ throwSQLiteError(db, fmt("finalizing statement ‘%s’", sql));
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+SQLiteStmt::Use::Use(SQLiteStmt & stmt)
+ : stmt(stmt)
+{
+ assert(stmt.stmt);
+ /* Note: sqlite3_reset() returns the error code for the most
+ recent call to sqlite3_step(). So ignore it. */
+ sqlite3_reset(stmt);
+}
+
+SQLiteStmt::Use::~Use()
+{
+ sqlite3_reset(stmt);
+}
+
+SQLiteStmt::Use & SQLiteStmt::Use::operator () (const std::string & value, bool notNull)
+{
+ if (notNull) {
+ if (sqlite3_bind_text(stmt, curArg++, value.c_str(), -1, SQLITE_TRANSIENT) != SQLITE_OK)
+ throwSQLiteError(stmt.db, "binding argument");
+ } else
+ bind();
+ return *this;
+}
+
+SQLiteStmt::Use & SQLiteStmt::Use::operator () (int64_t value, bool notNull)
+{
+ if (notNull) {
+ if (sqlite3_bind_int64(stmt, curArg++, value) != SQLITE_OK)
+ throwSQLiteError(stmt.db, "binding argument");
+ } else
+ bind();
+ return *this;
+}
+
+SQLiteStmt::Use & SQLiteStmt::Use::bind()
+{
+ if (sqlite3_bind_null(stmt, curArg++) != SQLITE_OK)
+ throwSQLiteError(stmt.db, "binding argument");
+ return *this;
+}
+
+int SQLiteStmt::Use::step()
+{
+ return sqlite3_step(stmt);
+}
+
+void SQLiteStmt::Use::exec()
+{
+ int r = step();
+ assert(r != SQLITE_ROW);
+ if (r != SQLITE_DONE)
+ throwSQLiteError(stmt.db, fmt("executing SQLite statement ‘%s’", stmt.sql));
+}
+
+bool SQLiteStmt::Use::next()
+{
+ int r = step();
+ if (r != SQLITE_DONE && r != SQLITE_ROW)
+ throwSQLiteError(stmt.db, fmt("executing SQLite query ‘%s’", stmt.sql));
+ return r == SQLITE_ROW;
+}
+
+std::string SQLiteStmt::Use::getStr(int col)
+{
+ auto s = (const char *) sqlite3_column_text(stmt, col);
+ assert(s);
+ return s;
+}
+
+int64_t SQLiteStmt::Use::getInt(int col)
+{
+ // FIXME: detect nulls?
+ return sqlite3_column_int64(stmt, col);
+}
+
+bool SQLiteStmt::Use::isNull(int col)
+{
+ return sqlite3_column_type(stmt, col) == SQLITE_NULL;
+}
+
+SQLiteTxn::SQLiteTxn(sqlite3 * db)
+{
+ this->db = db;
+ if (sqlite3_exec(db, "begin;", 0, 0, 0) != SQLITE_OK)
+ throwSQLiteError(db, "starting transaction");
+ active = true;
+}
+
+void SQLiteTxn::commit()
+{
+ if (sqlite3_exec(db, "commit;", 0, 0, 0) != SQLITE_OK)
+ throwSQLiteError(db, "committing transaction");
+ active = false;
+}
+
+SQLiteTxn::~SQLiteTxn()
+{
+ try {
+ if (active && sqlite3_exec(db, "rollback;", 0, 0, 0) != SQLITE_OK)
+ throwSQLiteError(db, "aborting transaction");
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+void handleSQLiteBusy(const SQLiteBusy & e)
+{
+ static std::atomic<time_t> lastWarned{0};
+
+ time_t now = time(0);
+
+ if (now > lastWarned + 10) {
+ lastWarned = now;
+ printError("warning: %s", e.what());
+ }
+
+ /* Sleep for a while since retrying the transaction right away
+ is likely to fail again. */
+ checkInterrupt();
+ struct timespec t;
+ t.tv_sec = 0;
+ t.tv_nsec = (random() % 100) * 1000 * 1000; /* <= 0.1s */
+ nanosleep(&t, 0);
+}
+
+}
diff --git a/src/libstore/sqlite.hh b/src/libstore/sqlite.hh
new file mode 100644
index 000000000..14a7a0dd8
--- /dev/null
+++ b/src/libstore/sqlite.hh
@@ -0,0 +1,114 @@
+#pragma once
+
+#include <functional>
+#include <string>
+
+#include "types.hh"
+
+class sqlite3;
+class sqlite3_stmt;
+
+namespace nix {
+
+/* RAII wrapper to close a SQLite database automatically. */
+struct SQLite
+{
+ sqlite3 * db = 0;
+ SQLite() { }
+ SQLite(const Path & path);
+ SQLite(const SQLite & from) = delete;
+ SQLite& operator = (const SQLite & from) = delete;
+ SQLite& operator = (SQLite && from) { db = from.db; from.db = 0; return *this; }
+ ~SQLite();
+ operator sqlite3 * () { return db; }
+
+ void exec(const std::string & stmt);
+};
+
+/* RAII wrapper to create and destroy SQLite prepared statements. */
+struct SQLiteStmt
+{
+ sqlite3 * db = 0;
+ sqlite3_stmt * stmt = 0;
+ std::string sql;
+ SQLiteStmt() { }
+ SQLiteStmt(sqlite3 * db, const std::string & sql) { create(db, sql); }
+ void create(sqlite3 * db, const std::string & s);
+ ~SQLiteStmt();
+ operator sqlite3_stmt * () { return stmt; }
+
+ /* Helper for binding / executing statements. */
+ class Use
+ {
+ friend struct SQLiteStmt;
+ private:
+ SQLiteStmt & stmt;
+ unsigned int curArg = 1;
+ Use(SQLiteStmt & stmt);
+
+ public:
+
+ ~Use();
+
+ /* Bind the next parameter. */
+ Use & operator () (const std::string & value, bool notNull = true);
+ Use & operator () (int64_t value, bool notNull = true);
+ Use & bind(); // null
+
+ int step();
+
+ /* Execute a statement that does not return rows. */
+ void exec();
+
+ /* For statements that return 0 or more rows. Returns true iff
+ a row is available. */
+ bool next();
+
+ std::string getStr(int col);
+ int64_t getInt(int col);
+ bool isNull(int col);
+ };
+
+ Use use()
+ {
+ return Use(*this);
+ }
+};
+
+/* RAII helper that ensures transactions are aborted unless explicitly
+ committed. */
+struct SQLiteTxn
+{
+ bool active = false;
+ sqlite3 * db;
+
+ SQLiteTxn(sqlite3 * db);
+
+ void commit();
+
+ ~SQLiteTxn();
+};
+
+
+MakeError(SQLiteError, Error);
+MakeError(SQLiteBusy, SQLiteError);
+
+[[noreturn]] void throwSQLiteError(sqlite3 * db, const format & f);
+
+void handleSQLiteBusy(const SQLiteBusy & e);
+
+/* Convenience function for retrying a SQLite transaction when the
+ database is busy. */
+template<typename T>
+T retrySQLite(std::function<T()> fun)
+{
+ while (true) {
+ try {
+ return fun();
+ } catch (SQLiteBusy & e) {
+ handleSQLiteBusy(e);
+ }
+ }
+}
+
+}
diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc
new file mode 100644
index 000000000..bb536fadf
--- /dev/null
+++ b/src/libstore/ssh-store.cc
@@ -0,0 +1,104 @@
+#include "store-api.hh"
+#include "remote-store.hh"
+#include "remote-fs-accessor.hh"
+#include "archive.hh"
+#include "worker-protocol.hh"
+#include "pool.hh"
+#include "ssh.hh"
+
+namespace nix {
+
+static std::string uriScheme = "ssh-ng://";
+
+class SSHStore : public RemoteStore
+{
+public:
+
+ const Setting<Path> sshKey{(Store*) this, "", "ssh-key", "path to an SSH private key"};
+ const Setting<bool> compress{(Store*) this, false, "compress", "whether to compress the connection"};
+
+ SSHStore(const std::string & host, const Params & params)
+ : Store(params)
+ , RemoteStore(params)
+ , host(host)
+ , master(
+ host,
+ sshKey,
+ // Use SSH master only if using more than 1 connection.
+ connections->capacity() > 1,
+ compress)
+ {
+ }
+
+ std::string getUri() override
+ {
+ return uriScheme + host;
+ }
+
+ void narFromPath(const Path & path, Sink & sink) override;
+
+ ref<FSAccessor> getFSAccessor() override;
+
+private:
+
+ struct Connection : RemoteStore::Connection
+ {
+ std::unique_ptr<SSHMaster::Connection> sshConn;
+ };
+
+ ref<RemoteStore::Connection> openConnection() override;
+
+ std::string host;
+
+ SSHMaster master;
+};
+
+
+class ForwardSource : public Source
+{
+ Source & readSource;
+ Sink & writeSink;
+public:
+ ForwardSource(Source & readSource, Sink & writeSink) : readSource(readSource), writeSink(writeSink) {}
+ size_t read(unsigned char * data, size_t len) override
+ {
+ auto res = readSource.read(data, len);
+ writeSink(data, len);
+ return res;
+ }
+};
+
+void SSHStore::narFromPath(const Path & path, Sink & sink)
+{
+ auto conn(connections->get());
+ conn->to << wopNarFromPath << path;
+ conn->processStderr();
+ ParseSink ps;
+ auto fwd = ForwardSource(conn->from, sink);
+ parseDump(ps, fwd);
+}
+
+ref<FSAccessor> SSHStore::getFSAccessor()
+{
+ return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()));
+}
+
+ref<RemoteStore::Connection> SSHStore::openConnection()
+{
+ auto conn = make_ref<Connection>();
+ conn->sshConn = master.startCommand("nix-daemon --stdio");
+ conn->to = FdSink(conn->sshConn->in.get());
+ conn->from = FdSource(conn->sshConn->out.get());
+ initConnection(*conn);
+ return conn;
+}
+
+static RegisterStoreImplementation regStore([](
+ const std::string & uri, const Store::Params & params)
+ -> std::shared_ptr<Store>
+{
+ if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
+ return std::make_shared<SSHStore>(std::string(uri, uriScheme.size()), params);
+});
+
+}
diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc
new file mode 100644
index 000000000..e54f3f4ba
--- /dev/null
+++ b/src/libstore/ssh.cc
@@ -0,0 +1,102 @@
+#include "ssh.hh"
+
+namespace nix {
+
+void SSHMaster::addCommonSSHOpts(Strings & args)
+{
+ for (auto & i : tokenizeString<Strings>(getEnv("NIX_SSHOPTS")))
+ args.push_back(i);
+ if (!keyFile.empty())
+ args.insert(args.end(), {"-i", keyFile});
+ if (compress)
+ args.push_back("-C");
+}
+
+std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string & command)
+{
+ Path socketPath = startMaster();
+
+ Pipe in, out;
+ in.create();
+ out.create();
+
+ auto conn = std::make_unique<Connection>();
+ conn->sshPid = startProcess([&]() {
+ restoreSignals();
+
+ close(in.writeSide.get());
+ close(out.readSide.get());
+
+ if (dup2(in.readSide.get(), STDIN_FILENO) == -1)
+ throw SysError("duping over stdin");
+ if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
+ throw SysError("duping over stdout");
+
+ Strings args = { "ssh", host.c_str(), "-x", "-a" };
+ addCommonSSHOpts(args);
+ if (socketPath != "")
+ args.insert(args.end(), {"-S", socketPath});
+ args.push_back(command);
+ execvp(args.begin()->c_str(), stringsToCharPtrs(args).data());
+
+ throw SysError("executing ‘%s’ on ‘%s’", command, host);
+ });
+
+
+ in.readSide = -1;
+ out.writeSide = -1;
+
+ conn->out = std::move(out.readSide);
+ conn->in = std::move(in.writeSide);
+
+ return conn;
+}
+
+Path SSHMaster::startMaster()
+{
+ if (!useMaster) return "";
+
+ auto state(state_.lock());
+
+ if (state->sshMaster != -1) return state->socketPath;
+
+ state->tmpDir = std::make_unique<AutoDelete>(createTempDir("", "nix", true, true, 0700));
+
+ state->socketPath = (Path) *state->tmpDir + "/ssh.sock";
+
+ Pipe out;
+ out.create();
+
+ state->sshMaster = startProcess([&]() {
+ restoreSignals();
+
+ close(out.readSide.get());
+
+ if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
+ throw SysError("duping over stdout");
+
+ Strings args =
+ { "ssh", host.c_str(), "-M", "-N", "-S", state->socketPath
+ , "-o", "LocalCommand=echo started"
+ , "-o", "PermitLocalCommand=yes"
+ };
+ addCommonSSHOpts(args);
+ execvp(args.begin()->c_str(), stringsToCharPtrs(args).data());
+
+ throw SysError("starting SSH master");
+ });
+
+ out.writeSide = -1;
+
+ std::string reply;
+ try {
+ reply = readLine(out.readSide.get());
+ } catch (EndOfFile & e) { }
+
+ if (reply != "started")
+ throw Error("failed to start SSH master connection to ‘%s’", host);
+
+ return state->socketPath;
+}
+
+}
diff --git a/src/libstore/ssh.hh b/src/libstore/ssh.hh
new file mode 100644
index 000000000..b4396467e
--- /dev/null
+++ b/src/libstore/ssh.hh
@@ -0,0 +1,49 @@
+#pragma once
+
+#include "util.hh"
+#include "sync.hh"
+
+namespace nix {
+
+class SSHMaster
+{
+private:
+
+ const std::string host;
+ const std::string keyFile;
+ const bool useMaster;
+ const bool compress;
+
+ struct State
+ {
+ Pid sshMaster;
+ std::unique_ptr<AutoDelete> tmpDir;
+ Path socketPath;
+ };
+
+ Sync<State> state_;
+
+ void addCommonSSHOpts(Strings & args);
+
+public:
+
+ SSHMaster(const std::string & host, const std::string & keyFile, bool useMaster, bool compress)
+ : host(host)
+ , keyFile(keyFile)
+ , useMaster(useMaster)
+ , compress(compress)
+ {
+ }
+
+ struct Connection
+ {
+ Pid sshPid;
+ AutoCloseFD out, in;
+ };
+
+ std::unique_ptr<Connection> startCommand(const std::string & command);
+
+ Path startMaster();
+};
+
+}
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
new file mode 100644
index 000000000..835bbb90e
--- /dev/null
+++ b/src/libstore/store-api.cc
@@ -0,0 +1,836 @@
+#include "crypto.hh"
+#include "globals.hh"
+#include "store-api.hh"
+#include "util.hh"
+#include "nar-info-disk-cache.hh"
+#include "thread-pool.hh"
+#include "json.hh"
+#include "derivations.hh"
+
+#include <future>
+
+
+namespace nix {
+
+
+bool Store::isInStore(const Path & path) const
+{
+ return isInDir(path, storeDir);
+}
+
+
+bool Store::isStorePath(const Path & path) const
+{
+ return isInStore(path)
+ && path.size() >= storeDir.size() + 1 + storePathHashLen
+ && path.find('/', storeDir.size() + 1) == Path::npos;
+}
+
+
+void Store::assertStorePath(const Path & path) const
+{
+ if (!isStorePath(path))
+ throw Error(format("path ‘%1%’ is not in the Nix store") % path);
+}
+
+
+Path Store::toStorePath(const Path & path) const
+{
+ if (!isInStore(path))
+ throw Error(format("path ‘%1%’ is not in the Nix store") % path);
+ Path::size_type slash = path.find('/', storeDir.size() + 1);
+ if (slash == Path::npos)
+ return path;
+ else
+ return Path(path, 0, slash);
+}
+
+
+Path Store::followLinksToStore(const Path & _path) const
+{
+ Path path = absPath(_path);
+ while (!isInStore(path)) {
+ if (!isLink(path)) break;
+ string target = readLink(path);
+ path = absPath(target, dirOf(path));
+ }
+ if (!isInStore(path))
+ throw Error(format("path ‘%1%’ is not in the Nix store") % path);
+ return path;
+}
+
+
+Path Store::followLinksToStorePath(const Path & path) const
+{
+ return toStorePath(followLinksToStore(path));
+}
+
+
+string storePathToName(const Path & path)
+{
+ auto base = baseNameOf(path);
+ assert(base.size() == storePathHashLen || (base.size() > storePathHashLen && base[storePathHashLen] == '-'));
+ return base.size() == storePathHashLen ? "" : string(base, storePathHashLen + 1);
+}
+
+
+string storePathToHash(const Path & path)
+{
+ auto base = baseNameOf(path);
+ assert(base.size() >= storePathHashLen);
+ return string(base, 0, storePathHashLen);
+}
+
+
+void checkStoreName(const string & name)
+{
+ string validChars = "+-._?=";
+ /* Disallow names starting with a dot for possible security
+ reasons (e.g., "." and ".."). */
+ if (string(name, 0, 1) == ".")
+ throw Error(format("illegal name: ‘%1%’") % name);
+ for (auto & i : name)
+ if (!((i >= 'A' && i <= 'Z') ||
+ (i >= 'a' && i <= 'z') ||
+ (i >= '0' && i <= '9') ||
+ validChars.find(i) != string::npos))
+ {
+ throw Error(format("invalid character ‘%1%’ in name ‘%2%’")
+ % i % name);
+ }
+}
+
+
+/* Store paths have the following form:
+
+ <store>/<h>-<name>
+
+ where
+
+ <store> = the location of the Nix store, usually /nix/store
+
+ <name> = a human readable name for the path, typically obtained
+ from the name attribute of the derivation, or the name of the
+ source file from which the store path is created. For derivation
+ outputs other than the default "out" output, the string "-<id>"
+ is suffixed to <name>.
+
+ <h> = base-32 representation of the first 160 bits of a SHA-256
+ hash of <s>; the hash part of the store name
+
+ <s> = the string "<type>:sha256:<h2>:<store>:<name>";
+ note that it includes the location of the store as well as the
+ name to make sure that changes to either of those are reflected
+ in the hash (e.g. you won't get /nix/store/<h>-name1 and
+ /nix/store/<h>-name2 with equal hash parts).
+
+ <type> = one of:
+ "text:<r1>:<r2>:...<rN>"
+ for plain text files written to the store using
+ addTextToStore(); <r1> ... <rN> are the references of the
+ path.
+ "source"
+ for paths copied to the store using addToStore() when recursive
+ = true and hashAlgo = "sha256"
+ "output:<id>"
+ for either the outputs created by derivations, OR paths copied
+ to the store using addToStore() with recursive != true or
+ hashAlgo != "sha256" (in that case "source" is used; it's
+ silly, but it's done that way for compatibility). <id> is the
+ name of the output (usually, "out").
+
+ <h2> = base-16 representation of a SHA-256 hash of:
+ if <type> = "text:...":
+ the string written to the resulting store path
+ if <type> = "source":
+ the serialisation of the path from which this store path is
+ copied, as returned by hashPath()
+ if <type> = "output:<id>":
+ for non-fixed derivation outputs:
+ the derivation (see hashDerivationModulo() in
+ primops.cc)
+ for paths copied by addToStore() or produced by fixed-output
+ derivations:
+ the string "fixed:out:<rec><algo>:<hash>:", where
+ <rec> = "r:" for recursive (path) hashes, or "" for flat
+ (file) hashes
+ <algo> = "md5", "sha1" or "sha256"
+ <hash> = base-16 representation of the path or flat hash of
+ the contents of the path (or expected contents of the
+ path for fixed-output derivations)
+
+ It would have been nicer to handle fixed-output derivations under
+ "source", e.g. have something like "source:<rec><algo>", but we're
+ stuck with this for now...
+
+ The main reason for this way of computing names is to prevent name
+ collisions (for security). For instance, it shouldn't be feasible
+ to come up with a derivation whose output path collides with the
+ path for a copied source. The former would have a <s> starting with
+ "output:out:", while the latter would have a <2> starting with
+ "source:".
+*/
+
+
+Path Store::makeStorePath(const string & type,
+ const Hash & hash, const string & name) const
+{
+ /* e.g., "source:sha256:1abc...:/nix/store:foo.tar.gz" */
+ string s = type + ":sha256:" + printHash(hash) + ":"
+ + storeDir + ":" + name;
+
+ checkStoreName(name);
+
+ return storeDir + "/"
+ + printHash32(compressHash(hashString(htSHA256, s), 20))
+ + "-" + name;
+}
+
+
+Path Store::makeOutputPath(const string & id,
+ const Hash & hash, const string & name) const
+{
+ return makeStorePath("output:" + id, hash,
+ name + (id == "out" ? "" : "-" + id));
+}
+
+
+Path Store::makeFixedOutputPath(bool recursive,
+ const Hash & hash, const string & name) const
+{
+ return hash.type == htSHA256 && recursive
+ ? makeStorePath("source", hash, name)
+ : makeStorePath("output:out", hashString(htSHA256,
+ "fixed:out:" + (recursive ? (string) "r:" : "") +
+ printHashType(hash.type) + ":" + printHash(hash) + ":"),
+ name);
+}
+
+
+Path Store::makeTextPath(const string & name, const Hash & hash,
+ const PathSet & references) const
+{
+ assert(hash.type == htSHA256);
+ /* Stuff the references (if any) into the type. This is a bit
+ hacky, but we can't put them in `s' since that would be
+ ambiguous. */
+ string type = "text";
+ for (auto & i : references) {
+ type += ":";
+ type += i;
+ }
+ return makeStorePath(type, hash, name);
+}
+
+
+std::pair<Path, Hash> Store::computeStorePathForPath(const Path & srcPath,
+ bool recursive, HashType hashAlgo, PathFilter & filter) const
+{
+ Hash h = recursive ? hashPath(hashAlgo, srcPath, filter).first : hashFile(hashAlgo, srcPath);
+ string name = baseNameOf(srcPath);
+ Path dstPath = makeFixedOutputPath(recursive, h, name);
+ return std::pair<Path, Hash>(dstPath, h);
+}
+
+
+Path Store::computeStorePathForText(const string & name, const string & s,
+ const PathSet & references) const
+{
+ return makeTextPath(name, hashString(htSHA256, s), references);
+}
+
+
+Store::Store(const Params & params)
+ : Config(params)
+ , state({(size_t) pathInfoCacheSize})
+{
+}
+
+
+std::string Store::getUri()
+{
+ return "";
+}
+
+
+bool Store::isValidPath(const Path & storePath)
+{
+ auto hashPart = storePathToHash(storePath);
+
+ {
+ auto state_(state.lock());
+ auto res = state_->pathInfoCache.get(hashPart);
+ if (res) {
+ stats.narInfoReadAverted++;
+ return *res != 0;
+ }
+ }
+
+ if (diskCache) {
+ auto res = diskCache->lookupNarInfo(getUri(), hashPart);
+ if (res.first != NarInfoDiskCache::oUnknown) {
+ stats.narInfoReadAverted++;
+ auto state_(state.lock());
+ state_->pathInfoCache.upsert(hashPart,
+ res.first == NarInfoDiskCache::oInvalid ? 0 : res.second);
+ return res.first == NarInfoDiskCache::oValid;
+ }
+ }
+
+ bool valid = isValidPathUncached(storePath);
+
+ if (diskCache && !valid)
+ // FIXME: handle valid = true case.
+ diskCache->upsertNarInfo(getUri(), hashPart, 0);
+
+ return valid;
+}
+
+
+/* Default implementation for stores that only implement
+ queryPathInfoUncached(). */
+bool Store::isValidPathUncached(const Path & path)
+{
+ try {
+ queryPathInfo(path);
+ return true;
+ } catch (InvalidPath &) {
+ return false;
+ }
+}
+
+
+ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath)
+{
+ std::promise<ref<ValidPathInfo>> promise;
+
+ queryPathInfo(storePath,
+ [&](ref<ValidPathInfo> info) {
+ promise.set_value(info);
+ },
+ [&](std::exception_ptr exc) {
+ promise.set_exception(exc);
+ });
+
+ return promise.get_future().get();
+}
+
+
+void Store::queryPathInfo(const Path & storePath,
+ std::function<void(ref<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure)
+{
+ auto hashPart = storePathToHash(storePath);
+
+ try {
+
+ {
+ auto res = state.lock()->pathInfoCache.get(hashPart);
+ if (res) {
+ stats.narInfoReadAverted++;
+ if (!*res)
+ throw InvalidPath(format("path ‘%s’ is not valid") % storePath);
+ return success(ref<ValidPathInfo>(*res));
+ }
+ }
+
+ if (diskCache) {
+ auto res = diskCache->lookupNarInfo(getUri(), hashPart);
+ if (res.first != NarInfoDiskCache::oUnknown) {
+ stats.narInfoReadAverted++;
+ {
+ auto state_(state.lock());
+ state_->pathInfoCache.upsert(hashPart,
+ res.first == NarInfoDiskCache::oInvalid ? 0 : res.second);
+ if (res.first == NarInfoDiskCache::oInvalid ||
+ (res.second->path != storePath && storePathToName(storePath) != ""))
+ throw InvalidPath(format("path ‘%s’ is not valid") % storePath);
+ }
+ return success(ref<ValidPathInfo>(res.second));
+ }
+ }
+
+ } catch (std::exception & e) {
+ return callFailure(failure);
+ }
+
+ queryPathInfoUncached(storePath,
+ [this, storePath, hashPart, success, failure](std::shared_ptr<ValidPathInfo> info) {
+
+ if (diskCache)
+ diskCache->upsertNarInfo(getUri(), hashPart, info);
+
+ {
+ auto state_(state.lock());
+ state_->pathInfoCache.upsert(hashPart, info);
+ }
+
+ if (!info
+ || (info->path != storePath && storePathToName(storePath) != ""))
+ {
+ stats.narInfoMissing++;
+ return failure(std::make_exception_ptr(InvalidPath(format("path ‘%s’ is not valid") % storePath)));
+ }
+
+ callSuccess(success, failure, ref<ValidPathInfo>(info));
+
+ }, failure);
+}
+
+
+PathSet Store::queryValidPaths(const PathSet & paths, bool maybeSubstitute)
+{
+ struct State
+ {
+ size_t left;
+ PathSet valid;
+ std::exception_ptr exc;
+ };
+
+ Sync<State> state_(State{paths.size(), PathSet()});
+
+ std::condition_variable wakeup;
+
+ for (auto & path : paths)
+ queryPathInfo(path,
+ [path, &state_, &wakeup](ref<ValidPathInfo> info) {
+ auto state(state_.lock());
+ state->valid.insert(path);
+ assert(state->left);
+ if (!--state->left)
+ wakeup.notify_one();
+ },
+ [path, &state_, &wakeup](std::exception_ptr exc) {
+ auto state(state_.lock());
+ try {
+ std::rethrow_exception(exc);
+ } catch (InvalidPath &) {
+ } catch (...) {
+ state->exc = exc;
+ }
+ assert(state->left);
+ if (!--state->left)
+ wakeup.notify_one();
+ });
+
+ while (true) {
+ auto state(state_.lock());
+ if (!state->left) {
+ if (state->exc) std::rethrow_exception(state->exc);
+ return state->valid;
+ }
+ state.wait(wakeup);
+ }
+}
+
+
+/* Return a string accepted by decodeValidPathInfo() that
+ registers the specified paths as valid. Note: it's the
+ responsibility of the caller to provide a closure. */
+string Store::makeValidityRegistration(const PathSet & paths,
+ bool showDerivers, bool showHash)
+{
+ string s = "";
+
+ for (auto & i : paths) {
+ s += i + "\n";
+
+ auto info = queryPathInfo(i);
+
+ if (showHash) {
+ s += printHash(info->narHash) + "\n";
+ s += (format("%1%\n") % info->narSize).str();
+ }
+
+ Path deriver = showDerivers ? info->deriver : "";
+ s += deriver + "\n";
+
+ s += (format("%1%\n") % info->references.size()).str();
+
+ for (auto & j : info->references)
+ s += j + "\n";
+ }
+
+ return s;
+}
+
+
+void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const PathSet & storePaths,
+ bool includeImpureInfo, bool showClosureSize)
+{
+ auto jsonList = jsonOut.list();
+
+ for (auto storePath : storePaths) {
+ auto info = queryPathInfo(storePath);
+ storePath = info->path;
+
+ auto jsonPath = jsonList.object();
+ jsonPath
+ .attr("path", storePath)
+ .attr("narHash", info->narHash.to_string())
+ .attr("narSize", info->narSize);
+
+ {
+ auto jsonRefs = jsonPath.list("references");
+ for (auto & ref : info->references)
+ jsonRefs.elem(ref);
+ }
+
+ if (info->ca != "")
+ jsonPath.attr("ca", info->ca);
+
+ if (showClosureSize)
+ jsonPath.attr("closureSize", getClosureSize(storePath));
+
+ if (!includeImpureInfo) continue;
+
+ if (info->deriver != "")
+ jsonPath.attr("deriver", info->deriver);
+
+ if (info->registrationTime)
+ jsonPath.attr("registrationTime", info->registrationTime);
+
+ if (info->ultimate)
+ jsonPath.attr("ultimate", info->ultimate);
+
+ if (!info->sigs.empty()) {
+ auto jsonSigs = jsonPath.list("signatures");
+ for (auto & sig : info->sigs)
+ jsonSigs.elem(sig);
+ }
+ }
+}
+
+
+unsigned long long Store::getClosureSize(const Path & storePath)
+{
+ unsigned long long totalSize = 0;
+ PathSet closure;
+ computeFSClosure(storePath, closure, false, false);
+ for (auto & p : closure)
+ totalSize += queryPathInfo(p)->narSize;
+ return totalSize;
+}
+
+
+const Store::Stats & Store::getStats()
+{
+ {
+ auto state_(state.lock());
+ stats.pathInfoCacheSize = state_->pathInfoCache.size();
+ }
+ return stats;
+}
+
+
+void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
+ const Path & storePath, bool repair, bool dontCheckSigs)
+{
+ auto info = srcStore->queryPathInfo(storePath);
+
+ StringSink sink;
+ srcStore->narFromPath({storePath}, sink);
+
+ if (srcStore->isTrusted())
+ dontCheckSigs = true;
+
+ if (!info->narHash && dontCheckSigs) {
+ auto info2 = make_ref<ValidPathInfo>(*info);
+ info2->narHash = hashString(htSHA256, *sink.s);
+ info = info2;
+ }
+
+ dstStore->addToStore(*info, sink.s, repair, dontCheckSigs);
+}
+
+
+void copyClosure(ref<Store> srcStore, ref<Store> dstStore,
+ const PathSet & storePaths, bool repair, bool dontCheckSigs)
+{
+ PathSet closure;
+ for (auto & path : storePaths)
+ srcStore->computeFSClosure(path, closure);
+
+ // FIXME: use copyStorePaths()
+
+ PathSet valid = dstStore->queryValidPaths(closure);
+
+ if (valid.size() == closure.size()) return;
+
+ Paths sorted = srcStore->topoSortPaths(closure);
+
+ Paths missing;
+ for (auto i = sorted.rbegin(); i != sorted.rend(); ++i)
+ if (!valid.count(*i)) missing.push_back(*i);
+
+ printMsg(lvlDebug, format("copying %1% missing paths") % missing.size());
+
+ for (auto & i : missing)
+ copyStorePath(srcStore, dstStore, i, repair, dontCheckSigs);
+}
+
+
+ValidPathInfo decodeValidPathInfo(std::istream & str, bool hashGiven)
+{
+ ValidPathInfo info;
+ getline(str, info.path);
+ if (str.eof()) { info.path = ""; return info; }
+ if (hashGiven) {
+ string s;
+ getline(str, s);
+ info.narHash = parseHash(htSHA256, s);
+ getline(str, s);
+ if (!string2Int(s, info.narSize)) throw Error("number expected");
+ }
+ getline(str, info.deriver);
+ string s; int n;
+ getline(str, s);
+ if (!string2Int(s, n)) throw Error("number expected");
+ while (n--) {
+ getline(str, s);
+ info.references.insert(s);
+ }
+ if (!str || str.eof()) throw Error("missing input");
+ return info;
+}
+
+
+string showPaths(const PathSet & paths)
+{
+ string s;
+ for (auto & i : paths) {
+ if (s.size() != 0) s += ", ";
+ s += "‘" + i + "’";
+ }
+ return s;
+}
+
+
+std::string ValidPathInfo::fingerprint() const
+{
+ if (narSize == 0 || !narHash)
+ throw Error(format("cannot calculate fingerprint of path ‘%s’ because its size/hash is not known")
+ % path);
+ return
+ "1;" + path + ";"
+ + printHashType(narHash.type) + ":" + printHash32(narHash) + ";"
+ + std::to_string(narSize) + ";"
+ + concatStringsSep(",", references);
+}
+
+
+void ValidPathInfo::sign(const SecretKey & secretKey)
+{
+ sigs.insert(secretKey.signDetached(fingerprint()));
+}
+
+
+bool ValidPathInfo::isContentAddressed(const Store & store) const
+{
+ auto warn = [&]() {
+ printError(format("warning: path ‘%s’ claims to be content-addressed but isn't") % path);
+ };
+
+ if (hasPrefix(ca, "text:")) {
+ auto hash = parseHash(std::string(ca, 5));
+ if (store.makeTextPath(storePathToName(path), hash, references) == path)
+ return true;
+ else
+ warn();
+ }
+
+ else if (hasPrefix(ca, "fixed:")) {
+ bool recursive = ca.compare(6, 2, "r:") == 0;
+ auto hash = parseHash(std::string(ca, recursive ? 8 : 6));
+ if (store.makeFixedOutputPath(recursive, hash, storePathToName(path)) == path)
+ return true;
+ else
+ warn();
+ }
+
+ return false;
+}
+
+
+size_t ValidPathInfo::checkSignatures(const Store & store, const PublicKeys & publicKeys) const
+{
+ if (isContentAddressed(store)) return maxSigs;
+
+ size_t good = 0;
+ for (auto & sig : sigs)
+ if (checkSignature(publicKeys, sig))
+ good++;
+ return good;
+}
+
+
+bool ValidPathInfo::checkSignature(const PublicKeys & publicKeys, const std::string & sig) const
+{
+ return verifyDetached(fingerprint(), sig, publicKeys);
+}
+
+
+Strings ValidPathInfo::shortRefs() const
+{
+ Strings refs;
+ for (auto & r : references)
+ refs.push_back(baseNameOf(r));
+ return refs;
+}
+
+
+std::string makeFixedOutputCA(bool recursive, const Hash & hash)
+{
+ return "fixed:" + (recursive ? (std::string) "r:" : "") + hash.to_string();
+}
+
+
+}
+
+
+#include "local-store.hh"
+#include "remote-store.hh"
+
+
+namespace nix {
+
+
+RegisterStoreImplementation::Implementations * RegisterStoreImplementation::implementations = 0;
+
+
+ref<Store> openStore(const std::string & uri_)
+{
+ auto uri(uri_);
+ Store::Params params;
+ auto q = uri.find('?');
+ if (q != std::string::npos) {
+ for (auto s : tokenizeString<Strings>(uri.substr(q + 1), "&")) {
+ auto e = s.find('=');
+ if (e != std::string::npos)
+ params[s.substr(0, e)] = s.substr(e + 1);
+ }
+ uri = uri_.substr(0, q);
+ }
+ return openStore(uri, params);
+}
+
+ref<Store> openStore(const std::string & uri, const Store::Params & params)
+{
+ for (auto fun : *RegisterStoreImplementation::implementations) {
+ auto store = fun(uri, params);
+ if (store) {
+ store->warnUnknownSettings();
+ return ref<Store>(store);
+ }
+ }
+
+ throw Error(format("don't know how to open Nix store ‘%s’") % uri);
+}
+
+
+StoreType getStoreType(const std::string & uri, const std::string & stateDir)
+{
+ if (uri == "daemon") {
+ return tDaemon;
+ } else if (uri == "local") {
+ return tLocal;
+ } else if (uri == "" || uri == "auto") {
+ if (access(stateDir.c_str(), R_OK | W_OK) == 0)
+ return tLocal;
+ else if (pathExists(settings.nixDaemonSocketFile))
+ return tDaemon;
+ else
+ return tLocal;
+ } else {
+ return tOther;
+ }
+}
+
+
+static RegisterStoreImplementation regStore([](
+ const std::string & uri, const Store::Params & params)
+ -> std::shared_ptr<Store>
+{
+ switch (getStoreType(uri, get(params, "state", settings.nixStateDir))) {
+ case tDaemon:
+ return std::shared_ptr<Store>(std::make_shared<UDSRemoteStore>(params));
+ case tLocal:
+ return std::shared_ptr<Store>(std::make_shared<LocalStore>(params));
+ default:
+ return nullptr;
+ }
+});
+
+
+std::list<ref<Store>> getDefaultSubstituters()
+{
+ struct State {
+ bool done = false;
+ std::list<ref<Store>> stores;
+ };
+ static Sync<State> state_;
+
+ auto state(state_.lock());
+
+ if (state->done) return state->stores;
+
+ StringSet done;
+
+ auto addStore = [&](const std::string & uri) {
+ if (done.count(uri)) return;
+ done.insert(uri);
+ state->stores.push_back(openStore(uri));
+ };
+
+ for (auto uri : settings.substituters.get())
+ addStore(uri);
+
+ for (auto uri : settings.extraSubstituters.get())
+ addStore(uri);
+
+ state->done = true;
+
+ return state->stores;
+}
+
+
+void copyPaths(ref<Store> from, ref<Store> to, const PathSet & storePaths, bool substitute)
+{
+ PathSet valid = to->queryValidPaths(storePaths, substitute);
+
+ PathSet missing;
+ for (auto & path : storePaths)
+ if (!valid.count(path)) missing.insert(path);
+
+ std::string copiedLabel = "copied";
+
+ logger->setExpected(copiedLabel, missing.size());
+
+ ThreadPool pool;
+
+ processGraph<Path>(pool,
+ PathSet(missing.begin(), missing.end()),
+
+ [&](const Path & storePath) {
+ if (to->isValidPath(storePath)) return PathSet();
+ return from->queryPathInfo(storePath)->references;
+ },
+
+ [&](const Path & storePath) {
+ checkInterrupt();
+
+ if (!to->isValidPath(storePath)) {
+ Activity act(*logger, lvlInfo, format("copying ‘%s’...") % storePath);
+
+ copyStorePath(from, to, storePath);
+
+ logger->incProgress(copiedLabel);
+ } else
+ logger->incExpected(copiedLabel, -1);
+ });
+
+ pool.process();
+}
+
+
+}
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
new file mode 100644
index 000000000..067309c9e
--- /dev/null
+++ b/src/libstore/store-api.hh
@@ -0,0 +1,729 @@
+#pragma once
+
+#include "hash.hh"
+#include "serialise.hh"
+#include "crypto.hh"
+#include "lru-cache.hh"
+#include "sync.hh"
+#include "globals.hh"
+#include "config.hh"
+
+#include <atomic>
+#include <limits>
+#include <map>
+#include <memory>
+#include <string>
+
+
+namespace nix {
+
+
+struct BasicDerivation;
+struct Derivation;
+class FSAccessor;
+class NarInfoDiskCache;
+class Store;
+class JSONPlaceholder;
+
+
+/* Size of the hash part of store paths, in base-32 characters. */
+const size_t storePathHashLen = 32; // i.e. 160 bits
+
+/* Magic header of exportPath() output (obsolete). */
+const uint32_t exportMagic = 0x4558494e;
+
+
+typedef std::map<Path, Path> Roots;
+
+
+struct GCOptions
+{
+ /* Garbage collector operation:
+
+ - `gcReturnLive': return the set of paths reachable from
+ (i.e. in the closure of) the roots.
+
+ - `gcReturnDead': return the set of paths not reachable from
+ the roots.
+
+ - `gcDeleteDead': actually delete the latter set.
+
+ - `gcDeleteSpecific': delete the paths listed in
+ `pathsToDelete', insofar as they are not reachable.
+ */
+ typedef enum {
+ gcReturnLive,
+ gcReturnDead,
+ gcDeleteDead,
+ gcDeleteSpecific,
+ } GCAction;
+
+ GCAction action{gcDeleteDead};
+
+ /* If `ignoreLiveness' is set, then reachability from the roots is
+ ignored (dangerous!). However, the paths must still be
+ unreferenced *within* the store (i.e., there can be no other
+ store paths that depend on them). */
+ bool ignoreLiveness{false};
+
+ /* For `gcDeleteSpecific', the paths to delete. */
+ PathSet pathsToDelete;
+
+ /* Stop after at least `maxFreed' bytes have been freed. */
+ unsigned long long maxFreed{std::numeric_limits<unsigned long long>::max()};
+};
+
+
+struct GCResults
+{
+ /* Depending on the action, the GC roots, or the paths that would
+ be or have been deleted. */
+ PathSet paths;
+
+ /* For `gcReturnDead', `gcDeleteDead' and `gcDeleteSpecific', the
+ number of bytes that would be or was freed. */
+ unsigned long long bytesFreed = 0;
+};
+
+
+struct SubstitutablePathInfo
+{
+ Path deriver;
+ PathSet references;
+ unsigned long long downloadSize; /* 0 = unknown or inapplicable */
+ unsigned long long narSize; /* 0 = unknown */
+};
+
+typedef std::map<Path, SubstitutablePathInfo> SubstitutablePathInfos;
+
+
+struct ValidPathInfo
+{
+ Path path;
+ Path deriver;
+ Hash narHash;
+ PathSet references;
+ time_t registrationTime = 0;
+ uint64_t narSize = 0; // 0 = unknown
+ uint64_t id; // internal use only
+
+ /* Whether the path is ultimately trusted, that is, it was built
+ locally or is content-addressable (e.g. added via addToStore()
+ or the result of a fixed-output derivation). */
+ bool ultimate = false;
+
+ StringSet sigs; // note: not necessarily verified
+
+ /* If non-empty, an assertion that the path is content-addressed,
+ i.e., that the store path is computed from a cryptographic hash
+ of the contents of the path, plus some other bits of data like
+ the "name" part of the path. Such a path doesn't need
+ signatures, since we don't have to trust anybody's claim that
+ the path is the output of a particular derivation. (In the
+ extensional store model, we have to trust that the *contents*
+ of an output path of a derivation were actually produced by
+ that derivation. In the intensional model, we have to trust
+ that a particular output path was produced by a derivation; the
+ path then implies the contents.)
+
+ Ideally, the content-addressability assertion would just be a
+ Boolean, and the store path would be computed from
+ ‘storePathToName(path)’, ‘narHash’ and ‘references’. However,
+ 1) we've accumulated several types of content-addressed paths
+ over the years; and 2) fixed-output derivations support
+ multiple hash algorithms and serialisation methods (flat file
+ vs NAR). Thus, ‘ca’ has one of the following forms:
+
+ * ‘text:sha256:<sha256 hash of file contents>’: For paths
+ computed by makeTextPath() / addTextToStore().
+
+ * ‘fixed:<r?>:<ht>:<h>’: For paths computed by
+ makeFixedOutputPath() / addToStore().
+ */
+ std::string ca;
+
+ bool operator == (const ValidPathInfo & i) const
+ {
+ return
+ path == i.path
+ && narHash == i.narHash
+ && references == i.references;
+ }
+
+ /* Return a fingerprint of the store path to be used in binary
+ cache signatures. It contains the store path, the base-32
+ SHA-256 hash of the NAR serialisation of the path, the size of
+ the NAR, and the sorted references. The size field is strictly
+ speaking superfluous, but might prevent endless/excessive data
+ attacks. */
+ std::string fingerprint() const;
+
+ void sign(const SecretKey & secretKey);
+
+ /* Return true iff the path is verifiably content-addressed. */
+ bool isContentAddressed(const Store & store) const;
+
+ static const size_t maxSigs = std::numeric_limits<size_t>::max();
+
+ /* Return the number of signatures on this .narinfo that were
+ produced by one of the specified keys, or maxSigs if the path
+ is content-addressed. */
+ size_t checkSignatures(const Store & store, const PublicKeys & publicKeys) const;
+
+ /* Verify a single signature. */
+ bool checkSignature(const PublicKeys & publicKeys, const std::string & sig) const;
+
+ Strings shortRefs() const;
+
+ virtual ~ValidPathInfo() { }
+};
+
+typedef list<ValidPathInfo> ValidPathInfos;
+
+
+enum BuildMode { bmNormal, bmRepair, bmCheck, bmHash };
+
+
+struct BuildResult
+{
+ /* Note: don't remove status codes, and only add new status codes
+ at the end of the list, to prevent client/server
+ incompatibilities in the nix-store --serve protocol. */
+ enum Status {
+ Built = 0,
+ Substituted,
+ AlreadyValid,
+ PermanentFailure,
+ InputRejected,
+ OutputRejected,
+ TransientFailure, // possibly transient
+ CachedFailure, // no longer used
+ TimedOut,
+ MiscFailure,
+ DependencyFailed,
+ LogLimitExceeded,
+ NotDeterministic,
+ } status = MiscFailure;
+ std::string errorMsg;
+
+ /* How many times this build was performed. */
+ unsigned int timesBuilt = 0;
+
+ /* If timesBuilt > 1, whether some builds did not produce the same
+ result. (Note that 'isNonDeterministic = false' does not mean
+ the build is deterministic, just that we don't have evidence of
+ non-determinism.) */
+ bool isNonDeterministic = false;
+
+ /* The start/stop times of the build (or one of the rounds, if it
+ was repeated). */
+ time_t startTime = 0, stopTime = 0;
+
+ bool success() {
+ return status == Built || status == Substituted || status == AlreadyValid;
+ }
+};
+
+
+class Store : public std::enable_shared_from_this<Store>, public Config
+{
+public:
+
+ typedef std::map<std::string, std::string> Params;
+
+ const PathSetting storeDir_{this, false, settings.nixStore,
+ "store", "path to the Nix store"};
+ const Path storeDir = storeDir_;
+
+ const Setting<int> pathInfoCacheSize{this, 65536, "path-info-cache-size", "size of the in-memory store path information cache"};
+
+protected:
+
+ struct State
+ {
+ LRUCache<std::string, std::shared_ptr<ValidPathInfo>> pathInfoCache;
+ };
+
+ Sync<State> state;
+
+ std::shared_ptr<NarInfoDiskCache> diskCache;
+
+ Store(const Params & params);
+
+public:
+
+ virtual ~Store() { }
+
+ virtual std::string getUri() = 0;
+
+ /* Return true if ‘path’ is in the Nix store (but not the Nix
+ store itself). */
+ bool isInStore(const Path & path) const;
+
+ /* Return true if ‘path’ is a store path, i.e. a direct child of
+ the Nix store. */
+ bool isStorePath(const Path & path) const;
+
+ /* Throw an exception if ‘path’ is not a store path. */
+ void assertStorePath(const Path & path) const;
+
+ /* Chop off the parts after the top-level store name, e.g.,
+ /nix/store/abcd-foo/bar => /nix/store/abcd-foo. */
+ Path toStorePath(const Path & path) const;
+
+ /* Follow symlinks until we end up with a path in the Nix store. */
+ Path followLinksToStore(const Path & path) const;
+
+ /* Same as followLinksToStore(), but apply toStorePath() to the
+ result. */
+ Path followLinksToStorePath(const Path & path) const;
+
+ /* Constructs a unique store path name. */
+ Path makeStorePath(const string & type,
+ const Hash & hash, const string & name) const;
+
+ Path makeOutputPath(const string & id,
+ const Hash & hash, const string & name) const;
+
+ Path makeFixedOutputPath(bool recursive,
+ const Hash & hash, const string & name) const;
+
+ Path makeTextPath(const string & name, const Hash & hash,
+ const PathSet & references) const;
+
+ /* This is the preparatory part of addToStore(); it computes the
+ store path to which srcPath is to be copied. Returns the store
+ path and the cryptographic hash of the contents of srcPath. */
+ std::pair<Path, Hash> computeStorePathForPath(const Path & srcPath,
+ bool recursive = true, HashType hashAlgo = htSHA256,
+ PathFilter & filter = defaultPathFilter) const;
+
+ /* Preparatory part of addTextToStore().
+
+ !!! Computation of the path should take the references given to
+ addTextToStore() into account, otherwise we have a (relatively
+ minor) security hole: a caller can register a source file with
+ bogus references. If there are too many references, the path may
+ not be garbage collected when it has to be (not really a problem,
+ the caller could create a root anyway), or it may be garbage
+ collected when it shouldn't be (more serious).
+
+ Hashing the references would solve this (bogus references would
+ simply yield a different store path, so other users wouldn't be
+ affected), but it has some backwards compatibility issues (the
+ hashing scheme changes), so I'm not doing that for now. */
+ Path computeStorePathForText(const string & name, const string & s,
+ const PathSet & references) const;
+
+ /* Check whether a path is valid. */
+ bool isValidPath(const Path & path);
+
+protected:
+
+ virtual bool isValidPathUncached(const Path & path);
+
+public:
+
+ /* Query which of the given paths is valid. Optionally, try to
+ substitute missing paths. */
+ virtual PathSet queryValidPaths(const PathSet & paths,
+ bool maybeSubstitute = false);
+
+ /* Query the set of all valid paths. Note that for some store
+ backends, the name part of store paths may be omitted
+ (i.e. you'll get /nix/store/<hash> rather than
+ /nix/store/<hash>-<name>). Use queryPathInfo() to obtain the
+ full store path. */
+ virtual PathSet queryAllValidPaths() = 0;
+
+ /* Query information about a valid path. It is permitted to omit
+ the name part of the store path. */
+ ref<const ValidPathInfo> queryPathInfo(const Path & path);
+
+ /* Asynchronous version of queryPathInfo(). */
+ void queryPathInfo(const Path & path,
+ std::function<void(ref<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure);
+
+protected:
+
+ virtual void queryPathInfoUncached(const Path & path,
+ std::function<void(std::shared_ptr<ValidPathInfo>)> success,
+ std::function<void(std::exception_ptr exc)> failure) = 0;
+
+public:
+
+ /* Queries the set of incoming FS references for a store path.
+ The result is not cleared. */
+ virtual void queryReferrers(const Path & path,
+ PathSet & referrers) = 0;
+
+ /* Return all currently valid derivations that have `path' as an
+ output. (Note that the result of `queryDeriver()' is the
+ derivation that was actually used to produce `path', which may
+ not exist anymore.) */
+ virtual PathSet queryValidDerivers(const Path & path) { return {}; };
+
+ /* Query the outputs of the derivation denoted by `path'. */
+ virtual PathSet queryDerivationOutputs(const Path & path) = 0;
+
+ /* Query the output names of the derivation denoted by `path'. */
+ virtual StringSet queryDerivationOutputNames(const Path & path) = 0;
+
+ /* Query the full store path given the hash part of a valid store
+ path, or "" if the path doesn't exist. */
+ virtual Path queryPathFromHashPart(const string & hashPart) = 0;
+
+ /* Query which of the given paths have substitutes. */
+ virtual PathSet querySubstitutablePaths(const PathSet & paths) { return {}; };
+
+ /* Query substitute info (i.e. references, derivers and download
+ sizes) of a set of paths. If a path does not have substitute
+ info, it's omitted from the resulting ‘infos’ map. */
+ virtual void querySubstitutablePathInfos(const PathSet & paths,
+ SubstitutablePathInfos & infos) { return; };
+
+ virtual bool wantMassQuery() { return false; }
+
+ /* Import a path into the store. */
+ virtual void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
+ bool repair = false, bool dontCheckSigs = false,
+ std::shared_ptr<FSAccessor> accessor = 0) = 0;
+
+ /* Copy the contents of a path to the store and register the
+ validity the resulting path. The resulting path is returned.
+ The function object `filter' can be used to exclude files (see
+ libutil/archive.hh). */
+ virtual Path addToStore(const string & name, const Path & srcPath,
+ bool recursive = true, HashType hashAlgo = htSHA256,
+ PathFilter & filter = defaultPathFilter, bool repair = false) = 0;
+
+ /* Like addToStore, but the contents written to the output path is
+ a regular file containing the given string. */
+ virtual Path addTextToStore(const string & name, const string & s,
+ const PathSet & references, bool repair = false) = 0;
+
+ /* Write a NAR dump of a store path. */
+ virtual void narFromPath(const Path & path, Sink & sink) = 0;
+
+ /* For each path, if it's a derivation, build it. Building a
+ derivation means ensuring that the output paths are valid. If
+ they are already valid, this is a no-op. Otherwise, validity
+ can be reached in two ways. First, if the output paths is
+ substitutable, then build the path that way. Second, the
+ output paths can be created by running the builder, after
+ recursively building any sub-derivations. For inputs that are
+ not derivations, substitute them. */
+ virtual void buildPaths(const PathSet & paths, BuildMode buildMode = bmNormal) = 0;
+
+ /* Build a single non-materialized derivation (i.e. not from an
+ on-disk .drv file). Note that ‘drvPath’ is only used for
+ informational purposes. */
+ virtual BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
+ BuildMode buildMode = bmNormal) = 0;
+
+ /* Ensure that a path is valid. If it is not currently valid, it
+ may be made valid by running a substitute (if defined for the
+ path). */
+ virtual void ensurePath(const Path & path) = 0;
+
+ /* Add a store path as a temporary root of the garbage collector.
+ The root disappears as soon as we exit. */
+ virtual void addTempRoot(const Path & path) = 0;
+
+ /* Add an indirect root, which is merely a symlink to `path' from
+ /nix/var/nix/gcroots/auto/<hash of `path'>. `path' is supposed
+ to be a symlink to a store path. The garbage collector will
+ automatically remove the indirect root when it finds that
+ `path' has disappeared. */
+ virtual void addIndirectRoot(const Path & path) = 0;
+
+ /* Acquire the global GC lock, then immediately release it. This
+ function must be called after registering a new permanent root,
+ but before exiting. Otherwise, it is possible that a running
+ garbage collector doesn't see the new root and deletes the
+ stuff we've just built. By acquiring the lock briefly, we
+ ensure that either:
+
+ - The collector is already running, and so we block until the
+ collector is finished. The collector will know about our
+ *temporary* locks, which should include whatever it is we
+ want to register as a permanent lock.
+
+ - The collector isn't running, or it's just started but hasn't
+ acquired the GC lock yet. In that case we get and release
+ the lock right away, then exit. The collector scans the
+ permanent root and sees our's.
+
+ In either case the permanent root is seen by the collector. */
+ virtual void syncWithGC() { };
+
+ /* Find the roots of the garbage collector. Each root is a pair
+ (link, storepath) where `link' is the path of the symlink
+ outside of the Nix store that point to `storePath'. */
+ virtual Roots findRoots() = 0;
+
+ /* Perform a garbage collection. */
+ virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0;
+
+ /* Return a string representing information about the path that
+ can be loaded into the database using `nix-store --load-db' or
+ `nix-store --register-validity'. */
+ string makeValidityRegistration(const PathSet & paths,
+ bool showDerivers, bool showHash);
+
+ /* Write a JSON representation of store path metadata, such as the
+ hash and the references. If ‘includeImpureInfo’ is true,
+ variable elements such as the registration time are
+ included. If ‘showClosureSize’ is true, the closure size of
+ each path is included. */
+ void pathInfoToJSON(JSONPlaceholder & jsonOut, const PathSet & storePaths,
+ bool includeImpureInfo, bool showClosureSize);
+
+ /* Return the size of the closure of the specified path, that is,
+ the sum of the size of the NAR serialisation of each path in
+ the closure. */
+ unsigned long long getClosureSize(const Path & storePath);
+
+ /* Optimise the disk space usage of the Nix store by hard-linking files
+ with the same contents. */
+ virtual void optimiseStore() { };
+
+ /* Check the integrity of the Nix store. Returns true if errors
+ remain. */
+ virtual bool verifyStore(bool checkContents, bool repair) { return false; };
+
+ /* Return an object to access files in the Nix store. */
+ virtual ref<FSAccessor> getFSAccessor() = 0;
+
+ /* Add signatures to the specified store path. The signatures are
+ not verified. */
+ virtual void addSignatures(const Path & storePath, const StringSet & sigs) = 0;
+
+ /* Utility functions. */
+
+ /* Read a derivation, after ensuring its existence through
+ ensurePath(). */
+ Derivation derivationFromPath(const Path & drvPath);
+
+ /* Place in `out' the set of all store paths in the file system
+ closure of `storePath'; that is, all paths than can be directly
+ or indirectly reached from it. `out' is not cleared. If
+ `flipDirection' is true, the set of paths that can reach
+ `storePath' is returned; that is, the closures under the
+ `referrers' relation instead of the `references' relation is
+ returned. */
+ virtual void computeFSClosure(const PathSet & paths,
+ PathSet & out, bool flipDirection = false,
+ bool includeOutputs = false, bool includeDerivers = false);
+
+ void computeFSClosure(const Path & path,
+ PathSet & out, bool flipDirection = false,
+ bool includeOutputs = false, bool includeDerivers = false);
+
+ /* Given a set of paths that are to be built, return the set of
+ derivations that will be built, and the set of output paths
+ that will be substituted. */
+ virtual void queryMissing(const PathSet & targets,
+ PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
+ unsigned long long & downloadSize, unsigned long long & narSize);
+
+ /* Sort a set of paths topologically under the references
+ relation. If p refers to q, then p preceeds q in this list. */
+ Paths topoSortPaths(const PathSet & paths);
+
+ /* Export multiple paths in the format expected by ‘nix-store
+ --import’. */
+ void exportPaths(const Paths & paths, Sink & sink);
+
+ void exportPath(const Path & path, Sink & sink);
+
+ /* Import a sequence of NAR dumps created by exportPaths() into
+ the Nix store. Optionally, the contents of the NARs are
+ preloaded into the specified FS accessor to speed up subsequent
+ access. */
+ Paths importPaths(Source & source, std::shared_ptr<FSAccessor> accessor,
+ bool dontCheckSigs = false);
+
+ struct Stats
+ {
+ std::atomic<uint64_t> narInfoRead{0};
+ std::atomic<uint64_t> narInfoReadAverted{0};
+ std::atomic<uint64_t> narInfoMissing{0};
+ std::atomic<uint64_t> narInfoWrite{0};
+ std::atomic<uint64_t> pathInfoCacheSize{0};
+ std::atomic<uint64_t> narRead{0};
+ std::atomic<uint64_t> narReadBytes{0};
+ std::atomic<uint64_t> narReadCompressedBytes{0};
+ std::atomic<uint64_t> narWrite{0};
+ std::atomic<uint64_t> narWriteAverted{0};
+ std::atomic<uint64_t> narWriteBytes{0};
+ std::atomic<uint64_t> narWriteCompressedBytes{0};
+ std::atomic<uint64_t> narWriteCompressionTimeMs{0};
+ };
+
+ const Stats & getStats();
+
+ /* Whether this store paths from this store can be imported even
+ if they lack a signature. */
+ virtual bool isTrusted() { return false; }
+
+ /* Return the build log of the specified store path, if available,
+ or null otherwise. */
+ virtual std::shared_ptr<std::string> getBuildLog(const Path & path)
+ { return nullptr; }
+
+ /* Hack to allow long-running processes like hydra-queue-runner to
+ occasionally flush their path info cache. */
+ void clearPathInfoCache()
+ {
+ state.lock()->pathInfoCache.clear();
+ }
+
+protected:
+
+ Stats stats;
+
+};
+
+
+class LocalFSStore : public virtual Store
+{
+public:
+
+ // FIXME: the (Store*) cast works around a bug in gcc that causes
+ // it to emit the call to the Option constructor. Clang works fine
+ // either way.
+ const PathSetting rootDir{(Store*) this, true, "",
+ "root", "directory prefixed to all other paths"};
+ const PathSetting stateDir{(Store*) this, false,
+ rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir,
+ "state", "directory where Nix will store state"};
+ const PathSetting logDir{(Store*) this, false,
+ rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir,
+ "log", "directory where Nix will store state"};
+
+ const static string drvsLogDir;
+
+ LocalFSStore(const Params & params);
+
+ void narFromPath(const Path & path, Sink & sink) override;
+ ref<FSAccessor> getFSAccessor() override;
+
+ /* Register a permanent GC root. */
+ Path addPermRoot(const Path & storePath,
+ const Path & gcRoot, bool indirect, bool allowOutsideRootsDir = false);
+
+ virtual Path getRealStoreDir() { return storeDir; }
+
+ Path toRealPath(const Path & storePath)
+ {
+ return getRealStoreDir() + "/" + baseNameOf(storePath);
+ }
+
+ std::shared_ptr<std::string> getBuildLog(const Path & path) override;
+};
+
+
+/* Extract the name part of the given store path. */
+string storePathToName(const Path & path);
+
+/* Extract the hash part of the given store path. */
+string storePathToHash(const Path & path);
+
+/* Check whether ‘name’ is a valid store path name part, i.e. contains
+ only the characters [a-zA-Z0-9\+\-\.\_\?\=] and doesn't start with
+ a dot. */
+void checkStoreName(const string & name);
+
+
+/* Copy a path from one store to another. */
+void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
+ const Path & storePath, bool repair = false, bool dontCheckSigs = false);
+
+
+/* Copy the closure of the specified paths from one store to another. */
+void copyClosure(ref<Store> srcStore, ref<Store> dstStore,
+ const PathSet & storePaths, bool repair = false, bool dontCheckSigs = false);
+
+
+/* Remove the temporary roots file for this process. Any temporary
+ root becomes garbage after this point unless it has been registered
+ as a (permanent) root. */
+void removeTempRoots();
+
+
+/* Return a Store object to access the Nix store denoted by
+ ‘uri’ (slight misnomer...). Supported values are:
+
+ * ‘direct’: The Nix store in /nix/store and database in
+ /nix/var/nix/db, accessed directly.
+
+ * ‘daemon’: The Nix store accessed via a Unix domain socket
+ connection to nix-daemon.
+
+ * ‘file://<path>’: A binary cache stored in <path>.
+
+ If ‘uri’ is empty, it defaults to ‘direct’ or ‘daemon’ depending on
+ whether the user has write access to the local Nix store/database.
+ set to true *unless* you're going to collect garbage. */
+ref<Store> openStore(const std::string & uri = getEnv("NIX_REMOTE"));
+
+ref<Store> openStore(const std::string & uri, const Store::Params & params);
+
+
+void copyPaths(ref<Store> from, ref<Store> to, const PathSet & storePaths, bool substitute = false);
+
+enum StoreType {
+ tDaemon,
+ tLocal,
+ tOther
+};
+
+
+StoreType getStoreType(const std::string & uri = getEnv("NIX_REMOTE"), const std::string & stateDir = settings.nixStateDir);
+
+/* Return the default substituter stores, defined by the
+ ‘substituters’ option and various legacy options like
+ ‘binary-caches’. */
+std::list<ref<Store>> getDefaultSubstituters();
+
+
+/* Store implementation registration. */
+typedef std::function<std::shared_ptr<Store>(
+ const std::string & uri, const Store::Params & params)> OpenStore;
+
+struct RegisterStoreImplementation
+{
+ typedef std::vector<OpenStore> Implementations;
+ static Implementations * implementations;
+
+ RegisterStoreImplementation(OpenStore fun)
+ {
+ if (!implementations) implementations = new Implementations;
+ implementations->push_back(fun);
+ }
+};
+
+
+
+/* Display a set of paths in human-readable form (i.e., between quotes
+ and separated by commas). */
+string showPaths(const PathSet & paths);
+
+
+ValidPathInfo decodeValidPathInfo(std::istream & str,
+ bool hashGiven = false);
+
+
+/* Compute the content-addressability assertion (ValidPathInfo::ca)
+ for paths created by makeFixedOutputPath() / addToStore(). */
+std::string makeFixedOutputCA(bool recursive, const Hash & hash);
+
+
+MakeError(SubstError, Error)
+MakeError(BuildError, Error) /* denotes a permanent build failure */
+MakeError(InvalidPath, Error)
+
+
+}
diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh
new file mode 100644
index 000000000..6c6766b36
--- /dev/null
+++ b/src/libstore/worker-protocol.hh
@@ -0,0 +1,66 @@
+#pragma once
+
+namespace nix {
+
+
+#define WORKER_MAGIC_1 0x6e697863
+#define WORKER_MAGIC_2 0x6478696f
+
+#define PROTOCOL_VERSION 0x113
+#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
+#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
+
+
+typedef enum {
+ wopIsValidPath = 1,
+ wopHasSubstitutes = 3,
+ wopQueryPathHash = 4, // obsolete
+ wopQueryReferences = 5, // obsolete
+ wopQueryReferrers = 6,
+ wopAddToStore = 7,
+ wopAddTextToStore = 8,
+ wopBuildPaths = 9,
+ wopEnsurePath = 10,
+ wopAddTempRoot = 11,
+ wopAddIndirectRoot = 12,
+ wopSyncWithGC = 13,
+ wopFindRoots = 14,
+ wopExportPath = 16, // obsolete
+ wopQueryDeriver = 18, // obsolete
+ wopSetOptions = 19,
+ wopCollectGarbage = 20,
+ wopQuerySubstitutablePathInfo = 21,
+ wopQueryDerivationOutputs = 22,
+ wopQueryAllValidPaths = 23,
+ wopQueryFailedPaths = 24,
+ wopClearFailedPaths = 25,
+ wopQueryPathInfo = 26,
+ wopImportPaths = 27, // obsolete
+ wopQueryDerivationOutputNames = 28,
+ wopQueryPathFromHashPart = 29,
+ wopQuerySubstitutablePathInfos = 30,
+ wopQueryValidPaths = 31,
+ wopQuerySubstitutablePaths = 32,
+ wopQueryValidDerivers = 33,
+ wopOptimiseStore = 34,
+ wopVerifyStore = 35,
+ wopBuildDerivation = 36,
+ wopAddSignatures = 37,
+ wopNarFromPath = 38,
+ wopAddToStoreNar = 39,
+ wopQueryMissing = 40,
+} WorkerOp;
+
+
+#define STDERR_NEXT 0x6f6c6d67
+#define STDERR_READ 0x64617461 // data needed from source
+#define STDERR_WRITE 0x64617416 // data for sink
+#define STDERR_LAST 0x616c7473
+#define STDERR_ERROR 0x63787470
+
+
+Path readStorePath(Store & store, Source & from);
+template<class T> T readStorePaths(Store & store, Source & from);
+
+
+}