diff options
-rw-r--r-- | configure.ac | 21 | ||||
-rw-r--r-- | externals/Makefile.am | 8 | ||||
-rw-r--r-- | src/libstore/Makefile.am | 11 | ||||
-rw-r--r-- | src/libstore/build.cc | 76 | ||||
-rw-r--r-- | src/libstore/db.cc | 6 | ||||
-rw-r--r-- | src/libstore/gc.cc | 2 | ||||
-rw-r--r-- | src/libstore/local-store.cc | 978 | ||||
-rw-r--r-- | src/libstore/local-store.hh | 101 | ||||
-rw-r--r-- | src/libstore/misc.cc | 1 | ||||
-rw-r--r-- | src/libstore/optimise-store.cc | 129 | ||||
-rw-r--r-- | src/libstore/store-api.cc | 17 | ||||
-rw-r--r-- | src/libstore/store-api.hh | 11 | ||||
-rw-r--r-- | src/libstore/upgrade-schema.cc | 108 | ||||
-rw-r--r-- | src/libutil/util.cc | 5 | ||||
-rw-r--r-- | src/libutil/util.hh | 4 | ||||
-rw-r--r-- | src/nix-env/nix-env.cc | 1 | ||||
-rw-r--r-- | src/nix-store/dotgraph.cc | 1 | ||||
-rw-r--r-- | src/nix-store/nix-store.cc | 24 | ||||
-rw-r--r-- | src/nix-worker/nix-worker.cc | 2 | ||||
-rw-r--r-- | tests/Makefile.am | 2 | ||||
-rw-r--r-- | tests/dependencies.sh | 2 | ||||
-rw-r--r-- | tests/export.sh | 31 | ||||
-rw-r--r-- | tests/init.sh | 3 | ||||
-rw-r--r-- | tests/referrers.sh | 48 |
24 files changed, 888 insertions, 704 deletions
diff --git a/configure.ac b/configure.ac index fd96c65f2..ffeff3a5e 100644 --- a/configure.ac +++ b/configure.ac @@ -16,7 +16,7 @@ if test "$STABLE" != "1"; then fi fi -AC_DEFINE_UNQUOTED(NIX_VERSION, ["$VERSION"], [version]) +AC_DEFINE_UNQUOTED(NIX_VERSION, ["$VERSION"], [Nix version.]) AC_PREFIX_DEFAULT(/nix) @@ -54,7 +54,7 @@ case $sys_name in esac AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM], - [platform identifier (e.g., `i686-linux')]), + [Platform identifier (e.g., `i686-linux').]), system=$withval, system="${machine_name}-${sys_name}") AC_MSG_RESULT($system) AC_SUBST(system) @@ -94,7 +94,7 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <iostream> using namespace std; static char buf[1024];]], [[cerr.rdbuf()->pubsetbuf(buf, sizeof(buf));]])], - [AC_MSG_RESULT(yes) AC_DEFINE(HAVE_PUBSETBUF, 1, [whether pubsetbuf is available])], + [AC_MSG_RESULT(yes) AC_DEFINE(HAVE_PUBSETBUF, 1, [Whether pubsetbuf is available.])], AC_MSG_RESULT(no)) AC_LANG_POP(C++) @@ -177,8 +177,13 @@ AC_ARG_WITH(store-dir, AC_HELP_STRING([--with-store-dir=PATH], storedir=$withval, storedir='${prefix}/store') AC_SUBST(storedir) +AC_ARG_ENABLE(old-db-compat, AC_HELP_STRING([--disable-old-db-compat], + [disable support for converting from old Berkeley DB-based Nix stores]), + old_db_compat=$enableval, old_db_compat=yes) +AM_CONDITIONAL(OLD_DB_COMPAT, test "$old_db_compat" = "yes") + AC_ARG_WITH(bdb, AC_HELP_STRING([--with-bdb=PATH], - [prefix of Berkeley DB]), + [prefix of Berkeley DB (for Nix <= 0.11 compatibility)]), bdb=$withval, bdb=) AM_CONDITIONAL(HAVE_BDB, test -n "$bdb") if test -z "$bdb"; then @@ -188,6 +193,12 @@ else bdb_lib="-L$bdb/lib -ldb_cxx" bdb_include="-I$bdb/include" fi +if test "$old_db_compat" = "no"; then + bdb_lib= + bdb_include= +else + AC_DEFINE(OLD_DB_COMPAT, 1, [Whether to support converting from old Berkeley DB-based Nix stores.]) +fi AC_SUBST(bdb_lib) AC_SUBST(bdb_include) @@ -216,7 +227,7 @@ if test -n "$openssl"; then LDFLAGS="-L$openssl/lib -lcrypto $LDFLAGS" CFLAGS="-I$openssl/include $CFLAGS" CXXFLAGS="-I$openssl/include $CXXFLAGS" - AC_DEFINE(HAVE_OPENSSL, 1, [whether to use OpenSSL]) + AC_DEFINE(HAVE_OPENSSL, 1, [Whether to use OpenSSL.]) fi AC_ARG_WITH(bzip2, AC_HELP_STRING([--with-bzip2=PATH], diff --git a/externals/Makefile.am b/externals/Makefile.am index 854a65268..981506ebb 100644 --- a/externals/Makefile.am +++ b/externals/Makefile.am @@ -2,6 +2,8 @@ DB = db-4.5.20 +if OLD_DB_COMPAT + $(DB).tar.gz: @echo "Nix requires Berkeley DB to build." @echo "Please download version 4.5.20 from" @@ -32,6 +34,12 @@ build-db: have-db touch build-db endif +else + +build-db: + +endif + # CWI ATerm diff --git a/src/libstore/Makefile.am b/src/libstore/Makefile.am index 21b1545f6..903778780 100644 --- a/src/libstore/Makefile.am +++ b/src/libstore/Makefile.am @@ -1,13 +1,14 @@ pkglib_LTLIBRARIES = libstore.la libstore_la_SOURCES = \ - store-api.cc local-store.cc remote-store.cc derivations.cc build.cc misc.cc \ - globals.cc db.cc references.cc pathlocks.cc gc.cc + store-api.cc local-store.cc remote-store.cc derivations.cc build.cc misc.cc \ + globals.cc db.cc references.cc pathlocks.cc gc.cc upgrade-schema.cc \ + optimise-store.cc pkginclude_HEADERS = \ - store-api.hh local-store.hh remote-store.hh derivations.hh misc.hh \ - globals.hh db.hh references.hh pathlocks.hh \ - worker-protocol.hh + store-api.hh local-store.hh remote-store.hh derivations.hh misc.hh \ + globals.hh db.hh references.hh pathlocks.hh \ + worker-protocol.hh libstore_la_LIBADD = ../libutil/libutil.la ../boost/format/libformat.la diff --git a/src/libstore/build.cc b/src/libstore/build.cc index e6ab6310c..23cda12c0 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -3,7 +3,6 @@ #include "misc.hh" #include "globals.hh" #include "local-store.hh" -#include "db.hh" #include "util.hh" #include <map> @@ -207,7 +206,9 @@ private: public: - Worker(); + LocalStore & store; + + Worker(LocalStore & store); ~Worker(); /* Make a goal (with caching). */ @@ -897,14 +898,14 @@ void DerivationGoal::haveDerivation() return; } - assert(store->isValidPath(drvPath)); + assert(worker.store.isValidPath(drvPath)); /* Get the derivation. */ drv = derivationFromPath(drvPath); for (DerivationOutputs::iterator i = drv.outputs.begin(); i != drv.outputs.end(); ++i) - store->addTempRoot(i->second.path); + worker.store.addTempRoot(i->second.path); /* Check what outputs paths are not already valid. */ PathSet invalidOutputs = checkPathValidity(false); @@ -938,7 +939,7 @@ void DerivationGoal::haveDerivation() i != invalidOutputs.end(); ++i) /* Don't bother creating a substitution goal if there are no substitutes. */ - if (store->hasSubstitutes(*i)) + if (worker.store.hasSubstitutes(*i)) addWaitee(worker.makeSubstitutionGoal(*i)); if (waitees.empty()) /* to prevent hang (no wake-up event) */ @@ -993,7 +994,7 @@ void DerivationGoal::outputsSubstituted() throw BuildError(format("`exportBuildReferencesGraph' contains a non-store path `%1%'") % storePath); storePath = toStorePath(storePath); - if (!store->isValidPath(storePath)) + if (!worker.store.isValidPath(storePath)) throw BuildError(format("`exportBuildReferencesGraph' contains an invalid path `%1%'") % storePath); @@ -1250,19 +1251,6 @@ PathSet outputPaths(const DerivationOutputs & outputs) } -string showPaths(const PathSet & paths) -{ - string s; - for (PathSet::const_iterator i = paths.begin(); - i != paths.end(); ++i) - { - if (s.size() != 0) s += ", "; - s += "`" + *i + "'"; - } - return s; -} - - DerivationGoal::HookReply DerivationGoal::tryBuildHook() { if (!useBuildHook) return rpDecline; @@ -1474,7 +1462,7 @@ DerivationGoal::PrepareBuildReply DerivationGoal::prepareBuild() i != drv.outputs.end(); ++i) { Path path = i->second.path; - if (store->isValidPath(path)) + if (worker.store.isValidPath(path)) throw BuildError(format("obstructed build: path `%1%' exists") % path); if (pathExists(path)) { debug(format("removing unregistered path `%1%'") % path); @@ -1502,7 +1490,7 @@ DerivationGoal::PrepareBuildReply DerivationGoal::prepareBuild() /* Add the relevant output closures of the input derivation `*i' as input paths. Only add the closures of output paths that are specified as inputs. */ - assert(store->isValidPath(i->first)); + assert(worker.store.isValidPath(i->first)); Derivation inDrv = derivationFromPath(i->first); for (StringSet::iterator j = i->second.begin(); j != i->second.end(); ++j) @@ -1624,7 +1612,7 @@ void DerivationGoal::startBuilder() throw BuildError(format("`exportReferencesGraph' contains a non-store path `%1%'") % storePath); storePath = toStorePath(storePath); - if (!store->isValidPath(storePath)) + if (!worker.store.isValidPath(storePath)) throw BuildError(format("`exportReferencesGraph' contains an invalid path `%1%'") % storePath); @@ -1652,7 +1640,7 @@ void DerivationGoal::startBuilder() throw BuildError(format("`exportBuildReferencesGraph' contains a non-store path `%1%'") % storePath); storePath = toStorePath(storePath); - if (!store->isValidPath(storePath)) + if (!worker.store.isValidPath(storePath)) throw BuildError(format("`exportBuildReferencesGraph' contains an invalid path `%1%'") % storePath); @@ -1994,27 +1982,17 @@ void DerivationGoal::computeClosure() } /* Register each output path as valid, and register the sets of - paths referenced by each of them. This is wrapped in one - database transaction to ensure that if we crash, either - everything is registered or nothing is. This is for - recoverability: unregistered paths in the store can be deleted - arbitrarily, while registered paths can only be deleted by - running the garbage collector. - - The reason that we do the transaction here and not on the fly - while we are scanning (above) is so that we don't hold database - locks for too long. */ - Transaction txn; - createStoreTransaction(txn); + paths referenced by each of them. !!! this should be + atomic so that either all paths are registered as valid, or + none are. */ for (DerivationOutputs::iterator i = drv.outputs.begin(); i != drv.outputs.end(); ++i) { - registerValidPath(txn, i->second.path, + worker.store.registerValidPath(i->second.path, contentHashes[i->second.path], allReferences[i->second.path], drvPath); } - txn.commit(); /* It is now safe to delete the lock files, since all future lockers will see that the output paths are valid; they will not @@ -2113,7 +2091,7 @@ PathSet DerivationGoal::checkPathValidity(bool returnValid) PathSet result; for (DerivationOutputs::iterator i = drv.outputs.begin(); i != drv.outputs.end(); ++i) - if (store->isValidPath(i->second.path)) { + if (worker.store.isValidPath(i->second.path)) { if (returnValid) result.insert(i->second.path); } else { if (!returnValid) result.insert(i->second.path); @@ -2219,10 +2197,10 @@ void SubstitutionGoal::init() { trace("init"); - store->addTempRoot(storePath); + worker.store.addTempRoot(storePath); /* If the path already exists we're done. */ - if (store->isValidPath(storePath)) { + if (worker.store.isValidPath(storePath)) { amDone(ecSuccess); return; } @@ -2293,7 +2271,7 @@ void SubstitutionGoal::referencesValid() for (PathSet::iterator i = references.begin(); i != references.end(); ++i) if (*i != storePath) /* ignore self-references */ - assert(store->isValidPath(*i)); + assert(worker.store.isValidPath(*i)); state = &SubstitutionGoal::tryToRun; worker.waitForBuildSlot(shared_from_this()); @@ -2327,7 +2305,7 @@ void SubstitutionGoal::tryToRun() (format("waiting for lock on `%1%'") % storePath).str()); /* Check again whether the path is invalid. */ - if (store->isValidPath(storePath)) { + if (worker.store.isValidPath(storePath)) { debug(format("store path `%1%' has become valid") % storePath); outputLock->setDeletion(true); amDone(ecSuccess); @@ -2434,11 +2412,8 @@ void SubstitutionGoal::finished() Hash contentHash = hashPath(htSHA256, storePath); - Transaction txn; - createStoreTransaction(txn); - registerValidPath(txn, storePath, contentHash, + worker.store.registerValidPath(storePath, contentHash, references, deriver); - txn.commit(); outputLock->setDeletion(true); @@ -2472,7 +2447,8 @@ void SubstitutionGoal::handleEOF(int fd) static bool working = false; -Worker::Worker() +Worker::Worker(LocalStore & store) + : store(store) { /* Debugging: prevent recursive workers. */ if (working) abort(); @@ -2870,7 +2846,7 @@ void LocalStore::buildDerivations(const PathSet & drvPaths) startNest(nest, lvlDebug, format("building %1%") % showPaths(drvPaths)); - Worker worker; + Worker worker(*this); Goals goals; for (PathSet::const_iterator i = drvPaths.begin(); @@ -2895,9 +2871,9 @@ void LocalStore::buildDerivations(const PathSet & drvPaths) void LocalStore::ensurePath(const Path & path) { /* If the path is already valid, we're done. */ - if (store->isValidPath(path)) return; + if (isValidPath(path)) return; - Worker worker; + Worker worker(*this); GoalPtr goal = worker.makeSubstitutionGoal(path); Goals goals = singleton<Goals>(goal); diff --git a/src/libstore/db.cc b/src/libstore/db.cc index 59b9c0c8e..26e82d695 100644 --- a/src/libstore/db.cc +++ b/src/libstore/db.cc @@ -1,3 +1,7 @@ +#include "config.h" + +#ifdef OLD_DB_COMPAT + #include "db.hh" #include "util.hh" #include "pathlocks.hh" @@ -466,3 +470,5 @@ void Database::clearTable(const Transaction & txn, TableId table) } + +#endif diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index dab2b80aa..1d654cd67 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -2,8 +2,6 @@ #include "misc.hh" #include "pathlocks.hh" #include "local-store.hh" -#include "db.hh" -#include "util.hh" #include <boost/shared_ptr.hpp> diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 6a8de7bc4..2e53d0dc6 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -1,8 +1,6 @@ #include "config.h" #include "local-store.hh" -#include "util.hh" #include "globals.hh" -#include "db.hh" #include "archive.hh" #include "pathlocks.hh" #include "aterm.hh" @@ -16,50 +14,12 @@ #include <sys/stat.h> #include <unistd.h> #include <utime.h> +#include <fcntl.h> namespace nix { -/* Nix database. */ -static Database nixDB; - - -/* Database tables. */ - -/* dbValidPaths :: Path -> () - - The existence of a key $p$ indicates that path $p$ is valid (that - is, produced by a succesful build). */ -static TableId dbValidPaths = 0; - -/* dbReferences :: Path -> [Path] - - This table lists the outgoing file system references for each - output path that has been built by a Nix derivation. These are - found by scanning the path for the hash components of input - paths. */ -static TableId dbReferences = 0; - -/* dbReferrers :: Path -> Path - - This table is just the reverse mapping of dbReferences. This table - can have duplicate keys, each corresponding value denoting a single - referrer. */ -static TableId dbReferrers = 0; - -/* dbDerivers :: Path -> [Path] - - This table lists the derivation used to build a path. There can - only be multiple such paths for fixed-output derivations (i.e., - derivations specifying an expected hash). */ -static TableId dbDerivers = 0; - - -static void upgradeStore09(); -static void upgradeStore11(); - - void checkStoreNotSymlink() { if (getEnv("NIX_IGNORE_SYMLINK_STORE") == "1") return; @@ -78,81 +38,62 @@ void checkStoreNotSymlink() } -LocalStore::LocalStore(bool reserveSpace) +LocalStore::LocalStore() { substitutablePathsLoaded = false; + schemaPath = nixDBPath + "/schema"; + if (readOnlyMode) return; checkStoreNotSymlink(); - try { - Path reservedPath = nixDBPath + "/reserved"; - string s = querySetting("gc-reserved-space", ""); - int reservedSize; - if (!string2Int(s, reservedSize)) reservedSize = 1024 * 1024; - if (reserveSpace) { - struct stat st; - if (stat(reservedPath.c_str(), &st) == -1 || - st.st_size != reservedSize) - writeFile(reservedPath, string(reservedSize, 'X')); - } - else - deletePath(reservedPath); - } catch (SysError & e) { /* don't care about errors */ + Path globalLockPath = nixDBPath + "/big-lock"; + globalLock = openLockFile(globalLockPath.c_str(), true); + + if (!lockFile(globalLock, ltRead, false)) { + printMsg(lvlError, "waiting for the big Nix store lock..."); + lockFile(globalLock, ltRead, true); } - try { - nixDB.open(nixDBPath); - } catch (DbNoPermission & e) { - printMsg(lvlTalkative, "cannot access Nix database; continuing anyway"); - readOnlyMode = true; - return; - } - dbValidPaths = nixDB.openTable("validpaths"); - dbReferences = nixDB.openTable("references"); - dbReferrers = nixDB.openTable("referrers", true); /* must be sorted */ - dbDerivers = nixDB.openTable("derivers"); + createDirs(nixDBPath + "/info"); + createDirs(nixDBPath + "/referrer"); - int curSchema = 0; - Path schemaFN = nixDBPath + "/schema"; - if (pathExists(schemaFN)) { - string s = readFile(schemaFN); - if (!string2Int(s, curSchema)) - throw Error(format("`%1%' is corrupt") % schemaFN); - } + //printMsg(lvlTalkative, "cannot access Nix database; continuing anyway"); + //readOnlyMode = true; + int curSchema = getSchema(); if (curSchema > nixSchemaVersion) throw Error(format("current Nix store schema is version %1%, but I only support %2%") % curSchema % nixSchemaVersion); - - if (curSchema < nixSchemaVersion) { - if (curSchema == 0) /* new store */ - curSchema = nixSchemaVersion; - if (curSchema <= 1) - throw Error("your Nix store is no longer supported"); - if (curSchema <= 2) upgradeStore09(); - if (curSchema <= 3) upgradeStore11(); - writeFile(schemaFN, (format("%1%") % nixSchemaVersion).str()); + if (curSchema == 0) { /* new store */ + curSchema = nixSchemaVersion; + writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str()); } + if (curSchema == 1) throw Error("your Nix store is no longer supported"); + if (curSchema < nixSchemaVersion) upgradeStore12(); } LocalStore::~LocalStore() { - /* If the database isn't open, this is a NOP. */ try { - nixDB.close(); + flushDelayedUpdates(); } catch (...) { ignoreException(); } } -void createStoreTransaction(Transaction & txn) +int LocalStore::getSchema() { - Transaction txn2(nixDB); - txn2.moveTo(txn); + int curSchema = 0; + if (pathExists(schemaPath)) { + string s = readFile(schemaPath); + if (!string2Int(s, curSchema)) + throw Error(format("`%1%' is corrupt") % schemaPath); + } + return curSchema; } @@ -173,7 +114,7 @@ void copyPath(const Path & src, const Path & dst, PathFilter & filter) } -static void _canonicalisePathMetaData(const Path & path, bool recurse) +void canonicalisePathMetaData(const Path & path, bool recurse) { checkInterrupt(); @@ -181,7 +122,7 @@ static void _canonicalisePathMetaData(const Path & path, bool recurse) if (lstat(path.c_str(), &st)) throw SysError(format("getting attributes of path `%1%'") % path); - /* Change ownership to the current uid. If its a symlink, use + /* Change ownership to the current uid. If it's a symlink, use lchown if available, otherwise don't bother. Wrong ownership of a symlink doesn't matter, since the owning user can't change the symlink and can't delete it because the directory is not @@ -225,14 +166,14 @@ static void _canonicalisePathMetaData(const Path & path, bool recurse) if (recurse && S_ISDIR(st.st_mode)) { Strings names = readDirectory(path); for (Strings::iterator i = names.begin(); i != names.end(); ++i) - _canonicalisePathMetaData(path + "/" + *i, true); + canonicalisePathMetaData(path + "/" + *i, true); } } void canonicalisePathMetaData(const Path & path) { - _canonicalisePathMetaData(path, true); + canonicalisePathMetaData(path, true); /* On platforms that don't have lchown(), the top-level path can't be a symlink, since we can't change its ownership. */ @@ -247,149 +188,278 @@ void canonicalisePathMetaData(const Path & path) } -bool isValidPathTxn(const Transaction & txn, const Path & path) +static Path infoFileFor(const Path & path) { - string s; - return nixDB.queryString(txn, dbValidPaths, path, s); + string baseName = baseNameOf(path); + return (format("%1%/info/%2%") % nixDBPath % baseName).str(); } -bool LocalStore::isValidPath(const Path & path) +static Path referrersFileFor(const Path & path) { - return isValidPathTxn(noTxn, path); + string baseName = baseNameOf(path); + return (format("%1%/referrer/%2%") % nixDBPath % baseName).str(); } -PathSet LocalStore::queryValidPaths() +static Path tmpFileForAtomicUpdate(const Path & path) { - Paths paths; - nixDB.enumTable(noTxn, dbValidPaths, paths); - return PathSet(paths.begin(), paths.end()); + return (format("%1%/.%2%.%3%") % dirOf(path) % getpid() % baseNameOf(path)).str(); } -static string addPrefix(const string & prefix, const string & s) +static void appendReferrer(const Path & from, const Path & to, bool lock) { - return prefix + string(1, (char) 0) + s; + Path referrersFile = referrersFileFor(from); + + PathLocks referrersLock; + if (lock) { + referrersLock.lockPaths(singleton<PathSet, Path>(referrersFile)); + referrersLock.setDeletion(true); + } + + AutoCloseFD fd = open(referrersFile.c_str(), O_WRONLY | O_APPEND | O_CREAT, 0666); + if (fd == -1) throw SysError(format("opening file `%1%'") % referrersFile); + + string s = " " + to; + writeFull(fd, (const unsigned char *) s.c_str(), s.size()); } -static string stripPrefix(const string & prefix, const string & s) +/* Atomically update the referrers file. If `purge' is true, the set + of referrers is set to `referrers'. Otherwise, the current set of + referrers is purged of invalid paths. */ +void LocalStore::rewriteReferrers(const Path & path, bool purge, PathSet referrers) { - if (s.size() <= prefix.size() || - string(s, 0, prefix.size()) != prefix || - s[prefix.size()] != 0) - throw Error(format("string `%1%' is missing prefix `%2%'") - % s % prefix); - return string(s, prefix.size() + 1); + Path referrersFile = referrersFileFor(path); + + PathLocks referrersLock(singleton<PathSet, Path>(referrersFile)); + referrersLock.setDeletion(true); + + if (purge) + /* queryReferrers() purges invalid paths, so that's all we + need. */ + queryReferrers(path, referrers); + + Path tmpFile = tmpFileForAtomicUpdate(referrersFile); + + AutoCloseFD fd = open(tmpFile.c_str(), O_WRONLY | O_TRUNC | O_CREAT, 0666); + if (fd == -1) throw SysError(format("opening file `%1%'") % referrersFile); + + string s; + foreach (PathSet::const_iterator, i, referrers) { + s += " "; s += *i; + } + + writeFull(fd, (const unsigned char *) s.c_str(), s.size()); + + fd.close(); /* for Windows; can't rename open file */ + + if (rename(tmpFile.c_str(), referrersFile.c_str()) == -1) + throw SysError(format("cannot rename `%1%' to `%2%'") % tmpFile % referrersFile); } -static PathSet getReferrers(const Transaction & txn, const Path & storePath) +void LocalStore::flushDelayedUpdates() { - PathSet referrers; - Strings keys; - nixDB.enumTable(txn, dbReferrers, keys, storePath + string(1, (char) 0)); - for (Strings::iterator i = keys.begin(); i != keys.end(); ++i) - referrers.insert(stripPrefix(storePath, *i)); - return referrers; + foreach (PathSet::iterator, i, delayedUpdates) { + rewriteReferrers(*i, true, PathSet()); + } + delayedUpdates.clear(); } -void setReferences(const Transaction & txn, const Path & storePath, - const PathSet & references) +void LocalStore::registerValidPath(const Path & path, + const Hash & hash, const PathSet & references, + const Path & deriver) +{ + ValidPathInfo info; + info.path = path; + info.hash = hash; + info.references = references; + info.deriver = deriver; + registerValidPath(info); +} + + +void LocalStore::registerValidPath(const ValidPathInfo & info, bool ignoreValidity) { - /* For invalid paths, we can only clear the references. */ - if (references.size() > 0 && !isValidPathTxn(txn, storePath)) - throw Error( - format("cannot set references for invalid path `%1%'") % storePath); + Path infoFile = infoFileFor(info.path); - Paths oldReferences; - nixDB.queryStrings(txn, dbReferences, storePath, oldReferences); + ValidPathInfo oldInfo; + if (pathExists(infoFile)) oldInfo = queryPathInfo(info.path); - PathSet oldReferences2(oldReferences.begin(), oldReferences.end()); - if (oldReferences2 == references) return; - - nixDB.setStrings(txn, dbReferences, storePath, - Paths(references.begin(), references.end())); - - /* Update the referrers mappings of all new referenced paths. */ - for (PathSet::const_iterator i = references.begin(); - i != references.end(); ++i) - if (oldReferences2.find(*i) == oldReferences2.end()) - nixDB.setString(txn, dbReferrers, addPrefix(*i, storePath), ""); - - /* Remove referrer mappings from paths that are no longer - references. */ - for (Paths::iterator i = oldReferences.begin(); - i != oldReferences.end(); ++i) - if (references.find(*i) == references.end()) - nixDB.delPair(txn, dbReferrers, addPrefix(*i, storePath)); + /* Note that it's possible for infoFile to already exist. */ + + /* Acquire a lock on each referrer file. This prevents those + paths from being invalidated. (It would be a violation of the + store invariants if we registered info.path as valid while some + of its references are invalid.) NB: there can be no deadlock + here since we're acquiring the locks in sorted order. */ + PathSet lockNames; + foreach (PathSet::const_iterator, i, info.references) + if (*i != info.path) lockNames.insert(referrersFileFor(*i)); + PathLocks referrerLocks(lockNames); + referrerLocks.setDeletion(true); + + string refs; + foreach (PathSet::const_iterator, i, info.references) { + if (!refs.empty()) refs += " "; + refs += *i; + + if (!ignoreValidity && *i != info.path && !isValidPath(*i)) + throw Error(format("cannot register `%1%' as valid, because its reference `%2%' isn't valid") + % info.path % *i); + + /* Update the referrer mapping for *i. This must be done + before the info file is written to maintain the invariant + that if `path' is a valid path, then all its references + have referrer mappings back to `path'. A " " is prefixed + to separate it from the previous entry. It's not suffixed + to deal with interrupted partial writes to this file. */ + if (oldInfo.references.find(*i) == oldInfo.references.end()) + appendReferrer(*i, info.path, false); + } + + string s = (format( + "Hash: sha256:%1%\n" + "References: %2%\n" + "Deriver: %3%\n" + "Registered-At: %4%\n") + % printHash(info.hash) % refs % info.deriver % + (oldInfo.registrationTime ? oldInfo.registrationTime : time(0))).str(); + + /* Atomically rewrite the info file. */ + Path tmpFile = tmpFileForAtomicUpdate(infoFile); + writeFile(tmpFile, s); + if (rename(tmpFile.c_str(), infoFile.c_str()) == -1) + throw SysError(format("cannot rename `%1%' to `%2%'") % tmpFile % infoFile); + + pathInfoCache[info.path] = info; } -void queryReferences(const Transaction & txn, - const Path & storePath, PathSet & references) +Hash parseHashField(const Path & path, const string & s) { - Paths references2; - if (!isValidPathTxn(txn, storePath)) - throw Error(format("path `%1%' is not valid") % storePath); - nixDB.queryStrings(txn, dbReferences, storePath, references2); - references.insert(references2.begin(), references2.end()); + string::size_type colon = s.find(':'); + if (colon == string::npos) + throw Error(format("corrupt hash `%1%' in valid-path entry for `%2%'") + % s % path); + HashType ht = parseHashType(string(s, 0, colon)); + if (ht == htUnknown) + throw Error(format("unknown hash type `%1%' in valid-path entry for `%2%'") + % string(s, 0, colon) % path); + return parseHash(ht, string(s, colon + 1)); } -void LocalStore::queryReferences(const Path & storePath, - PathSet & references) +ValidPathInfo LocalStore::queryPathInfo(const Path & path) { - nix::queryReferences(noTxn, storePath, references); + ValidPathInfo res; + res.path = path; + + assertStorePath(path); + + std::map<Path, ValidPathInfo>::iterator lookup = pathInfoCache.find(path); + if (lookup != pathInfoCache.end()) return lookup->second; + + //printMsg(lvlError, "queryPathInfo: " + path); + + /* Read the info file. */ + Path infoFile = infoFileFor(path); + if (!pathExists(infoFile)) + throw Error(format("path `%1%' is not valid") % path); + string info = readFile(infoFile); + + /* Parse it. */ + Strings lines = tokenizeString(info, "\n"); + + for (Strings::iterator i = lines.begin(); i != lines.end(); ++i) { + unsigned int p = i->find(':'); + if (p == string::npos) continue; /* bad line */ + string name(*i, 0, p); + string value(*i, p + 2); + if (name == "References") { + Strings refs = tokenizeString(value, " "); + res.references = PathSet(refs.begin(), refs.end()); + } else if (name == "Deriver") { + res.deriver = value; + } else if (name == "Hash") { + res.hash = parseHashField(path, value); + } else if (name == "Registered-At") { + int n = 0; + string2Int(value, n); + res.registrationTime = n; + } + } + + return pathInfoCache[path] = res; } -void queryReferrers(const Transaction & txn, - const Path & storePath, PathSet & referrers) +bool LocalStore::isValidPath(const Path & path) { - if (!isValidPathTxn(txn, storePath)) - throw Error(format("path `%1%' is not valid") % storePath); - PathSet referrers2 = getReferrers(txn, storePath); - referrers.insert(referrers2.begin(), referrers2.end()); + return pathExists(infoFileFor(path)); } -void LocalStore::queryReferrers(const Path & storePath, - PathSet & referrers) +PathSet LocalStore::queryValidPaths() { - nix::queryReferrers(noTxn, storePath, referrers); + PathSet paths; + Strings entries = readDirectory(nixDBPath + "/info"); + for (Strings::iterator i = entries.begin(); i != entries.end(); ++i) + paths.insert(nixStore + "/" + *i); + return paths; } -void setDeriver(const Transaction & txn, const Path & storePath, - const Path & deriver) +void LocalStore::queryReferences(const Path & path, + PathSet & references) { - assertStorePath(storePath); - if (deriver == "") return; - assertStorePath(deriver); - if (!isValidPathTxn(txn, storePath)) - throw Error(format("path `%1%' is not valid") % storePath); - nixDB.setString(txn, dbDerivers, storePath, deriver); + ValidPathInfo info = queryPathInfo(path); + references.insert(info.references.begin(), info.references.end()); } -static Path queryDeriver(const Transaction & txn, const Path & storePath) +bool LocalStore::queryReferrersInternal(const Path & path, PathSet & referrers) { - if (!isValidPathTxn(txn, storePath)) - throw Error(format("path `%1%' is not valid") % storePath); - Path deriver; - if (nixDB.queryString(txn, dbDerivers, storePath, deriver)) - return deriver; - else - return ""; + bool allValid = true; + + if (!isValidPath(path)) + throw Error(format("path `%1%' is not valid") % path); + + /* No locking is necessary here: updates are only done by + appending or by atomically replacing the file. When appending, + there is a possibility that we see a partial entry, but it will + just be filtered out below (the partially written path will not + be valid, so it will be ignored). */ + + Path referrersFile = referrersFileFor(path); + if (!pathExists(referrersFile)) return true; + + AutoCloseFD fd = open(referrersFile.c_str(), O_RDONLY); + if (fd == -1) throw SysError(format("opening file `%1%'") % referrersFile); + + Paths refs = tokenizeString(readFile(fd), " "); + + for (Paths::iterator i = refs.begin(); i != refs.end(); ++i) + /* Referrers can be invalid (see registerValidPath() for the + invariant), so we only return one if it is valid. */ + if (isStorePath(*i) && isValidPath(*i)) referrers.insert(*i); else allValid = false; + + return allValid; +} + + +void LocalStore::queryReferrers(const Path & path, PathSet & referrers) +{ + queryReferrersInternal(path, referrers); } Path LocalStore::queryDeriver(const Path & path) { - return nix::queryDeriver(noTxn, path); + return queryPathInfo(path).deriver; } @@ -423,95 +493,92 @@ bool LocalStore::hasSubstitutes(const Path & path) } -static void setHash(const Transaction & txn, const Path & storePath, - const Hash & hash) -{ - assert(hash.type == htSHA256); - nixDB.setString(txn, dbValidPaths, storePath, "sha256:" + printHash(hash)); -} - - -static Hash queryHash(const Transaction & txn, const Path & storePath) +Hash LocalStore::queryPathHash(const Path & path) { - string s; - nixDB.queryString(txn, dbValidPaths, storePath, s); - string::size_type colon = s.find(':'); - if (colon == string::npos) - throw Error(format("corrupt hash `%1%' in valid-path entry for `%2%'") - % s % storePath); - HashType ht = parseHashType(string(s, 0, colon)); - if (ht == htUnknown) - throw Error(format("unknown hash type `%1%' in valid-path entry for `%2%'") - % string(s, 0, colon) % storePath); - return parseHash(ht, string(s, colon + 1)); + return queryPathInfo(path).hash; } -Hash LocalStore::queryPathHash(const Path & path) +static void dfsVisit(std::map<Path, ValidPathInfo> & infos, + const Path & path, PathSet & visited, Paths & sorted) { - if (!isValidPath(path)) - throw Error(format("path `%1%' is not valid") % path); - return queryHash(noTxn, path); -} + if (visited.find(path) != visited.end()) return; + visited.insert(path); + ValidPathInfo & info(infos[path]); + + for (PathSet::iterator i = info.references.begin(); + i != info.references.end(); ++i) + if (infos.find(*i) != infos.end()) + dfsVisit(infos, *i, visited, sorted); -void registerValidPath(const Transaction & txn, - const Path & path, const Hash & hash, const PathSet & references, - const Path & deriver) -{ - ValidPathInfo info; - info.path = path; - info.hash = hash; - info.references = references; - info.deriver = deriver; - ValidPathInfos infos; - infos.push_back(info); - registerValidPaths(txn, infos); + sorted.push_back(path); } -void registerValidPaths(const Transaction & txn, - const ValidPathInfos & infos) +void LocalStore::registerValidPaths(const ValidPathInfos & infos) { - PathSet newPaths; - for (ValidPathInfos::const_iterator i = infos.begin(); - i != infos.end(); ++i) - newPaths.insert(i->path); - - for (ValidPathInfos::const_iterator i = infos.begin(); - i != infos.end(); ++i) - { - assertStorePath(i->path); + std::map<Path, ValidPathInfo> infosMap; + + /* Sort the paths topologically under the references relation, so + that if path A is referenced by B, then A is registered before + B. */ + for (ValidPathInfos::const_iterator i = infos.begin(); i != infos.end(); ++i) + infosMap[i->path] = *i; - debug(format("registering path `%1%'") % i->path); - setHash(txn, i->path, i->hash); + PathSet visited; + Paths sorted; + for (ValidPathInfos::const_iterator i = infos.begin(); i != infos.end(); ++i) + dfsVisit(infosMap, i->path, visited, sorted); - setReferences(txn, i->path, i->references); - - /* Check that all referenced paths are also valid (or about to - become valid). */ - for (PathSet::iterator j = i->references.begin(); - j != i->references.end(); ++j) - if (!isValidPathTxn(txn, *j) && newPaths.find(*j) == newPaths.end()) - throw Error(format("cannot register path `%1%' as valid, since its reference `%2%' is invalid") - % i->path % *j); - - setDeriver(txn, i->path, i->deriver); - } + for (Paths::iterator i = sorted.begin(); i != sorted.end(); ++i) + registerValidPath(infosMap[*i]); } /* Invalidate a path. The caller is responsible for checking that there are no referrers. */ -static void invalidatePath(Transaction & txn, const Path & path) +void LocalStore::invalidatePath(const Path & path) { debug(format("invalidating path `%1%'") % path); - /* Clear the `references' entry for this path, as well as the - inverse `referrers' entries, and the `derivers' entry. */ - setReferences(txn, path, PathSet()); - nixDB.delPair(txn, dbDerivers, path); - nixDB.delPair(txn, dbValidPaths, path); + ValidPathInfo info; + + if (pathExists(infoFileFor(path))) { + info = queryPathInfo(path); + + /* Remove the info file. */ + Path p = infoFileFor(path); + if (unlink(p.c_str()) == -1) + throw SysError(format("unlinking `%1%'") % p); + } + + /* Remove the referrers file for `path'. */ + Path p = referrersFileFor(path); + if (pathExists(p) && unlink(p.c_str()) == -1) + throw SysError(format("unlinking `%1%'") % p); + + /* Clear `path' from the info cache. */ + pathInfoCache.erase(path); + delayedUpdates.erase(path); + + /* Cause the referrer files for each path referenced by this one + to be updated. This has to happen after removing the info file + to preserve the invariant (see registerValidPath()). + + The referrer files are updated lazily in flushDelayedUpdates() + to prevent quadratic performance in the garbage collector + (i.e., when N referrers to some path X are deleted, we have to + rewrite the referrers file for X N times, causing O(N^2) I/O). + + What happens if we die before the referrer file can be updated? + That's not a problem, because stale (invalid) entries in the + referrer file are ignored by queryReferrers(). Thus a referrer + file is allowed to have stale entries; removing them is just an + optimisation. verifyStore() gets rid of them eventually. + */ + foreach (PathSet::iterator, i, info.references) + if (*i != path) delayedUpdates.insert(*i); } @@ -548,9 +615,7 @@ Path LocalStore::addToStore(const Path & _srcPath, bool fixed, canonicalisePathMetaData(dstPath); - Transaction txn(nixDB); - registerValidPath(txn, dstPath, h, PathSet(), ""); - txn.commit(); + registerValidPath(dstPath, h, PathSet(), ""); } outputLock.setDeletion(true); @@ -579,10 +644,8 @@ Path LocalStore::addTextToStore(const string & suffix, const string & s, canonicalisePathMetaData(dstPath); - Transaction txn(nixDB); - registerValidPath(txn, dstPath, + registerValidPath(dstPath, hashPath(htSHA256, dstPath), references, ""); - txn.commit(); } outputLock.setDeletion(true); @@ -774,13 +837,11 @@ Path LocalStore::importPath(bool requireSignature, Source & source) canonicalisePathMetaData(dstPath); - Transaction txn(nixDB); /* !!! if we were clever, we could prevent the hashPath() here. */ - if (!isValidPath(deriver)) deriver = ""; - registerValidPath(txn, dstPath, + if (deriver != "" && !isValidPath(deriver)) deriver = ""; + registerValidPath(dstPath, hashPath(htSHA256, dstPath), references, deriver); - txn.commit(); } outputLock.setDeletion(true); @@ -790,357 +851,142 @@ Path LocalStore::importPath(bool requireSignature, Source & source) } -void deleteFromStore(const Path & _path, unsigned long long & bytesFreed) +void LocalStore::deleteFromStore(const Path & path, unsigned long long & bytesFreed) { bytesFreed = 0; - Path path(canonPath(_path)); assertStorePath(path); - Transaction txn(nixDB); - if (isValidPathTxn(txn, path)) { - PathSet referrers = getReferrers(txn, path); - for (PathSet::iterator i = referrers.begin(); - i != referrers.end(); ++i) - if (*i != path && isValidPathTxn(txn, *i)) - throw PathInUse(format("cannot delete path `%1%' because it is in use by path `%2%'") % path % *i); - invalidatePath(txn, path); + if (isValidPath(path)) { + /* Acquire a lock on the referrers file to prevent new + referrers to this path from appearing while we're deleting + it. */ + PathLocks referrersLock(singleton<PathSet, Path>(referrersFileFor(path))); + referrersLock.setDeletion(true); + PathSet referrers; queryReferrers(path, referrers); + referrers.erase(path); /* ignore self-references */ + if (!referrers.empty()) + throw PathInUse(format("cannot delete path `%1%' because it is in use by `%2%'") + % path % showPaths(referrers)); + invalidatePath(path); } - txn.commit(); deletePathWrapped(path, bytesFreed); } -void verifyStore(bool checkContents) +void LocalStore::verifyStore(bool checkContents) { - Transaction txn(nixDB); - - + /* Check whether all valid paths actually exist. */ printMsg(lvlInfo, "checking path existence"); - Paths paths; - PathSet validPaths; - nixDB.enumTable(txn, dbValidPaths, paths); - - for (Paths::iterator i = paths.begin(); i != paths.end(); ++i) { + PathSet validPaths2 = queryValidPaths(), validPaths; + + for (PathSet::iterator i = validPaths2.begin(); i != validPaths2.end(); ++i) { checkInterrupt(); - if (!pathExists(*i)) { - printMsg(lvlError, format("path `%1%' disappeared") % *i); - invalidatePath(txn, *i); - } else if (!isStorePath(*i)) { + if (!isStorePath(*i)) { printMsg(lvlError, format("path `%1%' is not in the Nix store") % *i); - invalidatePath(txn, *i); - } else { - if (checkContents) { - debug(format("checking contents of `%1%'") % *i); - Hash expected = queryHash(txn, *i); - Hash current = hashPath(expected.type, *i); - if (current != expected) { - printMsg(lvlError, format("path `%1%' was modified! " - "expected hash `%2%', got `%3%'") - % *i % printHash(expected) % printHash(current)); - } - } + invalidatePath(*i); + } else if (!pathExists(*i)) { + printMsg(lvlError, format("path `%1%' disappeared") % *i); + invalidatePath(*i); + } else validPaths.insert(*i); - } } - /* Check the cleanup invariant: only valid paths can have - `references', `referrers', or `derivers' entries. */ - + /* Check the store path meta-information. */ + printMsg(lvlInfo, "checking path meta-information"); - /* Check the `derivers' table. */ - printMsg(lvlInfo, "checking the derivers table"); - Paths deriversKeys; - nixDB.enumTable(txn, dbDerivers, deriversKeys); - for (Paths::iterator i = deriversKeys.begin(); - i != deriversKeys.end(); ++i) - { - if (validPaths.find(*i) == validPaths.end()) { - printMsg(lvlError, format("removing deriver entry for invalid path `%1%'") - % *i); - nixDB.delPair(txn, dbDerivers, *i); - } - else { - Path deriver = queryDeriver(txn, *i); - if (!isStorePath(deriver)) { - printMsg(lvlError, format("removing corrupt deriver `%1%' for `%2%'") - % deriver % *i); - nixDB.delPair(txn, dbDerivers, *i); + std::map<Path, PathSet> referrersCache; + + for (PathSet::iterator i = validPaths.begin(); i != validPaths.end(); ++i) { + bool update = false; + ValidPathInfo info = queryPathInfo(*i); + + /* Check the references: each reference should be valid, and + it should have a matching referrer. */ + for (PathSet::iterator j = info.references.begin(); + j != info.references.end(); ++j) + { + if (referrersCache.find(*j) == referrersCache.end()) + queryReferrers(*j, referrersCache[*j]); + if (referrersCache[*j].find(*i) == referrersCache[*j].end()) { + printMsg(lvlError, format("adding missing referrer mapping from `%1%' to `%2%'") + % *j % *i); + appendReferrer(*j, *i, true); } - } - } - - - /* Check the `references' table. */ - printMsg(lvlInfo, "checking the references table"); - Paths referencesKeys; - nixDB.enumTable(txn, dbReferences, referencesKeys); - for (Paths::iterator i = referencesKeys.begin(); - i != referencesKeys.end(); ++i) - { - if (validPaths.find(*i) == validPaths.end()) { - printMsg(lvlError, format("removing references entry for invalid path `%1%'") - % *i); - setReferences(txn, *i, PathSet()); - } - else { - PathSet references; - queryReferences(txn, *i, references); - for (PathSet::iterator j = references.begin(); - j != references.end(); ++j) - { - string dummy; - if (!nixDB.queryString(txn, dbReferrers, addPrefix(*j, *i), dummy)) { - printMsg(lvlError, format("adding missing referrer mapping from `%1%' to `%2%'") - % *j % *i); - nixDB.setString(txn, dbReferrers, addPrefix(*j, *i), ""); - } - if (validPaths.find(*j) == validPaths.end()) { - printMsg(lvlError, format("incomplete closure: `%1%' needs missing `%2%'") - % *i % *j); - } + if (validPaths.find(*j) == validPaths.end()) { + printMsg(lvlError, format("incomplete closure: `%1%' needs missing `%2%'") + % *i % *j); + /* nothing we can do about it... */ } } - } - /* Check the `referrers' table. */ - printMsg(lvlInfo, "checking the referrers table"); - Strings referrers; - nixDB.enumTable(txn, dbReferrers, referrers); - for (Strings::iterator i = referrers.begin(); i != referrers.end(); ++i) { - - /* Decode the entry (it's a tuple of paths). */ - string::size_type nul = i->find((char) 0); - if (nul == string::npos) { - printMsg(lvlError, format("removing bad referrer table entry `%1%'") % *i); - nixDB.delPair(txn, dbReferrers, *i); - continue; - } - Path to(*i, 0, nul); - Path from(*i, nul + 1); - - if (validPaths.find(to) == validPaths.end()) { - printMsg(lvlError, format("removing referrer entry from `%1%' to invalid `%2%'") - % from % to); - nixDB.delPair(txn, dbReferrers, *i); + /* Check the deriver. (Note that the deriver doesn't have to + be a valid path.) */ + if (!info.deriver.empty() && !isStorePath(info.deriver)) { + info.deriver = ""; + update = true; } - else if (validPaths.find(from) == validPaths.end()) { - printMsg(lvlError, format("removing referrer entry from invalid `%1%' to `%2%'") - % from % to); - nixDB.delPair(txn, dbReferrers, *i); - } - - else { - PathSet references; - queryReferences(txn, from, references); - if (find(references.begin(), references.end(), to) == references.end()) { - printMsg(lvlError, format("adding missing referrer mapping from `%1%' to `%2%'") - % from % to); - references.insert(to); - setReferences(txn, from, references); + /* Check the content hash (optionally - slow). */ + if (checkContents) { + debug(format("checking contents of `%1%'") % *i); + Hash current = hashPath(info.hash.type, *i); + if (current != info.hash) { + printMsg(lvlError, format("path `%1%' was modified! " + "expected hash `%2%', got `%3%'") + % *i % printHash(info.hash) % printHash(current)); } } - - } - - txn.commit(); -} - - -typedef std::map<Hash, std::pair<Path, ino_t> > HashToPath; - - -static void makeWritable(const Path & path) -{ - struct stat st; - if (lstat(path.c_str(), &st)) - throw SysError(format("getting attributes of path `%1%'") % path); - if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1) - throw SysError(format("changing writability of `%1%'") % path); -} - - -static void hashAndLink(bool dryRun, HashToPath & hashToPath, - OptimiseStats & stats, const Path & path) -{ - struct stat st; - if (lstat(path.c_str(), &st)) - throw SysError(format("getting attributes of path `%1%'") % path); - - /* Sometimes SNAFUs can cause files in the Nix store to be - modified, in particular when running programs as root under - NixOS (example: $fontconfig/var/cache being modified). Skip - those files. */ - if (S_ISREG(st.st_mode) && (st.st_mode & S_IWUSR)) { - printMsg(lvlError, format("skipping suspicious writable file `%1%'") % path); - return; + if (update) registerValidPath(info); } - /* We can hard link regular files and symlinks. */ - if (S_ISREG(st.st_mode) || S_ISLNK(st.st_mode)) { - - /* Hash the file. Note that hashPath() returns the hash over - the NAR serialisation, which includes the execute bit on - the file. Thus, executable and non-executable files with - the same contents *won't* be linked (which is good because - otherwise the permissions would be screwed up). - - Also note that if `path' is a symlink, then we're hashing - the contents of the symlink (i.e. the result of - readlink()), not the contents of the target (which may not - even exist). */ - Hash hash = hashPath(htSHA256, path); - stats.totalFiles++; - printMsg(lvlDebug, format("`%1%' has hash `%2%'") % path % printHash(hash)); - - std::pair<Path, ino_t> prevPath = hashToPath[hash]; - - if (prevPath.first == "") { - hashToPath[hash] = std::pair<Path, ino_t>(path, st.st_ino); - return; - } - - /* Yes! We've seen a file with the same contents. Replace - the current file with a hard link to that file. */ - stats.sameContents++; - if (prevPath.second == st.st_ino) { - printMsg(lvlDebug, format("`%1%' is already linked to `%2%'") % path % prevPath.first); - return; - } - - if (!dryRun) { - - printMsg(lvlTalkative, format("linking `%1%' to `%2%'") % path % prevPath.first); - - Path tempLink = (format("%1%.tmp-%2%-%3%") - % path % getpid() % rand()).str(); - - /* Make the containing directory writable, but only if - it's not the store itself (we don't want or need to - mess with its permissions). */ - bool mustToggle = !isStorePath(path); - if (mustToggle) makeWritable(dirOf(path)); - - if (link(prevPath.first.c_str(), tempLink.c_str()) == -1) - throw SysError(format("cannot link `%1%' to `%2%'") - % tempLink % prevPath.first); - - /* Atomically replace the old file with the new hard link. */ - if (rename(tempLink.c_str(), path.c_str()) == -1) - throw SysError(format("cannot rename `%1%' to `%2%'") - % tempLink % path); - - /* Make the directory read-only again and reset its - timestamp back to 0. */ - if (mustToggle) _canonicalisePathMetaData(dirOf(path), false); - - } else - printMsg(lvlTalkative, format("would link `%1%' to `%2%'") % path % prevPath.first); - - stats.filesLinked++; - stats.bytesFreed += st.st_size; - } - - if (S_ISDIR(st.st_mode)) { - Strings names = readDirectory(path); - for (Strings::iterator i = names.begin(); i != names.end(); ++i) - hashAndLink(dryRun, hashToPath, stats, path + "/" + *i); - } -} - - -void LocalStore::optimiseStore(bool dryRun, OptimiseStats & stats) -{ - HashToPath hashToPath; + referrersCache.clear(); - Paths paths; - PathSet validPaths; - nixDB.enumTable(noTxn, dbValidPaths, paths); - - for (Paths::iterator i = paths.begin(); i != paths.end(); ++i) { - addTempRoot(*i); - if (!isValidPath(*i)) continue; /* path was GC'ed, probably */ - startNest(nest, lvlChatty, format("hashing files in `%1%'") % *i); - hashAndLink(dryRun, hashToPath, stats, *i); - } -} + /* Check the referrers. */ + printMsg(lvlInfo, "checking referrers"); -/* Upgrade from schema 2 (0.8 <= Nix <= 0.9) to schema 3 (Nix >= - 0.10). The only thing to do here is to upgrade the old `referer' - table (which causes quadratic complexity in some cases) to the new - (and properly spelled) `referrer' table. */ -static void upgradeStore09() -{ - /* !!! we should disallow concurrent upgrades */ + std::map<Path, PathSet> referencesCache; - if (!pathExists(nixDBPath + "/referers")) return; - - printMsg(lvlError, "upgrading Nix store to new schema (this may take a while)..."); - - Transaction txn(nixDB); - - std::cerr << "converting referers to referrers..."; - - TableId dbReferers = nixDB.openTable("referers"); /* sic! */ - - Paths referersKeys; - nixDB.enumTable(txn, dbReferers, referersKeys); - - int n = 0; - for (Paths::iterator i = referersKeys.begin(); - i != referersKeys.end(); ++i) - { - Paths referers; - nixDB.queryStrings(txn, dbReferers, *i, referers); - for (Paths::iterator j = referers.begin(); - j != referers.end(); ++j) - nixDB.setString(txn, dbReferrers, addPrefix(*i, *j), ""); - if (++n % 1000 == 0) { - txn.commit(); - txn.begin(nixDB); - std::cerr << "|"; + Strings entries = readDirectory(nixDBPath + "/referrer"); + for (Strings::iterator i = entries.begin(); i != entries.end(); ++i) { + Path from = nixStore + "/" + *i; + + if (validPaths.find(from) == validPaths.end()) { + printMsg(lvlError, format("removing referrers file for invalid `%1%'") % from); + Path p = referrersFileFor(from); + if (unlink(p.c_str()) == -1) + throw SysError(format("unlinking `%1%'") % p); + continue; } - std::cerr << "."; - } - - txn.commit(); - - std::cerr << std::endl; - nixDB.closeTable(dbReferers); + PathSet referrers; + bool allValid = queryReferrersInternal(from, referrers); + bool update = false; - nixDB.deleteTable("referers"); -} - - -/* Upgrade from schema 3 (Nix 0.10) to schema 4 (Nix >= 0.11). The - only thing to do here is to delete the substitutes table and get - rid of invalid but substitutable references/referrers. */ -static void upgradeStore11() -{ - if (!pathExists(nixDBPath + "/substitutes")) return; - - printMsg(lvlError, "upgrading Nix store to new schema (this may take a while)..."); + if (!allValid) { + printMsg(lvlError, format("removing some stale referrers for `%1%'") % from); + update = true; + } - Transaction txn(nixDB); - TableId dbSubstitutes = nixDB.openTable("substitutes"); + /* Each referrer should have a matching reference. */ + PathSet referrersNew; + for (PathSet::iterator j = referrers.begin(); j != referrers.end(); ++j) { + if (referencesCache.find(*j) == referencesCache.end()) + queryReferences(*j, referencesCache[*j]); + if (referencesCache[*j].find(from) == referencesCache[*j].end()) { + printMsg(lvlError, format("removing unexpected referrer mapping from `%1%' to `%2%'") + % from % *j); + update = true; + } else referrersNew.insert(*j); + } - Paths subKeys; - nixDB.enumTable(txn, dbSubstitutes, subKeys); - for (Paths::iterator i = subKeys.begin(); i != subKeys.end(); ++i) { - if (!isValidPathTxn(txn, *i)) - invalidatePath(txn, *i); + if (update) rewriteReferrers(from, false, referrersNew); } - - txn.commit(); - nixDB.closeTable(dbSubstitutes); - nixDB.deleteTable("substitutes"); } diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index 9e6130013..57d9d765e 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -4,18 +4,16 @@ #include <string> #include "store-api.hh" +#include "util.hh" namespace nix { -class Transaction; - - /* Nix store and database schema version. Version 1 (or 0) was Nix <= 0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10. - Version 4 is Nix 0.11. */ -const int nixSchemaVersion = 4; + Version 4 is Nix 0.11. Version 5 is Nix 0.12*/ +const int nixSchemaVersion = 5; extern string drvsLogDir; @@ -43,15 +41,9 @@ private: public: - /* Open the database environment. If `reserveSpace' is true, make - sure that a big empty file exists in /nix/var/nix/db/reserved. - If `reserveSpace' is false, delete this file if it exists. The - idea is that on normal operation, the file exists; but when we - run the garbage collector, it is deleted. This is to ensure - that the garbage collector has a small amount of disk space - available, which is required to open the Berkeley DB - environment. */ - LocalStore(bool reserveSpace); + /* Initialise the local store, upgrading the schema if + necessary. */ + LocalStore(); ~LocalStore(); @@ -100,32 +92,62 @@ public: void collectGarbage(GCAction action, const PathSet & pathsToDelete, bool ignoreLiveness, PathSet & result, unsigned long long & bytesFreed); + /* Delete a path from the Nix store. */ + void deleteFromStore(const Path & path, unsigned long long & bytesFreed); + /* Optimise the disk space usage of the Nix store by hard-linking files with the same contents. */ void optimiseStore(bool dryRun, OptimiseStats & stats); -}; + /* Check the integrity of the Nix store. */ + void verifyStore(bool checkContents); -/* Get a transaction object. */ -void createStoreTransaction(Transaction & txn); + /* Register the validity of a path, i.e., that `path' exists, that + the paths referenced by it exists, and in the case of an output + path of a derivation, that it has been produced by a succesful + execution of the derivation (or something equivalent). Also + register the hash of the file system contents of the path. The + hash must be a SHA-256 hash. */ + void registerValidPath(const Path & path, + const Hash & hash, const PathSet & references, const Path & deriver); -/* Copy a path recursively. */ -void copyPath(const Path & src, const Path & dst); + void registerValidPaths(const ValidPathInfos & infos); + +private: + + Path schemaPath; + + /* Lock file used for upgrading. */ + AutoCloseFD globalLock; + + /* !!! The cache can grow very big. Maybe it should be pruned + every once in a while. */ + std::map<Path, ValidPathInfo> pathInfoCache; -/* Register the validity of a path, i.e., that `path' exists, that the - paths referenced by it exists, and in the case of an output path of - a derivation, that it has been produced by a succesful execution of - the derivation (or something equivalent). Also register the hash - of the file system contents of the path. The hash must be a - SHA-256 hash. */ -void registerValidPath(const Transaction & txn, - const Path & path, const Hash & hash, const PathSet & references, - const Path & deriver); + /* Store paths for which the referrers file must be purged. */ + PathSet delayedUpdates; -typedef list<ValidPathInfo> ValidPathInfos; + int getSchema(); -void registerValidPaths(const Transaction & txn, - const ValidPathInfos & infos); + void registerValidPath(const ValidPathInfo & info, bool ignoreValidity = false); + + ValidPathInfo queryPathInfo(const Path & path); + + void rewriteReferrers(const Path & path, bool purge, PathSet referrers); + + void flushDelayedUpdates(); + + bool queryReferrersInternal(const Path & path, PathSet & referrers); + + void invalidatePath(const Path & path); + + void upgradeStore12(); + +}; + + +/* Copy a path recursively. */ +void copyPath(const Path & src, const Path & dst); /* "Fix", or canonicalise, the meta-data of the files in a store path after it has been built. In particular: @@ -137,25 +159,10 @@ void registerValidPaths(const Transaction & txn, in a setuid Nix installation. */ void canonicalisePathMetaData(const Path & path); -/* Checks whether a path is valid. */ -bool isValidPathTxn(const Transaction & txn, const Path & path); - -/* Sets the set of outgoing FS references for a store path. Use with - care! */ -void setReferences(const Transaction & txn, const Path & path, - const PathSet & references); - -/* Sets the deriver of a store path. Use with care! */ -void setDeriver(const Transaction & txn, const Path & path, - const Path & deriver); - -/* Delete a value from the nixStore directory. */ -void deleteFromStore(const Path & path, unsigned long long & bytesFreed); +void canonicalisePathMetaData(const Path & path, bool recurse); MakeError(PathInUse, Error); -void verifyStore(bool checkContents); - /* Whether we are in build users mode. */ bool haveBuildUsers(); diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index 25529d8ba..4b192ec9a 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -1,6 +1,5 @@ #include "misc.hh" #include "store-api.hh" -#include "db.hh" #include <aterm2.h> diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc new file mode 100644 index 000000000..c260f253e --- /dev/null +++ b/src/libstore/optimise-store.cc @@ -0,0 +1,129 @@ +#include "util.hh" +#include "local-store.hh" + +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> + + +namespace nix { + + +typedef std::map<Hash, std::pair<Path, ino_t> > HashToPath; + + +static void makeWritable(const Path & path) +{ + struct stat st; + if (lstat(path.c_str(), &st)) + throw SysError(format("getting attributes of path `%1%'") % path); + if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1) + throw SysError(format("changing writability of `%1%'") % path); +} + + +static void hashAndLink(bool dryRun, HashToPath & hashToPath, + OptimiseStats & stats, const Path & path) +{ + struct stat st; + if (lstat(path.c_str(), &st)) + throw SysError(format("getting attributes of path `%1%'") % path); + + /* Sometimes SNAFUs can cause files in the Nix store to be + modified, in particular when running programs as root under + NixOS (example: $fontconfig/var/cache being modified). Skip + those files. */ + if (S_ISREG(st.st_mode) && (st.st_mode & S_IWUSR)) { + printMsg(lvlError, format("skipping suspicious writable file `%1%'") % path); + return; + } + + /* We can hard link regular files and symlinks. */ + if (S_ISREG(st.st_mode) || S_ISLNK(st.st_mode)) { + + /* Hash the file. Note that hashPath() returns the hash over + the NAR serialisation, which includes the execute bit on + the file. Thus, executable and non-executable files with + the same contents *won't* be linked (which is good because + otherwise the permissions would be screwed up). + + Also note that if `path' is a symlink, then we're hashing + the contents of the symlink (i.e. the result of + readlink()), not the contents of the target (which may not + even exist). */ + Hash hash = hashPath(htSHA256, path); + stats.totalFiles++; + printMsg(lvlDebug, format("`%1%' has hash `%2%'") % path % printHash(hash)); + + std::pair<Path, ino_t> prevPath = hashToPath[hash]; + + if (prevPath.first == "") { + hashToPath[hash] = std::pair<Path, ino_t>(path, st.st_ino); + return; + } + + /* Yes! We've seen a file with the same contents. Replace + the current file with a hard link to that file. */ + stats.sameContents++; + if (prevPath.second == st.st_ino) { + printMsg(lvlDebug, format("`%1%' is already linked to `%2%'") % path % prevPath.first); + return; + } + + if (!dryRun) { + + printMsg(lvlTalkative, format("linking `%1%' to `%2%'") % path % prevPath.first); + + Path tempLink = (format("%1%.tmp-%2%-%3%") + % path % getpid() % rand()).str(); + + /* Make the containing directory writable, but only if + it's not the store itself (we don't want or need to + mess with its permissions). */ + bool mustToggle = !isStorePath(path); + if (mustToggle) makeWritable(dirOf(path)); + + if (link(prevPath.first.c_str(), tempLink.c_str()) == -1) + throw SysError(format("cannot link `%1%' to `%2%'") + % tempLink % prevPath.first); + + /* Atomically replace the old file with the new hard link. */ + if (rename(tempLink.c_str(), path.c_str()) == -1) + throw SysError(format("cannot rename `%1%' to `%2%'") + % tempLink % path); + + /* Make the directory read-only again and reset its + timestamp back to 0. */ + if (mustToggle) canonicalisePathMetaData(dirOf(path), false); + + } else + printMsg(lvlTalkative, format("would link `%1%' to `%2%'") % path % prevPath.first); + + stats.filesLinked++; + stats.bytesFreed += st.st_size; + } + + if (S_ISDIR(st.st_mode)) { + Strings names = readDirectory(path); + for (Strings::iterator i = names.begin(); i != names.end(); ++i) + hashAndLink(dryRun, hashToPath, stats, path + "/" + *i); + } +} + + +void LocalStore::optimiseStore(bool dryRun, OptimiseStats & stats) +{ + HashToPath hashToPath; + + PathSet paths = queryValidPaths(); + + for (PathSet::iterator i = paths.begin(); i != paths.end(); ++i) { + addTempRoot(*i); + if (!isValidPath(*i)) continue; /* path was GC'ed, probably */ + startNest(nest, lvlChatty, format("hashing files in `%1%'") % *i); + hashAndLink(dryRun, hashToPath, stats, *i); + } +} + + +} diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index b5bc85e18..0d516c198 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -205,6 +205,19 @@ ValidPathInfo decodeValidPathInfo(std::istream & str, bool hashGiven) } +string showPaths(const PathSet & paths) +{ + string s; + for (PathSet::const_iterator i = paths.begin(); + i != paths.end(); ++i) + { + if (s.size() != 0) s += ", "; + s += "`" + *i + "'"; + } + return s; +} + + } @@ -219,10 +232,10 @@ namespace nix { boost::shared_ptr<StoreAPI> store; -boost::shared_ptr<StoreAPI> openStore(bool reserveSpace) +boost::shared_ptr<StoreAPI> openStore() { if (getEnv("NIX_REMOTE") == "") - return boost::shared_ptr<StoreAPI>(new LocalStore(reserveSpace)); + return boost::shared_ptr<StoreAPI>(new LocalStore()); else return boost::shared_ptr<StoreAPI>(new RemoteStore()); } diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 39a7c9b70..a50dcf645 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -249,7 +249,12 @@ extern boost::shared_ptr<StoreAPI> store; /* Factory method: open the Nix database, either through the local or remote implementation. */ -boost::shared_ptr<StoreAPI> openStore(bool reserveSpace = true); +boost::shared_ptr<StoreAPI> openStore(); + + +/* Display a set of paths in human-readable form (i.e., between quotes + and separated by commas). */ +string showPaths(const PathSet & paths); string makeValidityRegistration(const PathSet & paths, @@ -261,8 +266,12 @@ struct ValidPathInfo Path deriver; Hash hash; PathSet references; + time_t registrationTime; + ValidPathInfo() : registrationTime(0) { } }; +typedef list<ValidPathInfo> ValidPathInfos; + ValidPathInfo decodeValidPathInfo(std::istream & str, bool hashGiven = false); diff --git a/src/libstore/upgrade-schema.cc b/src/libstore/upgrade-schema.cc new file mode 100644 index 000000000..450a7c8d0 --- /dev/null +++ b/src/libstore/upgrade-schema.cc @@ -0,0 +1,108 @@ +#include "db.hh" +#include "hash.hh" +#include "util.hh" +#include "local-store.hh" +#include "globals.hh" +#include "pathlocks.hh" +#include "config.h" + +#include <iostream> + + +namespace nix { + + +Hash parseHashField(const Path & path, const string & s); + + +/* Upgrade from schema 4 (Nix 0.11) to schema 5 (Nix >= 0.12). The + old schema uses Berkeley DB, the new one stores store path + meta-information in files. */ +void LocalStore::upgradeStore12() +{ +#if OLD_DB_COMPAT + +#ifdef __CYGWIN__ + /* Cygwin can't upgrade a read lock to a write lock... */ + lockFile(globalLock, ltNone, true); +#endif + + if (!lockFile(globalLock, ltWrite, false)) { + printMsg(lvlError, "waiting for exclusive access to the Nix store..."); + lockFile(globalLock, ltWrite, true); + } + + printMsg(lvlError, "upgrading Nix store to new schema (this may take a while)..."); + + if (getSchema() >= nixSchemaVersion) return; /* somebody else beat us to it */ + + /* Open the old Nix database and tables. */ + Database nixDB; + nixDB.open(nixDBPath); + + /* dbValidPaths :: Path -> () + + The existence of a key $p$ indicates that path $p$ is valid + (that is, produced by a succesful build). */ + TableId dbValidPaths = nixDB.openTable("validpaths"); + + /* dbReferences :: Path -> [Path] + + This table lists the outgoing file system references for each + output path that has been built by a Nix derivation. These are + found by scanning the path for the hash components of input + paths. */ + TableId dbReferences = nixDB.openTable("references"); + + /* dbReferrers :: Path -> Path + + This table is just the reverse mapping of dbReferences. This + table can have duplicate keys, each corresponding value + denoting a single referrer. */ + // Not needed for conversion: it's just the inverse of + // references. + // TableId dbReferrers = nixDB.openTable("referrers"); + + /* dbDerivers :: Path -> [Path] + + This table lists the derivation used to build a path. There + can only be multiple such paths for fixed-output derivations + (i.e., derivations specifying an expected hash). */ + TableId dbDerivers = nixDB.openTable("derivers"); + + Paths paths; + nixDB.enumTable(noTxn, dbValidPaths, paths); + + for (Paths::iterator i = paths.begin(); i != paths.end(); ++i) { + ValidPathInfo info; + info.path = *i; + + Paths references; + nixDB.queryStrings(noTxn, dbReferences, *i, references); + info.references.insert(references.begin(), references.end()); + + string s; + nixDB.queryString(noTxn, dbValidPaths, *i, s); + info.hash = parseHashField(*i, s); + + nixDB.queryString(noTxn, dbDerivers, *i, info.deriver); + + registerValidPath(info, true); + std::cerr << "."; + } + + std::cerr << std::endl; + + writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str()); + + lockFile(globalLock, ltRead, true); + +#else + throw Error( + "Your Nix store has a database in Berkeley DB format. To convert\n" + "to the new format, please compile Nix with Berkeley DB support."); +#endif +} + + +} diff --git a/src/libutil/util.cc b/src/libutil/util.cc index f978856a9..e18f9841f 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -361,9 +361,10 @@ Path createTempDir(const Path & tmpRoot, const Path & prefix, Paths createDirs(const Path & path) { - if (path == "/") return Paths(); - Paths created = createDirs(dirOf(path)); + Paths created; + if (path == "/") return created; if (!pathExists(path)) { + created = createDirs(dirOf(path)); if (mkdir(path.c_str(), 0777) == -1) throw SysError(format("creating directory `%1%'") % path); created.push_back(path); diff --git a/src/libutil/util.hh b/src/libutil/util.hh index d75002b02..d52ab3e4d 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -12,6 +12,10 @@ namespace nix { +#define foreach(it_type, it, collection) \ + for (it_type it = collection.begin(); it != collection.end(); ++it) + + /* Return an environment variable. */ string getEnv(const string & key, const string & def = ""); diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 1af1a2f53..f561dc2e5 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -13,7 +13,6 @@ #include "common-opts.hh" #include "xml-writer.hh" #include "store-api.hh" -#include "db.hh" #include "util.hh" #include <cerrno> diff --git a/src/nix-store/dotgraph.cc b/src/nix-store/dotgraph.cc index 989945600..83df9e9cd 100644 --- a/src/nix-store/dotgraph.cc +++ b/src/nix-store/dotgraph.cc @@ -1,7 +1,6 @@ #include "dotgraph.hh" #include "util.hh" #include "store-api.hh" -#include "db.hh" #include <iostream> diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index 17b3c18fa..df027fcc7 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -7,7 +7,6 @@ #include "shared.hh" #include "dotgraph.hh" #include "local-store.hh" -#include "db.hh" #include "util.hh" #include "help.txt.hh" @@ -31,6 +30,14 @@ static int rootNr = 0; static bool indirectRoot = false; +LocalStore & ensureLocalStore() +{ + LocalStore * store2(dynamic_cast<LocalStore *>(store.get())); + if (!store2) throw Error("you don't have sufficient rights to use --verify"); + return *store2; +} + + static Path useDeriver(Path path) { if (!isDerivation(path)) { @@ -430,10 +437,7 @@ static void registerValidity(bool reregister, bool hashGiven, bool canonicalise) } } - Transaction txn; - createStoreTransaction(txn); - registerValidPaths(txn, infos); - txn.commit(); + ensureLocalStore().registerValidPaths(infos); } @@ -641,11 +645,10 @@ static void opVerify(Strings opFlags, Strings opArgs) if (*i == "--check-contents") checkContents = true; else throw UsageError(format("unknown flag `%1%'") % *i); - verifyStore(checkContents); + ensureLocalStore().verifyStore(checkContents); } - static void showOptimiseStats(OptimiseStats & stats) { printMsg(lvlError, @@ -671,12 +674,9 @@ static void opOptimise(Strings opFlags, Strings opArgs) if (*i == "--dry-run") dryRun = true; else throw UsageError(format("unknown flag `%1%'") % *i); - LocalStore * store2(dynamic_cast<LocalStore *>(store.get())); - if (!store2) throw Error("you don't have sufficient rights to use --optimise"); - OptimiseStats stats; try { - store2->optimiseStore(dryRun, stats); + ensureLocalStore().optimiseStore(dryRun, stats); } catch (...) { showOptimiseStats(stats); throw; @@ -755,7 +755,7 @@ void run(Strings args) if (!op) throw UsageError("no operation specified"); if (op != opDump && op != opRestore) /* !!! hack */ - store = openStore(op != opGC); + store = openStore(); op(opFlags, opArgs); } diff --git a/src/nix-worker/nix-worker.cc b/src/nix-worker/nix-worker.cc index d9b75270f..134e1a693 100644 --- a/src/nix-worker/nix-worker.cc +++ b/src/nix-worker/nix-worker.cc @@ -474,7 +474,7 @@ static void processConnection() #endif /* Open the store. */ - store = boost::shared_ptr<StoreAPI>(new LocalStore(true)); + store = boost::shared_ptr<StoreAPI>(new LocalStore()); stopWork(); diff --git a/tests/Makefile.am b/tests/Makefile.am index bab9e8ee4..c65f68584 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -19,7 +19,7 @@ TESTS = init.sh hash.sh lang.sh add.sh simple.sh dependencies.sh \ fallback.sh nix-push.sh gc.sh gc-concurrent.sh verify.sh nix-pull.sh \ referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \ gc-runtime.sh install-package.sh check-refs.sh filter-source.sh \ - remote-store.sh + remote-store.sh export.sh XFAIL_TESTS = diff --git a/tests/dependencies.sh b/tests/dependencies.sh index 434027e46..440b6fae0 100644 --- a/tests/dependencies.sh +++ b/tests/dependencies.sh @@ -11,7 +11,7 @@ $nixstore -q --graph "$drvPath" > $TEST_ROOT/graph if test -n "$dot"; then # Does it parse? $dot < $TEST_ROOT/graph -fi +fi outPath=$($nixstore -rvv "$drvPath") diff --git a/tests/export.sh b/tests/export.sh new file mode 100644 index 000000000..53c9ec3c6 --- /dev/null +++ b/tests/export.sh @@ -0,0 +1,31 @@ +source common.sh + +clearStore + +outPath=$($nixstore -r $($nixinstantiate dependencies.nix)) + +$nixstore --export $outPath > $TEST_ROOT/exp + +$nixstore --export $($nixstore -qR $outPath) > $TEST_ROOT/exp_all + + +clearStore + +if $nixstore --import < $TEST_ROOT/exp; then + echo "importing a non-closure should fail" + exit 1 +fi + + +clearStore + +$nixstore --import < $TEST_ROOT/exp_all + +$nixstore --export $($nixstore -qR $outPath) > $TEST_ROOT/exp_all2 + + +clearStore + +# Regression test: the derivers in exp_all2 are empty, which shouldn't +# cause a failure. +$nixstore --import < $TEST_ROOT/exp_all2 diff --git a/tests/init.sh b/tests/init.sh index 2713aa590..7becc422e 100644 --- a/tests/init.sh +++ b/tests/init.sh @@ -95,4 +95,5 @@ chmod +x $NIX_BIN_DIR/nix/download-using-manifests.pl $nixstore --init # Did anything happen? -test -e "$NIX_DB_DIR"/validpaths +test -e "$NIX_DB_DIR"/info +test -e "$NIX_DB_DIR"/referrer diff --git a/tests/referrers.sh b/tests/referrers.sh index 4d2aaab5a..2fdf0cf7c 100644 --- a/tests/referrers.sh +++ b/tests/referrers.sh @@ -9,12 +9,50 @@ reference=$NIX_STORE_DIR/abcdef touch $reference (echo $reference && echo && echo 0) | $nixstore --register-validity -echo "registering..." -time for ((n = 0; n < $max; n++)); do +echo "making registration..." + +for ((n = 0; n < $max; n++)); do storePath=$NIX_STORE_DIR/$n touch $storePath - (echo $storePath && echo && echo 1 && echo $reference) -done | $nixstore --register-validity + ref2=$NIX_STORE_DIR/$((n+1)) + if test $((n+1)) = $max; then + ref2=$reference + fi + (echo $storePath && echo && echo 2 && echo $reference && echo $ref2) +done > $TEST_ROOT/reg_info + +echo "registering..." + +time $nixstore --register-validity < $TEST_ROOT/reg_info + +oldTime=$(cat test-tmp/db/info/1 | grep Registered-At) + +echo "sleeping..." + +sleep 2 + +echo "reregistering..." + +time $nixstore --register-validity --reregister < $TEST_ROOT/reg_info + +newTime=$(cat test-tmp/db/info/1 | grep Registered-At) + +if test "$newTime" != "$oldTime"; then + echo "reregistration changed original registration time" + exit 1 +fi + +if test "$(cat test-tmp/db/referrer/1 | wc -w)" != 1; then + echo "reregistration duplicated referrers" + exit 1 +fi echo "collecting garbage..." -time $nixstore --gc 2> /dev/null +ln -sfn $reference "$NIX_STATE_DIR"/gcroots/ref +time $nixstore --gc + +if test "$(cat test-tmp/db/referrer/abcdef | wc -w)" != 0; then + echo "referrers not cleaned up" + exit 1 +fi + |