diff options
Diffstat (limited to 'src/libexpr')
-rw-r--r-- | src/libexpr/common-eval-args.cc | 22 | ||||
-rw-r--r-- | src/libexpr/eval.cc | 60 | ||||
-rw-r--r-- | src/libexpr/eval.hh | 12 | ||||
-rw-r--r-- | src/libexpr/flake/call-flake.nix | 30 | ||||
-rw-r--r-- | src/libexpr/flake/eval-cache.cc | 116 | ||||
-rw-r--r-- | src/libexpr/flake/eval-cache.hh | 40 | ||||
-rw-r--r-- | src/libexpr/flake/flake.cc | 672 | ||||
-rw-r--r-- | src/libexpr/flake/flake.hh | 110 | ||||
-rw-r--r-- | src/libexpr/flake/flakeref.cc | 188 | ||||
-rw-r--r-- | src/libexpr/flake/flakeref.hh | 55 | ||||
-rw-r--r-- | src/libexpr/flake/lockfile.cc | 284 | ||||
-rw-r--r-- | src/libexpr/flake/lockfile.hh | 77 | ||||
-rw-r--r-- | src/libexpr/local.mk | 12 | ||||
-rw-r--r-- | src/libexpr/primops.cc | 15 | ||||
-rw-r--r-- | src/libexpr/primops/fetchTree.cc | 16 | ||||
-rw-r--r-- | src/libexpr/value.hh | 5 |
16 files changed, 1688 insertions, 26 deletions
diff --git a/src/libexpr/common-eval-args.cc b/src/libexpr/common-eval-args.cc index 82bfeac36..f965c194f 100644 --- a/src/libexpr/common-eval-args.cc +++ b/src/libexpr/common-eval-args.cc @@ -4,6 +4,8 @@ #include "util.hh" #include "eval.hh" #include "fetchers.hh" +#include "registry.hh" +#include "flake/flakeref.hh" #include "store-api.hh" namespace nix { @@ -28,6 +30,26 @@ MixEvalArgs::MixEvalArgs() .description("add a path to the list of locations used to look up <...> file names") .label("path") .handler([&](std::string s) { searchPath.push_back(s); }); + + mkFlag() + .longName("impure") + .description("allow access to mutable paths and repositories") + .handler([&](std::vector<std::string> ss) { + evalSettings.pureEval = false; + }); + + mkFlag() + .longName("override-flake") + .labels({"original-ref", "resolved-ref"}) + .description("override a flake registry value") + .arity(2) + .handler([&](std::vector<std::string> ss) { + auto from = parseFlakeRef(ss[0], absPath(".")); + auto to = parseFlakeRef(ss[1], absPath(".")); + fetchers::Attrs extraAttrs; + if (to.subdir != "") extraAttrs["dir"] = to.subdir; + fetchers::overrideRegistry(from.input, to.input, extraAttrs); + }); } Bindings * MixEvalArgs::getAutoArgs(EvalState & state) diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index dac32b6f5..6fcb2917c 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -8,6 +8,7 @@ #include "download.hh" #include "json.hh" #include "function-trace.hh" +#include "flake/flake.hh" #include <algorithm> #include <chrono> @@ -153,12 +154,12 @@ const Value *getPrimOp(const Value &v) { } -string showType(const Value & v) +string showType(ValueType type) { - switch (v.type) { + switch (type) { case tInt: return "an integer"; - case tBool: return "a boolean"; - case tString: return v.string.context ? "a string with context" : "a string"; + case tBool: return "a Boolean"; + case tString: return "a string"; case tPath: return "a path"; case tNull: return "null"; case tAttrs: return "a set"; @@ -167,14 +168,39 @@ string showType(const Value & v) case tApp: return "a function application"; case tLambda: return "a function"; case tBlackhole: return "a black hole"; + case tPrimOp: return "a built-in function"; + case tPrimOpApp: return "a partially applied built-in function"; + case tExternal: return "an external value"; + case tFloat: return "a float"; + } + abort(); +} + + +string showType(const Value & v) +{ + switch (v.type) { + case tString: return v.string.context ? "a string with context" : "a string"; case tPrimOp: return fmt("the built-in function '%s'", string(v.primOp->name)); case tPrimOpApp: return fmt("the partially applied built-in function '%s'", string(getPrimOp(v)->primOp->name)); case tExternal: return v.external->showType(); - case tFloat: return "a float"; + default: + return showType(v.type); } - abort(); +} + + +bool Value::isTrivial() const +{ + return + type != tApp + && type != tPrimOpApp + && (type != tThunk + || (dynamic_cast<ExprAttrs *>(thunk.expr) + && ((ExprAttrs *) thunk.expr)->dynamicAttrs.empty()) + || dynamic_cast<ExprLambda *>(thunk.expr)); } @@ -315,6 +341,8 @@ EvalState::EvalState(const Strings & _searchPath, ref<Store> store) , sOutputHash(symbols.create("outputHash")) , sOutputHashAlgo(symbols.create("outputHashAlgo")) , sOutputHashMode(symbols.create("outputHashMode")) + , sDescription(symbols.create("description")) + , sSelf(symbols.create("self")) , repair(NoRepair) , store(store) , baseEnv(allocEnv(128)) @@ -463,14 +491,21 @@ Value * EvalState::addConstant(const string & name, Value & v) Value * EvalState::addPrimOp(const string & name, size_t arity, PrimOpFun primOp) { + auto name2 = string(name, 0, 2) == "__" ? string(name, 2) : name; + Symbol sym = symbols.create(name2); + + /* Hack to make constants lazy: turn them into a application of + the primop to a dummy value. */ if (arity == 0) { + auto vPrimOp = allocValue(); + vPrimOp->type = tPrimOp; + vPrimOp->primOp = new PrimOp(primOp, 1, sym); Value v; - primOp(*this, noPos, nullptr, v); + mkApp(v, *vPrimOp, *vPrimOp); return addConstant(name, v); } + Value * v = allocValue(); - string name2 = string(name, 0, 2) == "__" ? string(name, 2) : name; - Symbol sym = symbols.create(name2); v->type = tPrimOp; v->primOp = new PrimOp(primOp, arity, sym); staticBaseEnv.vars[symbols.create(name)] = baseEnvDispl; @@ -736,7 +771,7 @@ Value * ExprPath::maybeThunk(EvalState & state, Env & env) } -void EvalState::evalFile(const Path & path_, Value & v) +void EvalState::evalFile(const Path & path_, Value & v, bool mustBeTrivial) { auto path = checkSourcePath(path_); @@ -765,6 +800,11 @@ void EvalState::evalFile(const Path & path_, Value & v) fileParseCache[path2] = e; try { + // Enforce that 'flake.nix' is a direct attrset, not a + // computation. + if (mustBeTrivial && + !(dynamic_cast<ExprAttrs *>(e))) + throw Error("file '%s' must be an attribute set", path); eval(e, v); } catch (Error & e) { addErrorPrefix(e, "while evaluating the file '%1%':\n", path2); diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 1485dc7fe..ab4d2bc4b 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -4,13 +4,13 @@ #include "value.hh" #include "nixexpr.hh" #include "symbol-table.hh" -#include "hash.hh" #include "config.hh" #include <regex> #include <map> #include <optional> #include <unordered_map> +#include <mutex> namespace nix { @@ -74,7 +74,8 @@ public: sSystem, sOverrides, sOutputs, sOutputName, sIgnoreNulls, sFile, sLine, sColumn, sFunctor, sToString, sRight, sWrong, sStructuredAttrs, sBuilder, sArgs, - sOutputHash, sOutputHashAlgo, sOutputHashMode; + sOutputHash, sOutputHashAlgo, sOutputHashMode, + sDescription, sSelf; Symbol sDerivationNix; /* If set, force copying files to the Nix store even if they @@ -89,6 +90,7 @@ public: const ref<Store> store; + private: SrcToStore srcToStore; @@ -151,8 +153,9 @@ public: Expr * parseStdin(); /* Evaluate an expression read from the given file to normal - form. */ - void evalFile(const Path & path, Value & v); + form. Optionally enforce that the top-level expression is + trivial (i.e. doesn't require arbitrary computation). */ + void evalFile(const Path & path, Value & v, bool mustBeTrivial = false); void resetFileCache(); @@ -324,6 +327,7 @@ private: /* Return a string representing the type of the value `v'. */ +string showType(ValueType type); string showType(const Value & v); /* Decode a context string ‘!<name>!<path>’ into a pair <path, diff --git a/src/libexpr/flake/call-flake.nix b/src/libexpr/flake/call-flake.nix new file mode 100644 index 000000000..dc9ed357c --- /dev/null +++ b/src/libexpr/flake/call-flake.nix @@ -0,0 +1,30 @@ +lockFileStr: rootSrc: rootSubdir: + +let + + lockFile = builtins.fromJSON lockFileStr; + + allNodes = + builtins.mapAttrs + (key: node: + let + sourceInfo = + if key == lockFile.root + then rootSrc + else fetchTree ({ inherit (node.info) narHash; } // removeAttrs node.locked ["dir"]); + subdir = if key == lockFile.root then rootSubdir else node.locked.dir or ""; + flake = import (sourceInfo + (if subdir != "" then "/" else "") + subdir + "/flake.nix"); + inputs = builtins.mapAttrs (inputName: key: allNodes.${key}) (node.inputs or {}); + outputs = flake.outputs (inputs // { self = result; }); + result = outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; }; + in + if node.flake or true then + assert flake.edition or flake.epoch or 0 == 201909; + assert builtins.isFunction flake.outputs; + result + else + sourceInfo + ) + lockFile.nodes; + +in allNodes.${lockFile.root} diff --git a/src/libexpr/flake/eval-cache.cc b/src/libexpr/flake/eval-cache.cc new file mode 100644 index 000000000..8d01ef0fc --- /dev/null +++ b/src/libexpr/flake/eval-cache.cc @@ -0,0 +1,116 @@ +#include "eval-cache.hh" +#include "sqlite.hh" +#include "eval.hh" + +#include <set> + +namespace nix::flake { + +static const char * schema = R"sql( + +create table if not exists Fingerprints ( + fingerprint blob primary key not null, + timestamp integer not null +); + +create table if not exists Attributes ( + fingerprint blob not null, + attrPath text not null, + type integer, + value text, + primary key (fingerprint, attrPath), + foreign key (fingerprint) references Fingerprints(fingerprint) on delete cascade +); +)sql"; + +struct EvalCache::State +{ + SQLite db; + SQLiteStmt insertFingerprint; + SQLiteStmt insertAttribute; + SQLiteStmt queryAttribute; + std::set<Fingerprint> fingerprints; +}; + +EvalCache::EvalCache() + : _state(std::make_unique<Sync<State>>()) +{ + auto state(_state->lock()); + + Path dbPath = getCacheDir() + "/nix/eval-cache-v1.sqlite"; + createDirs(dirOf(dbPath)); + + state->db = SQLite(dbPath); + state->db.isCache(); + state->db.exec(schema); + + state->insertFingerprint.create(state->db, + "insert or ignore into Fingerprints(fingerprint, timestamp) values (?, ?)"); + + state->insertAttribute.create(state->db, + "insert or replace into Attributes(fingerprint, attrPath, type, value) values (?, ?, ?, ?)"); + + state->queryAttribute.create(state->db, + "select type, value from Attributes where fingerprint = ? and attrPath = ?"); +} + +enum ValueType { + Derivation = 1, +}; + +void EvalCache::addDerivation( + const Fingerprint & fingerprint, + const std::string & attrPath, + const Derivation & drv) +{ + if (!evalSettings.pureEval) return; + + auto state(_state->lock()); + + if (state->fingerprints.insert(fingerprint).second) + // FIXME: update timestamp + state->insertFingerprint.use() + (fingerprint.hash, fingerprint.hashSize) + (time(0)).exec(); + + state->insertAttribute.use() + (fingerprint.hash, fingerprint.hashSize) + (attrPath) + (ValueType::Derivation) + (std::string(drv.drvPath.to_string()) + " " + std::string(drv.outPath.to_string()) + " " + drv.outputName).exec(); +} + +std::optional<EvalCache::Derivation> EvalCache::getDerivation( + const Fingerprint & fingerprint, + const std::string & attrPath) +{ + if (!evalSettings.pureEval) return {}; + + auto state(_state->lock()); + + auto queryAttribute(state->queryAttribute.use() + (fingerprint.hash, fingerprint.hashSize) + (attrPath)); + if (!queryAttribute.next()) return {}; + + // FIXME: handle negative results + + auto type = (ValueType) queryAttribute.getInt(0); + auto s = queryAttribute.getStr(1); + + if (type != ValueType::Derivation) return {}; + + auto ss = tokenizeString<std::vector<std::string>>(s, " "); + + debug("evaluation cache hit for '%s'", attrPath); + + return Derivation { StorePath::fromBaseName(ss[0]), StorePath::fromBaseName(ss[1]), ss[2] }; +} + +EvalCache & EvalCache::singleton() +{ + static std::unique_ptr<EvalCache> evalCache(new EvalCache()); + return *evalCache; +} + +} diff --git a/src/libexpr/flake/eval-cache.hh b/src/libexpr/flake/eval-cache.hh new file mode 100644 index 000000000..f81d48ba5 --- /dev/null +++ b/src/libexpr/flake/eval-cache.hh @@ -0,0 +1,40 @@ +#pragma once + +#include "sync.hh" +#include "flake.hh" +#include "path.hh" + +namespace nix { struct SQLite; struct SQLiteStmt; } + +namespace nix::flake { + +class EvalCache +{ + struct State; + + std::unique_ptr<Sync<State>> _state; + + EvalCache(); + +public: + + struct Derivation + { + StorePath drvPath; + StorePath outPath; + std::string outputName; + }; + + void addDerivation( + const Fingerprint & fingerprint, + const std::string & attrPath, + const Derivation & drv); + + std::optional<Derivation> getDerivation( + const Fingerprint & fingerprint, + const std::string & attrPath); + + static EvalCache & singleton(); +}; + +} diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc new file mode 100644 index 000000000..f23c53a82 --- /dev/null +++ b/src/libexpr/flake/flake.cc @@ -0,0 +1,672 @@ +#include "flake.hh" +#include "lockfile.hh" +#include "primops.hh" +#include "eval-inline.hh" +#include "store-api.hh" +#include "fetchers.hh" +#include "finally.hh" + +namespace nix { + +using namespace flake; + +namespace flake { + +/* If 'allowLookup' is true, then resolve 'flakeRef' using the + registries. */ +static FlakeRef maybeLookupFlake( + ref<Store> store, + const FlakeRef & flakeRef, + bool allowLookup) +{ + if (!flakeRef.input->isDirect()) { + if (allowLookup) + return flakeRef.resolve(store); + else + throw Error("'%s' is an indirect flake reference, but registry lookups are not allowed", flakeRef); + } else + return flakeRef; +} + +typedef std::vector<std::pair<FlakeRef, FlakeRef>> FlakeCache; + +static FlakeRef lookupInFlakeCache( + const FlakeCache & flakeCache, + const FlakeRef & flakeRef) +{ + // FIXME: inefficient. + for (auto & i : flakeCache) { + if (flakeRef == i.first) { + debug("mapping '%s' to previously seen input '%s' -> '%s", + flakeRef, i.first, i.second); + return i.second; + } + } + + return flakeRef; +} + +static std::pair<fetchers::Tree, FlakeRef> fetchOrSubstituteTree( + EvalState & state, + const FlakeRef & originalRef, + std::optional<TreeInfo> treeInfo, + bool allowLookup, + FlakeCache & flakeCache) +{ + /* The tree may already be in the Nix store, or it could be + substituted (which is often faster than fetching from the + original source). So check that. */ + if (treeInfo && originalRef.input->isDirect() && originalRef.input->isImmutable()) { + try { + auto storePath = treeInfo->computeStorePath(*state.store); + + state.store->ensurePath(storePath); + + debug("using substituted/cached input '%s' in '%s'", + originalRef, state.store->printStorePath(storePath)); + + auto actualPath = state.store->toRealPath(storePath); + + if (state.allowedPaths) + state.allowedPaths->insert(actualPath); + + return { + Tree { + .actualPath = actualPath, + .storePath = std::move(storePath), + .info = *treeInfo, + }, + originalRef + }; + } catch (Error & e) { + debug("substitution of input '%s' failed: %s", originalRef, e.what()); + } + } + + auto resolvedRef = lookupInFlakeCache(flakeCache, + maybeLookupFlake(state.store, + lookupInFlakeCache(flakeCache, originalRef), allowLookup)); + + auto [tree, lockedRef] = resolvedRef.fetchTree(state.store); + + debug("got tree '%s' from '%s'", + state.store->printStorePath(tree.storePath), lockedRef); + + flakeCache.push_back({originalRef, lockedRef}); + flakeCache.push_back({resolvedRef, lockedRef}); + + if (state.allowedPaths) + state.allowedPaths->insert(tree.actualPath); + + if (treeInfo) + assert(tree.storePath == treeInfo->computeStorePath(*state.store)); + + return {std::move(tree), lockedRef}; +} + +static void expectType(EvalState & state, ValueType type, + Value & value, const Pos & pos) +{ + if (value.type == tThunk && value.isTrivial()) + state.forceValue(value, pos); + if (value.type != type) + throw Error("expected %s but got %s at %s", + showType(type), showType(value.type), pos); +} + +static std::map<FlakeId, FlakeInput> parseFlakeInputs( + EvalState & state, Value * value, const Pos & pos); + +static FlakeInput parseFlakeInput(EvalState & state, + const std::string & inputName, Value * value, const Pos & pos) +{ + expectType(state, tAttrs, *value, pos); + + FlakeInput input { + .ref = FlakeRef::fromAttrs({{"type", "indirect"}, {"id", inputName}}) + }; + + auto sInputs = state.symbols.create("inputs"); + auto sUrl = state.symbols.create("url"); + auto sUri = state.symbols.create("uri"); // FIXME: remove soon + auto sFlake = state.symbols.create("flake"); + auto sFollows = state.symbols.create("follows"); + + fetchers::Attrs attrs; + std::optional<std::string> url; + + for (nix::Attr attr : *(value->attrs)) { + try { + if (attr.name == sUrl || attr.name == sUri) { + expectType(state, tString, *attr.value, *attr.pos); + url = attr.value->string.s; + attrs.emplace("url", *url); + } else if (attr.name == sFlake) { + expectType(state, tBool, *attr.value, *attr.pos); + input.isFlake = attr.value->boolean; + } else if (attr.name == sInputs) { + input.overrides = parseFlakeInputs(state, attr.value, *attr.pos); + } else if (attr.name == sFollows) { + expectType(state, tString, *attr.value, *attr.pos); + input.follows = parseInputPath(attr.value->string.s); + } else { + state.forceValue(*attr.value); + if (attr.value->type == tString) + attrs.emplace(attr.name, attr.value->string.s); + else + throw TypeError("flake input attribute '%s' is %s while a string is expected", + attr.name, showType(*attr.value)); + } + } catch (Error & e) { + e.addPrefix(fmt("in flake attribute '%s' at '%s':\n", attr.name, *attr.pos)); + throw; + } + } + + if (attrs.count("type")) + try { + input.ref = FlakeRef::fromAttrs(attrs); + } catch (Error & e) { + e.addPrefix(fmt("in flake input at '%s':\n", pos)); + throw; + } + else { + attrs.erase("url"); + if (!attrs.empty()) + throw Error("unexpected flake input attribute '%s', at %s", attrs.begin()->first, pos); + if (url) + input.ref = parseFlakeRef(*url); + } + + return input; +} + +static std::map<FlakeId, FlakeInput> parseFlakeInputs( + EvalState & state, Value * value, const Pos & pos) +{ + std::map<FlakeId, FlakeInput> inputs; + + expectType(state, tAttrs, *value, pos); + + for (nix::Attr & inputAttr : *(*value).attrs) { + inputs.emplace(inputAttr.name, + parseFlakeInput(state, + inputAttr.name, + inputAttr.value, + *inputAttr.pos)); + } + + return inputs; +} + +static Flake getFlake( + EvalState & state, + const FlakeRef & originalRef, + std::optional<TreeInfo> treeInfo, + bool allowLookup, + FlakeCache & flakeCache) +{ + auto [sourceInfo, lockedRef] = fetchOrSubstituteTree( + state, originalRef, treeInfo, allowLookup, flakeCache); + + // Guard against symlink attacks. + auto flakeFile = canonPath(sourceInfo.actualPath + "/" + lockedRef.subdir + "/flake.nix"); + if (!isInDir(flakeFile, sourceInfo.actualPath)) + throw Error("'flake.nix' file of flake '%s' escapes from '%s'", + lockedRef, state.store->printStorePath(sourceInfo.storePath)); + + Flake flake { + .originalRef = originalRef, + .lockedRef = lockedRef, + .sourceInfo = std::make_shared<fetchers::Tree>(std::move(sourceInfo)) + }; + + if (!pathExists(flakeFile)) + throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", lockedRef, lockedRef.subdir); + + Value vInfo; + state.evalFile(flakeFile, vInfo, true); // FIXME: symlink attack + + expectType(state, tAttrs, vInfo, Pos(state.symbols.create(flakeFile), 0, 0)); + + auto sEdition = state.symbols.create("edition"); + auto sEpoch = state.symbols.create("epoch"); // FIXME: remove soon + + auto edition = vInfo.attrs->get(sEdition); + if (!edition) + edition = vInfo.attrs->get(sEpoch); + + if (edition) { + expectType(state, tInt, *edition->value, *edition->pos); + flake.edition = edition->value->integer; + if (flake.edition > 201909) + throw Error("flake '%s' requires unsupported edition %d; please upgrade Nix", lockedRef, flake.edition); + if (flake.edition < 201909) + throw Error("flake '%s' has illegal edition %d", lockedRef, flake.edition); + } else + throw Error("flake '%s' lacks attribute 'edition'", lockedRef); + + if (auto description = vInfo.attrs->get(state.sDescription)) { + expectType(state, tString, *description->value, *description->pos); + flake.description = description->value->string.s; + } + + auto sInputs = state.symbols.create("inputs"); + + if (auto inputs = vInfo.attrs->get(sInputs)) + flake.inputs = parseFlakeInputs(state, inputs->value, *inputs->pos); + + auto sOutputs = state.symbols.create("outputs"); + + if (auto outputs = vInfo.attrs->get(sOutputs)) { + expectType(state, tLambda, *outputs->value, *outputs->pos); + flake.vOutputs = outputs->value; + + if (flake.vOutputs->lambda.fun->matchAttrs) { + for (auto & formal : flake.vOutputs->lambda.fun->formals->formals) { + if (formal.name != state.sSelf) + flake.inputs.emplace(formal.name, FlakeInput { + .ref = parseFlakeRef(formal.name) + }); + } + } + + } else + throw Error("flake '%s' lacks attribute 'outputs'", lockedRef); + + for (auto & attr : *vInfo.attrs) { + if (attr.name != sEdition && + attr.name != sEpoch && + attr.name != state.sDescription && + attr.name != sInputs && + attr.name != sOutputs) + throw Error("flake '%s' has an unsupported attribute '%s', at %s", + lockedRef, attr.name, *attr.pos); + } + + return flake; +} + +Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool allowLookup) +{ + FlakeCache flakeCache; + return getFlake(state, originalRef, {}, allowLookup, flakeCache); +} + +/* Compute an in-memory lock file for the specified top-level flake, + and optionally write it to file, it the flake is writable. */ +LockedFlake lockFlake( + EvalState & state, + const FlakeRef & topRef, + const LockFlags & lockFlags) +{ + settings.requireExperimentalFeature("flakes"); + + FlakeCache flakeCache; + + auto flake = getFlake(state, topRef, {}, lockFlags.useRegistries, flakeCache); + + // FIXME: symlink attack + auto oldLockFile = LockFile::read( + flake.sourceInfo->actualPath + "/" + flake.lockedRef.subdir + "/flake.lock"); + + debug("old lock file: %s", oldLockFile); + + // FIXME: check whether all overrides are used. + std::map<InputPath, FlakeInput> overrides; + std::set<InputPath> overridesUsed, updatesUsed; + + for (auto & i : lockFlags.inputOverrides) + overrides.insert_or_assign(i.first, FlakeInput { .ref = i.second }); + + LockFile newLockFile; + + std::vector<FlakeRef> parents; + std::map<InputPath, InputPath> follows; + + std::function<void( + const FlakeInputs & flakeInputs, + std::shared_ptr<Node> node, + const InputPath & inputPathPrefix, + std::shared_ptr<const Node> oldNode)> + computeLocks; + + computeLocks = [&]( + const FlakeInputs & flakeInputs, + std::shared_ptr<Node> node, + const InputPath & inputPathPrefix, + std::shared_ptr<const Node> oldNode) + { + debug("computing lock file node '%s'", concatStringsSep("/", inputPathPrefix)); + + /* Get the overrides (i.e. attributes of the form + 'inputs.nixops.inputs.nixpkgs.url = ...'). */ + // FIXME: check this + for (auto & [id, input] : flake.inputs) { + for (auto & [idOverride, inputOverride] : input.overrides) { + auto inputPath(inputPathPrefix); + inputPath.push_back(id); + inputPath.push_back(idOverride); + overrides.insert_or_assign(inputPath, inputOverride); + } + } + + /* Go over the flake inputs, resolve/fetch them if + necessary (i.e. if they're new or the flakeref changed + from what's in the lock file). */ + for (auto & [id, input2] : flakeInputs) { + auto inputPath(inputPathPrefix); + inputPath.push_back(id); + auto inputPathS = concatStringsSep("/", inputPath); + debug("computing input '%s'", concatStringsSep("/", inputPath)); + + /* Do we have an override for this input from one of the + ancestors? */ + auto i = overrides.find(inputPath); + bool hasOverride = i != overrides.end(); + if (hasOverride) overridesUsed.insert(inputPath); + auto & input = hasOverride ? i->second : input2; + + /* Resolve 'follows' later (since it may refer to an input + path we haven't processed yet. */ + if (input.follows) { + if (hasOverride) + /* 'follows' from an override is relative to the + root of the graph. */ + follows.insert_or_assign(inputPath, *input.follows); + else { + /* Otherwise, it's relative to the current flake. */ + InputPath path(inputPathPrefix); + for (auto & i : *input.follows) path.push_back(i); + follows.insert_or_assign(inputPath, path); + } + continue; + } + + /* Do we have an entry in the existing lock file? And we + don't have a --update-input flag for this input? */ + std::shared_ptr<const LockedNode> oldLock; + + updatesUsed.insert(inputPath); + + if (oldNode && !lockFlags.inputUpdates.count(inputPath)) { + auto oldLockIt = oldNode->inputs.find(id); + if (oldLockIt != oldNode->inputs.end()) + oldLock = std::dynamic_pointer_cast<const LockedNode>(oldLockIt->second); + } + + if (oldLock + && oldLock->originalRef == input.ref + && !hasOverride) + { + debug("keeping existing input '%s'", inputPathS); + + /* Copy the input from the old lock since its flakeref + didn't change and there is no override from a + higher level flake. */ + auto childNode = std::make_shared<LockedNode>( + oldLock->lockedRef, oldLock->originalRef, oldLock->info, oldLock->isFlake); + + node->inputs.insert_or_assign(id, childNode); + + /* If we have an --update-input flag for an input + of this input, then we must fetch the flake to + to update it. */ + auto lb = lockFlags.inputUpdates.lower_bound(inputPath); + + auto hasChildUpdate = + lb != lockFlags.inputUpdates.end() + && lb->size() > inputPath.size() + && std::equal(inputPath.begin(), inputPath.end(), lb->begin()); + + if (hasChildUpdate) { + auto inputFlake = getFlake( + state, oldLock->lockedRef, oldLock->info, false, flakeCache); + computeLocks(inputFlake.inputs, childNode, inputPath, oldLock); + } else { + /* No need to fetch this flake, we can be + lazy. However there may be new overrides on the + inputs of this flake, so we need to check + those. */ + FlakeInputs fakeInputs; + + for (auto & i : oldLock->inputs) { + auto lockedNode = std::dynamic_pointer_cast<LockedNode>(i.second); + // Note: this node is not locked in case + // of a circular reference back to the root. + if (lockedNode) + fakeInputs.emplace(i.first, FlakeInput { + .ref = lockedNode->originalRef + }); + else { + InputPath path(inputPath); + path.push_back(i.first); + follows.insert_or_assign(path, InputPath()); + } + } + + computeLocks(fakeInputs, childNode, inputPath, oldLock); + } + + } else { + /* We need to create a new lock file entry. So fetch + this input. */ + + if (!lockFlags.allowMutable && !input.ref.input->isImmutable()) + throw Error("cannot update flake input '%s' in pure mode", inputPathS); + + if (input.isFlake) { + auto inputFlake = getFlake(state, input.ref, {}, lockFlags.useRegistries, flakeCache); + + /* Note: in case of an --override-input, we use + the *original* ref (input2.ref) for the + "original" field, rather than the + override. This ensures that the override isn't + nuked the next time we update the lock + file. That is, overrides are sticky unless you + use --no-write-lock-file. */ + auto childNode = std::make_shared<LockedNode>( + inputFlake.lockedRef, input2.ref, inputFlake.sourceInfo->info); + + node->inputs.insert_or_assign(id, childNode); + + /* Guard against circular flake imports. */ + for (auto & parent : parents) + if (parent == input.ref) + throw Error("found circular import of flake '%s'", parent); + parents.push_back(input.ref); + Finally cleanup([&]() { parents.pop_back(); }); + + /* Recursively process the inputs of this + flake. Also, unless we already have this flake + in the top-level lock file, use this flake's + own lock file. */ + computeLocks( + inputFlake.inputs, childNode, inputPath, + oldLock + ? std::dynamic_pointer_cast<const Node>(oldLock) + : LockFile::read( + inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root); + } + + else { + auto [sourceInfo, lockedRef] = fetchOrSubstituteTree( + state, input.ref, {}, lockFlags.useRegistries, flakeCache); + node->inputs.insert_or_assign(id, + std::make_shared<LockedNode>(lockedRef, input.ref, sourceInfo.info, false)); + } + } + } + }; + + computeLocks( + flake.inputs, newLockFile.root, {}, + lockFlags.recreateLockFile ? nullptr : oldLockFile.root); + + /* Insert edges for 'follows' overrides. */ + for (auto & [from, to] : follows) { + debug("adding 'follows' node from '%s' to '%s'", + concatStringsSep("/", from), + concatStringsSep("/", to)); + + assert(!from.empty()); + + InputPath fromParent(from); + fromParent.pop_back(); + + auto fromParentNode = newLockFile.root->findInput(fromParent); + assert(fromParentNode); + + auto toNode = newLockFile.root->findInput(to); + if (!toNode) + throw Error("flake input '%s' follows non-existent flake input '%s'", + concatStringsSep("/", from), + concatStringsSep("/", to)); + + fromParentNode->inputs.insert_or_assign(from.back(), toNode); + } + + for (auto & i : lockFlags.inputOverrides) + if (!overridesUsed.count(i.first)) + warn("the flag '--override-input %s %s' does not match any input", + concatStringsSep("/", i.first), i.second); + + for (auto & i : lockFlags.inputUpdates) + if (!updatesUsed.count(i)) + warn("the flag '--update-input %s' does not match any input", concatStringsSep("/", i)); + + debug("new lock file: %s", newLockFile); + + /* Check whether we need to / can write the new lock file. */ + if (!(newLockFile == oldLockFile)) { + + auto diff = diffLockFiles(oldLockFile, newLockFile); + + if (!(oldLockFile == LockFile())) + printInfo("inputs of flake '%s' changed:\n%s", topRef, chomp(diff)); + + if (lockFlags.writeLockFile) { + if (auto sourcePath = topRef.input->getSourcePath()) { + if (!newLockFile.isImmutable()) { + if (settings.warnDirty) + warn("will not write lock file of flake '%s' because it has a mutable input", topRef); + } else { + if (!lockFlags.updateLockFile) + throw Error("flake '%s' requires lock file changes but they're not allowed due to '--no-update-lock-file'", topRef); + + auto relPath = (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock"; + + auto path = *sourcePath + "/" + relPath; + + bool lockFileExists = pathExists(path); + + if (lockFileExists) + warn("updating lock file '%s'", path); + else + warn("creating lock file '%s'", path); + + newLockFile.write(path); + + topRef.input->markChangedFile( + (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock", + lockFlags.commitLockFile + ? std::optional<std::string>(fmt("%s: %s\n\nFlake input changes:\n\n%s", + relPath, lockFileExists ? "Update" : "Add", diff)) + : std::nullopt); + + /* Rewriting the lockfile changed the top-level + repo, so we should re-read it. FIXME: we could + also just clear the 'rev' field... */ + auto prevLockedRef = flake.lockedRef; + FlakeCache dummyCache; + flake = getFlake(state, topRef, {}, lockFlags.useRegistries, dummyCache); + + if (lockFlags.commitLockFile && + flake.lockedRef.input->getRev() && + prevLockedRef.input->getRev() != flake.lockedRef.input->getRev()) + warn("committed new revision '%s'", flake.lockedRef.input->getRev()->gitRev()); + + /* Make sure that we picked up the change, + i.e. the tree should usually be dirty + now. Corner case: we could have reverted from a + dirty to a clean tree! */ + if (flake.lockedRef.input == prevLockedRef.input + && !flake.lockedRef.input->isImmutable()) + throw Error("'%s' did not change after I updated its 'flake.lock' file; is 'flake.lock' under version control?", flake.originalRef); + } + } else + throw Error("cannot write modified lock file of flake '%s' (use '--no-write-lock-file' to ignore)", topRef); + } else + warn("not writing modified lock file of flake '%s'", topRef); + } + + return LockedFlake { .flake = std::move(flake), .lockFile = std::move(newLockFile) }; +} + +void callFlake(EvalState & state, + const LockedFlake & lockedFlake, + Value & vRes) +{ + auto vLocks = state.allocValue(); + auto vRootSrc = state.allocValue(); + auto vRootSubdir = state.allocValue(); + auto vTmp1 = state.allocValue(); + auto vTmp2 = state.allocValue(); + + mkString(*vLocks, lockedFlake.lockFile.to_string()); + + emitTreeAttrs(state, *lockedFlake.flake.sourceInfo, lockedFlake.flake.lockedRef.input, *vRootSrc); + + mkString(*vRootSubdir, lockedFlake.flake.lockedRef.subdir); + + static Value * vCallFlake = nullptr; + + if (!vCallFlake) { + vCallFlake = state.allocValue(); + state.eval(state.parseExprFromString( + #include "call-flake.nix.gen.hh" + , "/"), *vCallFlake); + } + + state.callFunction(*vCallFlake, *vLocks, *vTmp1, noPos); + state.callFunction(*vTmp1, *vRootSrc, *vTmp2, noPos); + state.callFunction(*vTmp2, *vRootSubdir, vRes, noPos); +} + +static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v) +{ + auto flakeRefS = state.forceStringNoCtx(*args[0], pos); + auto flakeRef = parseFlakeRef(flakeRefS); + if (evalSettings.pureEval && !flakeRef.input->isImmutable()) + throw Error("cannot call 'getFlake' on mutable flake reference '%s', at %s (use --impure to override)", flakeRefS, pos); + + callFlake(state, + lockFlake(state, flakeRef, + LockFlags { + .updateLockFile = false, + .useRegistries = !evalSettings.pureEval, + .allowMutable = !evalSettings.pureEval, + }), + v); +} + +static RegisterPrimOp r2("getFlake", 1, prim_getFlake); + +} + +Fingerprint LockedFlake::getFingerprint() const +{ + // FIXME: as an optimization, if the flake contains a lock file + // and we haven't changed it, then it's sufficient to use + // flake.sourceInfo.storePath for the fingerprint. + return hashString(htSHA256, + fmt("%s;%d;%d;%s", + flake.sourceInfo->storePath.to_string(), + flake.sourceInfo->info.revCount.value_or(0), + flake.sourceInfo->info.lastModified.value_or(0), + lockFile)); +} + +Flake::~Flake() { } + +} diff --git a/src/libexpr/flake/flake.hh b/src/libexpr/flake/flake.hh new file mode 100644 index 000000000..88e386be0 --- /dev/null +++ b/src/libexpr/flake/flake.hh @@ -0,0 +1,110 @@ +#pragma once + +#include "types.hh" +#include "flakeref.hh" +#include "lockfile.hh" + +namespace nix { + +struct Value; +class EvalState; + +namespace fetchers { struct Tree; } + +namespace flake { + +struct FlakeInput; + +typedef std::map<FlakeId, FlakeInput> FlakeInputs; + +struct FlakeInput +{ + FlakeRef ref; + bool isFlake = true; + std::optional<InputPath> follows; + FlakeInputs overrides; +}; + +struct Flake +{ + FlakeRef originalRef; + FlakeRef lockedRef; + std::optional<std::string> description; + std::shared_ptr<const fetchers::Tree> sourceInfo; + FlakeInputs inputs; + Value * vOutputs; // FIXME: gc + unsigned int edition; + ~Flake(); +}; + +Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool allowLookup); + +/* Fingerprint of a locked flake; used as a cache key. */ +typedef Hash Fingerprint; + +struct LockedFlake +{ + Flake flake; + LockFile lockFile; + + Fingerprint getFingerprint() const; +}; + +struct LockFlags +{ + /* Whether to ignore the existing lock file, creating a new one + from scratch. */ + bool recreateLockFile = false; + + /* Whether to update the lock file at all. If set to false, if any + change to the lock file is needed (e.g. when an input has been + added to flake.nix), you get a fatal error. */ + bool updateLockFile = true; + + /* Whether to write the lock file to disk. If set to true, if the + any changes to the lock file are needed and the flake is not + writable (i.e. is not a local Git working tree or similar), you + get a fatal error. If set to false, Nix will use the modified + lock file in memory only, without writing it to disk. */ + bool writeLockFile = true; + + /* Whether to use the registries to lookup indirect flake + references like 'nixpkgs'. */ + bool useRegistries = true; + + /* Whether mutable flake references (i.e. those without a Git + revision or similar) without a corresponding lock are + allowed. Mutable flake references with a lock are always + allowed. */ + bool allowMutable = true; + + /* Whether to commit changes to flake.lock. */ + bool commitLockFile = false; + + /* Flake inputs to be overriden. */ + std::map<InputPath, FlakeRef> inputOverrides; + + /* Flake inputs to be updated. This means that any existing lock + for those inputs will be ignored. */ + std::set<InputPath> inputUpdates; +}; + +LockedFlake lockFlake( + EvalState & state, + const FlakeRef & flakeRef, + const LockFlags & lockFlags); + +void callFlake( + EvalState & state, + const LockedFlake & lockedFlake, + Value & v); + +} + +void emitTreeAttrs( + EvalState & state, + const fetchers::Tree & tree, + std::shared_ptr<const fetchers::Input> input, + Value & v); + +} diff --git a/src/libexpr/flake/flakeref.cc b/src/libexpr/flake/flakeref.cc new file mode 100644 index 000000000..de91f2eed --- /dev/null +++ b/src/libexpr/flake/flakeref.cc @@ -0,0 +1,188 @@ +#include "flakeref.hh" +#include "store-api.hh" +#include "url.hh" +#include "fetchers.hh" +#include "registry.hh" + +namespace nix { + +#if 0 +// 'dir' path elements cannot start with a '.'. We also reject +// potentially dangerous characters like ';'. +const static std::string subDirElemRegex = "(?:[a-zA-Z0-9_-]+[a-zA-Z0-9._-]*)"; +const static std::string subDirRegex = subDirElemRegex + "(?:/" + subDirElemRegex + ")*"; +#endif + +std::string FlakeRef::to_string() const +{ + auto url = input->toURL(); + if (subdir != "") + url.query.insert_or_assign("dir", subdir); + return url.to_string(); +} + +fetchers::Attrs FlakeRef::toAttrs() const +{ + auto attrs = input->toAttrs(); + if (subdir != "") + attrs.emplace("dir", subdir); + return attrs; +} + +std::ostream & operator << (std::ostream & str, const FlakeRef & flakeRef) +{ + str << flakeRef.to_string(); + return str; +} + +bool FlakeRef::operator ==(const FlakeRef & other) const +{ + return *input == *other.input && subdir == other.subdir; +} + +FlakeRef FlakeRef::resolve(ref<Store> store) const +{ + auto [input2, extraAttrs] = lookupInRegistries(store, input); + return FlakeRef(input2, fetchers::maybeGetStrAttr(extraAttrs, "dir").value_or(subdir)); +} + +FlakeRef parseFlakeRef( + const std::string & url, const std::optional<Path> & baseDir) +{ + auto [flakeRef, fragment] = parseFlakeRefWithFragment(url, baseDir); + if (fragment != "") + throw Error("unexpected fragment '%s' in flake reference '%s'", fragment, url); + return flakeRef; +} + +std::optional<FlakeRef> maybeParseFlakeRef( + const std::string & url, const std::optional<Path> & baseDir) +{ + try { + return parseFlakeRef(url, baseDir); + } catch (Error &) { + return {}; + } +} + +std::pair<FlakeRef, std::string> parseFlakeRefWithFragment( + const std::string & url, const std::optional<Path> & baseDir) +{ + using namespace fetchers; + + static std::string fnRegex = "[0-9a-zA-Z-._~!$&'\"()*+,;=]+"; + + static std::regex pathUrlRegex( + "(/?" + fnRegex + "(?:/" + fnRegex + ")*/?)" + + "(?:\\?(" + queryRegex + "))?" + + "(?:#(" + queryRegex + "))?", + std::regex::ECMAScript); + + static std::regex flakeRegex( + "((" + flakeIdRegexS + ")(?:/(?:" + refAndOrRevRegex + "))?)" + + "(?:#(" + queryRegex + "))?", + std::regex::ECMAScript); + + std::smatch match; + + /* Check if 'url' is a flake ID. This is an abbreviated syntax for + 'flake:<flake-id>?ref=<ref>&rev=<rev>'. */ + + if (std::regex_match(url, match, flakeRegex)) { + auto parsedURL = ParsedURL{ + .url = url, + .base = "flake:" + std::string(match[1]), + .scheme = "flake", + .authority = "", + .path = match[1], + }; + + return std::make_pair( + FlakeRef(inputFromURL(parsedURL), ""), + percentDecode(std::string(match[6]))); + } + + /* Check if 'url' is a path (either absolute or relative to + 'baseDir'). If so, search upward to the root of the repo + (i.e. the directory containing .git). */ + + else if (std::regex_match(url, match, pathUrlRegex)) { + std::string path = match[1]; + if (!baseDir && !hasPrefix(path, "/")) + throw BadURL("flake reference '%s' is not an absolute path", url); + path = absPath(path, baseDir, true); + + if (!S_ISDIR(lstat(path).st_mode)) + throw BadURL("path '%s' is not a flake (because it's not a directory)", path); + + auto flakeRoot = path; + std::string subdir; + + while (true) { + if (pathExists(flakeRoot + "/.git")) break; + subdir = std::string(baseNameOf(flakeRoot)) + (subdir.empty() ? "" : "/" + subdir); + flakeRoot = dirOf(flakeRoot); + if (flakeRoot == "/") + throw BadURL("path '%s' is not a flake (because it does not reference a Git repository)", path); + } + + auto base = std::string("git+file://") + flakeRoot; + + auto parsedURL = ParsedURL{ + .url = base, // FIXME + .base = base, + .scheme = "git+file", + .authority = "", + .path = flakeRoot, + .query = decodeQuery(match[2]), + }; + + auto fragment = percentDecode(std::string(match[3])); + + if (subdir != "") { + if (parsedURL.query.count("dir")) + throw Error("flake URL '%s' has an inconsistent 'dir' parameter", url); + parsedURL.query.insert_or_assign("dir", subdir); + } + + return std::make_pair( + FlakeRef(inputFromURL(parsedURL), get(parsedURL.query, "dir").value_or("")), + fragment); + } + + else { + auto parsedURL = parseURL(url); + std::string fragment; + std::swap(fragment, parsedURL.fragment); + return std::make_pair( + FlakeRef(inputFromURL(parsedURL), get(parsedURL.query, "dir").value_or("")), + fragment); + } +} + +std::optional<std::pair<FlakeRef, std::string>> maybeParseFlakeRefWithFragment( + const std::string & url, const std::optional<Path> & baseDir) +{ + try { + return parseFlakeRefWithFragment(url, baseDir); + } catch (Error & e) { + return {}; + } +} + +FlakeRef FlakeRef::fromAttrs(const fetchers::Attrs & attrs) +{ + auto attrs2(attrs); + attrs2.erase("dir"); + return FlakeRef( + fetchers::inputFromAttrs(attrs2), + fetchers::maybeGetStrAttr(attrs, "dir").value_or("")); +} + +std::pair<fetchers::Tree, FlakeRef> FlakeRef::fetchTree(ref<Store> store) const +{ + auto [tree, lockedInput] = input->fetchTree(store); + return {std::move(tree), FlakeRef(lockedInput, subdir)}; +} + +} diff --git a/src/libexpr/flake/flakeref.hh b/src/libexpr/flake/flakeref.hh new file mode 100644 index 000000000..1dbb6e54d --- /dev/null +++ b/src/libexpr/flake/flakeref.hh @@ -0,0 +1,55 @@ +#pragma once + +#include "types.hh" +#include "hash.hh" +#include "fetchers.hh" + +#include <variant> + +namespace nix { + +class Store; + +typedef std::string FlakeId; + +struct FlakeRef +{ + std::shared_ptr<const fetchers::Input> input; + + Path subdir; + + bool operator==(const FlakeRef & other) const; + + FlakeRef(const std::shared_ptr<const fetchers::Input> & input, const Path & subdir) + : input(input), subdir(subdir) + { + assert(input); + } + + // FIXME: change to operator <<. + std::string to_string() const; + + fetchers::Attrs toAttrs() const; + + FlakeRef resolve(ref<Store> store) const; + + static FlakeRef fromAttrs(const fetchers::Attrs & attrs); + + std::pair<fetchers::Tree, FlakeRef> fetchTree(ref<Store> store) const; +}; + +std::ostream & operator << (std::ostream & str, const FlakeRef & flakeRef); + +FlakeRef parseFlakeRef( + const std::string & url, const std::optional<Path> & baseDir = {}); + +std::optional<FlakeRef> maybeParseFlake( + const std::string & url, const std::optional<Path> & baseDir = {}); + +std::pair<FlakeRef, std::string> parseFlakeRefWithFragment( + const std::string & url, const std::optional<Path> & baseDir = {}); + +std::optional<std::pair<FlakeRef, std::string>> maybeParseFlakeRefWithFragment( + const std::string & url, const std::optional<Path> & baseDir = {}); + +} diff --git a/src/libexpr/flake/lockfile.cc b/src/libexpr/flake/lockfile.cc new file mode 100644 index 000000000..a40637824 --- /dev/null +++ b/src/libexpr/flake/lockfile.cc @@ -0,0 +1,284 @@ +#include "lockfile.hh" +#include "store-api.hh" + +#include <nlohmann/json.hpp> + +namespace nix::flake { + +FlakeRef flakeRefFromJson(const nlohmann::json & json) +{ + return FlakeRef::fromAttrs(jsonToAttrs(json)); +} + +FlakeRef getFlakeRef( + const nlohmann::json & json, + const char * version3Attr1, + const char * version3Attr2, + const char * version4Attr) +{ + auto i = json.find(version4Attr); + if (i != json.end()) + return flakeRefFromJson(*i); + + // FIXME: remove these. + i = json.find(version3Attr1); + if (i != json.end()) + return parseFlakeRef(*i); + + i = json.find(version3Attr2); + if (i != json.end()) + return parseFlakeRef(*i); + + throw Error("attribute '%s' missing in lock file", version4Attr); +} + +LockedNode::LockedNode(const nlohmann::json & json) + : lockedRef(getFlakeRef(json, "url", "uri", "locked")) + , originalRef(getFlakeRef(json, "originalUrl", "originalUri", "original")) + , info(TreeInfo::fromJson(json)) + , isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true) +{ + if (!lockedRef.input->isImmutable()) + throw Error("lockfile contains mutable flakeref '%s'", lockedRef); +} + +StorePath LockedNode::computeStorePath(Store & store) const +{ + return info.computeStorePath(store); +} + +std::shared_ptr<Node> Node::findInput(const InputPath & path) +{ + auto pos = shared_from_this(); + + for (auto & elem : path) { + auto i = pos->inputs.find(elem); + if (i == pos->inputs.end()) + return {}; + pos = i->second; + } + + return pos; +} + +LockFile::LockFile(const nlohmann::json & json, const Path & path) +{ + auto version = json.value("version", 0); + if (version < 3 || version > 5) + throw Error("lock file '%s' has unsupported version %d", path, version); + + if (version < 5) { + std::function<void(Node & node, const nlohmann::json & json)> getInputs; + + getInputs = [&](Node & node, const nlohmann::json & json) + { + for (auto & i : json["inputs"].items()) { + auto input = std::make_shared<LockedNode>(i.value()); + getInputs(*input, i.value()); + node.inputs.insert_or_assign(i.key(), input); + } + }; + + getInputs(*root, json); + } + + else { + std::unordered_map<std::string, std::shared_ptr<Node>> nodeMap; + + std::function<void(Node & node, const nlohmann::json & jsonNode)> getInputs; + + getInputs = [&](Node & node, const nlohmann::json & jsonNode) + { + if (jsonNode.find("inputs") == jsonNode.end()) return; + for (auto & i : jsonNode["inputs"].items()) { + std::string inputKey = i.value(); + auto k = nodeMap.find(inputKey); + if (k == nodeMap.end()) { + auto jsonNode2 = json["nodes"][inputKey]; + auto input = std::make_shared<LockedNode>(jsonNode2); + k = nodeMap.insert_or_assign(inputKey, input).first; + getInputs(*input, jsonNode2); + } + node.inputs.insert_or_assign(i.key(), k->second); + } + }; + + std::string rootKey = json["root"]; + nodeMap.insert_or_assign(rootKey, root); + getInputs(*root, json["nodes"][rootKey]); + } +} + +nlohmann::json LockFile::toJson() const +{ + nlohmann::json nodes; + std::unordered_map<std::shared_ptr<const Node>, std::string> nodeKeys; + std::unordered_set<std::string> keys; + + std::function<std::string(const std::string & key, std::shared_ptr<const Node> node)> dumpNode; + + dumpNode = [&](std::string key, std::shared_ptr<const Node> node) -> std::string + { + auto k = nodeKeys.find(node); + if (k != nodeKeys.end()) + return k->second; + + if (!keys.insert(key).second) { + for (int n = 2; ; ++n) { + auto k = fmt("%s_%d", key, n); + if (keys.insert(k).second) { + key = k; + break; + } + } + } + + nodeKeys.insert_or_assign(node, key); + + auto n = nlohmann::json::object(); + + if (!node->inputs.empty()) { + auto inputs = nlohmann::json::object(); + for (auto & i : node->inputs) + inputs[i.first] = dumpNode(i.first, i.second); + n["inputs"] = std::move(inputs); + } + + if (auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(node)) { + n["original"] = fetchers::attrsToJson(lockedNode->originalRef.toAttrs()); + n["locked"] = fetchers::attrsToJson(lockedNode->lockedRef.toAttrs()); + n["info"] = lockedNode->info.toJson(); + if (!lockedNode->isFlake) n["flake"] = false; + } + + nodes[key] = std::move(n); + + return key; + }; + + nlohmann::json json; + json["version"] = 5; + json["root"] = dumpNode("root", root); + json["nodes"] = std::move(nodes); + + return json; +} + +std::string LockFile::to_string() const +{ + return toJson().dump(2); +} + +LockFile LockFile::read(const Path & path) +{ + if (!pathExists(path)) return LockFile(); + return LockFile(nlohmann::json::parse(readFile(path)), path); +} + +std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile) +{ + stream << lockFile.toJson().dump(2); + return stream; +} + +void LockFile::write(const Path & path) const +{ + createDirs(dirOf(path)); + writeFile(path, fmt("%s\n", *this)); +} + +bool LockFile::isImmutable() const +{ + std::unordered_set<std::shared_ptr<const Node>> nodes; + + std::function<void(std::shared_ptr<const Node> node)> visit; + + visit = [&](std::shared_ptr<const Node> node) + { + if (!nodes.insert(node).second) return; + for (auto & i : node->inputs) visit(i.second); + }; + + visit(root); + + for (auto & i : nodes) { + if (i == root) continue; + auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(i); + if (lockedNode && !lockedNode->lockedRef.input->isImmutable()) return false; + } + + return true; +} + +bool LockFile::operator ==(const LockFile & other) const +{ + // FIXME: slow + return toJson() == other.toJson(); +} + +InputPath parseInputPath(std::string_view s) +{ + InputPath path; + + for (auto & elem : tokenizeString<std::vector<std::string>>(s, "/")) { + if (!std::regex_match(elem, flakeIdRegex)) + throw Error("invalid flake input path element '%s'", elem); + path.push_back(elem); + } + + return path; +} + +static void flattenLockFile( + std::shared_ptr<const Node> node, + const InputPath & prefix, + std::unordered_set<std::shared_ptr<const Node>> & done, + std::map<InputPath, std::shared_ptr<const LockedNode>> & res) +{ + if (!done.insert(node).second) return; + + for (auto &[id, input] : node->inputs) { + auto inputPath(prefix); + inputPath.push_back(id); + if (auto lockedInput = std::dynamic_pointer_cast<const LockedNode>(input)) + res.emplace(inputPath, lockedInput); + flattenLockFile(input, inputPath, done, res); + } +} + +std::string diffLockFiles(const LockFile & oldLocks, const LockFile & newLocks) +{ + std::unordered_set<std::shared_ptr<const Node>> done; + std::map<InputPath, std::shared_ptr<const LockedNode>> oldFlat, newFlat; + flattenLockFile(oldLocks.root, {}, done, oldFlat); + done.clear(); + flattenLockFile(newLocks.root, {}, done, newFlat); + + auto i = oldFlat.begin(); + auto j = newFlat.begin(); + std::string res; + + while (i != oldFlat.end() || j != newFlat.end()) { + if (j != newFlat.end() && (i == oldFlat.end() || i->first > j->first)) { + res += fmt("* Added '%s': '%s'\n", concatStringsSep("/", j->first), j->second->lockedRef); + ++j; + } else if (i != oldFlat.end() && (j == newFlat.end() || i->first < j->first)) { + res += fmt("* Removed '%s'\n", concatStringsSep("/", i->first)); + ++i; + } else { + if (!(i->second->lockedRef == j->second->lockedRef)) { + assert(i->second->lockedRef.to_string() != j->second->lockedRef.to_string()); + res += fmt("* Updated '%s': '%s' -> '%s'\n", + concatStringsSep("/", i->first), + i->second->lockedRef, + j->second->lockedRef); + } + ++i; + ++j; + } + } + + return res; +} + +} diff --git a/src/libexpr/flake/lockfile.hh b/src/libexpr/flake/lockfile.hh new file mode 100644 index 000000000..c34939ebc --- /dev/null +++ b/src/libexpr/flake/lockfile.hh @@ -0,0 +1,77 @@ +#pragma once + +#include "flakeref.hh" + +#include <nlohmann/json_fwd.hpp> + +namespace nix { +class Store; +struct StorePath; +} + +namespace nix::flake { + +using namespace fetchers; + +typedef std::vector<FlakeId> InputPath; + +/* A node in the lock file. It has outgoing edges to other nodes (its + inputs). Only the root node has this type; all other nodes have + type LockedNode. */ +struct Node : std::enable_shared_from_this<Node> +{ + std::map<FlakeId, std::shared_ptr<Node>> inputs; + + virtual ~Node() { } + + std::shared_ptr<Node> findInput(const InputPath & path); +}; + +/* A non-root node in the lock file. */ +struct LockedNode : Node +{ + FlakeRef lockedRef, originalRef; + TreeInfo info; + bool isFlake = true; + + LockedNode( + const FlakeRef & lockedRef, + const FlakeRef & originalRef, + const TreeInfo & info, + bool isFlake = true) + : lockedRef(lockedRef), originalRef(originalRef), info(info), isFlake(isFlake) + { } + + LockedNode(const nlohmann::json & json); + + StorePath computeStorePath(Store & store) const; +}; + +struct LockFile +{ + std::shared_ptr<Node> root = std::make_shared<Node>(); + + LockFile() {}; + LockFile(const nlohmann::json & json, const Path & path); + + nlohmann::json toJson() const; + + std::string to_string() const; + + static LockFile read(const Path & path); + + void write(const Path & path) const; + + bool isImmutable() const; + + bool operator ==(const LockFile & other) const; +}; + +std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile); + +InputPath parseInputPath(std::string_view s); + +std::string diffLockFiles(const LockFile & oldLocks, const LockFile & newLocks); + +} + diff --git a/src/libexpr/local.mk b/src/libexpr/local.mk index 917e8a1c7..f9460e821 100644 --- a/src/libexpr/local.mk +++ b/src/libexpr/local.mk @@ -4,7 +4,12 @@ libexpr_NAME = libnixexpr libexpr_DIR := $(d) -libexpr_SOURCES := $(wildcard $(d)/*.cc) $(wildcard $(d)/primops/*.cc) $(d)/lexer-tab.cc $(d)/parser-tab.cc +libexpr_SOURCES := \ + $(wildcard $(d)/*.cc) \ + $(wildcard $(d)/primops/*.cc) \ + $(wildcard $(d)/flake/*.cc) \ + $(d)/lexer-tab.cc \ + $(d)/parser-tab.cc libexpr_CXXFLAGS += -I src/libutil -I src/libstore -I src/libfetchers -I src/libmain -I src/libexpr @@ -34,4 +39,9 @@ dist-files += $(d)/parser-tab.cc $(d)/parser-tab.hh $(d)/lexer-tab.cc $(d)/lexer $(eval $(call install-file-in, $(d)/nix-expr.pc, $(prefix)/lib/pkgconfig, 0644)) +$(foreach i, $(wildcard src/libexpr/flake/*.hh), \ + $(eval $(call install-file-in, $(i), $(includedir)/nix/flake, 0644))) + $(d)/primops.cc: $(d)/imported-drv-to-derivation.nix.gen.hh + +$(d)/flake/flake.cc: $(d)/flake/call-flake.nix.gen.hh diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 629f3da15..fb7d5497b 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -50,20 +50,20 @@ void EvalState::realiseContext(const PathSet & context) std::vector<StorePathWithOutputs> drvs; for (auto & i : context) { - std::pair<string, string> decoded = decodeContext(i); - auto ctx = store->parseStorePath(decoded.first); + auto [ctxS, outputName] = decodeContext(i); + auto ctx = store->parseStorePath(ctxS); if (!store->isValidPath(ctx)) throw InvalidPathError(store->printStorePath(ctx)); - if (!decoded.second.empty() && ctx.isDerivation()) { - drvs.push_back(StorePathWithOutputs{ctx.clone(), {decoded.second}}); + if (!outputName.empty() && ctx.isDerivation()) { + drvs.push_back(StorePathWithOutputs{ctx.clone(), {outputName}}); /* Add the output of this derivation to the allowed paths. */ if (allowedPaths) { - auto drv = store->derivationFromPath(store->parseStorePath(decoded.first)); - DerivationOutputs::iterator i = drv.outputs.find(decoded.second); + auto drv = store->derivationFromPath(ctx); + DerivationOutputs::iterator i = drv.outputs.find(outputName); if (i == drv.outputs.end()) - throw Error("derivation '%s' does not have an output named '%s'", decoded.first, decoded.second); + throw Error("derivation '%s' does not have an output named '%s'", ctxS, outputName); allowedPaths->insert(store->printStorePath(i->second.path)); } } @@ -79,6 +79,7 @@ void EvalState::realiseContext(const PathSet & context) StorePathSet willBuild, willSubstitute, unknown; unsigned long long downloadSize, narSize; store->queryMissing(drvs, willBuild, willSubstitute, unknown, downloadSize, narSize); + store->buildPaths(drvs); } diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index 9586f71ed..4f66fc6c1 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -3,6 +3,7 @@ #include "store-api.hh" #include "fetchers.hh" #include "download.hh" +#include "registry.hh" #include <ctime> #include <iomanip> @@ -33,9 +34,11 @@ void emitTreeAttrs( if (tree.info.revCount) mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *tree.info.revCount); - if (tree.info.lastModified) - mkString(*state.allocAttr(v, state.symbols.create("lastModified")), + if (tree.info.lastModified) { + mkInt(*state.allocAttr(v, state.symbols.create("lastModified")), *tree.info.lastModified); + mkString(*state.allocAttr(v, state.symbols.create("lastModifiedDate")), fmt("%s", std::put_time(std::gmtime(&*tree.info.lastModified), "%Y%m%d%H%M%S"))); + } v.attrs->sort(); } @@ -60,8 +63,10 @@ static void prim_fetchTree(EvalState & state, const Pos & pos, Value * * args, V attrs.emplace(attr.name, attr.value->string.s); else if (attr.value->type == tBool) attrs.emplace(attr.name, attr.value->boolean); + else if (attr.value->type == tInt) + attrs.emplace(attr.name, attr.value->integer); else - throw TypeError("fetchTree argument '%s' is %s while a string or Boolean is expected", + throw TypeError("fetchTree argument '%s' is %s while a string, Boolean or integer is expected", attr.name, showType(*attr.value)); } @@ -72,8 +77,11 @@ static void prim_fetchTree(EvalState & state, const Pos & pos, Value * * args, V } else input = fetchers::inputFromURL(state.coerceToString(pos, *args[0], context, false, false)); + if (!evalSettings.pureEval && !input->isDirect()) + input = lookupInRegistries(state.store, input).first; + if (evalSettings.pureEval && !input->isImmutable()) - throw Error("in pure evaluation mode, 'fetchTree' requires an immutable input"); + throw Error("in pure evaluation mode, 'fetchTree' requires an immutable input, at %s", pos); // FIXME: use fetchOrSubstituteTree auto [tree, input2] = input->fetchTree(state.store); diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh index 689373873..60de60c67 100644 --- a/src/libexpr/value.hh +++ b/src/libexpr/value.hh @@ -166,6 +166,11 @@ struct Value { return type == tList1 ? 1 : type == tList2 ? 2 : bigList.size; } + + /* Check whether forcing this value requires a trivial amount of + computation. In particular, function applications are + non-trivial. */ + bool isTrivial() const; }; |