aboutsummaryrefslogtreecommitdiff
path: root/src/libexpr/flake
diff options
context:
space:
mode:
Diffstat (limited to 'src/libexpr/flake')
-rw-r--r--src/libexpr/flake/call-flake.nix29
-rw-r--r--src/libexpr/flake/flake.cc666
-rw-r--r--src/libexpr/flake/flake.hh110
-rw-r--r--src/libexpr/flake/flakeref.cc196
-rw-r--r--src/libexpr/flake/flakeref.hh55
-rw-r--r--src/libexpr/flake/lockfile.cc284
-rw-r--r--src/libexpr/flake/lockfile.hh77
7 files changed, 1417 insertions, 0 deletions
diff --git a/src/libexpr/flake/call-flake.nix b/src/libexpr/flake/call-flake.nix
new file mode 100644
index 000000000..8ee17b8f4
--- /dev/null
+++ b/src/libexpr/flake/call-flake.nix
@@ -0,0 +1,29 @@
+lockFileStr: rootSrc: rootSubdir:
+
+let
+
+ lockFile = builtins.fromJSON lockFileStr;
+
+ allNodes =
+ builtins.mapAttrs
+ (key: node:
+ let
+ sourceInfo =
+ if key == lockFile.root
+ then rootSrc
+ else fetchTree ({ inherit (node.info) narHash; } // removeAttrs node.locked ["dir"]);
+ subdir = if key == lockFile.root then rootSubdir else node.locked.dir or "";
+ flake = import (sourceInfo + (if subdir != "" then "/" else "") + subdir + "/flake.nix");
+ inputs = builtins.mapAttrs (inputName: key: allNodes.${key}) (node.inputs or {});
+ outputs = flake.outputs (inputs // { self = result; });
+ result = outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; };
+ in
+ if node.flake or true then
+ assert builtins.isFunction flake.outputs;
+ result
+ else
+ sourceInfo
+ )
+ lockFile.nodes;
+
+in allNodes.${lockFile.root}
diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc
new file mode 100644
index 000000000..0f5770019
--- /dev/null
+++ b/src/libexpr/flake/flake.cc
@@ -0,0 +1,666 @@
+#include "flake.hh"
+#include "lockfile.hh"
+#include "primops.hh"
+#include "eval-inline.hh"
+#include "store-api.hh"
+#include "fetchers.hh"
+#include "finally.hh"
+
+namespace nix {
+
+using namespace flake;
+
+namespace flake {
+
+/* If 'allowLookup' is true, then resolve 'flakeRef' using the
+ registries. */
+static FlakeRef maybeLookupFlake(
+ ref<Store> store,
+ const FlakeRef & flakeRef,
+ bool allowLookup)
+{
+ if (!flakeRef.input->isDirect()) {
+ if (allowLookup)
+ return flakeRef.resolve(store);
+ else
+ throw Error("'%s' is an indirect flake reference, but registry lookups are not allowed", flakeRef);
+ } else
+ return flakeRef;
+}
+
+typedef std::vector<std::pair<FlakeRef, FlakeRef>> FlakeCache;
+
+static FlakeRef lookupInFlakeCache(
+ const FlakeCache & flakeCache,
+ const FlakeRef & flakeRef)
+{
+ // FIXME: inefficient.
+ for (auto & i : flakeCache) {
+ if (flakeRef == i.first) {
+ debug("mapping '%s' to previously seen input '%s' -> '%s",
+ flakeRef, i.first, i.second);
+ return i.second;
+ }
+ }
+
+ return flakeRef;
+}
+
+static std::tuple<fetchers::Tree, FlakeRef, FlakeRef> fetchOrSubstituteTree(
+ EvalState & state,
+ const FlakeRef & originalRef,
+ std::optional<TreeInfo> treeInfo,
+ bool allowLookup,
+ FlakeCache & flakeCache)
+{
+ /* The tree may already be in the Nix store, or it could be
+ substituted (which is often faster than fetching from the
+ original source). So check that. */
+ if (treeInfo && originalRef.input->isDirect() && originalRef.input->isImmutable()) {
+ try {
+ auto storePath = treeInfo->computeStorePath(*state.store);
+
+ state.store->ensurePath(storePath);
+
+ debug("using substituted/cached input '%s' in '%s'",
+ originalRef, state.store->printStorePath(storePath));
+
+ auto actualPath = state.store->toRealPath(storePath);
+
+ if (state.allowedPaths)
+ state.allowedPaths->insert(actualPath);
+
+ return {
+ Tree {
+ .actualPath = actualPath,
+ .storePath = std::move(storePath),
+ .info = *treeInfo,
+ },
+ originalRef,
+ originalRef
+ };
+ } catch (Error & e) {
+ debug("substitution of input '%s' failed: %s", originalRef, e.what());
+ }
+ }
+
+ auto resolvedRef = lookupInFlakeCache(flakeCache,
+ maybeLookupFlake(state.store,
+ lookupInFlakeCache(flakeCache, originalRef), allowLookup));
+
+ auto [tree, lockedRef] = resolvedRef.fetchTree(state.store);
+
+ debug("got tree '%s' from '%s'",
+ state.store->printStorePath(tree.storePath), lockedRef);
+
+ flakeCache.push_back({originalRef, lockedRef});
+ flakeCache.push_back({resolvedRef, lockedRef});
+
+ if (state.allowedPaths)
+ state.allowedPaths->insert(tree.actualPath);
+
+ if (treeInfo)
+ assert(tree.storePath == treeInfo->computeStorePath(*state.store));
+
+ return {std::move(tree), resolvedRef, lockedRef};
+}
+
+static void expectType(EvalState & state, ValueType type,
+ Value & value, const Pos & pos)
+{
+ if (value.type == tThunk && value.isTrivial())
+ state.forceValue(value, pos);
+ if (value.type != type)
+ throw Error("expected %s but got %s at %s",
+ showType(type), showType(value.type), pos);
+}
+
+static std::map<FlakeId, FlakeInput> parseFlakeInputs(
+ EvalState & state, Value * value, const Pos & pos);
+
+static FlakeInput parseFlakeInput(EvalState & state,
+ const std::string & inputName, Value * value, const Pos & pos)
+{
+ expectType(state, tAttrs, *value, pos);
+
+ FlakeInput input {
+ .ref = FlakeRef::fromAttrs({{"type", "indirect"}, {"id", inputName}})
+ };
+
+ auto sInputs = state.symbols.create("inputs");
+ auto sUrl = state.symbols.create("url");
+ auto sUri = state.symbols.create("uri"); // FIXME: remove soon
+ auto sFlake = state.symbols.create("flake");
+ auto sFollows = state.symbols.create("follows");
+
+ fetchers::Attrs attrs;
+ std::optional<std::string> url;
+
+ for (nix::Attr attr : *(value->attrs)) {
+ try {
+ if (attr.name == sUrl || attr.name == sUri) {
+ expectType(state, tString, *attr.value, *attr.pos);
+ url = attr.value->string.s;
+ attrs.emplace("url", *url);
+ } else if (attr.name == sFlake) {
+ expectType(state, tBool, *attr.value, *attr.pos);
+ input.isFlake = attr.value->boolean;
+ } else if (attr.name == sInputs) {
+ input.overrides = parseFlakeInputs(state, attr.value, *attr.pos);
+ } else if (attr.name == sFollows) {
+ expectType(state, tString, *attr.value, *attr.pos);
+ input.follows = parseInputPath(attr.value->string.s);
+ } else {
+ state.forceValue(*attr.value);
+ if (attr.value->type == tString)
+ attrs.emplace(attr.name, attr.value->string.s);
+ else
+ throw TypeError("flake input attribute '%s' is %s while a string is expected",
+ attr.name, showType(*attr.value));
+ }
+ } catch (Error & e) {
+ e.addPrefix(fmt("in flake attribute '%s' at '%s':\n", attr.name, *attr.pos));
+ throw;
+ }
+ }
+
+ if (attrs.count("type"))
+ try {
+ input.ref = FlakeRef::fromAttrs(attrs);
+ } catch (Error & e) {
+ e.addPrefix(fmt("in flake input at '%s':\n", pos));
+ throw;
+ }
+ else {
+ attrs.erase("url");
+ if (!attrs.empty())
+ throw Error("unexpected flake input attribute '%s', at %s", attrs.begin()->first, pos);
+ if (url)
+ input.ref = parseFlakeRef(*url, {}, true);
+ }
+
+ return input;
+}
+
+static std::map<FlakeId, FlakeInput> parseFlakeInputs(
+ EvalState & state, Value * value, const Pos & pos)
+{
+ std::map<FlakeId, FlakeInput> inputs;
+
+ expectType(state, tAttrs, *value, pos);
+
+ for (nix::Attr & inputAttr : *(*value).attrs) {
+ inputs.emplace(inputAttr.name,
+ parseFlakeInput(state,
+ inputAttr.name,
+ inputAttr.value,
+ *inputAttr.pos));
+ }
+
+ return inputs;
+}
+
+static Flake getFlake(
+ EvalState & state,
+ const FlakeRef & originalRef,
+ std::optional<TreeInfo> treeInfo,
+ bool allowLookup,
+ FlakeCache & flakeCache)
+{
+ auto [sourceInfo, resolvedRef, lockedRef] = fetchOrSubstituteTree(
+ state, originalRef, treeInfo, allowLookup, flakeCache);
+
+ // Guard against symlink attacks.
+ auto flakeFile = canonPath(sourceInfo.actualPath + "/" + lockedRef.subdir + "/flake.nix");
+ if (!isInDir(flakeFile, sourceInfo.actualPath))
+ throw Error("'flake.nix' file of flake '%s' escapes from '%s'",
+ lockedRef, state.store->printStorePath(sourceInfo.storePath));
+
+ Flake flake {
+ .originalRef = originalRef,
+ .resolvedRef = resolvedRef,
+ .lockedRef = lockedRef,
+ .sourceInfo = std::make_shared<fetchers::Tree>(std::move(sourceInfo))
+ };
+
+ if (!pathExists(flakeFile))
+ throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", lockedRef, lockedRef.subdir);
+
+ Value vInfo;
+ state.evalFile(flakeFile, vInfo, true); // FIXME: symlink attack
+
+ expectType(state, tAttrs, vInfo, Pos(state.symbols.create(flakeFile), 0, 0));
+
+ auto sEdition = state.symbols.create("edition"); // FIXME: remove soon
+ auto sEpoch = state.symbols.create("epoch"); // FIXME: remove soon
+
+ if (vInfo.attrs->get(sEdition))
+ warn("flake '%s' has deprecated attribute 'edition'", lockedRef);
+
+ if (vInfo.attrs->get(sEpoch))
+ warn("flake '%s' has deprecated attribute 'epoch'", lockedRef);
+
+ if (auto description = vInfo.attrs->get(state.sDescription)) {
+ expectType(state, tString, *description->value, *description->pos);
+ flake.description = description->value->string.s;
+ }
+
+ auto sInputs = state.symbols.create("inputs");
+
+ if (auto inputs = vInfo.attrs->get(sInputs))
+ flake.inputs = parseFlakeInputs(state, inputs->value, *inputs->pos);
+
+ auto sOutputs = state.symbols.create("outputs");
+
+ if (auto outputs = vInfo.attrs->get(sOutputs)) {
+ expectType(state, tLambda, *outputs->value, *outputs->pos);
+ flake.vOutputs = allocRootValue(outputs->value);
+
+ if ((*flake.vOutputs)->lambda.fun->matchAttrs) {
+ for (auto & formal : (*flake.vOutputs)->lambda.fun->formals->formals) {
+ if (formal.name != state.sSelf)
+ flake.inputs.emplace(formal.name, FlakeInput {
+ .ref = parseFlakeRef(formal.name)
+ });
+ }
+ }
+
+ } else
+ throw Error("flake '%s' lacks attribute 'outputs'", lockedRef);
+
+ for (auto & attr : *vInfo.attrs) {
+ if (attr.name != sEdition &&
+ attr.name != sEpoch &&
+ attr.name != state.sDescription &&
+ attr.name != sInputs &&
+ attr.name != sOutputs)
+ throw Error("flake '%s' has an unsupported attribute '%s', at %s",
+ lockedRef, attr.name, *attr.pos);
+ }
+
+ return flake;
+}
+
+Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool allowLookup)
+{
+ FlakeCache flakeCache;
+ return getFlake(state, originalRef, {}, allowLookup, flakeCache);
+}
+
+/* Compute an in-memory lock file for the specified top-level flake,
+ and optionally write it to file, it the flake is writable. */
+LockedFlake lockFlake(
+ EvalState & state,
+ const FlakeRef & topRef,
+ const LockFlags & lockFlags)
+{
+ settings.requireExperimentalFeature("flakes");
+
+ FlakeCache flakeCache;
+
+ auto flake = getFlake(state, topRef, {}, lockFlags.useRegistries, flakeCache);
+
+ // FIXME: symlink attack
+ auto oldLockFile = LockFile::read(
+ flake.sourceInfo->actualPath + "/" + flake.lockedRef.subdir + "/flake.lock");
+
+ debug("old lock file: %s", oldLockFile);
+
+ // FIXME: check whether all overrides are used.
+ std::map<InputPath, FlakeInput> overrides;
+ std::set<InputPath> overridesUsed, updatesUsed;
+
+ for (auto & i : lockFlags.inputOverrides)
+ overrides.insert_or_assign(i.first, FlakeInput { .ref = i.second });
+
+ LockFile newLockFile;
+
+ std::vector<FlakeRef> parents;
+ std::map<InputPath, InputPath> follows;
+
+ std::function<void(
+ const FlakeInputs & flakeInputs,
+ std::shared_ptr<Node> node,
+ const InputPath & inputPathPrefix,
+ std::shared_ptr<const Node> oldNode)>
+ computeLocks;
+
+ computeLocks = [&](
+ const FlakeInputs & flakeInputs,
+ std::shared_ptr<Node> node,
+ const InputPath & inputPathPrefix,
+ std::shared_ptr<const Node> oldNode)
+ {
+ debug("computing lock file node '%s'", concatStringsSep("/", inputPathPrefix));
+
+ /* Get the overrides (i.e. attributes of the form
+ 'inputs.nixops.inputs.nixpkgs.url = ...'). */
+ // FIXME: check this
+ for (auto & [id, input] : flake.inputs) {
+ for (auto & [idOverride, inputOverride] : input.overrides) {
+ auto inputPath(inputPathPrefix);
+ inputPath.push_back(id);
+ inputPath.push_back(idOverride);
+ overrides.insert_or_assign(inputPath, inputOverride);
+ }
+ }
+
+ /* Go over the flake inputs, resolve/fetch them if
+ necessary (i.e. if they're new or the flakeref changed
+ from what's in the lock file). */
+ for (auto & [id, input2] : flakeInputs) {
+ auto inputPath(inputPathPrefix);
+ inputPath.push_back(id);
+ auto inputPathS = concatStringsSep("/", inputPath);
+ debug("computing input '%s'", concatStringsSep("/", inputPath));
+
+ /* Do we have an override for this input from one of the
+ ancestors? */
+ auto i = overrides.find(inputPath);
+ bool hasOverride = i != overrides.end();
+ if (hasOverride) overridesUsed.insert(inputPath);
+ auto & input = hasOverride ? i->second : input2;
+
+ /* Resolve 'follows' later (since it may refer to an input
+ path we haven't processed yet. */
+ if (input.follows) {
+ if (hasOverride)
+ /* 'follows' from an override is relative to the
+ root of the graph. */
+ follows.insert_or_assign(inputPath, *input.follows);
+ else {
+ /* Otherwise, it's relative to the current flake. */
+ InputPath path(inputPathPrefix);
+ for (auto & i : *input.follows) path.push_back(i);
+ follows.insert_or_assign(inputPath, path);
+ }
+ continue;
+ }
+
+ /* Do we have an entry in the existing lock file? And we
+ don't have a --update-input flag for this input? */
+ std::shared_ptr<const LockedNode> oldLock;
+
+ updatesUsed.insert(inputPath);
+
+ if (oldNode && !lockFlags.inputUpdates.count(inputPath)) {
+ auto oldLockIt = oldNode->inputs.find(id);
+ if (oldLockIt != oldNode->inputs.end())
+ oldLock = std::dynamic_pointer_cast<const LockedNode>(oldLockIt->second);
+ }
+
+ if (oldLock
+ && oldLock->originalRef == input.ref
+ && !hasOverride)
+ {
+ debug("keeping existing input '%s'", inputPathS);
+
+ /* Copy the input from the old lock since its flakeref
+ didn't change and there is no override from a
+ higher level flake. */
+ auto childNode = std::make_shared<LockedNode>(
+ oldLock->lockedRef, oldLock->originalRef, oldLock->info, oldLock->isFlake);
+
+ node->inputs.insert_or_assign(id, childNode);
+
+ /* If we have an --update-input flag for an input
+ of this input, then we must fetch the flake to
+ to update it. */
+ auto lb = lockFlags.inputUpdates.lower_bound(inputPath);
+
+ auto hasChildUpdate =
+ lb != lockFlags.inputUpdates.end()
+ && lb->size() > inputPath.size()
+ && std::equal(inputPath.begin(), inputPath.end(), lb->begin());
+
+ if (hasChildUpdate) {
+ auto inputFlake = getFlake(
+ state, oldLock->lockedRef, oldLock->info, false, flakeCache);
+ computeLocks(inputFlake.inputs, childNode, inputPath, oldLock);
+ } else {
+ /* No need to fetch this flake, we can be
+ lazy. However there may be new overrides on the
+ inputs of this flake, so we need to check
+ those. */
+ FlakeInputs fakeInputs;
+
+ for (auto & i : oldLock->inputs) {
+ auto lockedNode = std::dynamic_pointer_cast<LockedNode>(i.second);
+ // Note: this node is not locked in case
+ // of a circular reference back to the root.
+ if (lockedNode)
+ fakeInputs.emplace(i.first, FlakeInput {
+ .ref = lockedNode->originalRef
+ });
+ else {
+ InputPath path(inputPath);
+ path.push_back(i.first);
+ follows.insert_or_assign(path, InputPath());
+ }
+ }
+
+ computeLocks(fakeInputs, childNode, inputPath, oldLock);
+ }
+
+ } else {
+ /* We need to create a new lock file entry. So fetch
+ this input. */
+
+ if (!lockFlags.allowMutable && !input.ref.input->isImmutable())
+ throw Error("cannot update flake input '%s' in pure mode", inputPathS);
+
+ if (input.isFlake) {
+ auto inputFlake = getFlake(state, input.ref, {}, lockFlags.useRegistries, flakeCache);
+
+ /* Note: in case of an --override-input, we use
+ the *original* ref (input2.ref) for the
+ "original" field, rather than the
+ override. This ensures that the override isn't
+ nuked the next time we update the lock
+ file. That is, overrides are sticky unless you
+ use --no-write-lock-file. */
+ auto childNode = std::make_shared<LockedNode>(
+ inputFlake.lockedRef, input2.ref, inputFlake.sourceInfo->info);
+
+ node->inputs.insert_or_assign(id, childNode);
+
+ /* Guard against circular flake imports. */
+ for (auto & parent : parents)
+ if (parent == input.ref)
+ throw Error("found circular import of flake '%s'", parent);
+ parents.push_back(input.ref);
+ Finally cleanup([&]() { parents.pop_back(); });
+
+ /* Recursively process the inputs of this
+ flake. Also, unless we already have this flake
+ in the top-level lock file, use this flake's
+ own lock file. */
+ computeLocks(
+ inputFlake.inputs, childNode, inputPath,
+ oldLock
+ ? std::dynamic_pointer_cast<const Node>(oldLock)
+ : LockFile::read(
+ inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root);
+ }
+
+ else {
+ auto [sourceInfo, resolvedRef, lockedRef] = fetchOrSubstituteTree(
+ state, input.ref, {}, lockFlags.useRegistries, flakeCache);
+ node->inputs.insert_or_assign(id,
+ std::make_shared<LockedNode>(lockedRef, input.ref, sourceInfo.info, false));
+ }
+ }
+ }
+ };
+
+ computeLocks(
+ flake.inputs, newLockFile.root, {},
+ lockFlags.recreateLockFile ? nullptr : oldLockFile.root);
+
+ /* Insert edges for 'follows' overrides. */
+ for (auto & [from, to] : follows) {
+ debug("adding 'follows' node from '%s' to '%s'",
+ concatStringsSep("/", from),
+ concatStringsSep("/", to));
+
+ assert(!from.empty());
+
+ InputPath fromParent(from);
+ fromParent.pop_back();
+
+ auto fromParentNode = newLockFile.root->findInput(fromParent);
+ assert(fromParentNode);
+
+ auto toNode = newLockFile.root->findInput(to);
+ if (!toNode)
+ throw Error("flake input '%s' follows non-existent flake input '%s'",
+ concatStringsSep("/", from),
+ concatStringsSep("/", to));
+
+ fromParentNode->inputs.insert_or_assign(from.back(), toNode);
+ }
+
+ for (auto & i : lockFlags.inputOverrides)
+ if (!overridesUsed.count(i.first))
+ warn("the flag '--override-input %s %s' does not match any input",
+ concatStringsSep("/", i.first), i.second);
+
+ for (auto & i : lockFlags.inputUpdates)
+ if (!updatesUsed.count(i))
+ warn("the flag '--update-input %s' does not match any input", concatStringsSep("/", i));
+
+ debug("new lock file: %s", newLockFile);
+
+ /* Check whether we need to / can write the new lock file. */
+ if (!(newLockFile == oldLockFile)) {
+
+ auto diff = diffLockFiles(oldLockFile, newLockFile);
+
+ if (!(oldLockFile == LockFile()))
+ printInfo("inputs of flake '%s' changed:\n%s", topRef, chomp(diff));
+
+ if (lockFlags.writeLockFile) {
+ if (auto sourcePath = topRef.input->getSourcePath()) {
+ if (!newLockFile.isImmutable()) {
+ if (settings.warnDirty)
+ warn("will not write lock file of flake '%s' because it has a mutable input", topRef);
+ } else {
+ if (!lockFlags.updateLockFile)
+ throw Error("flake '%s' requires lock file changes but they're not allowed due to '--no-update-lock-file'", topRef);
+
+ auto relPath = (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock";
+
+ auto path = *sourcePath + "/" + relPath;
+
+ bool lockFileExists = pathExists(path);
+
+ if (lockFileExists)
+ warn("updating lock file '%s'", path);
+ else
+ warn("creating lock file '%s'", path);
+
+ newLockFile.write(path);
+
+ topRef.input->markChangedFile(
+ (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock",
+ lockFlags.commitLockFile
+ ? std::optional<std::string>(fmt("%s: %s\n\nFlake input changes:\n\n%s",
+ relPath, lockFileExists ? "Update" : "Add", diff))
+ : std::nullopt);
+
+ /* Rewriting the lockfile changed the top-level
+ repo, so we should re-read it. FIXME: we could
+ also just clear the 'rev' field... */
+ auto prevLockedRef = flake.lockedRef;
+ FlakeCache dummyCache;
+ flake = getFlake(state, topRef, {}, lockFlags.useRegistries, dummyCache);
+
+ if (lockFlags.commitLockFile &&
+ flake.lockedRef.input->getRev() &&
+ prevLockedRef.input->getRev() != flake.lockedRef.input->getRev())
+ warn("committed new revision '%s'", flake.lockedRef.input->getRev()->gitRev());
+
+ /* Make sure that we picked up the change,
+ i.e. the tree should usually be dirty
+ now. Corner case: we could have reverted from a
+ dirty to a clean tree! */
+ if (flake.lockedRef.input == prevLockedRef.input
+ && !flake.lockedRef.input->isImmutable())
+ throw Error("'%s' did not change after I updated its 'flake.lock' file; is 'flake.lock' under version control?", flake.originalRef);
+ }
+ } else
+ throw Error("cannot write modified lock file of flake '%s' (use '--no-write-lock-file' to ignore)", topRef);
+ } else
+ warn("not writing modified lock file of flake '%s'", topRef);
+ }
+
+ return LockedFlake { .flake = std::move(flake), .lockFile = std::move(newLockFile) };
+}
+
+void callFlake(EvalState & state,
+ const LockedFlake & lockedFlake,
+ Value & vRes)
+{
+ auto vLocks = state.allocValue();
+ auto vRootSrc = state.allocValue();
+ auto vRootSubdir = state.allocValue();
+ auto vTmp1 = state.allocValue();
+ auto vTmp2 = state.allocValue();
+
+ mkString(*vLocks, lockedFlake.lockFile.to_string());
+
+ emitTreeAttrs(state, *lockedFlake.flake.sourceInfo, lockedFlake.flake.lockedRef.input, *vRootSrc);
+
+ mkString(*vRootSubdir, lockedFlake.flake.lockedRef.subdir);
+
+ static RootValue vCallFlake = nullptr;
+
+ if (!vCallFlake) {
+ vCallFlake = allocRootValue(state.allocValue());
+ state.eval(state.parseExprFromString(
+ #include "call-flake.nix.gen.hh"
+ , "/"), **vCallFlake);
+ }
+
+ state.callFunction(**vCallFlake, *vLocks, *vTmp1, noPos);
+ state.callFunction(*vTmp1, *vRootSrc, *vTmp2, noPos);
+ state.callFunction(*vTmp2, *vRootSubdir, vRes, noPos);
+}
+
+static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ auto flakeRefS = state.forceStringNoCtx(*args[0], pos);
+ auto flakeRef = parseFlakeRef(flakeRefS, {}, true);
+ if (evalSettings.pureEval && !flakeRef.input->isImmutable())
+ throw Error("cannot call 'getFlake' on mutable flake reference '%s', at %s (use --impure to override)", flakeRefS, pos);
+
+ callFlake(state,
+ lockFlake(state, flakeRef,
+ LockFlags {
+ .updateLockFile = false,
+ .useRegistries = !evalSettings.pureEval,
+ .allowMutable = !evalSettings.pureEval,
+ }),
+ v);
+}
+
+static RegisterPrimOp r2("getFlake", 1, prim_getFlake);
+
+}
+
+Fingerprint LockedFlake::getFingerprint() const
+{
+ // FIXME: as an optimization, if the flake contains a lock file
+ // and we haven't changed it, then it's sufficient to use
+ // flake.sourceInfo.storePath for the fingerprint.
+ return hashString(htSHA256,
+ fmt("%s;%d;%d;%s",
+ flake.sourceInfo->storePath.to_string(),
+ flake.sourceInfo->info.revCount.value_or(0),
+ flake.sourceInfo->info.lastModified.value_or(0),
+ lockFile));
+}
+
+Flake::~Flake() { }
+
+}
diff --git a/src/libexpr/flake/flake.hh b/src/libexpr/flake/flake.hh
new file mode 100644
index 000000000..59a1adb3b
--- /dev/null
+++ b/src/libexpr/flake/flake.hh
@@ -0,0 +1,110 @@
+#pragma once
+
+#include "types.hh"
+#include "flakeref.hh"
+#include "lockfile.hh"
+#include "value.hh"
+
+namespace nix {
+
+class EvalState;
+
+namespace fetchers { struct Tree; }
+
+namespace flake {
+
+struct FlakeInput;
+
+typedef std::map<FlakeId, FlakeInput> FlakeInputs;
+
+struct FlakeInput
+{
+ FlakeRef ref;
+ bool isFlake = true;
+ std::optional<InputPath> follows;
+ FlakeInputs overrides;
+};
+
+struct Flake
+{
+ FlakeRef originalRef;
+ FlakeRef resolvedRef;
+ FlakeRef lockedRef;
+ std::optional<std::string> description;
+ std::shared_ptr<const fetchers::Tree> sourceInfo;
+ FlakeInputs inputs;
+ RootValue vOutputs;
+ ~Flake();
+};
+
+Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool allowLookup);
+
+/* Fingerprint of a locked flake; used as a cache key. */
+typedef Hash Fingerprint;
+
+struct LockedFlake
+{
+ Flake flake;
+ LockFile lockFile;
+
+ Fingerprint getFingerprint() const;
+};
+
+struct LockFlags
+{
+ /* Whether to ignore the existing lock file, creating a new one
+ from scratch. */
+ bool recreateLockFile = false;
+
+ /* Whether to update the lock file at all. If set to false, if any
+ change to the lock file is needed (e.g. when an input has been
+ added to flake.nix), you get a fatal error. */
+ bool updateLockFile = true;
+
+ /* Whether to write the lock file to disk. If set to true, if the
+ any changes to the lock file are needed and the flake is not
+ writable (i.e. is not a local Git working tree or similar), you
+ get a fatal error. If set to false, Nix will use the modified
+ lock file in memory only, without writing it to disk. */
+ bool writeLockFile = true;
+
+ /* Whether to use the registries to lookup indirect flake
+ references like 'nixpkgs'. */
+ bool useRegistries = true;
+
+ /* Whether mutable flake references (i.e. those without a Git
+ revision or similar) without a corresponding lock are
+ allowed. Mutable flake references with a lock are always
+ allowed. */
+ bool allowMutable = true;
+
+ /* Whether to commit changes to flake.lock. */
+ bool commitLockFile = false;
+
+ /* Flake inputs to be overriden. */
+ std::map<InputPath, FlakeRef> inputOverrides;
+
+ /* Flake inputs to be updated. This means that any existing lock
+ for those inputs will be ignored. */
+ std::set<InputPath> inputUpdates;
+};
+
+LockedFlake lockFlake(
+ EvalState & state,
+ const FlakeRef & flakeRef,
+ const LockFlags & lockFlags);
+
+void callFlake(
+ EvalState & state,
+ const LockedFlake & lockedFlake,
+ Value & v);
+
+}
+
+void emitTreeAttrs(
+ EvalState & state,
+ const fetchers::Tree & tree,
+ std::shared_ptr<const fetchers::Input> input,
+ Value & v);
+
+}
diff --git a/src/libexpr/flake/flakeref.cc b/src/libexpr/flake/flakeref.cc
new file mode 100644
index 000000000..a70261a41
--- /dev/null
+++ b/src/libexpr/flake/flakeref.cc
@@ -0,0 +1,196 @@
+#include "flakeref.hh"
+#include "store-api.hh"
+#include "url.hh"
+#include "fetchers.hh"
+#include "registry.hh"
+
+namespace nix {
+
+#if 0
+// 'dir' path elements cannot start with a '.'. We also reject
+// potentially dangerous characters like ';'.
+const static std::string subDirElemRegex = "(?:[a-zA-Z0-9_-]+[a-zA-Z0-9._-]*)";
+const static std::string subDirRegex = subDirElemRegex + "(?:/" + subDirElemRegex + ")*";
+#endif
+
+std::string FlakeRef::to_string() const
+{
+ auto url = input->toURL();
+ if (subdir != "")
+ url.query.insert_or_assign("dir", subdir);
+ return url.to_string();
+}
+
+fetchers::Attrs FlakeRef::toAttrs() const
+{
+ auto attrs = input->toAttrs();
+ if (subdir != "")
+ attrs.emplace("dir", subdir);
+ return attrs;
+}
+
+std::ostream & operator << (std::ostream & str, const FlakeRef & flakeRef)
+{
+ str << flakeRef.to_string();
+ return str;
+}
+
+bool FlakeRef::operator ==(const FlakeRef & other) const
+{
+ return *input == *other.input && subdir == other.subdir;
+}
+
+FlakeRef FlakeRef::resolve(ref<Store> store) const
+{
+ auto [input2, extraAttrs] = lookupInRegistries(store, input);
+ return FlakeRef(input2, fetchers::maybeGetStrAttr(extraAttrs, "dir").value_or(subdir));
+}
+
+FlakeRef parseFlakeRef(
+ const std::string & url, const std::optional<Path> & baseDir, bool allowMissing)
+{
+ auto [flakeRef, fragment] = parseFlakeRefWithFragment(url, baseDir, allowMissing);
+ if (fragment != "")
+ throw Error("unexpected fragment '%s' in flake reference '%s'", fragment, url);
+ return flakeRef;
+}
+
+std::optional<FlakeRef> maybeParseFlakeRef(
+ const std::string & url, const std::optional<Path> & baseDir)
+{
+ try {
+ return parseFlakeRef(url, baseDir);
+ } catch (Error &) {
+ return {};
+ }
+}
+
+std::pair<FlakeRef, std::string> parseFlakeRefWithFragment(
+ const std::string & url, const std::optional<Path> & baseDir, bool allowMissing)
+{
+ using namespace fetchers;
+
+ static std::string fnRegex = "[0-9a-zA-Z-._~!$&'\"()*+,;=]+";
+
+ static std::regex pathUrlRegex(
+ "(/?" + fnRegex + "(?:/" + fnRegex + ")*/?)"
+ + "(?:\\?(" + queryRegex + "))?"
+ + "(?:#(" + queryRegex + "))?",
+ std::regex::ECMAScript);
+
+ static std::regex flakeRegex(
+ "((" + flakeIdRegexS + ")(?:/(?:" + refAndOrRevRegex + "))?)"
+ + "(?:#(" + queryRegex + "))?",
+ std::regex::ECMAScript);
+
+ std::smatch match;
+
+ /* Check if 'url' is a flake ID. This is an abbreviated syntax for
+ 'flake:<flake-id>?ref=<ref>&rev=<rev>'. */
+
+ if (std::regex_match(url, match, flakeRegex)) {
+ auto parsedURL = ParsedURL{
+ .url = url,
+ .base = "flake:" + std::string(match[1]),
+ .scheme = "flake",
+ .authority = "",
+ .path = match[1],
+ };
+
+ return std::make_pair(
+ FlakeRef(inputFromURL(parsedURL), ""),
+ percentDecode(std::string(match[6])));
+ }
+
+ /* Check if 'url' is a path (either absolute or relative to
+ 'baseDir'). If so, search upward to the root of the repo
+ (i.e. the directory containing .git). */
+
+ else if (std::regex_match(url, match, pathUrlRegex)) {
+ std::string path = match[1];
+ if (!baseDir && !hasPrefix(path, "/"))
+ throw BadURL("flake reference '%s' is not an absolute path", url);
+ path = absPath(path, baseDir, true);
+
+ if (!S_ISDIR(lstat(path).st_mode))
+ throw BadURL("path '%s' is not a flake (because it's not a directory)", path);
+
+ if (!allowMissing && !pathExists(path + "/flake.nix"))
+ throw BadURL("path '%s' is not a flake (because it doesn't contain a 'flake.nix' file)", path);
+
+ auto fragment = percentDecode(std::string(match[3]));
+
+ auto flakeRoot = path;
+ std::string subdir;
+
+ while (flakeRoot != "/") {
+ if (pathExists(flakeRoot + "/.git")) {
+ auto base = std::string("git+file://") + flakeRoot;
+
+ auto parsedURL = ParsedURL{
+ .url = base, // FIXME
+ .base = base,
+ .scheme = "git+file",
+ .authority = "",
+ .path = flakeRoot,
+ .query = decodeQuery(match[2]),
+ };
+
+ if (subdir != "") {
+ if (parsedURL.query.count("dir"))
+ throw Error("flake URL '%s' has an inconsistent 'dir' parameter", url);
+ parsedURL.query.insert_or_assign("dir", subdir);
+ }
+
+ return std::make_pair(
+ FlakeRef(inputFromURL(parsedURL), get(parsedURL.query, "dir").value_or("")),
+ fragment);
+ }
+
+ subdir = std::string(baseNameOf(flakeRoot)) + (subdir.empty() ? "" : "/" + subdir);
+ flakeRoot = dirOf(flakeRoot);
+ }
+
+ fetchers::Attrs attrs;
+ attrs.insert_or_assign("type", "path");
+ attrs.insert_or_assign("path", path);
+
+ return std::make_pair(FlakeRef(inputFromAttrs(attrs), ""), fragment);
+ }
+
+ else {
+ auto parsedURL = parseURL(url);
+ std::string fragment;
+ std::swap(fragment, parsedURL.fragment);
+ return std::make_pair(
+ FlakeRef(inputFromURL(parsedURL), get(parsedURL.query, "dir").value_or("")),
+ fragment);
+ }
+}
+
+std::optional<std::pair<FlakeRef, std::string>> maybeParseFlakeRefWithFragment(
+ const std::string & url, const std::optional<Path> & baseDir)
+{
+ try {
+ return parseFlakeRefWithFragment(url, baseDir);
+ } catch (Error & e) {
+ return {};
+ }
+}
+
+FlakeRef FlakeRef::fromAttrs(const fetchers::Attrs & attrs)
+{
+ auto attrs2(attrs);
+ attrs2.erase("dir");
+ return FlakeRef(
+ fetchers::inputFromAttrs(attrs2),
+ fetchers::maybeGetStrAttr(attrs, "dir").value_or(""));
+}
+
+std::pair<fetchers::Tree, FlakeRef> FlakeRef::fetchTree(ref<Store> store) const
+{
+ auto [tree, lockedInput] = input->fetchTree(store);
+ return {std::move(tree), FlakeRef(lockedInput, subdir)};
+}
+
+}
diff --git a/src/libexpr/flake/flakeref.hh b/src/libexpr/flake/flakeref.hh
new file mode 100644
index 000000000..72cbb2908
--- /dev/null
+++ b/src/libexpr/flake/flakeref.hh
@@ -0,0 +1,55 @@
+#pragma once
+
+#include "types.hh"
+#include "hash.hh"
+#include "fetchers.hh"
+
+#include <variant>
+
+namespace nix {
+
+class Store;
+
+typedef std::string FlakeId;
+
+struct FlakeRef
+{
+ std::shared_ptr<const fetchers::Input> input;
+
+ Path subdir;
+
+ bool operator==(const FlakeRef & other) const;
+
+ FlakeRef(const std::shared_ptr<const fetchers::Input> & input, const Path & subdir)
+ : input(input), subdir(subdir)
+ {
+ assert(input);
+ }
+
+ // FIXME: change to operator <<.
+ std::string to_string() const;
+
+ fetchers::Attrs toAttrs() const;
+
+ FlakeRef resolve(ref<Store> store) const;
+
+ static FlakeRef fromAttrs(const fetchers::Attrs & attrs);
+
+ std::pair<fetchers::Tree, FlakeRef> fetchTree(ref<Store> store) const;
+};
+
+std::ostream & operator << (std::ostream & str, const FlakeRef & flakeRef);
+
+FlakeRef parseFlakeRef(
+ const std::string & url, const std::optional<Path> & baseDir = {}, bool allowMissing = false);
+
+std::optional<FlakeRef> maybeParseFlake(
+ const std::string & url, const std::optional<Path> & baseDir = {});
+
+std::pair<FlakeRef, std::string> parseFlakeRefWithFragment(
+ const std::string & url, const std::optional<Path> & baseDir = {}, bool allowMissing = false);
+
+std::optional<std::pair<FlakeRef, std::string>> maybeParseFlakeRefWithFragment(
+ const std::string & url, const std::optional<Path> & baseDir = {});
+
+}
diff --git a/src/libexpr/flake/lockfile.cc b/src/libexpr/flake/lockfile.cc
new file mode 100644
index 000000000..a40637824
--- /dev/null
+++ b/src/libexpr/flake/lockfile.cc
@@ -0,0 +1,284 @@
+#include "lockfile.hh"
+#include "store-api.hh"
+
+#include <nlohmann/json.hpp>
+
+namespace nix::flake {
+
+FlakeRef flakeRefFromJson(const nlohmann::json & json)
+{
+ return FlakeRef::fromAttrs(jsonToAttrs(json));
+}
+
+FlakeRef getFlakeRef(
+ const nlohmann::json & json,
+ const char * version3Attr1,
+ const char * version3Attr2,
+ const char * version4Attr)
+{
+ auto i = json.find(version4Attr);
+ if (i != json.end())
+ return flakeRefFromJson(*i);
+
+ // FIXME: remove these.
+ i = json.find(version3Attr1);
+ if (i != json.end())
+ return parseFlakeRef(*i);
+
+ i = json.find(version3Attr2);
+ if (i != json.end())
+ return parseFlakeRef(*i);
+
+ throw Error("attribute '%s' missing in lock file", version4Attr);
+}
+
+LockedNode::LockedNode(const nlohmann::json & json)
+ : lockedRef(getFlakeRef(json, "url", "uri", "locked"))
+ , originalRef(getFlakeRef(json, "originalUrl", "originalUri", "original"))
+ , info(TreeInfo::fromJson(json))
+ , isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true)
+{
+ if (!lockedRef.input->isImmutable())
+ throw Error("lockfile contains mutable flakeref '%s'", lockedRef);
+}
+
+StorePath LockedNode::computeStorePath(Store & store) const
+{
+ return info.computeStorePath(store);
+}
+
+std::shared_ptr<Node> Node::findInput(const InputPath & path)
+{
+ auto pos = shared_from_this();
+
+ for (auto & elem : path) {
+ auto i = pos->inputs.find(elem);
+ if (i == pos->inputs.end())
+ return {};
+ pos = i->second;
+ }
+
+ return pos;
+}
+
+LockFile::LockFile(const nlohmann::json & json, const Path & path)
+{
+ auto version = json.value("version", 0);
+ if (version < 3 || version > 5)
+ throw Error("lock file '%s' has unsupported version %d", path, version);
+
+ if (version < 5) {
+ std::function<void(Node & node, const nlohmann::json & json)> getInputs;
+
+ getInputs = [&](Node & node, const nlohmann::json & json)
+ {
+ for (auto & i : json["inputs"].items()) {
+ auto input = std::make_shared<LockedNode>(i.value());
+ getInputs(*input, i.value());
+ node.inputs.insert_or_assign(i.key(), input);
+ }
+ };
+
+ getInputs(*root, json);
+ }
+
+ else {
+ std::unordered_map<std::string, std::shared_ptr<Node>> nodeMap;
+
+ std::function<void(Node & node, const nlohmann::json & jsonNode)> getInputs;
+
+ getInputs = [&](Node & node, const nlohmann::json & jsonNode)
+ {
+ if (jsonNode.find("inputs") == jsonNode.end()) return;
+ for (auto & i : jsonNode["inputs"].items()) {
+ std::string inputKey = i.value();
+ auto k = nodeMap.find(inputKey);
+ if (k == nodeMap.end()) {
+ auto jsonNode2 = json["nodes"][inputKey];
+ auto input = std::make_shared<LockedNode>(jsonNode2);
+ k = nodeMap.insert_or_assign(inputKey, input).first;
+ getInputs(*input, jsonNode2);
+ }
+ node.inputs.insert_or_assign(i.key(), k->second);
+ }
+ };
+
+ std::string rootKey = json["root"];
+ nodeMap.insert_or_assign(rootKey, root);
+ getInputs(*root, json["nodes"][rootKey]);
+ }
+}
+
+nlohmann::json LockFile::toJson() const
+{
+ nlohmann::json nodes;
+ std::unordered_map<std::shared_ptr<const Node>, std::string> nodeKeys;
+ std::unordered_set<std::string> keys;
+
+ std::function<std::string(const std::string & key, std::shared_ptr<const Node> node)> dumpNode;
+
+ dumpNode = [&](std::string key, std::shared_ptr<const Node> node) -> std::string
+ {
+ auto k = nodeKeys.find(node);
+ if (k != nodeKeys.end())
+ return k->second;
+
+ if (!keys.insert(key).second) {
+ for (int n = 2; ; ++n) {
+ auto k = fmt("%s_%d", key, n);
+ if (keys.insert(k).second) {
+ key = k;
+ break;
+ }
+ }
+ }
+
+ nodeKeys.insert_or_assign(node, key);
+
+ auto n = nlohmann::json::object();
+
+ if (!node->inputs.empty()) {
+ auto inputs = nlohmann::json::object();
+ for (auto & i : node->inputs)
+ inputs[i.first] = dumpNode(i.first, i.second);
+ n["inputs"] = std::move(inputs);
+ }
+
+ if (auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(node)) {
+ n["original"] = fetchers::attrsToJson(lockedNode->originalRef.toAttrs());
+ n["locked"] = fetchers::attrsToJson(lockedNode->lockedRef.toAttrs());
+ n["info"] = lockedNode->info.toJson();
+ if (!lockedNode->isFlake) n["flake"] = false;
+ }
+
+ nodes[key] = std::move(n);
+
+ return key;
+ };
+
+ nlohmann::json json;
+ json["version"] = 5;
+ json["root"] = dumpNode("root", root);
+ json["nodes"] = std::move(nodes);
+
+ return json;
+}
+
+std::string LockFile::to_string() const
+{
+ return toJson().dump(2);
+}
+
+LockFile LockFile::read(const Path & path)
+{
+ if (!pathExists(path)) return LockFile();
+ return LockFile(nlohmann::json::parse(readFile(path)), path);
+}
+
+std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile)
+{
+ stream << lockFile.toJson().dump(2);
+ return stream;
+}
+
+void LockFile::write(const Path & path) const
+{
+ createDirs(dirOf(path));
+ writeFile(path, fmt("%s\n", *this));
+}
+
+bool LockFile::isImmutable() const
+{
+ std::unordered_set<std::shared_ptr<const Node>> nodes;
+
+ std::function<void(std::shared_ptr<const Node> node)> visit;
+
+ visit = [&](std::shared_ptr<const Node> node)
+ {
+ if (!nodes.insert(node).second) return;
+ for (auto & i : node->inputs) visit(i.second);
+ };
+
+ visit(root);
+
+ for (auto & i : nodes) {
+ if (i == root) continue;
+ auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(i);
+ if (lockedNode && !lockedNode->lockedRef.input->isImmutable()) return false;
+ }
+
+ return true;
+}
+
+bool LockFile::operator ==(const LockFile & other) const
+{
+ // FIXME: slow
+ return toJson() == other.toJson();
+}
+
+InputPath parseInputPath(std::string_view s)
+{
+ InputPath path;
+
+ for (auto & elem : tokenizeString<std::vector<std::string>>(s, "/")) {
+ if (!std::regex_match(elem, flakeIdRegex))
+ throw Error("invalid flake input path element '%s'", elem);
+ path.push_back(elem);
+ }
+
+ return path;
+}
+
+static void flattenLockFile(
+ std::shared_ptr<const Node> node,
+ const InputPath & prefix,
+ std::unordered_set<std::shared_ptr<const Node>> & done,
+ std::map<InputPath, std::shared_ptr<const LockedNode>> & res)
+{
+ if (!done.insert(node).second) return;
+
+ for (auto &[id, input] : node->inputs) {
+ auto inputPath(prefix);
+ inputPath.push_back(id);
+ if (auto lockedInput = std::dynamic_pointer_cast<const LockedNode>(input))
+ res.emplace(inputPath, lockedInput);
+ flattenLockFile(input, inputPath, done, res);
+ }
+}
+
+std::string diffLockFiles(const LockFile & oldLocks, const LockFile & newLocks)
+{
+ std::unordered_set<std::shared_ptr<const Node>> done;
+ std::map<InputPath, std::shared_ptr<const LockedNode>> oldFlat, newFlat;
+ flattenLockFile(oldLocks.root, {}, done, oldFlat);
+ done.clear();
+ flattenLockFile(newLocks.root, {}, done, newFlat);
+
+ auto i = oldFlat.begin();
+ auto j = newFlat.begin();
+ std::string res;
+
+ while (i != oldFlat.end() || j != newFlat.end()) {
+ if (j != newFlat.end() && (i == oldFlat.end() || i->first > j->first)) {
+ res += fmt("* Added '%s': '%s'\n", concatStringsSep("/", j->first), j->second->lockedRef);
+ ++j;
+ } else if (i != oldFlat.end() && (j == newFlat.end() || i->first < j->first)) {
+ res += fmt("* Removed '%s'\n", concatStringsSep("/", i->first));
+ ++i;
+ } else {
+ if (!(i->second->lockedRef == j->second->lockedRef)) {
+ assert(i->second->lockedRef.to_string() != j->second->lockedRef.to_string());
+ res += fmt("* Updated '%s': '%s' -> '%s'\n",
+ concatStringsSep("/", i->first),
+ i->second->lockedRef,
+ j->second->lockedRef);
+ }
+ ++i;
+ ++j;
+ }
+ }
+
+ return res;
+}
+
+}
diff --git a/src/libexpr/flake/lockfile.hh b/src/libexpr/flake/lockfile.hh
new file mode 100644
index 000000000..c34939ebc
--- /dev/null
+++ b/src/libexpr/flake/lockfile.hh
@@ -0,0 +1,77 @@
+#pragma once
+
+#include "flakeref.hh"
+
+#include <nlohmann/json_fwd.hpp>
+
+namespace nix {
+class Store;
+struct StorePath;
+}
+
+namespace nix::flake {
+
+using namespace fetchers;
+
+typedef std::vector<FlakeId> InputPath;
+
+/* A node in the lock file. It has outgoing edges to other nodes (its
+ inputs). Only the root node has this type; all other nodes have
+ type LockedNode. */
+struct Node : std::enable_shared_from_this<Node>
+{
+ std::map<FlakeId, std::shared_ptr<Node>> inputs;
+
+ virtual ~Node() { }
+
+ std::shared_ptr<Node> findInput(const InputPath & path);
+};
+
+/* A non-root node in the lock file. */
+struct LockedNode : Node
+{
+ FlakeRef lockedRef, originalRef;
+ TreeInfo info;
+ bool isFlake = true;
+
+ LockedNode(
+ const FlakeRef & lockedRef,
+ const FlakeRef & originalRef,
+ const TreeInfo & info,
+ bool isFlake = true)
+ : lockedRef(lockedRef), originalRef(originalRef), info(info), isFlake(isFlake)
+ { }
+
+ LockedNode(const nlohmann::json & json);
+
+ StorePath computeStorePath(Store & store) const;
+};
+
+struct LockFile
+{
+ std::shared_ptr<Node> root = std::make_shared<Node>();
+
+ LockFile() {};
+ LockFile(const nlohmann::json & json, const Path & path);
+
+ nlohmann::json toJson() const;
+
+ std::string to_string() const;
+
+ static LockFile read(const Path & path);
+
+ void write(const Path & path) const;
+
+ bool isImmutable() const;
+
+ bool operator ==(const LockFile & other) const;
+};
+
+std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile);
+
+InputPath parseInputPath(std::string_view s);
+
+std::string diffLockFiles(const LockFile & oldLocks, const LockFile & newLocks);
+
+}
+