aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEelco Dolstra <edolstra@gmail.com>2019-06-06 12:28:20 +0200
committerGitHub <noreply@github.com>2019-06-06 12:28:20 +0200
commit671f16aee04a2457f22156873e65715b8c4aa8a9 (patch)
tree4e8f84b5682b0480deb2c4943d244b9a5fe552e0
parentc7c562416c75ed60e024f84e4ac440e29b98e0e3 (diff)
parent54aff8430c4e7739903f6dbed713cc088e38507f (diff)
Merge pull request #2920 from NixOS/lazy-flakes
Lazy flake input fetching
-rw-r--r--flake.lock9
-rw-r--r--src/libexpr/eval.cc2
-rw-r--r--src/libexpr/flake/flake.cc (renamed from src/libexpr/primops/flake.cc)321
-rw-r--r--src/libexpr/flake/flake.hh (renamed from src/libexpr/primops/flake.hh)70
-rw-r--r--src/libexpr/flake/flakeref.cc (renamed from src/libexpr/primops/flakeref.cc)0
-rw-r--r--src/libexpr/flake/flakeref.hh (renamed from src/libexpr/primops/flakeref.hh)0
-rw-r--r--src/libexpr/flake/lockfile.cc102
-rw-r--r--src/libexpr/flake/lockfile.hh112
-rw-r--r--src/libexpr/local.mk7
-rw-r--r--src/nix/flake.cc8
-rw-r--r--src/nix/installables.cc26
-rw-r--r--src/nlohmann/json_fwd.hpp10
-rw-r--r--tests/flakes.sh51
13 files changed, 460 insertions, 258 deletions
diff --git a/flake.lock b/flake.lock
index b85571b61..727d16d73 100644
--- a/flake.lock
+++ b/flake.lock
@@ -1,10 +1,13 @@
{
"inputs": {
"nixpkgs": {
- "narHash": "sha256-rMiWaLXkhizEEMEeMDutUl0Y/c+VEjfjvMkvBwvuQJU=",
- "uri": "github:edolstra/nixpkgs/eeeffd24cd7e407cfaa99e98cfbb8f93bf4cc033"
+ "id": "nixpkgs",
+ "inputs": {},
+ "narHash": "sha256-eYtxncIMFVmOHaHBtTdPGcs/AnJqKqA6tHCm0UmPYQU=",
+ "nonFlakeInputs": {},
+ "uri": "github:edolstra/nixpkgs/e9d5882bb861dc48f8d46960e7c820efdbe8f9c1"
}
},
"nonFlakeInputs": {},
- "version": 1
+ "version": 2
}
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
index 0f8a105b1..46c622ee8 100644
--- a/src/libexpr/eval.cc
+++ b/src/libexpr/eval.cc
@@ -7,7 +7,7 @@
#include "eval-inline.hh"
#include "download.hh"
#include "json.hh"
-#include "primops/flake.hh"
+#include "flake/flake.hh"
#include <algorithm>
#include <cstring>
diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/flake/flake.cc
index e5035c53a..bb0543541 100644
--- a/src/libexpr/primops/flake.cc
+++ b/src/libexpr/flake/flake.cc
@@ -1,7 +1,8 @@
#include "flake.hh"
+#include "lockfile.hh"
#include "primops.hh"
#include "eval-inline.hh"
-#include "fetchGit.hh"
+#include "primops/fetchGit.hh"
#include "download.hh"
#include "args.hh"
@@ -43,106 +44,13 @@ std::shared_ptr<FlakeRegistry> readRegistry(const Path & path)
void writeRegistry(const FlakeRegistry & registry, const Path & path)
{
nlohmann::json json;
- json["version"] = 1;
+ json["version"] = 2;
for (auto elem : registry.entries)
json["flakes"][elem.first.to_string()] = { {"uri", elem.second.to_string()} };
createDirs(dirOf(path));
writeFile(path, json.dump(4)); // The '4' is the number of spaces used in the indentation in the json file.
}
-LockFile::FlakeEntry readFlakeEntry(nlohmann::json json)
-{
- FlakeRef flakeRef(json["uri"]);
- if (!flakeRef.isImmutable())
- throw Error("cannot use mutable flake '%s' in pure mode", flakeRef);
-
- LockFile::FlakeEntry entry(flakeRef, Hash((std::string) json["narHash"]));
-
- auto nonFlakeInputs = json["nonFlakeInputs"];
-
- for (auto i = nonFlakeInputs.begin(); i != nonFlakeInputs.end(); ++i) {
- FlakeRef flakeRef(i->value("uri", ""));
- if (!flakeRef.isImmutable())
- throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef);
- LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("narHash", "")));
- entry.nonFlakeEntries.insert_or_assign(i.key(), nonEntry);
- }
-
- auto inputs = json["inputs"];
-
- for (auto i = inputs.begin(); i != inputs.end(); ++i)
- entry.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i));
-
- return entry;
-}
-
-LockFile readLockFile(const Path & path)
-{
- LockFile lockFile;
-
- if (!pathExists(path))
- return lockFile;
-
- auto json = nlohmann::json::parse(readFile(path));
-
- auto version = json.value("version", 0);
- if (version != 1)
- throw Error("lock file '%s' has unsupported version %d", path, version);
-
- auto nonFlakeInputs = json["nonFlakeInputs"];
-
- for (auto i = nonFlakeInputs.begin(); i != nonFlakeInputs.end(); ++i) {
- FlakeRef flakeRef(i->value("uri", ""));
- LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("narHash", "")));
- if (!flakeRef.isImmutable())
- throw Error("found mutable FlakeRef '%s' in lockfile at path %s", flakeRef, path);
- lockFile.nonFlakeEntries.insert_or_assign(i.key(), nonEntry);
- }
-
- auto inputs = json["inputs"];
-
- for (auto i = inputs.begin(); i != inputs.end(); ++i)
- lockFile.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i));
-
- return lockFile;
-}
-
-nlohmann::json flakeEntryToJson(const LockFile::FlakeEntry & entry)
-{
- nlohmann::json json;
- json["uri"] = entry.ref.to_string();
- json["narHash"] = entry.narHash.to_string(SRI);
- for (auto & x : entry.nonFlakeEntries) {
- json["nonFlakeInputs"][x.first]["uri"] = x.second.ref.to_string();
- json["nonFlakeInputs"][x.first]["narHash"] = x.second.narHash.to_string(SRI);
- }
- for (auto & x : entry.flakeEntries)
- json["inputs"][x.first.to_string()] = flakeEntryToJson(x.second);
- return json;
-}
-
-std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile)
-{
- nlohmann::json json;
- json["version"] = 1;
- json["nonFlakeInputs"] = nlohmann::json::object();
- for (auto & x : lockFile.nonFlakeEntries) {
- json["nonFlakeInputs"][x.first]["uri"] = x.second.ref.to_string();
- json["nonFlakeInputs"][x.first]["narHash"] = x.second.narHash.to_string(SRI);
- }
- json["inputs"] = nlohmann::json::object();
- for (auto & x : lockFile.flakeEntries)
- json["inputs"][x.first.to_string()] = flakeEntryToJson(x.second);
- stream << json.dump(4); // '4' = indentation in json file
- return stream;
-}
-
-void writeLockFile(const LockFile & lockFile, const Path & path)
-{
- createDirs(dirOf(path));
- writeFile(path, fmt("%s\n", lockFile));
-}
-
Path getUserRegistryPath()
{
return getHome() + "/.config/nix/registry.json";
@@ -368,7 +276,7 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe
}
// Get the `NonFlake` corresponding to a `FlakeRef`.
-NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias alias, bool impureIsAllowed = false)
+NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false)
{
auto sourceInfo = fetchFlake(state, flakeRef, impureIsAllowed);
debug("got non-flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string());
@@ -382,38 +290,9 @@ NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias al
if (state.allowedPaths)
state.allowedPaths->insert(nonFlake.sourceInfo.storePath);
- nonFlake.alias = alias;
-
return nonFlake;
}
-LockFile entryToLockFile(const LockFile::FlakeEntry & entry)
-{
- LockFile lockFile;
- lockFile.flakeEntries = entry.flakeEntries;
- lockFile.nonFlakeEntries = entry.nonFlakeEntries;
- return lockFile;
-}
-
-LockFile::FlakeEntry dependenciesToFlakeEntry(const ResolvedFlake & resolvedFlake)
-{
- LockFile::FlakeEntry entry(
- resolvedFlake.flake.sourceInfo.resolvedRef,
- resolvedFlake.flake.sourceInfo.narHash);
-
- for (auto & info : resolvedFlake.flakeDeps)
- entry.flakeEntries.insert_or_assign(info.first.to_string(), dependenciesToFlakeEntry(info.second));
-
- for (auto & nonFlake : resolvedFlake.nonFlakeDeps) {
- LockFile::NonFlakeEntry nonEntry(
- nonFlake.sourceInfo.resolvedRef,
- nonFlake.sourceInfo.narHash);
- entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, nonEntry);
- }
-
- return entry;
-}
-
bool allowedToWrite(HandleLockFile handle)
{
return handle == UpdateLockFile || handle == RecreateLockFile;
@@ -435,70 +314,84 @@ bool allowedToUseRegistries(HandleLockFile handle, bool isTopRef)
else assert(false);
}
-ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flakeRef,
- HandleLockFile handleLockFile, LockFile lockFile = {}, bool topRef = false)
+/* Given a flakeref and its subtree of the lockfile, return an updated
+ subtree of the lockfile. That is, if the 'flake.nix' of the
+ referenced flake has inputs that don't have a corresponding entry
+ in the lockfile, they're added to the lockfile; conversely, any
+ lockfile entries that don't have a corresponding entry in flake.nix
+ are removed.
+
+ Note that this is lazy: we only recursively fetch inputs that are
+ not in the lockfile yet. */
+static std::pair<Flake, FlakeInput> updateLocks(
+ EvalState & state,
+ const Flake & flake,
+ HandleLockFile handleLockFile,
+ const FlakeInputs & oldEntry,
+ bool topRef)
{
- Flake flake = getFlake(state, flakeRef, allowedToUseRegistries(handleLockFile, topRef));
-
- ResolvedFlake deps(flake);
-
- for (auto & nonFlakeInfo : flake.nonFlakeInputs) {
- FlakeRef ref = nonFlakeInfo.second;
- auto i = lockFile.nonFlakeEntries.find(nonFlakeInfo.first);
- if (i != lockFile.nonFlakeEntries.end()) {
- NonFlake nonFlake = getNonFlake(state, i->second.ref, nonFlakeInfo.first);
- if (nonFlake.sourceInfo.narHash != i->second.narHash)
- throw Error("the content hash of flakeref '%s' doesn't match", i->second.ref.to_string());
- deps.nonFlakeDeps.push_back(nonFlake);
+ FlakeInput newEntry(
+ flake.id,
+ flake.sourceInfo.resolvedRef,
+ flake.sourceInfo.narHash);
+
+ for (auto & input : flake.nonFlakeInputs) {
+ auto & id = input.first;
+ auto & ref = input.second;
+ auto i = oldEntry.nonFlakeInputs.find(id);
+ if (i != oldEntry.nonFlakeInputs.end()) {
+ newEntry.nonFlakeInputs.insert_or_assign(i->first, i->second);
} else {
if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries)
- throw Error("cannot update non-flake dependency '%s' in pure mode", nonFlakeInfo.first);
- deps.nonFlakeDeps.push_back(getNonFlake(state, nonFlakeInfo.second, nonFlakeInfo.first, allowedToUseRegistries(handleLockFile, false)));
+ throw Error("cannot update non-flake dependency '%s' in pure mode", id);
+ auto nonFlake = getNonFlake(state, ref, allowedToUseRegistries(handleLockFile, false));
+ newEntry.nonFlakeInputs.insert_or_assign(id,
+ NonFlakeInput(
+ nonFlake.sourceInfo.resolvedRef,
+ nonFlake.sourceInfo.narHash));
}
}
- for (auto newFlakeRef : flake.inputs) {
- auto i = lockFile.flakeEntries.find(newFlakeRef);
- if (i != lockFile.flakeEntries.end()) { // Propagate lockFile downwards if possible
- ResolvedFlake newResFlake = resolveFlakeFromLockFile(state, i->second.ref, handleLockFile, entryToLockFile(i->second));
- if (newResFlake.flake.sourceInfo.narHash != i->second.narHash)
- throw Error("the content hash of flakeref '%s' doesn't match", i->second.ref.to_string());
- deps.flakeDeps.insert_or_assign(newFlakeRef, newResFlake);
+ for (auto & inputRef : flake.inputs) {
+ auto i = oldEntry.flakeInputs.find(inputRef);
+ if (i != oldEntry.flakeInputs.end()) {
+ newEntry.flakeInputs.insert_or_assign(inputRef, i->second);
} else {
if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries)
- throw Error("cannot update flake dependency '%s' in pure mode", newFlakeRef.to_string());
- deps.flakeDeps.insert_or_assign(newFlakeRef, resolveFlakeFromLockFile(state, newFlakeRef, handleLockFile));
+ throw Error("cannot update flake dependency '%s' in pure mode", inputRef);
+ newEntry.flakeInputs.insert_or_assign(inputRef,
+ updateLocks(state,
+ getFlake(state, inputRef, allowedToUseRegistries(handleLockFile, false)),
+ handleLockFile, {}, false).second);
}
}
- return deps;
+ return {flake, newEntry};
}
-/* Given a flake reference, recursively fetch it and its dependencies.
- FIXME: this should return a graph of flakes.
-*/
+/* Compute an in-memory lockfile for the specified top-level flake,
+ and optionally write it to file, it the flake is writable. */
ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLockFile handleLockFile)
{
- Flake flake = getFlake(state, topRef, allowedToUseRegistries(handleLockFile, true));
+ auto flake = getFlake(state, topRef, allowedToUseRegistries(handleLockFile, true));
+
LockFile oldLockFile;
if (!recreateLockFile(handleLockFile)) {
// If recreateLockFile, start with an empty lockfile
// FIXME: symlink attack
- oldLockFile = readLockFile(
+ oldLockFile = LockFile::read(
state.store->toRealPath(flake.sourceInfo.storePath)
+ "/" + flake.sourceInfo.resolvedRef.subdir + "/flake.lock");
}
- LockFile lockFile(oldLockFile);
-
- ResolvedFlake resFlake = resolveFlakeFromLockFile(state, topRef, handleLockFile, lockFile, true);
- lockFile = entryToLockFile(dependenciesToFlakeEntry(resFlake));
+ LockFile lockFile(updateLocks(
+ state, flake, handleLockFile, oldLockFile, true).second);
if (!(lockFile == oldLockFile)) {
if (allowedToWrite(handleLockFile)) {
if (auto refData = std::get_if<FlakeRef::IsPath>(&topRef.data)) {
- writeLockFile(lockFile, refData->path + (topRef.subdir == "" ? "" : "/" + topRef.subdir) + "/flake.lock");
+ lockFile.write(refData->path + (topRef.subdir == "" ? "" : "/" + topRef.subdir) + "/flake.lock");
// Hack: Make sure that flake.lock is visible to Git, so it ends up in the Nix store.
runProgram("git", true, { "-C", refData->path, "add",
@@ -509,7 +402,7 @@ ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLoc
warn("using updated lockfile without writing it to file");
}
- return resFlake;
+ return ResolvedFlake(std::move(flake), std::move(lockFile));
}
void updateLockFile(EvalState & state, const FlakeRef & flakeRef, bool recreateLockFile)
@@ -520,7 +413,7 @@ void updateLockFile(EvalState & state, const FlakeRef & flakeRef, bool recreateL
static void emitSourceInfoAttrs(EvalState & state, const SourceInfo & sourceInfo, Value & vAttrs)
{
auto & path = sourceInfo.storePath;
- state.store->isValidPath(path);
+ assert(state.store->isValidPath(path));
mkString(*state.allocAttr(vAttrs, state.sOutPath), path, {path});
if (sourceInfo.resolvedRef.rev) {
@@ -539,40 +432,104 @@ static void emitSourceInfoAttrs(EvalState & state, const SourceInfo & sourceInfo
std::put_time(std::gmtime(&*sourceInfo.lastModified), "%Y%m%d%H%M%S")));
}
-void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v)
+/* Helper primop to make callFlake (below) fetch/call its inputs
+ lazily. Note that this primop cannot be called by user code since
+ it doesn't appear in 'builtins'. */
+static void prim_callFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
{
- // Construct the resulting attrset '{description, outputs,
- // ...}'. This attrset is passed lazily as an argument to 'outputs'.
+ auto lazyFlake = (FlakeInput *) args[0]->attrs;
+ auto flake = getFlake(state, lazyFlake->ref, false);
- state.mkAttrs(v, resFlake.flakeDeps.size() + resFlake.nonFlakeDeps.size() + 8);
+ if (flake.sourceInfo.narHash != lazyFlake->narHash)
+ throw Error("the content hash of flake '%s' doesn't match the hash recorded in the referring lockfile", flake.sourceInfo.resolvedRef);
- for (auto info : resFlake.flakeDeps) {
- const ResolvedFlake newResFlake = info.second;
- auto vFlake = state.allocAttr(v, newResFlake.flake.id);
- callFlake(state, newResFlake, *vFlake);
- }
+ callFlake(state, flake, *lazyFlake, v);
+}
+
+static void prim_callNonFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ auto lazyNonFlake = (NonFlakeInput *) args[0]->attrs;
+
+ auto nonFlake = getNonFlake(state, lazyNonFlake->ref);
- for (const NonFlake nonFlake : resFlake.nonFlakeDeps) {
- auto vNonFlake = state.allocAttr(v, nonFlake.alias);
- state.mkAttrs(*vNonFlake, 8);
+ if (nonFlake.sourceInfo.narHash != lazyNonFlake->narHash)
+ throw Error("the content hash of repository '%s' doesn't match the hash recorded in the referring lockfile", nonFlake.sourceInfo.resolvedRef);
- state.store->isValidPath(nonFlake.sourceInfo.storePath);
- mkString(*state.allocAttr(*vNonFlake, state.sOutPath),
- nonFlake.sourceInfo.storePath, {nonFlake.sourceInfo.storePath});
+ state.mkAttrs(v, 8);
+
+ assert(state.store->isValidPath(nonFlake.sourceInfo.storePath));
+
+ mkString(*state.allocAttr(v, state.sOutPath),
+ nonFlake.sourceInfo.storePath, {nonFlake.sourceInfo.storePath});
+
+ emitSourceInfoAttrs(state, nonFlake.sourceInfo, v);
+}
- emitSourceInfoAttrs(state, nonFlake.sourceInfo, *vNonFlake);
+void callFlake(EvalState & state,
+ const Flake & flake,
+ const FlakeInputs & inputs,
+ Value & vRes)
+{
+ // Construct the resulting attrset '{outputs, ...}'. This attrset
+ // is passed lazily as an argument to the 'outputs' function.
+
+ auto & v = *state.allocValue();
+
+ state.mkAttrs(v,
+ inputs.flakeInputs.size() +
+ inputs.nonFlakeInputs.size() + 8);
+
+ for (auto & dep : inputs.flakeInputs) {
+ auto vFlake = state.allocAttr(v, dep.second.id);
+ auto vPrimOp = state.allocValue();
+ static auto primOp = new PrimOp(prim_callFlake, 1, state.symbols.create("callFlake"));
+ vPrimOp->type = tPrimOp;
+ vPrimOp->primOp = primOp;
+ auto vArg = state.allocValue();
+ vArg->type = tNull;
+ // FIXME: leak
+ vArg->attrs = (Bindings *) new FlakeInput(dep.second); // evil! also inefficient
+ mkApp(*vFlake, *vPrimOp, *vArg);
+ }
+
+ for (auto & dep : inputs.nonFlakeInputs) {
+ auto vNonFlake = state.allocAttr(v, dep.first);
+ auto vPrimOp = state.allocValue();
+ static auto primOp = new PrimOp(prim_callNonFlake, 1, state.symbols.create("callNonFlake"));
+ vPrimOp->type = tPrimOp;
+ vPrimOp->primOp = primOp;
+ auto vArg = state.allocValue();
+ vArg->type = tNull;
+ // FIXME: leak
+ vArg->attrs = (Bindings *) new NonFlakeInput(dep.second); // evil! also inefficient
+ mkApp(*vNonFlake, *vPrimOp, *vArg);
}
- mkString(*state.allocAttr(v, state.sDescription), resFlake.flake.description);
+ mkString(*state.allocAttr(v, state.sDescription), flake.description);
- emitSourceInfoAttrs(state, resFlake.flake.sourceInfo, v);
+ emitSourceInfoAttrs(state, flake.sourceInfo, v);
auto vOutputs = state.allocAttr(v, state.symbols.create("outputs"));
- mkApp(*vOutputs, *resFlake.flake.vOutputs, v);
+ mkApp(*vOutputs, *flake.vOutputs, v);
v.attrs->push_back(Attr(state.symbols.create("self"), &v));
v.attrs->sort();
+
+ /* For convenience, put the outputs directly in the result, so you
+ can refer to an output of an input as 'inputs.foo.bar' rather
+ than 'inputs.foo.outputs.bar'. */
+ auto v2 = *state.allocValue();
+ state.eval(state.parseExprFromString("res: res.outputs // res", "/"), v2);
+
+ state.callFunction(v2, v, vRes, noPos);
+}
+
+void callFlake(EvalState & state,
+ const ResolvedFlake & resFlake,
+ Value & v)
+{
+ callFlake(state, resFlake.flake, resFlake.lockFile, v);
}
// This function is exposed to be used in nix files.
diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/flake/flake.hh
index 82b0973f6..b8d0da252 100644
--- a/src/libexpr/primops/flake.hh
+++ b/src/libexpr/flake/flake.hh
@@ -1,7 +1,8 @@
+#pragma once
+
#include "types.hh"
#include "flakeref.hh"
-
-#include <variant>
+#include "lockfile.hh"
namespace nix {
@@ -19,50 +20,11 @@ struct FlakeRegistry
std::map<FlakeRef, FlakeRef> entries;
};
-struct LockFile
-{
- struct NonFlakeEntry
- {
- FlakeRef ref;
- Hash narHash;
- NonFlakeEntry(const FlakeRef & flakeRef, const Hash & hash) : ref(flakeRef), narHash(hash) {};
-
- bool operator ==(const NonFlakeEntry & other) const
- {
- return ref == other.ref && narHash == other.narHash;
- }
- };
-
- struct FlakeEntry
- {
- FlakeRef ref;
- Hash narHash;
- std::map<FlakeRef, FlakeEntry> flakeEntries;
- std::map<FlakeAlias, NonFlakeEntry> nonFlakeEntries;
- FlakeEntry(const FlakeRef & flakeRef, const Hash & hash) : ref(flakeRef), narHash(hash) {};
-
- bool operator ==(const FlakeEntry & other) const
- {
- return
- ref == other.ref
- && narHash == other.narHash
- && flakeEntries == other.flakeEntries
- && nonFlakeEntries == other.nonFlakeEntries;
- }
- };
-
- std::map<FlakeRef, FlakeEntry> flakeEntries;
- std::map<FlakeAlias, NonFlakeEntry> nonFlakeEntries;
+typedef std::vector<std::shared_ptr<FlakeRegistry>> Registries;
- bool operator ==(const LockFile & other) const
- {
- return
- flakeEntries == other.flakeEntries
- && nonFlakeEntries == other.nonFlakeEntries;
- }
-};
+std::shared_ptr<FlakeRegistry> readRegistry(const Path &);
-typedef std::vector<std::shared_ptr<FlakeRegistry>> Registries;
+void writeRegistry(const FlakeRegistry &, const Path &);
Path getUserRegistryPath();
@@ -75,10 +37,6 @@ enum HandleLockFile : unsigned int
, UseNewLockFile // `RecreateLockFile` without writing to file
};
-std::shared_ptr<FlakeRegistry> readRegistry(const Path &);
-
-void writeRegistry(const FlakeRegistry &, const Path &);
-
struct SourceInfo
{
// Immutable flakeref that this source tree was obtained from.
@@ -117,7 +75,6 @@ struct Flake
struct NonFlake
{
- FlakeAlias alias;
FlakeRef originalRef;
SourceInfo sourceInfo;
NonFlake(const FlakeRef & origRef, const SourceInfo & sourceInfo)
@@ -129,14 +86,21 @@ Flake getFlake(EvalState &, const FlakeRef &, bool impureIsAllowed);
struct ResolvedFlake
{
Flake flake;
- std::map<FlakeRef, ResolvedFlake> flakeDeps; // The key in this map, is the originalRef as written in flake.nix
- std::vector<NonFlake> nonFlakeDeps;
- ResolvedFlake(const Flake & flake) : flake(flake) {}
+ LockFile lockFile;
+ ResolvedFlake(Flake && flake, LockFile && lockFile)
+ : flake(flake), lockFile(lockFile) {}
};
ResolvedFlake resolveFlake(EvalState &, const FlakeRef &, HandleLockFile);
-void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v);
+void callFlake(EvalState & state,
+ const Flake & flake,
+ const FlakeInputs & inputs,
+ Value & v);
+
+void callFlake(EvalState & state,
+ const ResolvedFlake & resFlake,
+ Value & v);
void updateLockFile(EvalState &, const FlakeRef & flakeRef, bool recreateLockFile);
diff --git a/src/libexpr/primops/flakeref.cc b/src/libexpr/flake/flakeref.cc
index 24af09124..24af09124 100644
--- a/src/libexpr/primops/flakeref.cc
+++ b/src/libexpr/flake/flakeref.cc
diff --git a/src/libexpr/primops/flakeref.hh b/src/libexpr/flake/flakeref.hh
index 52bb82ddb..52bb82ddb 100644
--- a/src/libexpr/primops/flakeref.hh
+++ b/src/libexpr/flake/flakeref.hh
diff --git a/src/libexpr/flake/lockfile.cc b/src/libexpr/flake/lockfile.cc
new file mode 100644
index 000000000..97c748c66
--- /dev/null
+++ b/src/libexpr/flake/lockfile.cc
@@ -0,0 +1,102 @@
+#include "lockfile.hh"
+#include "store-api.hh"
+
+namespace nix::flake {
+
+AbstractInput::AbstractInput(const nlohmann::json & json)
+ : ref(json["uri"])
+ , narHash(Hash((std::string) json["narHash"]))
+{
+ if (!ref.isImmutable())
+ throw Error("lockfile contains mutable flakeref '%s'", ref);
+}
+
+nlohmann::json AbstractInput::toJson() const
+{
+ nlohmann::json json;
+ json["uri"] = ref.to_string();
+ json["narHash"] = narHash.to_string(SRI);
+ return json;
+}
+
+Path AbstractInput::computeStorePath(Store & store) const
+{
+ return store.makeFixedOutputPath(true, narHash, "source");
+}
+
+FlakeInput::FlakeInput(const nlohmann::json & json)
+ : FlakeInputs(json)
+ , AbstractInput(json)
+ , id(json["id"])
+{
+}
+
+nlohmann::json FlakeInput::toJson() const
+{
+ auto json = FlakeInputs::toJson();
+ json.update(AbstractInput::toJson());
+ json["id"] = id;
+ return json;
+}
+
+FlakeInputs::FlakeInputs(const nlohmann::json & json)
+{
+ for (auto & i : json["nonFlakeInputs"].items())
+ nonFlakeInputs.insert_or_assign(i.key(), NonFlakeInput(i.value()));
+
+ for (auto & i : json["inputs"].items())
+ flakeInputs.insert_or_assign(i.key(), FlakeInput(i.value()));
+}
+
+nlohmann::json FlakeInputs::toJson() const
+{
+ nlohmann::json json;
+ {
+ auto j = nlohmann::json::object();
+ for (auto & i : nonFlakeInputs)
+ j[i.first] = i.second.toJson();
+ json["nonFlakeInputs"] = std::move(j);
+ }
+ {
+ auto j = nlohmann::json::object();
+ for (auto & i : flakeInputs)
+ j[i.first.to_string()] = i.second.toJson();
+ json["inputs"] = std::move(j);
+ }
+ return json;
+}
+
+nlohmann::json LockFile::toJson() const
+{
+ auto json = FlakeInputs::toJson();
+ json["version"] = 2;
+ return json;
+}
+
+LockFile LockFile::read(const Path & path)
+{
+ if (pathExists(path)) {
+ auto json = nlohmann::json::parse(readFile(path));
+
+ auto version = json.value("version", 0);
+ if (version != 2)
+ throw Error("lock file '%s' has unsupported version %d", path, version);
+
+ return LockFile(json);
+ } else
+ return LockFile();
+}
+
+std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile)
+{
+ stream << lockFile.toJson().dump(4); // '4' = indentation in json file
+ return stream;
+}
+
+void LockFile::write(const Path & path) const
+{
+ createDirs(dirOf(path));
+ writeFile(path, fmt("%s\n", *this));
+}
+
+}
diff --git a/src/libexpr/flake/lockfile.hh b/src/libexpr/flake/lockfile.hh
new file mode 100644
index 000000000..b76124190
--- /dev/null
+++ b/src/libexpr/flake/lockfile.hh
@@ -0,0 +1,112 @@
+#pragma once
+
+#include "flakeref.hh"
+
+#include <nlohmann/json.hpp>
+
+namespace nix {
+class Store;
+}
+
+namespace nix::flake {
+
+/* Common lock file information about a flake input, namely the
+ immutable ref and the NAR hash. */
+struct AbstractInput
+{
+ FlakeRef ref;
+ Hash narHash;
+
+ AbstractInput(const FlakeRef & flakeRef, const Hash & narHash)
+ : ref(flakeRef), narHash(narHash)
+ {
+ assert(ref.isImmutable());
+ };
+
+ AbstractInput(const nlohmann::json & json);
+
+ nlohmann::json toJson() const;
+
+ Path computeStorePath(Store & store) const;
+};
+
+/* Lock file information about a non-flake input. */
+struct NonFlakeInput : AbstractInput
+{
+ using AbstractInput::AbstractInput;
+
+ bool operator ==(const NonFlakeInput & other) const
+ {
+ return ref == other.ref && narHash == other.narHash;
+ }
+};
+
+struct FlakeInput;
+
+/* Lock file information about the dependencies of a flake. */
+struct FlakeInputs
+{
+ std::map<FlakeRef, FlakeInput> flakeInputs;
+ std::map<FlakeAlias, NonFlakeInput> nonFlakeInputs;
+
+ FlakeInputs() {};
+ FlakeInputs(const nlohmann::json & json);
+
+ nlohmann::json toJson() const;
+};
+
+/* Lock file information about a flake input. */
+struct FlakeInput : FlakeInputs, AbstractInput
+{
+ FlakeId id;
+
+ FlakeInput(const FlakeId & id, const FlakeRef & flakeRef, const Hash & narHash)
+ : AbstractInput(flakeRef, narHash), id(id) {};
+
+ FlakeInput(const nlohmann::json & json);
+
+ bool operator ==(const FlakeInput & other) const
+ {
+ return
+ id == other.id
+ && ref == other.ref
+ && narHash == other.narHash
+ && flakeInputs == other.flakeInputs
+ && nonFlakeInputs == other.nonFlakeInputs;
+ }
+
+ nlohmann::json toJson() const;
+};
+
+/* An entire lock file. Note that this cannot be a FlakeInput for the
+ top-level flake, because then the lock file would need to contain
+ the hash of the top-level flake, but committing the lock file
+ would invalidate that hash. */
+struct LockFile : FlakeInputs
+{
+ bool operator ==(const LockFile & other) const
+ {
+ return
+ flakeInputs == other.flakeInputs
+ && nonFlakeInputs == other.nonFlakeInputs;
+ }
+
+ LockFile() {}
+ LockFile(const nlohmann::json & json) : FlakeInputs(json) {}
+ LockFile(FlakeInput && dep)
+ {
+ flakeInputs = std::move(dep.flakeInputs);
+ nonFlakeInputs = std::move(dep.nonFlakeInputs);
+ }
+
+ nlohmann::json toJson() const;
+
+ static LockFile read(const Path & path);
+
+ void write(const Path & path) const;
+};
+
+std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile);
+
+}
+
diff --git a/src/libexpr/local.mk b/src/libexpr/local.mk
index ccd5293e4..a9cb6b7b6 100644
--- a/src/libexpr/local.mk
+++ b/src/libexpr/local.mk
@@ -4,7 +4,12 @@ libexpr_NAME = libnixexpr
libexpr_DIR := $(d)
-libexpr_SOURCES := $(wildcard $(d)/*.cc) $(wildcard $(d)/primops/*.cc) $(d)/lexer-tab.cc $(d)/parser-tab.cc
+libexpr_SOURCES := \
+ $(wildcard $(d)/*.cc) \
+ $(wildcard $(d)/primops/*.cc) \
+ $(wildcard $(d)/flake/*.cc) \
+ $(d)/lexer-tab.cc \
+ $(d)/parser-tab.cc
libexpr_LIBS = libutil libstore
diff --git a/src/nix/flake.cc b/src/nix/flake.cc
index 8d6716391..af1a361b3 100644
--- a/src/nix/flake.cc
+++ b/src/nix/flake.cc
@@ -4,7 +4,7 @@
#include "progress-bar.hh"
#include "eval.hh"
#include "eval-inline.hh"
-#include "primops/flake.hh"
+#include "flake/flake.hh"
#include "get-drvs.hh"
#include "store-api.hh"
@@ -122,6 +122,7 @@ static nlohmann::json flakeToJson(const Flake & flake)
return j;
}
+#if 0
static void printNonFlakeInfo(const NonFlake & nonFlake)
{
std::cout << fmt("ID: %s\n", nonFlake.alias);
@@ -173,6 +174,7 @@ struct CmdFlakeDeps : FlakeCommand
}
}
};
+#endif
struct CmdFlakeUpdate : FlakeCommand
{
@@ -232,6 +234,7 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON
if (json) {
auto json = flakeToJson(flake);
+#if 0
auto state = getEvalState();
auto vFlake = state->allocValue();
@@ -254,6 +257,7 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON
});
json["outputs"] = std::move(outputs);
+#endif
std::cout << json.dump() << std::endl;
} else
@@ -518,7 +522,7 @@ struct CmdFlake : virtual MultiCommand, virtual Command
, make_ref<CmdFlakeUpdate>()
, make_ref<CmdFlakeInfo>()
, make_ref<CmdFlakeCheck>()
- , make_ref<CmdFlakeDeps>()
+ //, make_ref<CmdFlakeDeps>()
, make_ref<CmdFlakeAdd>()
, make_ref<CmdFlakeRemove>()
, make_ref<CmdFlakePin>()
diff --git a/src/nix/installables.cc b/src/nix/installables.cc
index b6f05b314..a85295a09 100644
--- a/src/nix/installables.cc
+++ b/src/nix/installables.cc
@@ -7,7 +7,7 @@
#include "get-drvs.hh"
#include "store-api.hh"
#include "shared.hh"
-#include "primops/flake.hh"
+#include "flake/flake.hh"
#include <regex>
#include <queue>
@@ -195,16 +195,28 @@ void makeFlakeClosureGCRoot(Store & store,
/* Get the store paths of all non-local flakes. */
PathSet closure;
- std::queue<std::reference_wrapper<const flake::ResolvedFlake>> queue;
- queue.push(resFlake);
+ assert(store.isValidPath(resFlake.flake.sourceInfo.storePath));
+ closure.insert(resFlake.flake.sourceInfo.storePath);
+
+ std::queue<std::reference_wrapper<const flake::FlakeInputs>> queue;
+ queue.push(resFlake.lockFile);
while (!queue.empty()) {
- const flake::ResolvedFlake & flake = queue.front();
+ const flake::FlakeInputs & flake = queue.front();
queue.pop();
- if (!std::get_if<FlakeRef::IsPath>(&flake.flake.sourceInfo.resolvedRef.data))
- closure.insert(flake.flake.sourceInfo.storePath);
- for (const auto & dep : flake.flakeDeps)
+ /* Note: due to lazy fetching, these paths might not exist
+ yet. */
+ for (auto & dep : flake.flakeInputs) {
+ auto path = dep.second.computeStorePath(store);
+ if (store.isValidPath(path))
+ closure.insert(path);
queue.push(dep.second);
+ }
+ for (auto & dep : flake.nonFlakeInputs) {
+ auto path = dep.second.computeStorePath(store);
+ if (store.isValidPath(path))
+ closure.insert(path);
+ }
}
if (closure.empty()) return;
diff --git a/src/nlohmann/json_fwd.hpp b/src/nlohmann/json_fwd.hpp
new file mode 100644
index 000000000..ae6e4c64f
--- /dev/null
+++ b/src/nlohmann/json_fwd.hpp
@@ -0,0 +1,10 @@
+#pragma once
+
+namespace nlohmann {
+
+struct json : basic_json<>
+{
+ using basic_json<>::basic_json;
+};
+
+}
diff --git a/tests/flakes.sh b/tests/flakes.sh
index 998abfd09..c380b405b 100644
--- a/tests/flakes.sh
+++ b/tests/flakes.sh
@@ -55,7 +55,7 @@ cat > $flake2Dir/flake.nix <<EOF
description = "Fnord";
outputs = inputs: rec {
- packages.bar = inputs.flake1.outputs.packages.foo;
+ packages.bar = inputs.flake1.packages.foo;
};
}
EOF
@@ -74,7 +74,7 @@ cat > $flake3Dir/flake.nix <<EOF
description = "Fnord";
outputs = inputs: rec {
- packages.xyzzy = inputs.flake2.outputs.packages.bar;
+ packages.xyzzy = inputs.flake2.packages.bar;
};
}
EOF
@@ -83,7 +83,7 @@ git -C $flake3Dir add flake.nix
git -C $flake3Dir commit -m 'Initial'
cat > $nonFlakeDir/README.md <<EOF
-Not much
+FNORD
EOF
git -C $nonFlakeDir add README.md
@@ -176,8 +176,8 @@ cat > $flake3Dir/flake.nix <<EOF
description = "Fnord";
outputs = inputs: rec {
- packages.xyzzy = inputs.flake2.outputs.packages.bar;
- packages.sth = inputs.flake1.outputs.packages.foo;
+ packages.xyzzy = inputs.flake2.packages.bar;
+ packages.sth = inputs.flake1.packages.foo;
};
}
EOF
@@ -191,6 +191,8 @@ nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:sth
# Check whether it saved the lockfile
[[ ! (-z $(git -C $flake3Dir diff master)) ]]
+git -C $flake3Dir commit -m 'Add lockfile'
+
# Unsupported epochs should be an error.
sed -i $flake3Dir/flake.nix -e s/201906/201909/
nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:sth 2>&1 | grep 'unsupported epoch'
@@ -202,6 +204,7 @@ nix flake list --flake-registry file://$registry --tarball-ttl 0 | grep -q flake
mv $registry.tmp $registry
# Test whether flakes are registered as GC roots for offline use.
+# FIXME: use tarballs rather than git.
rm -rf $TEST_HOME/.cache
nix build -o $TEST_ROOT/result --flake-registry file://$registry file://$flake2Dir:bar
mv $flake1Dir $flake1Dir.tmp
@@ -230,14 +233,44 @@ cat > $flake3Dir/flake.nix <<EOF
description = "Fnord";
outputs = inputs: rec {
- packages.xyzzy = inputs.flake2.outputs.packages.bar;
- packages.sth = inputs.flake1.outputs.packages.foo;
+ packages.xyzzy = inputs.flake2.packages.bar;
+ packages.sth = inputs.flake1.packages.foo;
+ packages.fnord =
+ with import ./config.nix;
+ mkDerivation {
+ inherit system;
+ name = "fnord";
+ buildCommand = ''
+ cat \${inputs.nonFlake}/README.md > \$out
+ '';
+ };
};
}
EOF
-git -C $flake3Dir add flake.nix
+cp ./config.nix $flake3Dir
+
+git -C $flake3Dir add flake.nix config.nix
git -C $flake3Dir commit -m 'Add nonFlakeInputs'
-# Check whether `nix build` works with a lockfile which is missing a nonFlakeInputs
+# Check whether `nix build` works with a lockfile which is missing a
+# nonFlakeInputs.
nix build -o $TEST_ROOT/result --flake-registry $registry $flake3Dir:sth
+
+git -C $flake3Dir commit -m 'Update nonFlakeInputs'
+
+nix build -o $TEST_ROOT/result --flake-registry $registry flake3:fnord
+[[ $(cat $TEST_ROOT/result) = FNORD ]]
+
+# Check whether flake input fetching is lazy: flake3:sth does not
+# depend on flake2, so this shouldn't fail.
+rm -rf $TEST_HOME/.cache
+clearStore
+mv $flake2Dir $flake2Dir.tmp
+mv $nonFlakeDir $nonFlakeDir.tmp
+nix build -o $TEST_ROOT/result --flake-registry $registry flake3:sth
+(! nix build -o $TEST_ROOT/result --flake-registry $registry flake3:xyzzy)
+(! nix build -o $TEST_ROOT/result --flake-registry $registry flake3:fnord)
+mv $flake2Dir.tmp $flake2Dir
+mv $nonFlakeDir.tmp $nonFlakeDir
+nix build -o $TEST_ROOT/result --flake-registry $registry flake3:xyzzy flake3:fnord