aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/libexpr/primops/flake.cc90
-rw-r--r--src/libexpr/primops/flake.hh6
-rw-r--r--src/nix/flake.cc20
-rw-r--r--src/nix/installables.cc14
4 files changed, 65 insertions, 65 deletions
diff --git a/src/libexpr/primops/flake.cc b/src/libexpr/primops/flake.cc
index 0d762f2a1..fdbdc83bc 100644
--- a/src/libexpr/primops/flake.cc
+++ b/src/libexpr/primops/flake.cc
@@ -56,21 +56,21 @@ LockFile::FlakeEntry readFlakeEntry(nlohmann::json json)
if (!flakeRef.isImmutable())
throw Error("cannot use mutable flake '%s' in pure mode", flakeRef);
- LockFile::FlakeEntry entry(flakeRef, Hash((std::string) json["contentHash"]));
+ LockFile::FlakeEntry entry(flakeRef, Hash((std::string) json["narHash"]));
- auto nonFlakeRequires = json["nonFlakeRequires"];
+ auto nonFlakeInputs = json["nonFlakeInputs"];
- for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) {
+ for (auto i = nonFlakeInputs.begin(); i != nonFlakeInputs.end(); ++i) {
FlakeRef flakeRef(i->value("uri", ""));
if (!flakeRef.isImmutable())
throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef);
- LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("contentHash", "")));
+ LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("narHash", "")));
entry.nonFlakeEntries.insert_or_assign(i.key(), nonEntry);
}
- auto requires = json["requires"];
+ auto inputs = json["inputs"];
- for (auto i = requires.begin(); i != requires.end(); ++i)
+ for (auto i = inputs.begin(); i != inputs.end(); ++i)
entry.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i));
return entry;
@@ -89,19 +89,19 @@ LockFile readLockFile(const Path & path)
if (version != 1)
throw Error("lock file '%s' has unsupported version %d", path, version);
- auto nonFlakeRequires = json["nonFlakeRequires"];
+ auto nonFlakeInputs = json["nonFlakeInputs"];
- for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) {
+ for (auto i = nonFlakeInputs.begin(); i != nonFlakeInputs.end(); ++i) {
FlakeRef flakeRef(i->value("uri", ""));
- LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("contentHash", "")));
+ LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("narHash", "")));
if (!flakeRef.isImmutable())
throw Error("found mutable FlakeRef '%s' in lockfile at path %s", flakeRef, path);
lockFile.nonFlakeEntries.insert_or_assign(i.key(), nonEntry);
}
- auto requires = json["requires"];
+ auto inputs = json["inputs"];
- for (auto i = requires.begin(); i != requires.end(); ++i)
+ for (auto i = inputs.begin(); i != inputs.end(); ++i)
lockFile.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i));
return lockFile;
@@ -111,13 +111,13 @@ nlohmann::json flakeEntryToJson(const LockFile::FlakeEntry & entry)
{
nlohmann::json json;
json["uri"] = entry.ref.to_string();
- json["contentHash"] = entry.narHash.to_string(SRI);
+ json["narHash"] = entry.narHash.to_string(SRI);
for (auto & x : entry.nonFlakeEntries) {
- json["nonFlakeRequires"][x.first]["uri"] = x.second.ref.to_string();
- json["nonFlakeRequires"][x.first]["contentHash"] = x.second.narHash.to_string(SRI);
+ json["nonFlakeInputs"][x.first]["uri"] = x.second.ref.to_string();
+ json["nonFlakeInputs"][x.first]["narHash"] = x.second.narHash.to_string(SRI);
}
for (auto & x : entry.flakeEntries)
- json["requires"][x.first.to_string()] = flakeEntryToJson(x.second);
+ json["inputs"][x.first.to_string()] = flakeEntryToJson(x.second);
return json;
}
@@ -125,14 +125,14 @@ void writeLockFile(const LockFile & lockFile, const Path & path)
{
nlohmann::json json;
json["version"] = 1;
- json["nonFlakeRequires"] = nlohmann::json::object();
+ json["nonFlakeInputs"] = nlohmann::json::object();
for (auto & x : lockFile.nonFlakeEntries) {
- json["nonFlakeRequires"][x.first]["uri"] = x.second.ref.to_string();
- json["nonFlakeRequires"][x.first]["contentHash"] = x.second.narHash.to_string(SRI);
+ json["nonFlakeInputs"][x.first]["uri"] = x.second.ref.to_string();
+ json["nonFlakeInputs"][x.first]["narHash"] = x.second.narHash.to_string(SRI);
}
- json["requires"] = nlohmann::json::object();
+ json["inputs"] = nlohmann::json::object();
for (auto & x : lockFile.flakeEntries)
- json["requires"][x.first.to_string()] = flakeEntryToJson(x.second);
+ json["inputs"][x.first.to_string()] = flakeEntryToJson(x.second);
createDirs(dirOf(path));
writeFile(path, json.dump(4) + "\n"); // '4' = indentation in json file
}
@@ -319,41 +319,41 @@ Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowe
if (auto description = vInfo.attrs->get(state.sDescription))
flake.description = state.forceStringNoCtx(*(**description).value, *(**description).pos);
- auto sRequires = state.symbols.create("requires");
+ auto sInputs = state.symbols.create("inputs");
- if (auto requires = vInfo.attrs->get(sRequires)) {
- state.forceList(*(**requires).value, *(**requires).pos);
- for (unsigned int n = 0; n < (**requires).value->listSize(); ++n)
- flake.requires.push_back(FlakeRef(state.forceStringNoCtx(
- *(**requires).value->listElems()[n], *(**requires).pos)));
+ if (auto inputs = vInfo.attrs->get(sInputs)) {
+ state.forceList(*(**inputs).value, *(**inputs).pos);
+ for (unsigned int n = 0; n < (**inputs).value->listSize(); ++n)
+ flake.inputs.push_back(FlakeRef(state.forceStringNoCtx(
+ *(**inputs).value->listElems()[n], *(**inputs).pos)));
}
- auto sNonFlakeRequires = state.symbols.create("nonFlakeRequires");
+ auto sNonFlakeInputs = state.symbols.create("nonFlakeInputs");
- if (std::optional<Attr *> nonFlakeRequires = vInfo.attrs->get(sNonFlakeRequires)) {
- state.forceAttrs(*(**nonFlakeRequires).value, *(**nonFlakeRequires).pos);
- for (Attr attr : *(*(**nonFlakeRequires).value).attrs) {
+ if (std::optional<Attr *> nonFlakeInputs = vInfo.attrs->get(sNonFlakeInputs)) {
+ state.forceAttrs(*(**nonFlakeInputs).value, *(**nonFlakeInputs).pos);
+ for (Attr attr : *(*(**nonFlakeInputs).value).attrs) {
std::string myNonFlakeUri = state.forceStringNoCtx(*attr.value, *attr.pos);
FlakeRef nonFlakeRef = FlakeRef(myNonFlakeUri);
- flake.nonFlakeRequires.insert_or_assign(attr.name, nonFlakeRef);
+ flake.nonFlakeInputs.insert_or_assign(attr.name, nonFlakeRef);
}
}
- auto sProvides = state.symbols.create("provides");
+ auto sOutputs = state.symbols.create("outputs");
- if (auto provides = vInfo.attrs->get(sProvides)) {
- state.forceFunction(*(**provides).value, *(**provides).pos);
- flake.vProvides = (**provides).value;
+ if (auto outputs = vInfo.attrs->get(sOutputs)) {
+ state.forceFunction(*(**outputs).value, *(**outputs).pos);
+ flake.vOutputs = (**outputs).value;
} else
- throw Error("flake '%s' lacks attribute 'provides'", flakeRef);
+ throw Error("flake '%s' lacks attribute 'outputs'", flakeRef);
for (auto & attr : *vInfo.attrs) {
if (attr.name != sEpoch &&
attr.name != state.sName &&
attr.name != state.sDescription &&
- attr.name != sRequires &&
- attr.name != sNonFlakeRequires &&
- attr.name != sProvides)
+ attr.name != sInputs &&
+ attr.name != sNonFlakeInputs &&
+ attr.name != sOutputs)
throw Error("flake '%s' has an unsupported attribute '%s', at %s",
flakeRef, attr.name, *attr.pos);
}
@@ -436,7 +436,7 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake
ResolvedFlake deps(flake);
- for (auto & nonFlakeInfo : flake.nonFlakeRequires) {
+ for (auto & nonFlakeInfo : flake.nonFlakeInputs) {
FlakeRef ref = nonFlakeInfo.second;
auto i = lockFile.nonFlakeEntries.find(nonFlakeInfo.first);
if (i != lockFile.nonFlakeEntries.end()) {
@@ -451,7 +451,7 @@ ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flake
}
}
- for (auto newFlakeRef : flake.requires) {
+ for (auto newFlakeRef : flake.inputs) {
auto i = lockFile.flakeEntries.find(newFlakeRef);
if (i != lockFile.flakeEntries.end()) { // Propagate lockFile downwards if possible
ResolvedFlake newResFlake = resolveFlakeFromLockFile(state, i->second.ref, handleLockFile, entryToLockFile(i->second));
@@ -532,8 +532,8 @@ static void emitSourceInfoAttrs(EvalState & state, const SourceInfo & sourceInfo
void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v)
{
- // Construct the resulting attrset '{description, provides,
- // ...}'. This attrset is passed lazily as an argument to 'provides'.
+ // Construct the resulting attrset '{description, outputs,
+ // ...}'. This attrset is passed lazily as an argument to 'outputs'.
state.mkAttrs(v, resFlake.flakeDeps.size() + resFlake.nonFlakeDeps.size() + 8);
@@ -558,8 +558,8 @@ void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v)
emitSourceInfoAttrs(state, resFlake.flake.sourceInfo, v);
- auto vProvides = state.allocAttr(v, state.symbols.create("provides"));
- mkApp(*vProvides, *resFlake.flake.vProvides, v);
+ auto vOutputs = state.allocAttr(v, state.symbols.create("outputs"));
+ mkApp(*vOutputs, *resFlake.flake.vOutputs, v);
v.attrs->push_back(Attr(state.symbols.create("self"), &v));
diff --git a/src/libexpr/primops/flake.hh b/src/libexpr/primops/flake.hh
index 340b97c65..82b0973f6 100644
--- a/src/libexpr/primops/flake.hh
+++ b/src/libexpr/primops/flake.hh
@@ -106,9 +106,9 @@ struct Flake
FlakeRef originalRef;
std::string description;
SourceInfo sourceInfo;
- std::vector<FlakeRef> requires;
- std::map<FlakeAlias, FlakeRef> nonFlakeRequires;
- Value * vProvides; // FIXME: gc
+ std::vector<FlakeRef> inputs;
+ std::map<FlakeAlias, FlakeRef> nonFlakeInputs;
+ Value * vOutputs; // FIXME: gc
unsigned int epoch;
Flake(const FlakeRef & origRef, const SourceInfo & sourceInfo)
diff --git a/src/nix/flake.cc b/src/nix/flake.cc
index 872ec2849..8d6716391 100644
--- a/src/nix/flake.cc
+++ b/src/nix/flake.cc
@@ -199,16 +199,16 @@ struct CmdFlakeUpdate : FlakeCommand
}
};
-static void enumerateProvides(EvalState & state, Value & vFlake,
+static void enumerateOutputs(EvalState & state, Value & vFlake,
std::function<void(const std::string & name, Value & vProvide)> callback)
{
state.forceAttrs(vFlake);
- auto vProvides = (*vFlake.attrs->get(state.symbols.create("provides")))->value;
+ auto vOutputs = (*vFlake.attrs->get(state.symbols.create("outputs")))->value;
- state.forceAttrs(*vProvides);
+ state.forceAttrs(*vOutputs);
- for (auto & attr : *vProvides->attrs)
+ for (auto & attr : *vOutputs->attrs)
callback(attr.name, *attr.value);
}
@@ -237,9 +237,9 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON
auto vFlake = state->allocValue();
flake::callFlake(*state, flake, *vFlake);
- auto provides = nlohmann::json::object();
+ auto outputs = nlohmann::json::object();
- enumerateProvides(*state,
+ enumerateOutputs(*state,
*vFlake,
[&](const std::string & name, Value & vProvide) {
auto provide = nlohmann::json::object();
@@ -250,10 +250,10 @@ struct CmdFlakeInfo : FlakeCommand, MixJSON
provide[aCheck.name] = nlohmann::json::object();
}
- provides[name] = provide;
+ outputs[name] = provide;
});
- json["provides"] = std::move(provides);
+ json["outputs"] = std::move(outputs);
std::cout << json.dump() << std::endl;
} else
@@ -298,7 +298,7 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON
// FIXME: check meta attributes
return drvInfo->queryDrvPath();
} catch (Error & e) {
- e.addPrefix(fmt("while checking flake attribute '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", attrPath));
+ e.addPrefix(fmt("while checking flake output attribute '" ANSI_BOLD "%s" ANSI_NORMAL "':\n", attrPath));
throw;
}
};
@@ -311,7 +311,7 @@ struct CmdFlakeCheck : FlakeCommand, MixJSON
auto vFlake = state->allocValue();
flake::callFlake(*state, flake, *vFlake);
- enumerateProvides(*state,
+ enumerateOutputs(*state,
*vFlake,
[&](const std::string & name, Value & vProvide) {
Activity act(*logger, lvlChatty, actUnknown,
diff --git a/src/nix/installables.cc b/src/nix/installables.cc
index ea12cd79c..fe89a6bb4 100644
--- a/src/nix/installables.cc
+++ b/src/nix/installables.cc
@@ -230,16 +230,16 @@ struct InstallableFlake : InstallableValue
makeFlakeClosureGCRoot(*state.store, flakeRef, resFlake);
- auto vProvides = (*vFlake->attrs->get(state.symbols.create("provides")))->value;
+ auto vOutputs = (*vFlake->attrs->get(state.symbols.create("outputs")))->value;
- state.forceValue(*vProvides);
+ state.forceValue(*vOutputs);
auto emptyArgs = state.allocBindings(0);
if (searchPackages) {
// As a convenience, look for the attribute in
- // 'provides.packages'.
- if (auto aPackages = *vProvides->attrs->get(state.symbols.create("packages"))) {
+ // 'outputs.packages'.
+ if (auto aPackages = *vOutputs->attrs->get(state.symbols.create("packages"))) {
try {
auto * v = findAlongAttrPath(state, *attrPaths.begin(), *emptyArgs, *aPackages->value);
state.forceValue(*v);
@@ -250,7 +250,7 @@ struct InstallableFlake : InstallableValue
// As a temporary hack until Nixpkgs is properly converted
// to provide a clean 'packages' set, look in 'legacyPackages'.
- if (auto aPackages = *vProvides->attrs->get(state.symbols.create("legacyPackages"))) {
+ if (auto aPackages = *vOutputs->attrs->get(state.symbols.create("legacyPackages"))) {
try {
auto * v = findAlongAttrPath(state, *attrPaths.begin(), *emptyArgs, *aPackages->value);
state.forceValue(*v);
@@ -260,10 +260,10 @@ struct InstallableFlake : InstallableValue
}
}
- // Otherwise, look for it in 'provides'.
+ // Otherwise, look for it in 'outputs'.
for (auto & attrPath : attrPaths) {
try {
- auto * v = findAlongAttrPath(state, attrPath, *emptyArgs, *vProvides);
+ auto * v = findAlongAttrPath(state, attrPath, *emptyArgs, *vOutputs);
state.forceValue(*v);
return v;
} catch (AttrPathNotFound & e) {