diff options
-rw-r--r-- | scripts/install-multi-user.sh | 43 | ||||
-rw-r--r-- | scripts/install-nix-from-closure.sh | 4 | ||||
-rw-r--r-- | src/libexpr/primops.cc | 2 | ||||
-rw-r--r-- | src/libstore/binary-cache-store.cc | 5 | ||||
-rw-r--r-- | src/libstore/local-store.cc | 14 | ||||
-rw-r--r-- | src/libstore/local-store.hh | 10 | ||||
-rw-r--r-- | src/libstore/path-info.hh | 111 | ||||
-rw-r--r-- | src/libstore/s3-binary-cache-store.cc | 9 | ||||
-rw-r--r-- | src/libstore/s3-binary-cache-store.hh | 1 | ||||
-rw-r--r-- | src/libstore/store-api.cc | 14 | ||||
-rw-r--r-- | src/libstore/store-api.hh | 96 | ||||
-rw-r--r-- | src/libutil/util.cc | 1 | ||||
-rw-r--r-- | src/nix/build.cc | 9 | ||||
-rw-r--r-- | src/nix/bundle.cc | 129 | ||||
-rw-r--r-- | src/nix/command.hh | 3 | ||||
-rw-r--r-- | src/nix/flake.cc | 25 | ||||
-rw-r--r-- | src/nix/installables.cc | 4 | ||||
-rw-r--r-- | tests/binary-cache.sh | 8 | ||||
-rw-r--r-- | tests/filter-source.sh | 18 | ||||
-rw-r--r-- | tests/nar-access.sh | 24 | ||||
-rw-r--r-- | tests/path.nix | 14 |
21 files changed, 398 insertions, 146 deletions
diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh index 00c9d540b..e5cc4d7ed 100644 --- a/scripts/install-multi-user.sh +++ b/scripts/install-multi-user.sh @@ -37,6 +37,8 @@ readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix- readonly NIX_INSTALLED_NIX="@nix@" readonly NIX_INSTALLED_CACERT="@cacert@" +#readonly NIX_INSTALLED_NIX="/nix/store/j8dbv5w6jl34caywh2ygdy88knx1mdf7-nix-2.3.6" +#readonly NIX_INSTALLED_CACERT="/nix/store/7dxhzymvy330i28ii676fl1pqwcahv2f-nss-cacert-3.49.2" readonly EXTRACTED_NIX_PATH="$(dirname "$0")" readonly ROOT_HOME=$(echo ~root) @@ -69,9 +71,11 @@ uninstall_directions() { subheader "Uninstalling nix:" local step=0 - if poly_service_installed_check; then + if [ -e /run/systemd/system ] && poly_service_installed_check; then step=$((step + 1)) poly_service_uninstall_directions "$step" + else + step=$((step + 1)) fi for profile_target in "${PROFILE_TARGETS[@]}"; do @@ -250,20 +254,42 @@ function finish_success { echo "But fetching the nixpkgs channel failed. (Are you offline?)" echo "To try again later, run \"sudo -i nix-channel --update nixpkgs\"." fi - cat <<EOF + + if [ -e /run/systemd/system ]; then + cat <<EOF + +Before Nix will work in your existing shells, you'll need to close +them and open them again. Other than that, you should be ready to go. + +Try it! Open a new terminal, and type: + + $ nix-shell -p nix-info --run "nix-info -m" + +Thank you for using this installer. If you have any feedback, don't +hesitate: + +$(contactme) +EOF + else + cat <<EOF Before Nix will work in your existing shells, you'll need to close them and open them again. Other than that, you should be ready to go. Try it! Open a new terminal, and type: + $ sudo nix-daemon $ nix-shell -p nix-info --run "nix-info -m" +Additionally, you may want to add nix-daemon to your init-system. + Thank you for using this installer. If you have any feedback, don't hesitate: $(contactme) EOF + fi + } @@ -664,12 +690,8 @@ main() { # shellcheck source=./install-darwin-multi-user.sh . "$EXTRACTED_NIX_PATH/install-darwin-multi-user.sh" elif [ "$(uname -s)" = "Linux" ]; then - if [ -e /run/systemd/system ]; then - # shellcheck source=./install-systemd-multi-user.sh - . "$EXTRACTED_NIX_PATH/install-systemd-multi-user.sh" - else - failure "Sorry, the multi-user installation requires systemd on Linux (detected using /run/systemd/system)" - fi + # shellcheck source=./install-systemd-multi-user.sh + . "$EXTRACTED_NIX_PATH/install-systemd-multi-user.sh" # most of this works on non-systemd distros also else failure "Sorry, I don't know what to do on $(uname)" fi @@ -702,7 +724,10 @@ main() { setup_default_profile place_nix_configuration - poly_configure_nix_daemon_service + + if [ -e /run/systemd/system ]; then + poly_configure_nix_daemon_service + fi trap finish_success EXIT } diff --git a/scripts/install-nix-from-closure.sh b/scripts/install-nix-from-closure.sh index 6fb0beb2b..6efd8af18 100644 --- a/scripts/install-nix-from-closure.sh +++ b/scripts/install-nix-from-closure.sh @@ -35,7 +35,7 @@ fi # Determine if we could use the multi-user installer or not if [ "$(uname -s)" = "Darwin" ]; then echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2 -elif [ "$(uname -s)" = "Linux" ] && [ -e /run/systemd/system ]; then +elif [ "$(uname -s)" = "Linux" ]; then echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2 fi @@ -122,7 +122,7 @@ if [ "$(uname -s)" = "Darwin" ]; then fi if [ "$INSTALL_MODE" = "daemon" ]; then - printf '\e[1;31mSwitching to the Daemon-based Installer\e[0m\n' + printf '\e[1;31mSwitching to the Multi-user Installer\e[0m\n' exec "$self/install-multi-user" exit 0 fi diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index a98cadc80..05d499d1f 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -1199,7 +1199,7 @@ static void prim_path(EvalState & state, const Pos & pos, Value * * args, Value string name; Value * filterFun = nullptr; auto method = FileIngestionMethod::Recursive; - Hash expectedHash(htSHA256); + std::optional<Hash> expectedHash; for (auto & attr : *args[0]->attrs) { const string & n(attr.name); diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index 30150eeba..9682db730 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -153,6 +153,8 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource auto [fdTemp, fnTemp] = createTempFile(); + AutoDelete autoDelete(fnTemp); + auto now1 = std::chrono::steady_clock::now(); /* Read the NAR simultaneously into a CompressionSink+FileSink (to @@ -167,6 +169,7 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource TeeSource teeSource(narSource, *compressionSink); narAccessor = makeNarAccessor(teeSource); compressionSink->finish(); + fileSink.flush(); } auto now2 = std::chrono::steady_clock::now(); @@ -280,7 +283,7 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource if (repair || !fileExists(narInfo->url)) { stats.narWrite++; upsertFile(narInfo->url, - std::make_shared<std::fstream>(fnTemp, std::ios_base::in), + std::make_shared<std::fstream>(fnTemp, std::ios_base::in | std::ios_base::binary), "application/x-nix-nar"); } else stats.narWriteAverted++; diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 4c4d0bdc2..6b0548538 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -1038,20 +1038,6 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source, } -StorePath LocalStore::addToStore(const string & name, const Path & _srcPath, - FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair) -{ - Path srcPath(absPath(_srcPath)); - auto source = sinkToSource([&](Sink & sink) { - if (method == FileIngestionMethod::Recursive) - dumpPath(srcPath, sink, filter); - else - readFile(srcPath, sink); - }); - return addToStoreFromDump(*source, name, method, hashAlgo, repair); -} - - StorePath LocalStore::addToStoreFromDump(Source & source0, const string & name, FileIngestionMethod method, HashType hashAlgo, RepairFlag repair) { diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index 5a87ec2f8..31e6587ac 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -145,16 +145,8 @@ public: void addToStore(const ValidPathInfo & info, Source & source, RepairFlag repair, CheckSigsFlag checkSigs) override; - StorePath addToStore(const string & name, const Path & srcPath, - FileIngestionMethod method, HashType hashAlgo, - PathFilter & filter, RepairFlag repair) override; - - /* Like addToStore(), but the contents of the path are contained - in `dump', which is either a NAR serialisation (if recursive == - true) or simply the contents of a regular file (if recursive == - false). */ StorePath addToStoreFromDump(Source & dump, const string & name, - FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override; + FileIngestionMethod method, HashType hashAlgo, RepairFlag repair) override; StorePath addTextToStore(const string & name, const string & s, const StorePathSet & references, RepairFlag repair) override; diff --git a/src/libstore/path-info.hh b/src/libstore/path-info.hh new file mode 100644 index 000000000..2a015ea3c --- /dev/null +++ b/src/libstore/path-info.hh @@ -0,0 +1,111 @@ +#pragma once + +#include "path.hh" +#include "hash.hh" +#include "content-address.hh" + +#include <string> +#include <optional> + +namespace nix { + + +class Store; + + +struct SubstitutablePathInfo +{ + std::optional<StorePath> deriver; + StorePathSet references; + uint64_t downloadSize; /* 0 = unknown or inapplicable */ + uint64_t narSize; /* 0 = unknown */ +}; + +typedef std::map<StorePath, SubstitutablePathInfo> SubstitutablePathInfos; + + +struct ValidPathInfo +{ + StorePath path; + std::optional<StorePath> deriver; + // TODO document this + std::optional<Hash> narHash; + StorePathSet references; + time_t registrationTime = 0; + uint64_t narSize = 0; // 0 = unknown + uint64_t id; // internal use only + + /* Whether the path is ultimately trusted, that is, it's a + derivation output that was built locally. */ + bool ultimate = false; + + StringSet sigs; // note: not necessarily verified + + /* If non-empty, an assertion that the path is content-addressed, + i.e., that the store path is computed from a cryptographic hash + of the contents of the path, plus some other bits of data like + the "name" part of the path. Such a path doesn't need + signatures, since we don't have to trust anybody's claim that + the path is the output of a particular derivation. (In the + extensional store model, we have to trust that the *contents* + of an output path of a derivation were actually produced by + that derivation. In the intensional model, we have to trust + that a particular output path was produced by a derivation; the + path then implies the contents.) + + Ideally, the content-addressability assertion would just be a Boolean, + and the store path would be computed from the name component, ‘narHash’ + and ‘references’. However, we support many types of content addresses. + */ + std::optional<ContentAddress> ca; + + bool operator == (const ValidPathInfo & i) const + { + return + path == i.path + && narHash == i.narHash + && references == i.references; + } + + /* Return a fingerprint of the store path to be used in binary + cache signatures. It contains the store path, the base-32 + SHA-256 hash of the NAR serialisation of the path, the size of + the NAR, and the sorted references. The size field is strictly + speaking superfluous, but might prevent endless/excessive data + attacks. */ + std::string fingerprint(const Store & store) const; + + void sign(const Store & store, const SecretKey & secretKey); + + /* Return true iff the path is verifiably content-addressed. */ + bool isContentAddressed(const Store & store) const; + + /* Functions to view references + hasSelfReference as one set, mainly for + compatibility's sake. */ + StorePathSet referencesPossiblyToSelf() const; + void insertReferencePossiblyToSelf(StorePath && ref); + void setReferencesPossiblyToSelf(StorePathSet && refs); + + static const size_t maxSigs = std::numeric_limits<size_t>::max(); + + /* Return the number of signatures on this .narinfo that were + produced by one of the specified keys, or maxSigs if the path + is content-addressed. */ + size_t checkSignatures(const Store & store, const PublicKeys & publicKeys) const; + + /* Verify a single signature. */ + bool checkSignature(const Store & store, const PublicKeys & publicKeys, const std::string & sig) const; + + Strings shortRefs() const; + + ValidPathInfo(const ValidPathInfo & other) = default; + + ValidPathInfo(StorePath && path) : path(std::move(path)) { }; + ValidPathInfo(const StorePath & path) : path(path) { }; + + virtual ~ValidPathInfo() { } +}; + +typedef list<ValidPathInfo> ValidPathInfos; + +} diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 67935f3ba..a0a446bd3 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -266,6 +266,10 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore const std::string & mimeType, const std::string & contentEncoding) { + istream->seekg(0, istream->end); + auto size = istream->tellg(); + istream->seekg(0, istream->beg); + auto maxThreads = std::thread::hardware_concurrency(); static std::shared_ptr<Aws::Utils::Threading::PooledThreadExecutor> @@ -343,10 +347,11 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1) .count(); - printInfo("uploaded 's3://%s/%s' in %d ms", - bucketName, path, duration); + printInfo("uploaded 's3://%s/%s' (%d bytes) in %d ms", + bucketName, path, size, duration); stats.putTimeMs += duration; + stats.putBytes += std::max(size, (decltype(size)) 0); stats.put++; } diff --git a/src/libstore/s3-binary-cache-store.hh b/src/libstore/s3-binary-cache-store.hh index b2b75d498..4d43fe4d2 100644 --- a/src/libstore/s3-binary-cache-store.hh +++ b/src/libstore/s3-binary-cache-store.hh @@ -19,6 +19,7 @@ public: struct Stats { std::atomic<uint64_t> put{0}; + std::atomic<uint64_t> putBytes{0}; std::atomic<uint64_t> putTimeMs{0}; std::atomic<uint64_t> get{0}; std::atomic<uint64_t> getBytes{0}; diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 76324cf08..ea666f098 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -239,6 +239,20 @@ StorePath Store::computeStorePathForText(const string & name, const string & s, } +StorePath Store::addToStore(const string & name, const Path & _srcPath, + FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair) +{ + Path srcPath(absPath(_srcPath)); + auto source = sinkToSource([&](Sink & sink) { + if (method == FileIngestionMethod::Recursive) + dumpPath(srcPath, sink, filter); + else + readFile(srcPath, sink); + }); + return addToStoreFromDump(*source, name, method, hashAlgo, repair); +} + + /* The aim of this function is to compute in one pass the correct ValidPathInfo for the files that we are trying to add to the store. To accomplish that in one diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 49da19496..e94d975c5 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -10,6 +10,7 @@ #include "globals.hh" #include "config.hh" #include "derivations.hh" +#include "path-info.hh" #include <atomic> #include <limits> @@ -101,95 +102,6 @@ struct GCResults }; -struct SubstitutablePathInfo -{ - std::optional<StorePath> deriver; - StorePathSet references; - uint64_t downloadSize; /* 0 = unknown or inapplicable */ - uint64_t narSize; /* 0 = unknown */ -}; - -typedef std::map<StorePath, SubstitutablePathInfo> SubstitutablePathInfos; - -struct ValidPathInfo -{ - StorePath path; - std::optional<StorePath> deriver; - // TODO document this - std::optional<Hash> narHash; - StorePathSet references; - time_t registrationTime = 0; - uint64_t narSize = 0; // 0 = unknown - uint64_t id; // internal use only - - /* Whether the path is ultimately trusted, that is, it's a - derivation output that was built locally. */ - bool ultimate = false; - - StringSet sigs; // note: not necessarily verified - - /* If non-empty, an assertion that the path is content-addressed, - i.e., that the store path is computed from a cryptographic hash - of the contents of the path, plus some other bits of data like - the "name" part of the path. Such a path doesn't need - signatures, since we don't have to trust anybody's claim that - the path is the output of a particular derivation. (In the - extensional store model, we have to trust that the *contents* - of an output path of a derivation were actually produced by - that derivation. In the intensional model, we have to trust - that a particular output path was produced by a derivation; the - path then implies the contents.) - - Ideally, the content-addressability assertion would just be a Boolean, - and the store path would be computed from the name component, ‘narHash’ - and ‘references’. However, we support many types of content addresses. - */ - std::optional<ContentAddress> ca; - - bool operator == (const ValidPathInfo & i) const - { - return - path == i.path - && narHash == i.narHash - && references == i.references; - } - - /* Return a fingerprint of the store path to be used in binary - cache signatures. It contains the store path, the base-32 - SHA-256 hash of the NAR serialisation of the path, the size of - the NAR, and the sorted references. The size field is strictly - speaking superfluous, but might prevent endless/excessive data - attacks. */ - std::string fingerprint(const Store & store) const; - - void sign(const Store & store, const SecretKey & secretKey); - - /* Return true iff the path is verifiably content-addressed. */ - bool isContentAddressed(const Store & store) const; - - static const size_t maxSigs = std::numeric_limits<size_t>::max(); - - /* Return the number of signatures on this .narinfo that were - produced by one of the specified keys, or maxSigs if the path - is content-addressed. */ - size_t checkSignatures(const Store & store, const PublicKeys & publicKeys) const; - - /* Verify a single signature. */ - bool checkSignature(const Store & store, const PublicKeys & publicKeys, const std::string & sig) const; - - Strings shortRefs() const; - - ValidPathInfo(const ValidPathInfo & other) = default; - - ValidPathInfo(StorePath && path) : path(std::move(path)) { }; - ValidPathInfo(const StorePath & path) : path(path) { }; - - virtual ~ValidPathInfo() { } -}; - -typedef list<ValidPathInfo> ValidPathInfos; - - enum BuildMode { bmNormal, bmRepair, bmCheck }; @@ -456,7 +368,7 @@ public: libutil/archive.hh). */ virtual StorePath addToStore(const string & name, const Path & srcPath, FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, - PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair) = 0; + PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair); /* Copy the contents of a path to the store and register the validity the resulting path, using a constant amount of @@ -465,6 +377,10 @@ public: FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, std::optional<Hash> expectedCAHash = {}); + /* Like addToStore(), but the contents of the path are contained + in `dump', which is either a NAR serialisation (if recursive == + true) or simply the contents of a regular file (if recursive == + false). */ // FIXME: remove? virtual StorePath addToStoreFromDump(Source & dump, const string & name, FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 690169184..c0b9698ee 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -494,6 +494,7 @@ std::pair<AutoCloseFD, Path> createTempFile(const Path & prefix) { Path tmpl(getEnv("TMPDIR").value_or("/tmp") + "/" + prefix + ".XXXXXX"); // Strictly speaking, this is UB, but who cares... + // FIXME: use O_TMPFILE. AutoCloseFD fd(mkstemp((char *) tmpl.c_str())); if (!fd) throw SysError("creating temporary file '%s'", tmpl); diff --git a/src/nix/build.cc b/src/nix/build.cc index 0f7e0e123..613cc15eb 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -9,6 +9,7 @@ using namespace nix; struct CmdBuild : InstallablesCommand, MixDryRun, MixProfile { Path outLink = "result"; + BuildMode buildMode = bmNormal; CmdBuild() { @@ -26,6 +27,12 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixProfile .description = "do not create a symlink to the build result", .handler = {&outLink, Path("")}, }); + + addFlag({ + .longName = "rebuild", + .description = "rebuild an already built package and compare the result to the existing store paths", + .handler = {&buildMode, bmCheck}, + }); } std::string description() override @@ -53,7 +60,7 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixProfile void run(ref<Store> store) override { - auto buildables = build(store, dryRun ? Realise::Nothing : Realise::Outputs, installables); + auto buildables = build(store, dryRun ? Realise::Nothing : Realise::Outputs, installables, buildMode); if (dryRun) return; diff --git a/src/nix/bundle.cc b/src/nix/bundle.cc new file mode 100644 index 000000000..eb3339f5d --- /dev/null +++ b/src/nix/bundle.cc @@ -0,0 +1,129 @@ +#include "command.hh" +#include "common-args.hh" +#include "shared.hh" +#include "store-api.hh" +#include "fs-accessor.hh" + +using namespace nix; + +struct CmdBundle : InstallableCommand +{ + std::string bundler = "github:matthewbauer/nix-bundle"; + std::optional<Path> outLink; + + CmdBundle() + { + addFlag({ + .longName = "bundler", + .description = "use custom bundler", + .labels = {"flake-url"}, + .handler = {&bundler}, + .completer = {[&](size_t, std::string_view prefix) { + completeFlakeRef(getStore(), prefix); + }} + }); + + addFlag({ + .longName = "out-link", + .shortName = 'o', + .description = "path of the symlink to the build result", + .labels = {"path"}, + .handler = {&outLink}, + .completer = completePath + }); + } + + std::string description() override + { + return "bundle an application so that it works outside of the Nix store"; + } + + Examples examples() override + { + return { + Example{ + "To bundle Hello:", + "nix bundle hello" + }, + }; + } + + Category category() override { return catSecondary; } + + Strings getDefaultFlakeAttrPaths() override + { + Strings res{"defaultApp." + settings.thisSystem.get()}; + for (auto & s : SourceExprCommand::getDefaultFlakeAttrPaths()) + res.push_back(s); + return res; + } + + Strings getDefaultFlakeAttrPathPrefixes() override + { + Strings res{"apps." + settings.thisSystem.get() + ".", "packages"}; + for (auto & s : SourceExprCommand::getDefaultFlakeAttrPathPrefixes()) + res.push_back(s); + return res; + } + + void run(ref<Store> store) override + { + auto evalState = getEvalState(); + + auto app = installable->toApp(*evalState); + store->buildPaths(app.context); + + auto [bundlerFlakeRef, bundlerName] = parseFlakeRefWithFragment(bundler, absPath(".")); + const flake::LockFlags lockFlags{ .writeLockFile = false }; + auto bundler = InstallableFlake( + evalState, std::move(bundlerFlakeRef), + Strings{bundlerName == "" ? "defaultBundler" : bundlerName}, + Strings({"bundlers."}), lockFlags); + + Value * arg = evalState->allocValue(); + evalState->mkAttrs(*arg, 2); + + PathSet context; + for (auto & i : app.context) + context.insert("=" + store->printStorePath(i.path)); + mkString(*evalState->allocAttr(*arg, evalState->symbols.create("program")), app.program, context); + + mkString(*evalState->allocAttr(*arg, evalState->symbols.create("system")), settings.thisSystem.get()); + + arg->attrs->sort(); + + auto vRes = evalState->allocValue(); + evalState->callFunction(*bundler.toValue(*evalState).first, *arg, *vRes, noPos); + + if (!evalState->isDerivation(*vRes)) + throw Error("the bundler '%s' does not produce a derivation", bundler.what()); + + auto attr1 = vRes->attrs->find(evalState->sDrvPath); + if (!attr1) + throw Error("the bundler '%s' does not produce a derivation", bundler.what()); + + PathSet context2; + StorePath drvPath = store->parseStorePath(evalState->coerceToPath(*attr1->pos, *attr1->value, context2)); + + auto attr2 = vRes->attrs->find(evalState->sOutPath); + if (!attr2) + throw Error("the bundler '%s' does not produce a derivation", bundler.what()); + + StorePath outPath = store->parseStorePath(evalState->coerceToPath(*attr2->pos, *attr2->value, context2)); + + store->buildPaths({{drvPath}}); + + auto outPathS = store->printStorePath(outPath); + + auto info = store->queryPathInfo(outPath); + if (!info->references.empty()) + throw Error("'%s' has references; a bundler must not leave any references", outPathS); + + if (!outLink) + outLink = baseNameOf(app.program); + + store.dynamic_pointer_cast<LocalFSStore>()->addPermRoot(outPath, absPath(*outLink), true); + } +}; + +static auto r2 = registerCommand<CmdBundle>("bundle"); diff --git a/src/nix/command.hh b/src/nix/command.hh index 856721ebf..bc46a2028 100644 --- a/src/nix/command.hh +++ b/src/nix/command.hh @@ -5,6 +5,7 @@ #include "common-eval-args.hh" #include "path.hh" #include "flake/lockfile.hh" +#include "store-api.hh" #include <optional> @@ -185,7 +186,7 @@ static RegisterCommand registerCommand(const std::string & name) } Buildables build(ref<Store> store, Realise mode, - std::vector<std::shared_ptr<Installable>> installables); + std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode = bmNormal); std::set<StorePath> toStorePaths(ref<Store> store, Realise mode, OperateOn operateOn, diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 027a9871e..80d8654bc 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -368,6 +368,21 @@ struct CmdFlakeCheck : FlakeCommand } }; + auto checkBundler = [&](const std::string & attrPath, Value & v, const Pos & pos) { + try { + state->forceValue(v, pos); + if (v.type != tLambda) + throw Error("bundler must be a function"); + if (!v.lambda.fun->formals || + v.lambda.fun->formals->argNames.find(state->symbols.create("program")) == v.lambda.fun->formals->argNames.end() || + v.lambda.fun->formals->argNames.find(state->symbols.create("system")) == v.lambda.fun->formals->argNames.end()) + throw Error("bundler must take formal arguments 'program' and 'system'"); + } catch (Error & e) { + e.addTrace(pos, hintfmt("while checking the template '%s'", attrPath)); + throw; + } + }; + { Activity act(*logger, lvlInfo, actUnknown, "evaluating flake"); @@ -490,6 +505,16 @@ struct CmdFlakeCheck : FlakeCommand *attr.value, *attr.pos); } + else if (name == "defaultBundler") + checkBundler(name, vOutput, pos); + + else if (name == "bundlers") { + state->forceAttrs(vOutput, pos); + for (auto & attr : *vOutput.attrs) + checkBundler(fmt("%s.%s", name, attr.name), + *attr.value, *attr.pos); + } + else warn("unknown flake output '%s'", name); diff --git a/src/nix/installables.cc b/src/nix/installables.cc index a17477c8a..59b52ce95 100644 --- a/src/nix/installables.cc +++ b/src/nix/installables.cc @@ -645,7 +645,7 @@ std::shared_ptr<Installable> SourceExprCommand::parseInstallable( } Buildables build(ref<Store> store, Realise mode, - std::vector<std::shared_ptr<Installable>> installables) + std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode) { if (mode == Realise::Nothing) settings.readOnlyMode = true; @@ -671,7 +671,7 @@ Buildables build(ref<Store> store, Realise mode, if (mode == Realise::Nothing) printMissing(store, pathsToBuild, lvlError); else if (mode == Realise::Outputs) - store->buildPaths(pathsToBuild); + store->buildPaths(pathsToBuild, bMode); return buildables; } diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh index 40f1a4f76..b05a7d167 100644 --- a/tests/binary-cache.sh +++ b/tests/binary-cache.sh @@ -218,7 +218,9 @@ outPath=$(nix-build --no-out-link -E ' nix copy --to file://$cacheDir?write-nar-listing=1 $outPath -[[ $(cat $cacheDir/$(basename $outPath).ls) = '{"version":1,"root":{"type":"directory","entries":{"bar":{"type":"regular","size":4,"narOffset":232},"link":{"type":"symlink","target":"xyzzy"}}}}' ]] +diff -u \ + <(jq -S < $cacheDir/$(basename $outPath).ls) \ + <(echo '{"version":1,"root":{"type":"directory","entries":{"bar":{"type":"regular","size":4,"narOffset":232},"link":{"type":"symlink","target":"xyzzy"}}}}' | jq -S) # Test debug info index generation. @@ -234,4 +236,6 @@ outPath=$(nix-build --no-out-link -E ' nix copy --to "file://$cacheDir?index-debug-info=1&compression=none" $outPath -[[ $(cat $cacheDir/debuginfo/02623eda209c26a59b1a8638ff7752f6b945c26b.debug) = '{"archive":"../nar/100vxs724qr46phz8m24iswmg9p3785hsyagz0kchf6q6gf06sw6.nar","member":"lib/debug/.build-id/02/623eda209c26a59b1a8638ff7752f6b945c26b.debug"}' ]] +diff -u \ + <(cat $cacheDir/debuginfo/02623eda209c26a59b1a8638ff7752f6b945c26b.debug | jq -S) \ + <(echo '{"archive":"../nar/100vxs724qr46phz8m24iswmg9p3785hsyagz0kchf6q6gf06sw6.nar","member":"lib/debug/.build-id/02/623eda209c26a59b1a8638ff7752f6b945c26b.debug"}' | jq -S) diff --git a/tests/filter-source.sh b/tests/filter-source.sh index 1f8dceee5..ba34d2eac 100644 --- a/tests/filter-source.sh +++ b/tests/filter-source.sh @@ -10,10 +10,16 @@ touch $TEST_ROOT/filterin/bak touch $TEST_ROOT/filterin/bla.c.bak ln -s xyzzy $TEST_ROOT/filterin/link -nix-build ./filter-source.nix -o $TEST_ROOT/filterout +checkFilter() { + test ! -e $1/foo/bar + test -e $1/xyzzy + test -e $1/bak + test ! -e $1/bla.c.bak + test ! -L $1/link +} -test ! -e $TEST_ROOT/filterout/foo/bar -test -e $TEST_ROOT/filterout/xyzzy -test -e $TEST_ROOT/filterout/bak -test ! -e $TEST_ROOT/filterout/bla.c.bak -test ! -L $TEST_ROOT/filterout/link +nix-build ./filter-source.nix -o $TEST_ROOT/filterout1 +checkFilter $TEST_ROOT/filterout1 + +nix-build ./path.nix -o $TEST_ROOT/filterout2 +checkFilter $TEST_ROOT/filterout2 diff --git a/tests/nar-access.sh b/tests/nar-access.sh index 553d6ca89..88b997ca6 100644 --- a/tests/nar-access.sh +++ b/tests/nar-access.sh @@ -26,12 +26,24 @@ nix cat-store $storePath/foo/baz > baz.cat-nar diff -u baz.cat-nar $storePath/foo/baz # Test --json. -[[ $(nix ls-nar --json $narFile /) = '{"type":"directory","entries":{"foo":{},"foo-x":{},"qux":{},"zyx":{}}}' ]] -[[ $(nix ls-nar --json -R $narFile /foo) = '{"type":"directory","entries":{"bar":{"type":"regular","size":0,"narOffset":368},"baz":{"type":"regular","size":0,"narOffset":552},"data":{"type":"regular","size":58,"narOffset":736}}}' ]] -[[ $(nix ls-nar --json -R $narFile /foo/bar) = '{"type":"regular","size":0,"narOffset":368}' ]] -[[ $(nix ls-store --json $storePath) = '{"type":"directory","entries":{"foo":{},"foo-x":{},"qux":{},"zyx":{}}}' ]] -[[ $(nix ls-store --json -R $storePath/foo) = '{"type":"directory","entries":{"bar":{"type":"regular","size":0},"baz":{"type":"regular","size":0},"data":{"type":"regular","size":58}}}' ]] -[[ $(nix ls-store --json -R $storePath/foo/bar) = '{"type":"regular","size":0}' ]] +diff -u \ + <(nix ls-nar --json $narFile / | jq -S) \ + <(echo '{"type":"directory","entries":{"foo":{},"foo-x":{},"qux":{},"zyx":{}}}' | jq -S) +diff -u \ + <(nix ls-nar --json -R $narFile /foo | jq -S) \ + <(echo '{"type":"directory","entries":{"bar":{"type":"regular","size":0,"narOffset":368},"baz":{"type":"regular","size":0,"narOffset":552},"data":{"type":"regular","size":58,"narOffset":736}}}' | jq -S) +diff -u \ + <(nix ls-nar --json -R $narFile /foo/bar | jq -S) \ + <(echo '{"type":"regular","size":0,"narOffset":368}' | jq -S) +diff -u \ + <(nix ls-store --json $storePath | jq -S) \ + <(echo '{"type":"directory","entries":{"foo":{},"foo-x":{},"qux":{},"zyx":{}}}' | jq -S) +diff -u \ + <(nix ls-store --json -R $storePath/foo | jq -S) \ + <(echo '{"type":"directory","entries":{"bar":{"type":"regular","size":0},"baz":{"type":"regular","size":0},"data":{"type":"regular","size":58}}}' | jq -S) +diff -u \ + <(nix ls-store --json -R $storePath/foo/bar| jq -S) \ + <(echo '{"type":"regular","size":0}' | jq -S) # Test missing files. nix ls-store --json -R $storePath/xyzzy 2>&1 | grep 'does not exist in NAR' diff --git a/tests/path.nix b/tests/path.nix new file mode 100644 index 000000000..883c3c41b --- /dev/null +++ b/tests/path.nix @@ -0,0 +1,14 @@ +with import ./config.nix; + +mkDerivation { + name = "filter"; + builder = builtins.toFile "builder" "ln -s $input $out"; + input = + builtins.path { + path = ((builtins.getEnv "TEST_ROOT") + "/filterin"); + filter = path: type: + type != "symlink" + && baseNameOf path != "foo" + && !((import ./lang/lib.nix).hasSuffix ".bak" (baseNameOf path)); + }; +} |