diff options
-rw-r--r-- | doc/manual/src/contributing/hacking.md | 221 | ||||
-rw-r--r-- | flake.nix | 22 | ||||
-rw-r--r-- | meson.build | 42 | ||||
-rw-r--r-- | package.nix | 45 | ||||
-rw-r--r-- | src/libfetchers/github.cc | 20 | ||||
-rw-r--r-- | src/libstore/builtins/fetchurl.cc | 1 | ||||
-rw-r--r-- | src/libstore/filetransfer.cc | 161 | ||||
-rw-r--r-- | src/libstore/filetransfer.hh | 1 | ||||
-rw-r--r-- | src/libstore/gc-store.hh | 7 | ||||
-rw-r--r-- | src/libstore/gc.cc | 111 | ||||
-rw-r--r-- | src/libstore/local-store.cc | 2 | ||||
-rw-r--r-- | src/libstore/local-store.hh | 29 | ||||
-rw-r--r-- | src/libstore/local.mk | 5 | ||||
-rw-r--r-- | src/libstore/meson.build | 11 | ||||
-rw-r--r-- | src/libstore/parsed-derivations.cc | 2 | ||||
-rw-r--r-- | src/libstore/platform.cc | 18 | ||||
-rw-r--r-- | src/libstore/platform/fallback.cc | 5 | ||||
-rw-r--r-- | src/libstore/platform/fallback.hh | 31 | ||||
-rw-r--r-- | src/libstore/platform/linux.cc | 123 | ||||
-rw-r--r-- | src/libstore/platform/linux.hh | 35 | ||||
-rw-r--r-- | src/libstore/store-api.cc | 12 | ||||
-rw-r--r-- | src/nix/prefetch.cc | 1 | ||||
-rw-r--r-- | tests/nixos/github-flakes.nix | 7 | ||||
-rw-r--r-- | tests/nixos/tarball-flakes.nix | 13 |
24 files changed, 527 insertions, 398 deletions
diff --git a/doc/manual/src/contributing/hacking.md b/doc/manual/src/contributing/hacking.md index 133a61b5c..a7d707438 100644 --- a/doc/manual/src/contributing/hacking.md +++ b/doc/manual/src/contributing/hacking.md @@ -1,113 +1,138 @@ # Hacking -This section provides some notes on how to hack on Nix. To get the -latest version of Nix from GitHub: +This section provides some notes on how to hack on Nix. To get the latest version of Lix from Forgejo: ```console -$ git clone https://github.com/NixOS/nix.git -$ cd nix +$ git clone https://git.lix.systems/lix-project/lix +$ cd lix ``` -The following instructions assume you already have some version of Nix installed locally, so that you can use it to set up the development environment. If you don't have it installed, follow the [installation instructions]. +The following instructions assume you already have some version of Nix or Lix installed locally, so that you can use it to set up the development environment. If you don't have it installed, follow the [installation instructions]. [installation instructions]: ../installation/installation.md -## Building Nix with flakes +## Building Lix in a development shell -This section assumes you are using Nix with the [`flakes`] and [`nix-command`] experimental features enabled. -See the [Building Nix](#building-nix) section for equivalent instructions using stable Nix interfaces. +### Setting up the development shell -[`flakes`]: @docroot@/contributing/experimental-features.md#xp-feature-flakes -[`nix-command`]: @docroot@/contributing/experimental-features.md#xp-nix-command - -To build all dependencies and start a shell in which all environment variables are set up so that those dependencies can be found: +If you are using Lix or Nix with the [`flakes`] and [`nix-command`] experimental features enabled, the following command will build all dependencies and start a shell in which all environment variables are setup for those dependencies to be found: -```console +```bash $ nix develop ``` -This shell also adds `./outputs/bin/nix` to your `$PATH` so you can run `nix` immediately after building it. +That will use the default stdenv for your system. To get a shell with one of the other [supported compilation environments](#compilation-environments), specify its attribute name after a hash (which you may need to quote, depending on your shell): -To get a shell with one of the other [supported compilation environments](#compilation-environments): +```bash +$ nix develop ".#native-clangStdenvPackages" +``` -```console -$ nix develop .#native-clangStdenvPackages +For classic Nix, use: + +```bash +$ nix-shell -A native-clangStdenvPackages ``` -> **Note** -> -> Use `ccacheStdenv` to drastically improve rebuild time. -> By default, [ccache](https://ccache.dev) keeps artifacts in `~/.cache/ccache/`. +[`flakes`]: @docroot@/contributing/experimental-features.md#xp-feature-flakes +[`nix-command`]: @docroot@/contributing/experimental-features.md#xp-nix-command -To build Nix itself in this shell: -```console -[nix-shell]$ autoreconfPhase -[nix-shell]$ configurePhase -[nix-shell]$ make -j $NIX_BUILD_CORES +### Building from the development shell + +As always you may run [stdenv's phases by name](https://nixos.org/manual/nixpkgs/unstable/#sec-building-stdenv-package-in-nix-shell), e.g.: + +```bash +$ configurePhase +$ buildPhase +$ checkPhase +$ installPhase +$ installCheckPhase ``` -To install it in `$(pwd)/outputs` and test it: +To build manually, however, use the following: -```console -[nix-shell]$ make install -[nix-shell]$ make installcheck -j $NIX_BUILD_CORES -[nix-shell]$ nix --version -nix (Nix) 2.12 +```bash +$ meson setup ./build "--prefix=$out" $mesonFlags ``` -To build a release version of Nix for the current operating system and CPU architecture: +(A simple `meson setup ./build` will also build, but will do a different thing, not having the settings from package.nix applied). -```console -$ nix build +```bash +$ meson compile -C build +$ meson test -C build --suite=check +$ meson install -C build +$ meson test -C build --suite=installcheck ``` -You can also build Nix for one of the [supported platforms](#platforms). +(Check and installcheck may both be done after install, allowing you to omit the --suite argument entirely, but this is the order package.nix runs them in.) -## Building Nix +This will install Lix to `$PWD/outputs`, the `/bin` of which is prepended to PATH in the development shells. -To build all dependencies and start a shell in which all environment variables are set up so that those dependencies can be found: +If the tests fail and Meson helpfully has no output for why, use the `--print-error-logs` option to `meson test`. -```console -$ nix-shell +If you change a setting in the buildsystem (i.e., any of the `meson.build` files), most cases will automatically regenerate the Meson configuration just before compiling. +Some cases, however, like trying to build a specific target whose name is new to the buildsystem (e.g. `meson compile -C build src/libmelt/libmelt.dylib`, when `libmelt.dylib` did not exist as a target the last time the buildsystem was generated), then you can reconfigure using new settings but existing options, and only recompiling stuff affected by the changes: + +```bash +$ meson setup --reconfigure build ``` -To get a shell with one of the other [supported compilation environments](#compilation-environments): +Note that changes to the default values in `meson.options` or in the `default_options :` argument to `project()` are **not** propagated with `--reconfigure`. -```console -$ nix-shell --attr devShells.x86_64-linux.native-clangStdenvPackages +If you want a totally clean build, you can use: + +```bash +$ meson setup --wipe build ``` -> **Note** -> -> You can use `native-ccacheStdenvPackages` to drastically improve rebuild time. -> By default, [ccache](https://ccache.dev) keeps artifacts in `~/.cache/ccache/`. +That will work regardless of if `./build` exists or not. -To build Nix itself in this shell: +Specific, named targets may be addressed in `meson build -C build <target>`, with the "target ID", if there is one, which is the first string argument passed to target functions that have one, and unrelated to the variable name, e.g.: -```console -[nix-shell]$ autoreconfPhase -[nix-shell]$ ./configure $configureFlags --prefix=$(pwd)/outputs/out -[nix-shell]$ make -j $NIX_BUILD_CORES +```meson +libexpr_dylib = library('nixexpr', …) ``` -To install it in `$(pwd)/outputs` and test it: +can be addressed with: -```console -[nix-shell]$ make install -[nix-shell]$ make installcheck -j $NIX_BUILD_CORES -[nix-shell]$ ./outputs/out/bin/nix --version -nix (Nix) 2.12 +```bash +$ meson compile -C build nixexpr +``` + +All targets may be addressed as their output, relative to the build directory, e.g.: + +```bash +$ meson compile -C build src/libexpr/libnixexpr.so ``` +But Meson does not consider intermediate files like object files targets. +To build a specific object file, use Ninja directly and specify the output file relative to the build directory: + +```bash +$ ninja -C build src/libexpr/libnixexpr.so.p/nixexpr.cc.o +``` + +To inspect the canonical source of truth on what the state of the buildsystem configuration is, use: + +```bash +$ meson introspect +``` + +## Building Lix outside of development shells + To build a release version of Nix for the current operating system and CPU architecture: ```console -$ nix-build +$ nix build ``` You can also build Nix for one of the [supported platforms](#platforms). +> **Note** +> +> You can use `native-ccacheStdenvPackages` to drastically improve rebuild time. +> By default, [ccache](https://ccache.dev) keeps artifacts in `~/.cache/ccache/`. + ## Platforms Nix can be built for various platforms, as specified in [`flake.nix`]: @@ -148,55 +173,38 @@ Add more [system types](#system-type) to `crossSystems` in `flake.nix` to bootst ### Building for multiple platforms at once -It is useful to perform multiple cross and native builds on the same source tree, -for example to ensure that better support for one platform doesn't break the build for another. -In order to facilitate this, Nix has some support for being built out of tree – that is, placing build artefacts in a different directory than the source code: - -1. Create a directory for the build, e.g. - - ```bash - mkdir build - ``` - -2. Run the configure script from that directory, e.g. +It is useful to perform multiple cross and native builds on the same source tree, for example to ensure that better support for one platform doesn't break the build for another. +As Lix now uses Meson, out-of-tree builds are supported first class. In the invocation - ```bash - cd build - ../configure <configure flags> - ``` +```bash +$ meson setup build +``` -3. Run make from the source directory, but with the build directory specified, e.g. +the argument after `setup` specifies the directory for this build, conventionally simply called "build", but it may be called anything, and you may run `meson setup <somedir>` for as many different directories as you want. +To compile the configuration for a given build directory, pass that build directory to the `-C` argument of `meson compile`: - ```bash - make builddir=build <make flags> - ``` +```bash +$ meson setup some-custom-build +$ meson compile -C some-custom-build +``` ## System type -Nix uses a string with he following format to identify the *system type* or *platform* it runs on: +Lix uses a string with the following format to identify the *system type* or *platform* it runs on: ``` <cpu>-<os>[-<abi>] ``` -It is set when Nix is compiled for the given system, and based on the output of [`config.guess`](https://github.com/nixos/nix/blob/master/config/config.guess) ([upstream](https://git.savannah.gnu.org/cgit/config.git/tree/config.guess)): - -``` -<cpu>-<vendor>-<os>[<version>][-<abi>] -``` - -When Nix is built such that `./configure` is passed any of the `--host`, `--build`, `--target` options, the value is based on the output of [`config.sub`](https://github.com/nixos/nix/blob/master/config/config.sub) ([upstream](https://git.savannah.gnu.org/cgit/config.git/tree/config.sub)): - -``` -<cpu>-<vendor>[-<kernel>]-<os> -``` +It is set when Nix is compiled for the given system, and determined by [Meson's `host_machine.cpu_family()` and `host_machine.system()` values](https://mesonbuild.com/Reference-manual_builtin_host_machine.html). -For historic reasons and backward-compatibility, some CPU and OS identifiers are translated from the GNU Autotools naming convention in [`configure.ac`](https://github.com/nixos/nix/blob/master/configure.ac) as follows: +For historic reasons and backward-compatibility, some CPU and OS identifiers are translated from the GNU Autotools naming convention in [`meson.build`](https://git.lix.systems/lix-project/lix/blob/main/meson.build) as follows: -| `config.guess` | Nix | +| `host_machine.cpu_family()` | Nix | |----------------------------|---------------------| -| `amd64` | `x86_64` | -| `i*86` | `i686` | +| `x86` | `i686` | +| `i686` | `i686` | +| `i686` | `i686` | | `arm6` | `arm6l` | | `arm7` | `arm7l` | | `linux-gnu*` | `linux` | @@ -229,13 +237,14 @@ You can use any of the other supported environments in place of `nix-ccacheStden ## Editor integration -The `clangd` LSP server is installed by default on the `clang`-based `devShell`s. +The `clangd` LSP server is installed by default in each development shell. See [supported compilation environments](#compilation-environments) and instructions how to set up a shell [with flakes](#nix-with-flakes) or in [classic Nix](#classic-nix). -To use the LSP with your editor, you first need to [set up `clangd`](https://clangd.llvm.org/installation#project-setup) by running: +Clangd requires a compilation database, which Meson generates by default. After running `meson setup`, there will already be a `compile_commands.json` file in the build directory. +Some editor configurations may prefer that file to be in the root directory, which you can accomplish with a simple: -```console -make clean && bear -- make -j$NIX_BUILD_CORES install +```bash +$ ln -sf ./build/compile_commands.json ./compile_commands.json ``` Configure your editor to use the `clangd` from the shell, either by running it inside the development shell, or by using [nix-direnv](https://github.com/nix-community/nix-direnv) and [the appropriate editor plugin](https://github.com/direnv/direnv/wiki#editor-integration). @@ -253,15 +262,7 @@ This happens late in the process, so `nix build` is not suitable for iterating. To build the manual incrementally, run: ```console -make html -j $NIX_BUILD_CORES -``` - -In order to reflect changes to the [Makefile], clear all generated files before re-building: - -[Makefile]: https://github.com/NixOS/nix/blob/master/doc/manual/local.mk - -```console -rm $(git ls-files doc/manual/ -o | grep -F '.md') && rmdir doc/manual/src/command-ref/new-cli && make html -j $NIX_BUILD_CORES +meson compile -C build manual ``` [`mdbook-linkcheck`] does not implement checking [URI fragments] yet. @@ -292,9 +293,9 @@ can also build and view it yourself: or inside a `nix develop` shell by running: -``` -# make internal-api-html -# xdg-open ./outputs/doc/share/doc/nix/internal-api/html/index.html +```bash +$ meson compile -C build internal-api-docs +$ xdg-open ./outputs/doc/share/doc/nix/internal-api/html/index.html ``` ## Coverage analysis @@ -196,24 +196,6 @@ } ); - # FIXME(Qyriad): remove this when the migration to Meson has been completed. - # NOTE: mesonBuildClang depends on mesonBuild depends on build to avoid OOMs - # on aarch64 builders caused by too many parallel compiler/linker processes. - mesonBuild = forAllSystems ( - system: - (self.packages.${system}.nix.override { buildWithMeson = true; }).overrideAttrs (prev: { - buildInputs = prev.buildInputs ++ [ self.packages.${system}.nix ]; - }) - ); - mesonBuildClang = forAllSystems ( - system: - (nixpkgsFor.${system}.stdenvs.clangStdenvPackages.nix.override { buildWithMeson = true; }) - .overrideAttrs - (prev: { - buildInputs = prev.buildInputs ++ [ self.hydraJobs.mesonBuild.${system} ]; - }) - ); - # Perl bindings for various platforms. perlBindings = forAllSystems (system: nixpkgsFor.${system}.native.nix.perl-bindings); @@ -237,7 +219,6 @@ inherit (pkgs) build-release-notes; internalApiDocs = true; busybox-sandbox-shell = pkgs.busybox-sandbox-shell; - buildWithMeson = true; }; in nix.overrideAttrs (prev: { @@ -367,9 +348,6 @@ checks = forAllSystems ( system: { - # FIXME(Qyriad): remove this when the migration to Meson has been completed. - mesonBuild = self.hydraJobs.mesonBuild.${system}; - mesonBuildClang = self.hydraJobs.mesonBuildClang.${system}; binaryTarball = self.hydraJobs.binaryTarball.${system}; perlBindings = self.hydraJobs.perlBindings.${system}; nixpkgsLibTests = self.hydraJobs.tests.nixpkgsLibTests.${system}; diff --git a/meson.build b/meson.build index 2128ec6e2..d40a9029a 100644 --- a/meson.build +++ b/meson.build @@ -85,8 +85,8 @@ endif enable_docs = get_option('enable-docs') enable_internal_api_docs = get_option('internal-api-docs') -doxygen = find_program('doxygen', required : enable_internal_api_docs) -bash = find_program('bash') +doxygen = find_program('doxygen', required : enable_internal_api_docs, native : true) +bash = find_program('bash', native : true) rapidcheck_meson = dependency('rapidcheck', required : enable_internal_api_docs) @@ -114,6 +114,25 @@ endif cxx = meson.get_compiler('cpp') +# Translate some historical and Mesony CPU names to Lixy CPU names. +# FIXME(Qyriad): the 32-bit x86 code is not tested right now, because cross compilation for Lix +# to those architectures is currently broken for other reasons, namely: +# - nixos-23.11's x86_64-linux -> i686-linux glibc does not build (also applies to cppnix) +# - nixpkgs-unstable (as of 2024/04)'s boehmgc is not compatible with our patches +# It's also broken in cppnix, though. +host_cpu = host_machine.cpu_family() +if host_cpu in ['x86', 'i686', 'i386'] + # Meson considers 32-bit x86 CPUs to be "x86", and does not consider 64-bit + # x86 CPUs to be "x86" (instead using "x86_64", which needs no translation). + host_cpu = 'i686' +elif host_cpu == 'amd64' + # This should not be needed under normal circumstances, but someone could pass a --cross-file + # that sets the cpu_family to this. + host_cpu = 'x86_64' +elif host_cpu in ['armv6', 'armv7'] + host_cpu += 'l' +endif + host_system = host_machine.cpu_family() + '-' + host_machine.system() message('canonical Nix system name:', host_system) @@ -181,6 +200,7 @@ openssl = dependency('libcrypto', 'openssl', required : true) deps += openssl aws_sdk = dependency('aws-cpp-sdk-core', required : false) +aws_sdk_transfer = dependency('aws-cpp-sdk-transfer', required : aws_sdk.found()) if aws_sdk.found() # The AWS pkg-config adds -std=c++11. # https://github.com/aws/aws-sdk-cpp/issues/2673 @@ -198,7 +218,7 @@ if aws_sdk.found() 'AWS_VERSION_MINOR': s[1].to_int(), 'AWS_VERSION_PATCH': s[2].to_int(), } - aws_sdk_transfer = dependency('aws-cpp-sdk-transfer', required : true).partial_dependency( + aws_sdk_transfer = aws_sdk_transfer.partial_dependency( compile_args : false, includes : true, link_args : true, @@ -255,7 +275,7 @@ gtest = [ ] deps += gtest -toml11 = dependency('toml11', version : '>=3.7.0', required : true) +toml11 = dependency('toml11', version : '>=3.7.0', required : true, method : 'cmake') deps += toml11 nlohmann_json = dependency('nlohmann_json', required : true) @@ -272,17 +292,17 @@ deps += lix_doc # # Build-time tools # -coreutils = find_program('coreutils') -dot = find_program('dot', required : false) +coreutils = find_program('coreutils', native : true) +dot = find_program('dot', required : false, native : true) pymod = import('python') python = pymod.find_installation('python3') if enable_docs - mdbook = find_program('mdbook') + mdbook = find_program('mdbook', native : true) endif # Used to workaround https://github.com/mesonbuild/meson/issues/2320 in src/nix/meson.build. -installcmd = find_program('install') +installcmd = find_program('install', native : true) enable_embedded_sandbox_shell = get_option('enable-embedded-sandbox-shell') if enable_embedded_sandbox_shell @@ -307,9 +327,9 @@ endif # FIXME(Qyriad): the autoconf system checks that busybox has the "standalone" feature, indicating # that busybox sh won't run busybox applets as builtins (which would break our sandbox). -lsof = find_program('lsof') -bison = find_program('bison') -flex = find_program('flex') +lsof = find_program('lsof', native : true) +bison = find_program('bison', native : true) +flex = find_program('flex', native : true) # This is how Nix does generated headers... # other instances of header generation use a very similar command. diff --git a/package.nix b/package.nix index 9a2e08038..c9cc17c29 100644 --- a/package.nix +++ b/package.nix @@ -62,7 +62,7 @@ # FIXME(Qyriad): build Lix using Meson instead of autoconf and make. # This flag will be removed when the migration to Meson is complete. - buildWithMeson ? false, + buildWithMeson ? true, # Not a real argument, just the only way to approximate let-binding some # stuff for argument defaults. @@ -100,6 +100,34 @@ let testConfigureFlags = [ "RAPIDCHECK_HEADERS=${lib.getDev rapidcheck}/extras/gtest/include" ]; + # Reimplementation of Nixpkgs' Meson cross file, with some additions to make + # it actually work. + mesonCrossFile = + let + cpuFamily = + platform: + with platform; + if isAarch32 then + "arm" + else if isx86_32 then + "x86" + else + platform.uname.processor; + in + builtins.toFile "lix-cross-file.conf" '' + [properties] + # Meson is convinced that if !buildPlatform.canExecute hostPlatform then we cannot + # build anything at all, which is not at all correct. If we can't execute the host + # platform, we'll just disable tests and doc gen. + needs_exe_wrapper = false + + [binaries] + # Meson refuses to consider any CMake binary during cross compilation if it's + # not explicitly specified here, in the cross file. + # https://github.com/mesonbuild/meson/blob/0ed78cf6fa6d87c0738f67ae43525e661b50a8a2/mesonbuild/cmake/executor.py#L72 + cmake = 'cmake' + ''; + # The internal API docs need these for the build, but if we're not building # Nix itself, then these don't need to be propagated. maybePropagatedInputs = [ @@ -184,10 +212,15 @@ stdenv.mkDerivation (finalAttrs: { ] ++ lib.optional stdenv.hostPlatform.isStatic "-Denable-embedded-sandbox-shell=true" ++ lib.optional (finalAttrs.dontBuild) "-Denable-build=false" - # mesonConfigurePhase automatically passes -Dauto_features=enabled, - # so we must explicitly enable or disable features that we are not passing - # dependencies for. - ++ lib.singleton (lib.mesonEnable "internal-api-docs" internalApiDocs); + ++ [ + # mesonConfigurePhase automatically passes -Dauto_features=enabled, + # so we must explicitly enable or disable features that we are not passing + # dependencies for. + (lib.mesonEnable "internal-api-docs" internalApiDocs) + (lib.mesonBool "enable-tests" finalAttrs.doCheck) + (lib.mesonBool "enable-docs" canRunInstalled) + ] + ++ lib.optional (stdenv.hostPlatform != stdenv.buildPlatform) "--cross-file=${mesonCrossFile}"; # We only include CMake so that Meson can locate toml11, which only ships CMake dependency metadata. dontUseCmakeConfigure = true; @@ -315,7 +348,7 @@ stdenv.mkDerivation (finalAttrs: { makeFlags = "profiledir=$(out)/etc/profile.d PRECOMPILE_HEADERS=1"; - doCheck = true; + doCheck = canRunInstalled; mesonCheckFlags = lib.optionals (buildWithMeson || forDevShell) [ "--suite=check" ]; diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index 291f457f0..6f997885d 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -195,32 +195,12 @@ struct GitArchiveInputScheme : InputScheme input.attrs.erase("ref"); input.attrs.insert_or_assign("rev", rev->gitRev()); - Attrs lockedAttrs({ - {"type", "git-tarball"}, - {"rev", rev->gitRev()}, - }); - - if (auto res = getCache()->lookup(store, lockedAttrs)) { - input.attrs.insert_or_assign("lastModified", getIntAttr(res->first, "lastModified")); - return {std::move(res->second), input}; - } - auto url = getDownloadUrl(input); auto result = downloadTarball(store, url.url, input.getName(), true, url.headers); input.attrs.insert_or_assign("lastModified", uint64_t(result.lastModified)); - getCache()->add( - store, - lockedAttrs, - { - {"rev", rev->gitRev()}, - {"lastModified", uint64_t(result.lastModified)} - }, - result.tree.storePath, - true); - return {result.tree.storePath, input}; } }; diff --git a/src/libstore/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc index 3d87bdc21..6bf46dad8 100644 --- a/src/libstore/builtins/fetchurl.cc +++ b/src/libstore/builtins/fetchurl.cc @@ -38,7 +38,6 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData) the result anyway. */ FileTransferRequest request(url); request.verifyTLS = false; - request.decompress = false; auto decompressor = makeDecompressionSink( unpack && mainUrl.ends_with(".xz") ? "xz" : "none", sink); diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index ef2480863..9dc742220 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -51,6 +51,7 @@ struct curlFileTransfer : public FileTransfer Callback<FileTransferResult> callback; CURL * req = 0; bool active = false; // whether the handle has been added to the multi object + bool headersProcessed = false; std::string statusMsg; unsigned int attempt = 0; @@ -89,19 +90,12 @@ struct curlFileTransfer : public FileTransfer {request.uri}, request.parentAct) , callback(std::move(callback)) , finalSink([this](std::string_view data) { - if (errorSink) { - (*errorSink)(data); - } - - if (this->request.dataCallback) { - auto httpStatus = getHTTPStatus(); - + auto httpStatus = getHTTPStatus(); /* Only write data to the sink if this is a successful response. */ - if (successfulStatuses.count(httpStatus)) { - writtenToSink += data.size(); - this->request.dataCallback(data); - } + if (successfulStatuses.count(httpStatus) && this->request.dataCallback) { + writtenToSink += data.size(); + this->request.dataCallback(data); } else this->result.data.append(data); }) @@ -147,25 +141,41 @@ struct curlFileTransfer : public FileTransfer LambdaSink finalSink; std::shared_ptr<FinishSink> decompressionSink; - std::optional<StringSink> errorSink; std::exception_ptr writeException; + std::optional<std::string> getHeader(const char * name) + { + curl_header * result; + auto e = curl_easy_header(req, name, 0, CURLH_HEADER, -1, &result); + if (e == CURLHE_OK) { + return result->value; + } else if (e == CURLHE_MISSING || e == CURLHE_NOHEADERS) { + return std::nullopt; + } else { + throw nix::Error("unexpected error from curl_easy_header(): %i", e); + } + } + size_t writeCallback(void * contents, size_t size, size_t nmemb) { try { + if (!headersProcessed) { + if (auto h = getHeader("content-encoding")) { + encoding = std::move(*h); + } + if (auto h = getHeader("accept-ranges"); h && *h == "bytes") { + acceptRanges = true; + } + + headersProcessed = true; + } + size_t realSize = size * nmemb; result.bodySize += realSize; if (!decompressionSink) { decompressionSink = makeDecompressionSink(encoding, finalSink); - if (! successfulStatuses.count(getHTTPStatus())) { - // In this case we want to construct a TeeSink, to keep - // the response around (which we figure won't be big - // like an actual download should be) to improve error - // messages. - errorSink = StringSink { }; - } } (*decompressionSink)({(char *) contents, realSize}); @@ -196,42 +206,7 @@ struct curlFileTransfer : public FileTransfer statusMsg = trim(match.str(1)); acceptRanges = false; encoding = ""; - } else { - - auto i = line.find(':'); - if (i != std::string::npos) { - std::string name = toLower(trim(line.substr(0, i))); - - if (name == "etag") { - result.etag = trim(line.substr(i + 1)); - /* Hack to work around a GitHub bug: it sends - ETags, but ignores If-None-Match. So if we get - the expected ETag on a 200 response, then shut - down the connection because we already have the - data. */ - long httpStatus = 0; - curl_easy_getinfo(req, CURLINFO_RESPONSE_CODE, &httpStatus); - if (result.etag == request.expectedETag && httpStatus == 200) { - debug("shutting down on 200 HTTP response with expected ETag"); - return 0; - } - } - - else if (name == "content-encoding") - encoding = trim(line.substr(i + 1)); - - else if (name == "accept-ranges" && toLower(trim(line.substr(i + 1))) == "bytes") - acceptRanges = true; - - else if (name == "link" || name == "x-amz-meta-link") { - auto value = trim(line.substr(i + 1)); - static std::regex linkRegex("<([^>]*)>; rel=\"immutable\"", std::regex::extended | std::regex::icase); - if (std::smatch match; std::regex_match(value, match, linkRegex)) - result.immutableUrl = match.str(1); - else - debug("got invalid link header '%s'", value); - } - } + headersProcessed = false; } return realSize; } @@ -301,15 +276,11 @@ struct curlFileTransfer : public FileTransfer curl_easy_setopt(req, CURLOPT_USERAGENT, ("curl/" LIBCURL_VERSION " Lix/" + nixVersion + (fileTransferSettings.userAgentSuffix != "" ? " " + fileTransferSettings.userAgentSuffix.get() : "")).c_str()); - #if LIBCURL_VERSION_NUM >= 0x072b00 curl_easy_setopt(req, CURLOPT_PIPEWAIT, 1); - #endif - #if LIBCURL_VERSION_NUM >= 0x072f00 if (fileTransferSettings.enableHttp2) curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS); else curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1); - #endif curl_easy_setopt(req, CURLOPT_WRITEFUNCTION, TransferItem::writeCallbackWrapper); curl_easy_setopt(req, CURLOPT_WRITEDATA, this); curl_easy_setopt(req, CURLOPT_HEADERFUNCTION, TransferItem::headerCallbackWrapper); @@ -379,9 +350,23 @@ struct curlFileTransfer : public FileTransfer } } - if (code == CURLE_WRITE_ERROR && result.etag == request.expectedETag) { - code = CURLE_OK; - httpStatus = 304; + auto link = getHeader("link"); + if (!link) { + link = getHeader("x-amz-meta-link"); + } + if (link) { + static std::regex linkRegex( + "<([^>]*)>; rel=\"immutable\"", std::regex::extended | std::regex::icase + ); + if (std::smatch match; std::regex_match(*link, match, linkRegex)) { + result.immutableUrl = match.str(1); + } else { + debug("got invalid link header '%s'", *link); + } + } + + if (auto etag = getHeader("etag")) { + result.etag = std::move(*etag); } if (writeException) @@ -390,13 +375,6 @@ struct curlFileTransfer : public FileTransfer else if (code == CURLE_OK && successfulStatuses.count(httpStatus)) { result.cached = httpStatus == 304; - - // In 2021, GitHub responds to If-None-Match with 304, - // but omits ETag. We just use the If-None-Match etag - // since 304 implies they are the same. - if (httpStatus == 304 && result.etag == "") - result.etag = request.expectedETag; - act.progress(result.bodySize, result.bodySize); done = true; callback(std::move(result)); @@ -455,16 +433,16 @@ struct curlFileTransfer : public FileTransfer attempt++; std::optional<std::string> response; - if (errorSink) - response = std::move(errorSink->s); + if (!successfulStatuses.count(httpStatus)) + response = std::move(result.data); auto exc = code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted ? FileTransferError(Interrupted, std::move(response), "%s of '%s' was interrupted", request.verb(), request.uri) : httpStatus != 0 ? FileTransferError(err, std::move(response), - "unable to %s '%s': HTTP error %d%s", - request.verb(), request.uri, httpStatus, + "unable to %s '%s': HTTP error %d (%s)%s", + request.verb(), request.uri, httpStatus, statusMsg, code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code))) : FileTransferError(err, std::move(response), @@ -508,11 +486,6 @@ struct curlFileTransfer : public FileTransfer Sync<State> state_; - /* We can't use a std::condition_variable to wake up the curl - thread, because it only monitors file descriptors. So use a - pipe instead. */ - Pipe wakeupPipe; - std::thread workerThread; curlFileTransfer() @@ -523,16 +496,9 @@ struct curlFileTransfer : public FileTransfer curlm = curl_multi_init(); - #if LIBCURL_VERSION_NUM >= 0x072b00 // Multiplex requires >= 7.43.0 curl_multi_setopt(curlm, CURLMOPT_PIPELINING, CURLPIPE_MULTIPLEX); - #endif - #if LIBCURL_VERSION_NUM >= 0x071e00 // Max connections requires >= 7.30.0 curl_multi_setopt(curlm, CURLMOPT_MAX_TOTAL_CONNECTIONS, fileTransferSettings.httpConnections.get()); - #endif - - wakeupPipe.create(); - fcntl(wakeupPipe.readSide.get(), F_SETFL, O_NONBLOCK); workerThread = std::thread([&]() { workerThreadEntry(); }); } @@ -546,6 +512,12 @@ struct curlFileTransfer : public FileTransfer if (curlm) curl_multi_cleanup(curlm); } + void wakeup() + { + if (auto mc = curl_multi_wakeup(curlm)) + throw nix::Error("unexpected error from curl_multi_wakeup(): %s", curl_multi_strerror(mc)); + } + void stopWorkerThread() { /* Signal the worker thread to exit. */ @@ -553,7 +525,7 @@ struct curlFileTransfer : public FileTransfer auto state(state_.lock()); state->quit = true; } - writeFull(wakeupPipe.writeSide.get(), " ", false); + wakeup(); } void workerThreadMain() @@ -595,32 +567,21 @@ struct curlFileTransfer : public FileTransfer } /* Wait for activity, including wakeup events. */ - int numfds = 0; - struct curl_waitfd extraFDs[1]; - extraFDs[0].fd = wakeupPipe.readSide.get(); - extraFDs[0].events = CURL_WAIT_POLLIN; - extraFDs[0].revents = 0; long maxSleepTimeMs = items.empty() ? 10000 : 100; auto sleepTimeMs = nextWakeup != std::chrono::steady_clock::time_point() ? std::max(0, (int) std::chrono::duration_cast<std::chrono::milliseconds>(nextWakeup - std::chrono::steady_clock::now()).count()) : maxSleepTimeMs; vomit("download thread waiting for %d ms", sleepTimeMs); - mc = curl_multi_wait(curlm, extraFDs, 1, sleepTimeMs, &numfds); + mc = curl_multi_poll(curlm, nullptr, 0, sleepTimeMs, nullptr); if (mc != CURLM_OK) - throw nix::Error("unexpected error from curl_multi_wait(): %s", curl_multi_strerror(mc)); + throw nix::Error("unexpected error from curl_multi_poll(): %s", curl_multi_strerror(mc)); nextWakeup = std::chrono::steady_clock::time_point(); /* Add new curl requests from the incoming requests queue, except for requests that are embargoed (waiting for a retry timeout to expire). */ - if (extraFDs[0].revents & CURL_WAIT_POLLIN) { - char buf[1024]; - auto res = read(extraFDs[0].fd, buf, sizeof(buf)); - if (res == -1 && errno != EINTR) - throw SysError("reading curl wakeup socket"); - } std::vector<std::shared_ptr<TransferItem>> incoming; auto now = std::chrono::steady_clock::now(); @@ -683,7 +644,7 @@ struct curlFileTransfer : public FileTransfer throw nix::Error("cannot enqueue download request because the download thread is shutting down"); state->incoming.push(item); } - writeFull(wakeupPipe.writeSide.get(), " "); + wakeup(); } #if ENABLE_S3 diff --git a/src/libstore/filetransfer.hh b/src/libstore/filetransfer.hh index a3b0dde1f..6c11c14ee 100644 --- a/src/libstore/filetransfer.hh +++ b/src/libstore/filetransfer.hh @@ -59,7 +59,6 @@ struct FileTransferRequest size_t tries = fileTransferSettings.tries; unsigned int baseRetryTimeMs = 250; ActivityId parentAct; - bool decompress = true; std::optional<std::string> data; std::string mimeType; std::function<void(std::string_view data)> dataCallback; diff --git a/src/libstore/gc-store.hh b/src/libstore/gc-store.hh index ab1059fb1..88c997247 100644 --- a/src/libstore/gc-store.hh +++ b/src/libstore/gc-store.hh @@ -7,7 +7,14 @@ namespace nix { +/** + * Garbage-collector roots, referring to a store path + */ typedef std::unordered_map<StorePath, std::unordered_set<std::string>> Roots; +/** + * Possible garbage collector roots, referring to any path + */ +typedef std::unordered_map<Path, std::unordered_set<std::string>> UncheckedRoots; struct GCOptions diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 20519c1a2..535bbd251 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -321,105 +321,8 @@ Roots LocalStore::findRoots(bool censor) return roots; } -typedef std::unordered_map<Path, std::unordered_set<std::string>> UncheckedRoots; - -static void readProcLink(const std::string & file, UncheckedRoots & roots) -{ - constexpr auto bufsiz = PATH_MAX; - char buf[bufsiz]; - auto res = readlink(file.c_str(), buf, bufsiz); - if (res == -1) { - if (errno == ENOENT || errno == EACCES || errno == ESRCH) - return; - throw SysError("reading symlink"); - } - if (res == bufsiz) { - throw Error("overly long symlink starting with '%1%'", std::string_view(buf, bufsiz)); - } - if (res > 0 && buf[0] == '/') - roots[std::string(static_cast<char *>(buf), res)] - .emplace(file); -} - -static std::string quoteRegexChars(const std::string & raw) -{ - static auto specialRegex = std::regex(R"([.^$\\*+?()\[\]{}|])"); - return std::regex_replace(raw, specialRegex, R"(\$&)"); -} - -#if __linux__ -static void readFileRoots(const char * path, UncheckedRoots & roots) +void LocalStore::findPlatformRoots(UncheckedRoots & unchecked) { - try { - roots[readFile(path)].emplace(path); - } catch (SysError & e) { - if (e.errNo != ENOENT && e.errNo != EACCES) - throw; - } -} -#endif - -void LocalStore::findRuntimeRoots(Roots & roots, bool censor) -{ - UncheckedRoots unchecked; - - auto procDir = AutoCloseDir{opendir("/proc")}; - if (procDir) { - struct dirent * ent; - auto digitsRegex = std::regex(R"(^\d+$)"); - auto mapRegex = std::regex(R"(^\s*\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+(/\S+)\s*$)"); - auto storePathRegex = std::regex(quoteRegexChars(storeDir) + R"(/[0-9a-z]+[0-9a-zA-Z\+\-\._\?=]*)"); - while (errno = 0, ent = readdir(procDir.get())) { - checkInterrupt(); - if (std::regex_match(ent->d_name, digitsRegex)) { - try { - readProcLink(fmt("/proc/%s/exe" ,ent->d_name), unchecked); - readProcLink(fmt("/proc/%s/cwd", ent->d_name), unchecked); - - auto fdStr = fmt("/proc/%s/fd", ent->d_name); - auto fdDir = AutoCloseDir(opendir(fdStr.c_str())); - if (!fdDir) { - if (errno == ENOENT || errno == EACCES) - continue; - throw SysError("opening %1%", fdStr); - } - struct dirent * fd_ent; - while (errno = 0, fd_ent = readdir(fdDir.get())) { - if (fd_ent->d_name[0] != '.') - readProcLink(fmt("%s/%s", fdStr, fd_ent->d_name), unchecked); - } - if (errno) { - if (errno == ESRCH) - continue; - throw SysError("iterating /proc/%1%/fd", ent->d_name); - } - fdDir.reset(); - - auto mapFile = fmt("/proc/%s/maps", ent->d_name); - auto mapLines = tokenizeString<std::vector<std::string>>(readFile(mapFile), "\n"); - for (const auto & line : mapLines) { - auto match = std::smatch{}; - if (std::regex_match(line, match, mapRegex)) - unchecked[match[1]].emplace(mapFile); - } - - auto envFile = fmt("/proc/%s/environ", ent->d_name); - auto envString = readFile(envFile); - auto env_end = std::sregex_iterator{}; - for (auto i = std::sregex_iterator{envString.begin(), envString.end(), storePathRegex}; i != env_end; ++i) - unchecked[i->str()].emplace(envFile); - } catch (SysError & e) { - if (errno == ENOENT || errno == EACCES || errno == ESRCH) - continue; - throw; - } - } - } - if (errno) - throw SysError("iterating /proc"); - } - -#if !defined(__linux__) // lsof is really slow on OS X. This actually causes the gc-concurrent.sh test to fail. // See: https://github.com/NixOS/nix/issues/3011 // Because of this we disable lsof when running the tests. @@ -437,13 +340,13 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor) /* lsof not installed, lsof failed */ } } -#endif +} + +void LocalStore::findRuntimeRoots(Roots & roots, bool censor) +{ + UncheckedRoots unchecked; -#if __linux__ - readFileRoots("/proc/sys/kernel/modprobe", unchecked); - readFileRoots("/proc/sys/kernel/fbsplash", unchecked); - readFileRoots("/proc/sys/kernel/poweroff_cmd", unchecked); -#endif + findPlatformRoots(unchecked); for (auto & [target, links] : unchecked) { if (!isInStore(target)) continue; diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index f252b449c..2f59b3591 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -1940,6 +1940,4 @@ std::optional<std::string> LocalStore::getVersion() return nixVersion; } -static RegisterStoreImplementation<LocalStore, LocalStoreConfig> regLocalStore; - } // namespace nix diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index fe26a0f27..b8d1f02ab 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -127,26 +127,35 @@ private: const PublicKeys & getPublicKeys(); -public: - - /** - * Hack for build-remote.cc. - */ - PathSet locksHeld; +protected: /** * Initialise the local store, upgrading the schema if * necessary. + * Protected so that users don't accidentally create a LocalStore + * instead of a platform's subclass. */ LocalStore(const Params & params); LocalStore(std::string scheme, std::string path, const Params & params); - ~LocalStore(); +public: + + /** + * Hack for build-remote.cc. + */ + PathSet locksHeld; + + virtual ~LocalStore(); static std::set<std::string> uriSchemes() { return {}; } /** + * Create a LocalStore, possibly a platform-specific subclass + */ + static std::shared_ptr<LocalStore> makeLocalStore(const Params & params); + + /** * Implementations of abstract store API methods. */ @@ -330,6 +339,12 @@ private: void findRootsNoTemp(Roots & roots, bool censor); + /** + * Find possible garbage collector roots in a platform-specific manner, + * e.g. by looking in `/proc` or using `lsof` + */ + virtual void findPlatformRoots(UncheckedRoots & unchecked); + void findRuntimeRoots(Roots & roots, bool censor); std::pair<Path, AutoCloseFD> createTempDirInStore(); diff --git a/src/libstore/local.mk b/src/libstore/local.mk index 68ccdc409..6bd73965d 100644 --- a/src/libstore/local.mk +++ b/src/libstore/local.mk @@ -5,6 +5,11 @@ libstore_NAME = libnixstore libstore_DIR := $(d) libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc $(d)/build/*.cc) +ifdef HOST_LINUX +libstore_SOURCES += $(d)/platform/linux.cc +else +libstore_SOURCES += $(d)/platform/fallback.cc +endif libstore_LIBS = libutil diff --git a/src/libstore/meson.build b/src/libstore/meson.build index e1c6c267a..94471dc29 100644 --- a/src/libstore/meson.build +++ b/src/libstore/meson.build @@ -11,7 +11,7 @@ foreach header : [ 'schema.sql', 'ca-specific-schema.sql' ] endforeach if enable_embedded_sandbox_shell - hexdump = find_program('hexdump', required : true) + hexdump = find_program('hexdump', required : true, native : true) embedded_sandbox_shell_gen = custom_target( 'embedded-sandbox-shell.gen.hh', command : [ @@ -66,6 +66,7 @@ libstore_sources = files( 'path-with-outputs.cc', 'path.cc', 'pathlocks.cc', + 'platform.cc', 'profiles.cc', 'realisation.cc', 'remote-fs-accessor.cc', @@ -158,6 +159,14 @@ libstore_headers = files( 'worker-protocol.hh', ) +if host_machine.system() == 'linux' + libstore_sources += files('platform/linux.cc') + libstore_headers += files('platform/linux.hh') +else + libstore_sources += files('platform/fallback.cc') + libstore_headers += files('platform/fallback.hh') +endif + # These variables (aside from LSOF) are created pseudo-dynamically, near the beginning of # the top-level meson.build. Aside from prefix itself, each of these was # made into an absolute path by joining it with prefix, unless it was already diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc index 1d900c272..992a79c6e 100644 --- a/src/libstore/parsed-derivations.cc +++ b/src/libstore/parsed-derivations.cc @@ -151,7 +151,7 @@ std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & s for (auto i = e->begin(); i != e->end(); ++i) { StorePathSet storePaths; for (auto & p : *i) - storePaths.insert(store.parseStorePath(p.get<std::string>())); + storePaths.insert(store.toStorePath(p.get<std::string>()).first); json[i.key()] = store.pathInfoToJSON( store.exportReferences(storePaths, inputPaths), false, true); } diff --git a/src/libstore/platform.cc b/src/libstore/platform.cc new file mode 100644 index 000000000..9c389ef55 --- /dev/null +++ b/src/libstore/platform.cc @@ -0,0 +1,18 @@ +#include "local-store.hh" + +#if __linux__ +#include "platform/linux.hh" +#else +#include "platform/fallback.hh" +#endif + +namespace nix { +std::shared_ptr<LocalStore> LocalStore::makeLocalStore(const Params & params) +{ +#if __linux__ + return std::shared_ptr<LocalStore>(new LinuxLocalStore(params)); +#else + return std::shared_ptr<LocalStore>(new FallbackLocalStore(params)); +#endif +} +} diff --git a/src/libstore/platform/fallback.cc b/src/libstore/platform/fallback.cc new file mode 100644 index 000000000..5a01d64c8 --- /dev/null +++ b/src/libstore/platform/fallback.cc @@ -0,0 +1,5 @@ +#include "platform/fallback.hh" + +namespace nix { +static RegisterStoreImplementation<FallbackLocalStore, LocalStoreConfig> regLocalStore; +} diff --git a/src/libstore/platform/fallback.hh b/src/libstore/platform/fallback.hh new file mode 100644 index 000000000..fd27edbe6 --- /dev/null +++ b/src/libstore/platform/fallback.hh @@ -0,0 +1,31 @@ +#pragma once +///@file + +#include "local-store.hh" + +namespace nix { + +/** + * Fallback platform implementation of LocalStore + * Exists so we can make LocalStore constructor protected + */ +class FallbackLocalStore : public LocalStore +{ +public: + FallbackLocalStore(const Params & params) + : StoreConfig(params) + , LocalFSStoreConfig(params) + , LocalStoreConfig(params) + , Store(params) + , LocalFSStore(params) + , LocalStore(params) + { + } + FallbackLocalStore(const std::string scheme, std::string path, const Params & params) + : FallbackLocalStore(params) + { + throw UnimplementedError("FallbackLocalStore"); + } +}; + +} diff --git a/src/libstore/platform/linux.cc b/src/libstore/platform/linux.cc new file mode 100644 index 000000000..9be3e47da --- /dev/null +++ b/src/libstore/platform/linux.cc @@ -0,0 +1,123 @@ +#include "gc-store.hh" +#include "signals.hh" +#include "platform/linux.hh" + +#include <regex> + +namespace nix { +static RegisterStoreImplementation<LinuxLocalStore, LocalStoreConfig> regLocalStore; + +static void readProcLink(const std::string & file, UncheckedRoots & roots) +{ + constexpr auto bufsiz = PATH_MAX; + char buf[bufsiz]; + auto res = readlink(file.c_str(), buf, bufsiz); + if (res == -1) { + if (errno == ENOENT || errno == EACCES || errno == ESRCH) { + return; + } + throw SysError("reading symlink"); + } + if (res == bufsiz) { + throw Error("overly long symlink starting with '%1%'", std::string_view(buf, bufsiz)); + } + if (res > 0 && buf[0] == '/') { + roots[std::string(static_cast<char *>(buf), res)].emplace(file); + } +} + +static std::string quoteRegexChars(const std::string & raw) +{ + static auto specialRegex = std::regex(R"([.^$\\*+?()\[\]{}|])"); + return std::regex_replace(raw, specialRegex, R"(\$&)"); +} + +static void readFileRoots(const char * path, UncheckedRoots & roots) +{ + try { + roots[readFile(path)].emplace(path); + } catch (SysError & e) { + if (e.errNo != ENOENT && e.errNo != EACCES) { + throw; + } + } +} + +void LinuxLocalStore::findPlatformRoots(UncheckedRoots & unchecked) +{ + auto procDir = AutoCloseDir{opendir("/proc")}; + if (procDir) { + struct dirent * ent; + auto digitsRegex = std::regex(R"(^\d+$)"); + auto mapRegex = std::regex(R"(^\s*\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+(/\S+)\s*$)"); + auto storePathRegex = + std::regex(quoteRegexChars(storeDir) + R"(/[0-9a-z]+[0-9a-zA-Z\+\-\._\?=]*)"); + while (errno = 0, ent = readdir(procDir.get())) { + checkInterrupt(); + if (std::regex_match(ent->d_name, digitsRegex)) { + try { + readProcLink(fmt("/proc/%s/exe", ent->d_name), unchecked); + readProcLink(fmt("/proc/%s/cwd", ent->d_name), unchecked); + + auto fdStr = fmt("/proc/%s/fd", ent->d_name); + auto fdDir = AutoCloseDir(opendir(fdStr.c_str())); + if (!fdDir) { + if (errno == ENOENT || errno == EACCES) { + continue; + } + throw SysError("opening %1%", fdStr); + } + struct dirent * fd_ent; + while (errno = 0, fd_ent = readdir(fdDir.get())) { + if (fd_ent->d_name[0] != '.') { + readProcLink(fmt("%s/%s", fdStr, fd_ent->d_name), unchecked); + } + } + if (errno) { + if (errno == ESRCH) { + continue; + } + throw SysError("iterating /proc/%1%/fd", ent->d_name); + } + fdDir.reset(); + + auto mapFile = fmt("/proc/%s/maps", ent->d_name); + auto mapLines = + tokenizeString<std::vector<std::string>>(readFile(mapFile), "\n"); + for (const auto & line : mapLines) { + auto match = std::smatch{}; + if (std::regex_match(line, match, mapRegex)) { + unchecked[match[1]].emplace(mapFile); + } + } + + auto envFile = fmt("/proc/%s/environ", ent->d_name); + auto envString = readFile(envFile); + auto env_end = std::sregex_iterator{}; + for (auto i = + std::sregex_iterator{ + envString.begin(), envString.end(), storePathRegex + }; + i != env_end; + ++i) + { + unchecked[i->str()].emplace(envFile); + } + } catch (SysError & e) { + if (errno == ENOENT || errno == EACCES || errno == ESRCH) { + continue; + } + throw; + } + } + } + if (errno) { + throw SysError("iterating /proc"); + } + } + + readFileRoots("/proc/sys/kernel/modprobe", unchecked); + readFileRoots("/proc/sys/kernel/fbsplash", unchecked); + readFileRoots("/proc/sys/kernel/poweroff_cmd", unchecked); +} +} diff --git a/src/libstore/platform/linux.hh b/src/libstore/platform/linux.hh new file mode 100644 index 000000000..8b97e17c5 --- /dev/null +++ b/src/libstore/platform/linux.hh @@ -0,0 +1,35 @@ +#pragma once +///@file + +#include "gc-store.hh" +#include "local-store.hh" + +namespace nix { + +/** + * Linux-specific implementation of LocalStore + */ +class LinuxLocalStore : public LocalStore +{ +public: + LinuxLocalStore(const Params & params) + : StoreConfig(params) + , LocalFSStoreConfig(params) + , LocalStoreConfig(params) + , Store(params) + , LocalFSStore(params) + , LocalStore(params) + { + } + LinuxLocalStore(const std::string scheme, std::string path, const Params & params) + : LinuxLocalStore(params) + { + throw UnimplementedError("LinuxLocalStore"); + } + +private: + + void findPlatformRoots(UncheckedRoots & unchecked) override; +}; + +} diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 94202d46e..69e89263b 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -1426,7 +1426,7 @@ std::shared_ptr<Store> openFromNonUri(const std::string & uri, const Store::Para if (uri == "" || uri == "auto") { auto stateDir = getOr(params, "state", settings.nixStateDir); if (access(stateDir.c_str(), R_OK | W_OK) == 0) - return std::make_shared<LocalStore>(params); + return LocalStore::makeLocalStore(params); else if (pathExists(settings.nixDaemonSocketFile)) return std::make_shared<UDSRemoteStore>(params); #if __linux__ @@ -1444,26 +1444,26 @@ std::shared_ptr<Store> openFromNonUri(const std::string & uri, const Store::Para try { createDirs(chrootStore); } catch (Error & e) { - return std::make_shared<LocalStore>(params); + return LocalStore::makeLocalStore(params); } warn("'%s' does not exist, so Nix will use '%s' as a chroot store", stateDir, chrootStore); } else debug("'%s' does not exist, so Nix will use '%s' as a chroot store", stateDir, chrootStore); Store::Params params2; params2["root"] = chrootStore; - return std::make_shared<LocalStore>(params2); + return LocalStore::makeLocalStore(params); } #endif else - return std::make_shared<LocalStore>(params); + return LocalStore::makeLocalStore(params); } else if (uri == "daemon") { return std::make_shared<UDSRemoteStore>(params); } else if (uri == "local") { - return std::make_shared<LocalStore>(params); + return LocalStore::makeLocalStore(params); } else if (isNonUriPath(uri)) { Store::Params params2 = params; params2["root"] = absPath(uri); - return std::make_shared<LocalStore>(params2); + return LocalStore::makeLocalStore(params2); } else { return nullptr; } diff --git a/src/nix/prefetch.cc b/src/nix/prefetch.cc index 8f74984e0..2457e4cc8 100644 --- a/src/nix/prefetch.cc +++ b/src/nix/prefetch.cc @@ -98,7 +98,6 @@ std::tuple<StorePath, Hash> prefetchFile( FdSink sink(fd.get()); FileTransferRequest req(url); - req.decompress = false; getFileTransfer()->download(std::move(req), sink); } diff --git a/tests/nixos/github-flakes.nix b/tests/nixos/github-flakes.nix index 4830be6ac..1954208b9 100644 --- a/tests/nixos/github-flakes.nix +++ b/tests/nixos/github-flakes.nix @@ -119,6 +119,9 @@ in [ { urlPath = "/repos/NixOS/nixpkgs"; dir = nixpkgs-api; } + { urlPath = "/repos/fork/nixpkgs"; + dir = nixpkgs-api; + } { urlPath = "/repos/fancy-enterprise/private-flake"; dir = private-flake-api; } @@ -190,6 +193,10 @@ in client.succeed("nix registry pin nixpkgs") client.succeed("nix flake metadata nixpkgs --tarball-ttl 0 >&2") + # fetching a fork with the same commit ID should fail, even if the revision is cached + client.succeed("nix flake metadata github:NixOS/nixpkgs") + client.fail("nix flake metadata github:fork/nixpkgs") + # Shut down the web server. The flake should be cached on the client. github.succeed("systemctl stop httpd.service") diff --git a/tests/nixos/tarball-flakes.nix b/tests/nixos/tarball-flakes.nix index e30d15739..2c0df60ee 100644 --- a/tests/nixos/tarball-flakes.nix +++ b/tests/nixos/tarball-flakes.nix @@ -64,15 +64,18 @@ in info = json.loads(out) # Check that we got redirected to the immutable URL. - assert info["locked"]["url"] == "http://localhost/stable/${nixpkgs.rev}.tar.gz" + locked_url = info["locked"]["url"] + assert locked_url == "http://localhost/stable/${nixpkgs.rev}.tar.gz", f"{locked_url=} != http://localhost/stable/${nixpkgs.rev}.tar.gz" # Check that we got the rev and revCount attributes. - assert info["revision"] == "${nixpkgs.rev}" - assert info["revCount"] == 1234 + revision = info["revision"] + rev_count = info["revCount"] + assert revision == "${nixpkgs.rev}", f"{revision=} != ${nixpkgs.rev}" + assert rev_count == 1234, f"{rev_count=} != 1234" # Check that fetching with rev/revCount/narHash succeeds. - machine.succeed("nix flake metadata --json http://localhost/latest.tar.gz?rev=" + info["revision"]) - machine.succeed("nix flake metadata --json http://localhost/latest.tar.gz?revCount=" + str(info["revCount"])) + machine.succeed("nix flake metadata --json http://localhost/latest.tar.gz?rev=" + revision) + machine.succeed("nix flake metadata --json http://localhost/latest.tar.gz?revCount=" + str(rev_count)) machine.succeed("nix flake metadata --json http://localhost/latest.tar.gz?narHash=" + info["locked"]["narHash"]) # Check that fetching fails if we provide incorrect attributes. |