diff options
author | Eelco Dolstra <edolstra@gmail.com> | 2020-05-28 12:55:24 +0200 |
---|---|---|
committer | Eelco Dolstra <edolstra@gmail.com> | 2020-05-28 12:55:24 +0200 |
commit | 17ca997fc6875071c7c0027b7a23c732c3ad02ee (patch) | |
tree | cf820d38955dee7cf33b36502f5b62b78b466a95 | |
parent | c3eff22f46cc05ddae4f648413ab115edb817ef9 (diff) | |
parent | d2a537568a7ed3fa8bee63c4298b771a27fdad89 (diff) |
Merge remote-tracking branch 'origin/master' into flakes
46 files changed, 2022 insertions, 190 deletions
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index f9b1d6093..447a6d43b 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -6,7 +6,7 @@ jobs: tests: strategy: matrix: - os: [ubuntu-18.04, macos] + os: [ubuntu-latest, macos-latest] runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v2 @@ -3,6 +3,7 @@ makefiles = \ local.mk \ nix-rust/local.mk \ src/libutil/local.mk \ + src/libutil/tests/local.mk \ src/libstore/local.mk \ src/libfetchers/local.mk \ src/libmain/local.mk \ diff --git a/Makefile.config.in b/Makefile.config.in index e7a12089a..b632444e8 100644 --- a/Makefile.config.in +++ b/Makefile.config.in @@ -1,36 +1,38 @@ AR = @AR@ BDW_GC_LIBS = @BDW_GC_LIBS@ +BOOST_LDFLAGS = @BOOST_LDFLAGS@ BUILD_SHARED_LIBS = @BUILD_SHARED_LIBS@ CC = @CC@ CFLAGS = @CFLAGS@ CXX = @CXX@ CXXFLAGS = @CXXFLAGS@ -LDFLAGS = @LDFLAGS@ +EDITLINE_LIBS = @EDITLINE_LIBS@ ENABLE_S3 = @ENABLE_S3@ -HAVE_SODIUM = @HAVE_SODIUM@ +GTEST_LIBS = @GTEST_LIBS@ HAVE_SECCOMP = @HAVE_SECCOMP@ -BOOST_LDFLAGS = @BOOST_LDFLAGS@ +HAVE_SODIUM = @HAVE_SODIUM@ +LDFLAGS = @LDFLAGS@ +LIBARCHIVE_LIBS = @LIBARCHIVE_LIBS@ +LIBBROTLI_LIBS = @LIBBROTLI_LIBS@ LIBCURL_LIBS = @LIBCURL_LIBS@ +LIBLZMA_LIBS = @LIBLZMA_LIBS@ OPENSSL_LIBS = @OPENSSL_LIBS@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ SODIUM_LIBS = @SODIUM_LIBS@ -LIBLZMA_LIBS = @LIBLZMA_LIBS@ SQLITE3_LIBS = @SQLITE3_LIBS@ -LIBBROTLI_LIBS = @LIBBROTLI_LIBS@ -LIBARCHIVE_LIBS = @LIBARCHIVE_LIBS@ -EDITLINE_LIBS = @EDITLINE_LIBS@ bash = @bash@ bindir = @bindir@ -lsof = @lsof@ datadir = @datadir@ datarootdir = @datarootdir@ +doc_generate = @doc_generate@ docdir = @docdir@ exec_prefix = @exec_prefix@ includedir = @includedir@ libdir = @libdir@ libexecdir = @libexecdir@ localstatedir = @localstatedir@ +lsof = @lsof@ mandir = @mandir@ pkglibdir = $(libdir)/$(PACKAGE_NAME) prefix = @prefix@ @@ -38,6 +40,5 @@ sandbox_shell = @sandbox_shell@ storedir = @storedir@ sysconfdir = @sysconfdir@ system = @system@ -doc_generate = @doc_generate@ xmllint = @xmllint@ xsltproc = @xsltproc@ diff --git a/configure.ac b/configure.ac index 1af96736c..2f29cf864 100644 --- a/configure.ac +++ b/configure.ac @@ -267,6 +267,10 @@ if test "$gc" = yes; then fi +# Look for gtest. +PKG_CHECK_MODULES([GTEST], [gtest_main]) + + # documentation generation switch AC_ARG_ENABLE(doc-gen, AC_HELP_STRING([--disable-doc-gen], [disable documentation generation]), diff --git a/doc/manual/advanced-topics/post-build-hook.xml b/doc/manual/advanced-topics/post-build-hook.xml index 08a7a772f..acfe9e3cc 100644 --- a/doc/manual/advanced-topics/post-build-hook.xml +++ b/doc/manual/advanced-topics/post-build-hook.xml @@ -61,7 +61,7 @@ substituters = https://cache.nixos.org/ s3://example-nix-cache trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= example-nix-cache-1:1/cKDz3QCCOmwcztD2eV6Coggp6rqc9DGjWv7C0G+rM= </programlisting> -<para>we will restart the Nix daemon a later step.</para> +<para>We will restart the Nix daemon in a later step.</para> </section> <section> @@ -139,7 +139,7 @@ $ nix-store --delete /nix/store/ibcyipq5gf91838ldx40mjsp0b8w9n18-example <para>Now, copy the path back from the cache:</para> <screen> -$ nix store --realize /nix/store/ibcyipq5gf91838ldx40mjsp0b8w9n18-example +$ nix-store --realise /nix/store/ibcyipq5gf91838ldx40mjsp0b8w9n18-example copying path '/nix/store/m8bmqwrch6l3h8s0k3d673xpmipcdpsa-example from 's3://example-nix-cache'... warning: you did not specify '--add-root'; the result might be removed by the garbage collector /nix/store/m8bmqwrch6l3h8s0k3d673xpmipcdpsa-example diff --git a/doc/manual/command-ref/conf-file.xml b/doc/manual/command-ref/conf-file.xml index 1820598e5..1fa74a143 100644 --- a/doc/manual/command-ref/conf-file.xml +++ b/doc/manual/command-ref/conf-file.xml @@ -386,7 +386,7 @@ false</literal>.</para> <programlisting> builtins.fetchurl { - url = https://example.org/foo-1.2.3.tar.xz; + url = "https://example.org/foo-1.2.3.tar.xz"; sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"; } </programlisting> diff --git a/doc/manual/command-ref/env-common.xml b/doc/manual/command-ref/env-common.xml index 0217de7b2..8466cc463 100644 --- a/doc/manual/command-ref/env-common.xml +++ b/doc/manual/command-ref/env-common.xml @@ -53,7 +53,7 @@ nixpkgs=/home/eelco/Dev/nixpkgs-branch:/etc/nixos</screen> <envar>NIX_PATH</envar> to <screen> -nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-15.09.tar.gz</screen> +nixpkgs=https://github.com/NixOS/nixpkgs/archive/nixos-15.09.tar.gz</screen> tells Nix to download the latest revision in the Nixpkgs/NixOS 15.09 channel.</para> diff --git a/doc/manual/command-ref/nix-env.xml b/doc/manual/command-ref/nix-env.xml index 9c03ccce1..2b95b6819 100644 --- a/doc/manual/command-ref/nix-env.xml +++ b/doc/manual/command-ref/nix-env.xml @@ -526,13 +526,10 @@ these paths will be fetched (0.04 MiB download, 0.19 MiB unpacked): 14.12 channel: <screen> -$ nix-env -f https://github.com/NixOS/nixpkgs-channels/archive/nixos-14.12.tar.gz -iA firefox +$ nix-env -f https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz -iA firefox </screen> -(The GitHub repository <literal>nixpkgs-channels</literal> is updated -automatically from the main <literal>nixpkgs</literal> repository -after certain tests have succeeded and binaries have been built and -uploaded to the binary cache at <uri>cache.nixos.org</uri>.)</para> +</para> </refsection> diff --git a/doc/manual/command-ref/nix-shell.xml b/doc/manual/command-ref/nix-shell.xml index 766482460..2fef323c5 100644 --- a/doc/manual/command-ref/nix-shell.xml +++ b/doc/manual/command-ref/nix-shell.xml @@ -258,7 +258,7 @@ path. You can override it by passing <option>-I</option> or setting containing the Pan package from a specific revision of Nixpkgs: <screen> -$ nix-shell -p pan -I nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/8a3eea054838b55aca962c3fbde9c83c102b8bf2.tar.gz +$ nix-shell -p pan -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/8a3eea054838b55aca962c3fbde9c83c102b8bf2.tar.gz [nix-shell:~]$ pan --version Pan 0.139 @@ -352,7 +352,7 @@ following Haskell script uses a specific branch of Nixpkgs/NixOS (the <programlisting><![CDATA[ #! /usr/bin/env nix-shell #! nix-shell -i runghc -p "haskellPackages.ghcWithPackages (ps: [ps.HTTP ps.tagsoup])" -#! nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-18.03.tar.gz +#! nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/nixos-18.03.tar.gz import Network.HTTP import Text.HTML.TagSoup @@ -370,7 +370,7 @@ If you want to be even more precise, you can specify a specific revision of Nixpkgs: <programlisting> -#! nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/0672315759b3e15e2121365f067c1c8c56bb4722.tar.gz +#! nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/0672315759b3e15e2121365f067c1c8c56bb4722.tar.gz </programlisting> </para> diff --git a/doc/manual/command-ref/nix-store.xml b/doc/manual/command-ref/nix-store.xml index 1ddb5408d..d71f9d8e4 100644 --- a/doc/manual/command-ref/nix-store.xml +++ b/doc/manual/command-ref/nix-store.xml @@ -936,7 +936,7 @@ $ nix-store --add ./foo.c <para>The operation <option>--add-fixed</option> adds the specified paths to the Nix store. Unlike <option>--add</option> paths are registered using the -specified hashing algorithm, resulting in the same output path as a fixed output +specified hashing algorithm, resulting in the same output path as a fixed-output derivation. This can be used for sources that are not available from a public url or broke since the download expression was written. </para> diff --git a/doc/manual/expressions/advanced-attributes.xml b/doc/manual/expressions/advanced-attributes.xml index 372d03de7..5759ff50e 100644 --- a/doc/manual/expressions/advanced-attributes.xml +++ b/doc/manual/expressions/advanced-attributes.xml @@ -178,7 +178,7 @@ impureEnvVars = [ "http_proxy" "https_proxy" <replaceable>...</replaceable> ]; <programlisting> fetchurl { - url = http://ftp.gnu.org/pub/gnu/hello/hello-2.1.1.tar.gz; + url = "http://ftp.gnu.org/pub/gnu/hello/hello-2.1.1.tar.gz"; sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465"; } </programlisting> @@ -189,7 +189,7 @@ fetchurl { <programlisting> fetchurl { - url = ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz; + url = "ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz"; sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465"; } </programlisting> diff --git a/doc/manual/expressions/builtins.xml b/doc/manual/expressions/builtins.xml index f71a8f3be..a18c5801a 100644 --- a/doc/manual/expressions/builtins.xml +++ b/doc/manual/expressions/builtins.xml @@ -324,7 +324,7 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting> particular version of Nixpkgs, e.g. <programlisting> -with import (fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixos-14.12.tar.gz) {}; +with import (fetchTarball https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz) {}; stdenv.mkDerivation { … } </programlisting> @@ -349,7 +349,7 @@ stdenv.mkDerivation { … } <programlisting> with import (fetchTarball { - url = https://github.com/NixOS/nixpkgs-channels/archive/nixos-14.12.tar.gz; + url = "https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz"; sha256 = "1jppksrfvbk5ypiqdz4cddxdl8z6zyzdb2srq8fcffr327ld5jj2"; }) {}; @@ -1406,7 +1406,7 @@ stdenv.mkDerivation { "; src = fetchurl { - url = http://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz; + url = "http://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz"; sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465"; }; inherit perl; diff --git a/doc/manual/expressions/expression-syntax.xml b/doc/manual/expressions/expression-syntax.xml index 42b9dca36..a3de20713 100644 --- a/doc/manual/expressions/expression-syntax.xml +++ b/doc/manual/expressions/expression-syntax.xml @@ -15,7 +15,7 @@ stdenv.mkDerivation { <co xml:id='ex-hello-nix-co-2' /> name = "hello-2.1.1"; <co xml:id='ex-hello-nix-co-3' /> builder = ./builder.sh; <co xml:id='ex-hello-nix-co-4' /> src = fetchurl { <co xml:id='ex-hello-nix-co-5' /> - url = ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz; + url = "ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz"; sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465"; }; inherit perl; <co xml:id='ex-hello-nix-co-6' /> diff --git a/doc/manual/expressions/simple-building-testing.xml b/doc/manual/expressions/simple-building-testing.xml index 7326a3e76..ce0a1636d 100644 --- a/doc/manual/expressions/simple-building-testing.xml +++ b/doc/manual/expressions/simple-building-testing.xml @@ -73,12 +73,4 @@ waiting for lock on `/nix/store/0h5b7hp8d4hqfrw8igvx97x1xawrjnac-hello-2.1.1x'</ So it is always safe to run multiple instances of Nix in parallel (which isn’t the case with, say, <command>make</command>).</para> -<para>If you have a system with multiple CPUs, you may want to have -Nix build different derivations in parallel (insofar as possible). -Just pass the option <link linkend='opt-max-jobs'><option>-j -<replaceable>N</replaceable></option></link>, where -<replaceable>N</replaceable> is the maximum number of jobs to be run -in parallel, or set. Typically this should be the number of -CPUs.</para> - </section> diff --git a/doc/manual/installation/installing-binary.xml b/doc/manual/installation/installing-binary.xml index 3f57f47b5..8d548f0ea 100644 --- a/doc/manual/installation/installing-binary.xml +++ b/doc/manual/installation/installing-binary.xml @@ -6,16 +6,30 @@ <title>Installing a Binary Distribution</title> -<para>If you are using Linux or macOS, the easiest way to install Nix -is to run the following command: +<para> + If you are using Linux or macOS versions up to 10.14 (Mojave), the + easiest way to install Nix is to run the following command: +</para> <screen> $ sh <(curl https://nixos.org/nix/install) </screen> -As of Nix 2.1.0, the Nix installer will always default to creating a -single-user installation, however opting in to the multi-user -installation is highly recommended. +<para> + If you're using macOS 10.15 (Catalina) or newer, consult + <link linkend="sect-macos-installation">the macOS installation instructions</link> + before installing. +</para> + +<para> + As of Nix 2.1.0, the Nix installer will always default to creating a + single-user installation, however opting in to the multi-user + installation is highly recommended. + <!-- TODO: this explains *neither* why the default version is + single-user, nor why we'd recommend multi-user over the default. + True prospective users don't have much basis for evaluating this. + What's it to me? Who should pick which? Why? What if I pick wrong? + --> </para> <section xml:id="sect-single-user-installation"> @@ -36,7 +50,7 @@ run this under your usual user account, <emphasis>not</emphasis> as root. The script will invoke <command>sudo</command> to create <filename>/nix</filename> if it doesn’t already exist. If you don’t have <command>sudo</command>, you should manually create -<command>/nix</command> first as root, e.g.: +<filename>/nix</filename> first as root, e.g.: <screen> $ mkdir /nix @@ -47,7 +61,7 @@ The install script will modify the first writable file from amongst <filename>.bash_profile</filename>, <filename>.bash_login</filename> and <filename>.profile</filename> to source <filename>~/.nix-profile/etc/profile.d/nix.sh</filename>. You can set -the <command>NIX_INSTALLER_NO_MODIFY_PROFILE</command> environment +the <envar>NIX_INSTALLER_NO_MODIFY_PROFILE</envar> environment variable before executing the install script to disable this behaviour. </para> @@ -81,12 +95,10 @@ $ rm -rf /nix <para> You can instruct the installer to perform a multi-user installation on your system: - - <screen> - sh <(curl https://nixos.org/nix/install) --daemon -</screen> </para> + <screen>sh <(curl https://nixos.org/nix/install) --daemon</screen> + <para> The multi-user installation of Nix will create build users between the user IDs 30001 and 30032, and a group with the group ID 30000. @@ -136,6 +148,273 @@ sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist </section> +<section xml:id="sect-macos-installation"> + <title>macOS Installation</title> + + <para> + Starting with macOS 10.15 (Catalina), the root filesystem is read-only. + This means <filename>/nix</filename> can no longer live on your system + volume, and that you'll need a workaround to install Nix. + </para> + + <para> + The recommended approach, which creates an unencrypted APFS volume + for your Nix store and a "synthetic" empty directory to mount it + over at <filename>/nix</filename>, is least likely to impair Nix + or your system. + </para> + + <note><para> + With all separate-volume approaches, it's possible something on + your system (particularly daemons/services and restored apps) may + need access to your Nix store before the volume is mounted. Adding + additional encryption makes this more likely. + </para></note> + + <para> + If you're using a recent Mac with a + <link xlink:href="https://www.apple.com/euro/mac/shared/docs/Apple_T2_Security_Chip_Overview.pdf">T2 chip</link>, + your drive will still be encrypted at rest (in which case "unencrypted" + is a bit of a misnomer). To use this approach, just install Nix with: + </para> + + <screen>$ sh <(curl https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume</screen> + + <para> + If you don't like the sound of this, you'll want to weigh the + other approaches and tradeoffs detailed in this section. + </para> + + <note> + <title>Eventual solutions?</title> + <para> + All of the known workarounds have drawbacks, but we hope + better solutions will be available in the future. Some that + we have our eye on are: + </para> + <orderedlist> + <listitem> + <para> + A true firmlink would enable the Nix store to live on the + primary data volume without the build problems caused by + the symlink approach. End users cannot currently + create true firmlinks. + </para> + </listitem> + <listitem> + <para> + If the Nix store volume shared FileVault encryption + with the primary data volume (probably by using the same + volume group and role), FileVault encryption could be + easily supported by the installer without requiring + manual setup by each user. + </para> + </listitem> + </orderedlist> + </note> + + <section xml:id="sect-macos-installation-change-store-prefix"> + <title>Change the Nix store path prefix</title> + <para> + Changing the default prefix for the Nix store is a simple + approach which enables you to leave it on your root volume, + where it can take full advantage of FileVault encryption if + enabled. Unfortunately, this approach also opts your device out + of some benefits that are enabled by using the same prefix + across systems: + + <itemizedlist> + <listitem> + <para> + Your system won't be able to take advantage of the binary + cache (unless someone is able to stand up and support + duplicate caching infrastructure), which means you'll + spend more time waiting for builds. + </para> + </listitem> + <listitem> + <para> + It's harder to build and deploy packages to Linux systems. + </para> + </listitem> + <!-- TODO: may be more here --> + </itemizedlist> + + <!-- TODO: Yes, but how?! --> + + It would also possible (and often requested) to just apply this + change ecosystem-wide, but it's an intrusive process that has + side effects we want to avoid for now. + <!-- magnificent hand-wavy gesture --> + </para> + <para> + </para> + </section> + + <section xml:id="sect-macos-installation-encrypted-volume"> + <title>Use a separate encrypted volume</title> + <para> + If you like, you can also add encryption to the recommended + approach taken by the installer. You can do this by pre-creating + an encrypted volume before you run the installer--or you can + run the installer and encrypt the volume it creates later. + <!-- TODO: see later note about whether this needs both add-encryption and from-scratch directions --> + </para> + <para> + In either case, adding encryption to a second volume isn't quite + as simple as enabling FileVault for your boot volume. Before you + dive in, there are a few things to weigh: + </para> + <orderedlist> + <listitem> + <para> + The additional volume won't be encrypted with your existing + FileVault key, so you'll need another mechanism to decrypt + the volume. + </para> + </listitem> + <listitem> + <para> + You can store the password in Keychain to automatically + decrypt the volume on boot--but it'll have to wait on Keychain + and may not mount before your GUI apps restore. If any of + your launchd agents or apps depend on Nix-installed software + (for example, if you use a Nix-installed login shell), the + restore may fail or break. + </para> + <para> + On a case-by-case basis, you may be able to work around this + problem by using <command>wait4path</command> to block + execution until your executable is available. + </para> + <para> + It's also possible to decrypt and mount the volume earlier + with a login hook--but this mechanism appears to be + deprecated and its future is unclear. + </para> + </listitem> + <listitem> + <para> + You can hard-code the password in the clear, so that your + store volume can be decrypted before Keychain is available. + </para> + </listitem> + </orderedlist> + <para> + If you are comfortable navigating these tradeoffs, you can encrypt the volume with + something along the lines of: + <!-- TODO: + I don't know if this also needs from-scratch instructions? + can we just recommend use-the-installer-and-then-encrypt? + --> + </para> + <!-- + TODO: it looks like this option can be encryptVolume|encrypt|enableFileVault + + It may be more clear to use encryptVolume, here? FileVault seems + heavily associated with the boot-volume behavior; I worry + a little that it can mislead here, especially as it gets + copied around minus doc context...? + --> + <screen>alice$ diskutil apfs enableFileVault /nix -user disk</screen> + + <!-- TODO: and then go into detail on the mount/decrypt approaches? --> + </section> + + <section xml:id="sect-macos-installation-symlink"> + <!-- + Maybe a good razor is: if we'd hate having to support someone who + installed Nix this way, it shouldn't even be detailed? + --> + <title>Symlink the Nix store to a custom location</title> + <para> + Another simple approach is using <filename>/etc/synthetic.conf</filename> + to symlink the Nix store to the data volume. This option also + enables your store to share any configured FileVault encryption. + Unfortunately, builds that resolve the symlink may leak the + canonical path or even fail. + </para> + <para> + Because of these downsides, we can't recommend this approach. + </para> + <!-- Leaving out instructions for this one. --> + </section> + + <section xml:id="sect-macos-installation-recommended-notes"> + <title>Notes on the recommended approach</title> + <para> + This section goes into a little more detail on the recommended + approach. You don't need to understand it to run the installer, + but it can serve as a helpful reference if you run into trouble. + </para> + <orderedlist> + <listitem> + <para> + In order to compose user-writable locations into the new + read-only system root, Apple introduced a new concept called + <literal>firmlinks</literal>, which it describes as a + "bi-directional wormhole" between two filesystems. You can + see the current firmlinks in <filename>/usr/share/firmlinks</filename>. + Unfortunately, firmlinks aren't (currently?) user-configurable. + </para> + + <para> + For special cases like NFS mount points or package manager roots, + <link xlink:href="https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man5/synthetic.conf.5.html">synthetic.conf(5)</link> + supports limited user-controlled file-creation (of symlinks, + and synthetic empty directories) at <filename>/</filename>. + To create a synthetic empty directory for mounting at <filename>/nix</filename>, + add the following line to <filename>/etc/synthetic.conf</filename> + (create it if necessary): + </para> + + <screen>nix</screen> + </listitem> + + <listitem> + <para> + This configuration is applied at boot time, but you can use + <command>apfs.util</command> to trigger creation (not deletion) + of new entries without a reboot: + </para> + + <screen>alice$ /System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B</screen> + </listitem> + + <listitem> + <para> + Create the new APFS volume with diskutil: + </para> + + <screen>alice$ sudo diskutil apfs addVolume diskX APFS 'Nix Store' -mountpoint /nix</screen> + </listitem> + + <listitem> + <para> + Using <command>vifs</command>, add the new mount to + <filename>/etc/fstab</filename>. If it doesn't already have + other entries, it should look something like: + </para> + +<screen> +# +# Warning - this file should only be modified with vifs(8) +# +# Failure to do so is unsupported and may be destructive. +# +LABEL=Nix\040Store /nix apfs rw,nobrowse +</screen> + + <para> + The nobrowse setting will keep Spotlight from indexing this + volume, and keep it from showing up on your desktop. + </para> + </listitem> + </orderedlist> + </section> + +</section> + <section xml:id="sect-nix-install-pinned-version-url"> <title>Installing a pinned Nix version from a URL</title> diff --git a/doc/manual/installation/upgrading.xml b/doc/manual/installation/upgrading.xml index 30670d7fe..592f63895 100644 --- a/doc/manual/installation/upgrading.xml +++ b/doc/manual/installation/upgrading.xml @@ -17,6 +17,11 @@ <para> Single-user installations of Nix should run this: - <command>nix-channel --update; nix-env -iA nixpkgs.nix</command> + <command>nix-channel --update; nix-env -iA nixpkgs.nix nixpkgs.cacert</command> + </para> + + <para> + Multi-user Nix users on Linux should run this with sudo: + <command>nix-channel --update; nix-env -iA nixpkgs.nix nixpkgs.cacert; systemctl daemon-reload; systemctl restart nix-daemon</command> </para> </chapter> diff --git a/doc/manual/release-notes/rl-0.8.xml b/doc/manual/release-notes/rl-0.8.xml index 784b26c6b..825798fa9 100644 --- a/doc/manual/release-notes/rl-0.8.xml +++ b/doc/manual/release-notes/rl-0.8.xml @@ -8,7 +8,7 @@ <para>NOTE: the hashing scheme in Nix 0.8 changed (as detailed below). As a result, <command>nix-pull</command> manifests and channels built -for Nix 0.7 and below will now work anymore. However, the Nix +for Nix 0.7 and below will not work anymore. However, the Nix expression language has not changed, so you can still build from source. Also, existing user environments continue to work. Nix 0.8 will automatically upgrade the database schema of previous @@ -86,6 +86,7 @@ git mercurial jq + gmock ] ++ lib.optionals stdenv.isLinux [libseccomp utillinuxMinimal] ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium @@ -150,15 +151,15 @@ installFlags = "sysconfdir=$(out)/etc"; + postInstall = '' + mkdir -p $doc/nix-support + echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products + ''; + doInstallCheck = true; installCheckFlags = "sysconfdir=$(out)/etc"; separateDebugInfo = true; - - preDist = '' - mkdir -p $doc/nix-support - echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products - ''; }) // { perl-bindings = with final; stdenv.mkDerivation { diff --git a/mk/programs.mk b/mk/programs.mk index d93df4468..3fa9685c3 100644 --- a/mk/programs.mk +++ b/mk/programs.mk @@ -35,24 +35,28 @@ define build-program $$(trace-ld) $(CXX) -o $$@ $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE)) $(1)_INSTALL_DIR ?= $$(bindir) - $(1)_INSTALL_PATH := $$($(1)_INSTALL_DIR)/$(1) - $$(eval $$(call create-dir, $$($(1)_INSTALL_DIR))) + ifdef $(1)_INSTALL_DIR - install: $(DESTDIR)$$($(1)_INSTALL_PATH) + $(1)_INSTALL_PATH := $$($(1)_INSTALL_DIR)/$(1) - ifeq ($(BUILD_SHARED_LIBS), 1) + $$(eval $$(call create-dir, $$($(1)_INSTALL_DIR))) - _libs_final := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_INSTALL_PATH)) + install: $(DESTDIR)$$($(1)_INSTALL_PATH) - $(DESTDIR)$$($(1)_INSTALL_PATH): $$($(1)_OBJS) $$(_libs_final) | $(DESTDIR)$$($(1)_INSTALL_DIR)/ + ifeq ($(BUILD_SHARED_LIBS), 1) + + _libs_final := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_INSTALL_PATH)) + + $(DESTDIR)$$($(1)_INSTALL_PATH): $$($(1)_OBJS) $$(_libs_final) | $(DESTDIR)$$($(1)_INSTALL_DIR)/ $$(trace-ld) $(CXX) -o $$@ $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE_INSTALLED)) - else + else - $(DESTDIR)$$($(1)_INSTALL_PATH): $$($(1)_PATH) | $(DESTDIR)$$($(1)_INSTALL_DIR)/ + $(DESTDIR)$$($(1)_INSTALL_PATH): $$($(1)_PATH) | $(DESTDIR)$$($(1)_INSTALL_DIR)/ install -t $(DESTDIR)$$($(1)_INSTALL_DIR) $$< + endif endif # Propagate CFLAGS and CXXFLAGS to the individual object files. @@ -76,4 +80,10 @@ define build-program programs-list += $$($(1)_PATH) clean-files += $$($(1)_PATH) $$(_d)/*.o $$(_d)/.*.dep $$($(1)_DEPS) $$($(1)_OBJS) dist-files += $$(_srcs) + + # Phony target to run this program (typically as a dependency of 'check'). + .PHONY: $(1)_RUN + $(1)_RUN: $$($(1)_PATH) + $(trace-test) $$($(1)_PATH) + endef diff --git a/mk/tracing.mk b/mk/tracing.mk index 13912d3c7..54c77ab60 100644 --- a/mk/tracing.mk +++ b/mk/tracing.mk @@ -11,6 +11,7 @@ ifeq ($(V), 0) trace-javac = @echo " JAVAC " $@; trace-jar = @echo " JAR " $@; trace-mkdir = @echo " MKDIR " $@; + trace-test = @echo " TEST " $@; suppress = @ diff --git a/nix-rust/local.mk b/nix-rust/local.mk index 1e006e500..e4bfde31b 100644 --- a/nix-rust/local.mk +++ b/nix-rust/local.mk @@ -41,5 +41,5 @@ ifneq ($(OS), Darwin) check: rust-tests rust-tests: - cd nix-rust && CARGO_HOME=$$(if [[ -d vendor ]]; then echo vendor; fi) cargo test --release $$(if [[ -d vendor ]]; then echo --offline; fi) + $(trace-test) cd nix-rust && CARGO_HOME=$$(if [[ -d vendor ]]; then echo vendor; fi) cargo test --release $$(if [[ -d vendor ]]; then echo --offline; fi) endif diff --git a/scripts/create-darwin-volume.sh b/scripts/create-darwin-volume.sh new file mode 100755 index 000000000..dac30d72d --- /dev/null +++ b/scripts/create-darwin-volume.sh @@ -0,0 +1,185 @@ +#!/bin/sh +set -e + +root_disk() { + diskutil info -plist / +} + +apfs_volumes_for() { + disk=$1 + diskutil apfs list -plist "$disk" +} + +disk_identifier() { + xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" 2>/dev/null +} + +volume_list_true() { + key=$1 + xpath "/plist/dict/array/dict/key[text()='Volumes']/following-sibling::array/dict/key[text()='$key']/following-sibling::true[1]" 2> /dev/null +} + +volume_get_string() { + key=$1 i=$2 + xpath "/plist/dict/array/dict/key[text()='Volumes']/following-sibling::array/dict[$i]/key[text()='$key']/following-sibling::string[1]/text()" 2> /dev/null +} + +find_nix_volume() { + disk=$1 + i=1 + volumes=$(apfs_volumes_for "$disk") + while true; do + name=$(echo "$volumes" | volume_get_string "Name" "$i") + if [ -z "$name" ]; then + break + fi + case "$name" in + [Nn]ix*) + echo "$name" + break + ;; + esac + i=$((i+1)) + done +} + +test_fstab() { + grep -q "/nix apfs rw" /etc/fstab 2>/dev/null +} + +test_nix_symlink() { + [ -L "/nix" ] || grep -q "^nix." /etc/synthetic.conf 2>/dev/null +} + +test_synthetic_conf() { + grep -q "^nix$" /etc/synthetic.conf 2>/dev/null +} + +test_nix() { + test -d "/nix" +} + +test_t2_chip_present(){ + # Use xartutil to see if system has a t2 chip. + # + # This isn't well-documented on its own; until it is, + # let's keep track of knowledge/assumptions. + # + # Warnings: + # - Don't search "xart" if porn will cause you trouble :) + # - Other xartutil flags do dangerous things. Don't run them + # naively. If you must, search "xartutil" first. + # + # Assumptions: + # - the "xART session seeds recovery utility" + # appears to interact with xartstorageremoted + # - `sudo xartutil --list` lists xART sessions + # and their seeds and exits 0 if successful. If + # not, it exits 1 and prints an error such as: + # xartutil: ERROR: No supported link to the SEP present + # - xART sessions/seeds are present when a T2 chip is + # (and not, otherwise) + # - the presence of a T2 chip means a newly-created + # volume on the primary drive will be + # encrypted at rest + # - all together: `sudo xartutil --list` + # should exit 0 if a new Nix Store volume will + # be encrypted at rest, and exit 1 if not. + sudo xartutil --list >/dev/null 2>/dev/null +} + +test_filevault_in_use() { + disk=$1 + # list vols on disk | get value of Filevault key | value is true + apfs_volumes_for "$disk" | volume_list_true FileVault | grep -q true +} + +# use after error msg for conditions we don't understand +suggest_report_error(){ + # ex "error: something sad happened :(" >&2 + echo " please report this @ https://github.com/nixos/nix/issues" >&2 +} + +main() { + ( + echo "" + echo " ------------------------------------------------------------------ " + echo " | This installer will create a volume for the nix store and |" + echo " | configure it to mount at /nix. Follow these steps to uninstall. |" + echo " ------------------------------------------------------------------ " + echo "" + echo " 1. Remove the entry from fstab using 'sudo vifs'" + echo " 2. Destroy the data volume using 'diskutil apfs deleteVolume'" + echo " 3. Remove the 'nix' line from /etc/synthetic.conf or the file" + echo "" + ) >&2 + + if test_nix_symlink; then + echo "error: /nix is a symlink, please remove it and make sure it's not in synthetic.conf (in which case a reboot is required)" >&2 + echo " /nix -> $(readlink "/nix")" >&2 + exit 2 + fi + + if ! test_synthetic_conf; then + echo "Configuring /etc/synthetic.conf..." >&2 + echo nix | sudo tee -a /etc/synthetic.conf + if ! test_synthetic_conf; then + echo "error: failed to configure synthetic.conf;" >&2 + suggest_report_error + exit 1 + fi + fi + + if ! test_nix; then + echo "Creating mountpoint for /nix..." >&2 + /System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B || true + if ! test_nix; then + sudo mkdir -p /nix 2>/dev/null || true + fi + if ! test_nix; then + echo "error: failed to bootstrap /nix; if a reboot doesn't help," >&2 + suggest_report_error + exit 1 + fi + fi + + disk=$(root_disk | disk_identifier) + volume=$(find_nix_volume "$disk") + if [ -z "$volume" ]; then + echo "Creating a Nix Store volume..." >&2 + + if test_filevault_in_use "$disk"; then + # TODO: Not sure if it's in-scope now, but `diskutil apfs list` + # shows both filevault and encrypted at rest status, and it + # may be the more semantic way to test for this? It'll show + # `FileVault: No (Encrypted at rest)` + # `FileVault: No` + # `FileVault: Yes (Unlocked)` + # and so on. + if test_t2_chip_present; then + echo "warning: boot volume is FileVault-encrypted, but the Nix store volume" >&2 + echo " is only encrypted at rest." >&2 + echo " See https://nixos.org/nix/manual/#sect-macos-installation" >&2 + else + echo "error: refusing to create Nix store volume because the boot volume is" >&2 + echo " FileVault encrypted, but encryption-at-rest is not available." >&2 + echo " Manually create a volume for the store and re-run this script." >&2 + echo " See https://nixos.org/nix/manual/#sect-macos-installation" >&2 + exit 1 + fi + fi + + sudo diskutil apfs addVolume "$disk" APFS 'Nix Store' -mountpoint /nix + volume="Nix Store" + else + echo "Using existing '$volume' volume" >&2 + fi + + if ! test_fstab; then + echo "Configuring /etc/fstab..." >&2 + label=$(echo "$volume" | sed 's/ /\\040/g') + printf "\$a\nLABEL=%s /nix apfs rw,nobrowse\n.\nwq\n" "$label" | EDITOR=ed sudo vifs + fi +} + +main "$@" diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh index a0f1deb98..157e8ddb4 100644 --- a/scripts/install-multi-user.sh +++ b/scripts/install-multi-user.sh @@ -20,15 +20,18 @@ readonly GREEN='\033[32m' readonly GREEN_UL='\033[4;32m' readonly RED='\033[31m' -readonly NIX_USER_COUNT="32" +# installer allows overriding build user count to speed up installation +# as creating each user takes non-trivial amount of time on macos +readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32} readonly NIX_BUILD_GROUP_ID="30000" readonly NIX_BUILD_GROUP_NAME="nixbld" readonly NIX_FIRST_BUILD_UID="30001" # Please don't change this. We don't support it, because the # default shell profile that comes with Nix doesn't support it. readonly NIX_ROOT="/nix" +readonly NIX_EXTRA_CONF=${NIX_EXTRA_CONF:-} -readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshrc") +readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshenv") readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix" readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh" @@ -450,9 +453,11 @@ create_directories() { } place_channel_configuration() { - echo "https://nixos.org/channels/nixpkgs-unstable nixpkgs" > "$SCRATCH/.nix-channels" - _sudo "to set up the default system channel (part 1)" \ - install -m 0664 "$SCRATCH/.nix-channels" "$ROOT_HOME/.nix-channels" + if [ -z "${NIX_INSTALLER_NO_CHANNEL_ADD:-}" ]; then + echo "https://nixos.org/channels/nixpkgs-unstable nixpkgs" > "$SCRATCH/.nix-channels" + _sudo "to set up the default system channel (part 1)" \ + install -m 0664 "$SCRATCH/.nix-channels" "$ROOT_HOME/.nix-channels" + fi } welcome_to_nix() { @@ -634,18 +639,20 @@ setup_default_profile() { export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt fi - # Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call, - # otherwise it will be lost in environments where sudo doesn't pass - # all the environment variables by default. - _sudo "to update the default channel in the default profile" \ - HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs \ - || channel_update_failed=1 - + if [ -z "${NIX_INSTALLER_NO_CHANNEL_ADD:-}" ]; then + # Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call, + # otherwise it will be lost in environments where sudo doesn't pass + # all the environment variables by default. + _sudo "to update the default channel in the default profile" \ + HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs \ + || channel_update_failed=1 + fi } place_nix_configuration() { cat <<EOF > "$SCRATCH/nix.conf" +$NIX_EXTRA_CONF build-users-group = $NIX_BUILD_GROUP_NAME EOF _sudo "to place the default nix daemon configuration (part 2)" \ diff --git a/scripts/install-nix-from-closure.sh b/scripts/install-nix-from-closure.sh index e00708f6c..826ca8b8c 100644 --- a/scripts/install-nix-from-closure.sh +++ b/scripts/install-nix-from-closure.sh @@ -40,29 +40,85 @@ elif [ "$(uname -s)" = "Linux" ] && [ -e /run/systemd/system ]; then fi INSTALL_MODE=no-daemon -# Trivially handle the --daemon / --no-daemon options -if [ "x${1:-}" = "x--no-daemon" ]; then - INSTALL_MODE=no-daemon -elif [ "x${1:-}" = "x--daemon" ]; then - INSTALL_MODE=daemon -elif [ "x${1:-}" != "x" ]; then - ( - echo "Nix Installer [--daemon|--no-daemon]" - - echo "Choose installation method." - echo "" - echo " --daemon: Installs and configures a background daemon that manages the store," - echo " providing multi-user support and better isolation for local builds." - echo " Both for security and reproducibility, this method is recommended if" - echo " supported on your platform." - echo " See https://nixos.org/nix/manual/#sect-multi-user-installation" - echo "" - echo " --no-daemon: Simple, single-user installation that does not require root and is" - echo " trivial to uninstall." - echo " (default)" - echo "" - ) >&2 - exit +CREATE_DARWIN_VOLUME=0 +# handle the command line flags +while [ $# -gt 0 ]; do + case $1 in + --daemon) + INSTALL_MODE=daemon;; + --no-daemon) + INSTALL_MODE=no-daemon;; + --no-channel-add) + export NIX_INSTALLER_NO_CHANNEL_ADD=1;; + --daemon-user-count) + export NIX_USER_COUNT=$2 + shift;; + --no-modify-profile) + NIX_INSTALLER_NO_MODIFY_PROFILE=1;; + --darwin-use-unencrypted-nix-store-volume) + CREATE_DARWIN_VOLUME=1;; + --nix-extra-conf-file) + export NIX_EXTRA_CONF="$(cat $2)" + shift;; + *) + ( + echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--darwin-use-unencrypted-nix-store-volume] [--nix-extra-conf-file FILE]" + + echo "Choose installation method." + echo "" + echo " --daemon: Installs and configures a background daemon that manages the store," + echo " providing multi-user support and better isolation for local builds." + echo " Both for security and reproducibility, this method is recommended if" + echo " supported on your platform." + echo " See https://nixos.org/nix/manual/#sect-multi-user-installation" + echo "" + echo " --no-daemon: Simple, single-user installation that does not require root and is" + echo " trivial to uninstall." + echo " (default)" + echo "" + echo " --no-channel-add: Don't add any channels. nixpkgs-unstable is installed by default." + echo "" + echo " --no-modify-profile: Skip channel installation. When not provided nixpkgs-unstable" + echo " is installed by default." + echo "" + echo " --daemon-user-count: Number of build users to create. Defaults to 32." + echo "" + echo " --nix-extra-conf-file: Path to nix.conf to prepend when installing /etc/nix.conf" + echo "" + ) >&2 + + # darwin and Catalina+ + if [ "$(uname -s)" = "Darwin" ] && [ "$macos_major" -gt 14 ]; then + ( + echo " --darwin-use-unencrypted-nix-store-volume: Create an APFS volume for the Nix" + echo " store and mount it at /nix. This is the recommended way to create" + echo " /nix with a read-only / on macOS >=10.15." + echo " See: https://nixos.org/nix/manual/#sect-macos-installation" + echo "" + ) >&2 + fi + exit;; + esac + shift +done + +if [ "$(uname -s)" = "Darwin" ]; then + if [ "$CREATE_DARWIN_VOLUME" = 1 ]; then + printf '\e[1;31mCreating volume and mountpoint /nix.\e[0m\n' + "$self/create-darwin-volume.sh" + fi + + info=$(diskutil info -plist / | xpath "/plist/dict/key[text()='Writable']/following-sibling::true[1]" 2> /dev/null) + if ! [ -e $dest ] && [ -n "$info" ] && [ "$macos_major" -gt 14 ]; then + ( + echo "" + echo "Installing on macOS >=10.15 requires relocating the store to an apfs volume." + echo "Use sh <(curl https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume or run the preparation steps manually." + echo "See https://nixos.org/nix/manual/#sect-macos-installation" + echo "" + ) >&2 + exit 1 + fi fi if [ "$INSTALL_MODE" = "daemon" ]; then @@ -130,13 +186,15 @@ if [ -z "$NIX_SSL_CERT_FILE" ] || ! [ -f "$NIX_SSL_CERT_FILE" ]; then fi # Subscribe the user to the Nixpkgs channel and fetch it. -if ! $nix/bin/nix-channel --list | grep -q "^nixpkgs "; then - $nix/bin/nix-channel --add https://nixos.org/channels/nixpkgs-unstable -fi -if [ -z "$_NIX_INSTALLER_TEST" ]; then - if ! $nix/bin/nix-channel --update nixpkgs; then - echo "Fetching the nixpkgs channel failed. (Are you offline?)" - echo "To try again later, run \"nix-channel --update nixpkgs\"." +if [ -z "$NIX_INSTALLER_NO_CHANNEL_ADD" ]; then + if ! $nix/bin/nix-channel --list | grep -q "^nixpkgs "; then + $nix/bin/nix-channel --add https://nixos.org/channels/nixpkgs-unstable + fi + if [ -z "$_NIX_INSTALLER_TEST" ]; then + if ! $nix/bin/nix-channel --update nixpkgs; then + echo "Fetching the nixpkgs channel failed. (Are you offline?)" + echo "To try again later, run \"nix-channel --update nixpkgs\"." + fi fi fi @@ -155,6 +213,17 @@ if [ -z "$NIX_INSTALLER_NO_MODIFY_PROFILE" ]; then break fi done + for i in .zshenv .zshrc; do + fn="$HOME/$i" + if [ -w "$fn" ]; then + if ! grep -q "$p" "$fn"; then + echo "modifying $fn..." >&2 + echo "if [ -e $p ]; then . $p; fi # added by Nix installer" >> "$fn" + fi + added=1 + break + fi + done fi if [ -z "$added" ]; then diff --git a/scripts/install.in b/scripts/install.in index 6709f00d4..1d26c4ff0 100644 --- a/scripts/install.in +++ b/scripts/install.in @@ -36,7 +36,9 @@ tarball="$tmpDir/$(basename "$tmpDir/nix-@nixVersion@-$system.tar.xz")" require_util curl "download the binary tarball" require_util tar "unpack the binary tarball" -require_util xz "unpack the binary tarball" +if [ "$(uname -s)" != "Darwin" ]; then + require_util xz "unpack the binary tarball" +fi echo "downloading Nix @nixVersion@ binary tarball for $system from '$url' to '$tmpDir'..." curl -L "$url" -o "$tarball" || oops "failed to download '$url'" diff --git a/src/libstore/build.cc b/src/libstore/build.cc index 147093fae..a2b16f95c 100644 --- a/src/libstore/build.cc +++ b/src/libstore/build.cc @@ -507,9 +507,10 @@ private: Path fnUserLock; AutoCloseFD fdUserLock; + bool isEnabled = false; string user; - uid_t uid; - gid_t gid; + uid_t uid = 0; + gid_t gid = 0; std::vector<gid_t> supplementaryGIDs; public: @@ -522,7 +523,9 @@ public: uid_t getGID() { assert(gid); return gid; } std::vector<gid_t> getSupplementaryGIDs() { return supplementaryGIDs; } - bool enabled() { return uid != 0; } + bool findFreeUser(); + + bool enabled() { return isEnabled; } }; @@ -530,6 +533,11 @@ public: UserLock::UserLock() { assert(settings.buildUsersGroup != ""); + createDirs(settings.nixStateDir + "/userpool"); +} + +bool UserLock::findFreeUser() { + if (enabled()) return true; /* Get the members of the build-users-group. */ struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str()); @@ -559,7 +567,6 @@ UserLock::UserLock() throw Error(format("the user '%1%' in the group '%2%' does not exist") % i % settings.buildUsersGroup); - createDirs(settings.nixStateDir + "/userpool"); fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str(); @@ -590,16 +597,13 @@ UserLock::UserLock() supplementaryGIDs.resize(ngroups); #endif - return; + isEnabled = true; + return true; } } - - throw Error(format("all build users are currently in use; " - "consider creating additional users and adding them to the '%1%' group") - % settings.buildUsersGroup); + return false; } - void UserLock::kill() { killUser(uid); @@ -928,6 +932,7 @@ private: void closureRepaired(); void inputsRealised(); void tryToBuild(); + void tryLocalBuild(); void buildDone(); /* Is the build hook willing to perform the build? */ @@ -999,6 +1004,8 @@ private: Goal::amDone(result); } + void started(); + void done(BuildResult::Status status, const string & msg = ""); StorePathSet exportReferences(const StorePathSet & storePaths); @@ -1386,6 +1393,19 @@ void DerivationGoal::inputsRealised() result = BuildResult(); } +void DerivationGoal::started() { + auto msg = fmt( + buildMode == bmRepair ? "repairing outputs of '%s'" : + buildMode == bmCheck ? "checking outputs of '%s'" : + nrRounds > 1 ? "building '%s' (round %d/%d)" : + "building '%s'", worker.store.printStorePath(drvPath), curRound, nrRounds); + fmt("building '%s'", worker.store.printStorePath(drvPath)); + if (hook) msg += fmt(" on '%s'", machineName); + act = std::make_unique<Activity>(*logger, lvlInfo, actBuild, msg, + Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", curRound, nrRounds}); + mcRunningBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.runningBuilds); + worker.updateProgress(); +} void DerivationGoal::tryToBuild() { @@ -1437,20 +1457,6 @@ void DerivationGoal::tryToBuild() supported for local builds. */ bool buildLocally = buildMode != bmNormal || parsedDrv->willBuildLocally(); - auto started = [&]() { - auto msg = fmt( - buildMode == bmRepair ? "repairing outputs of '%s'" : - buildMode == bmCheck ? "checking outputs of '%s'" : - nrRounds > 1 ? "building '%s' (round %d/%d)" : - "building '%s'", worker.store.printStorePath(drvPath), curRound, nrRounds); - fmt("building '%s'", worker.store.printStorePath(drvPath)); - if (hook) msg += fmt(" on '%s'", machineName); - act = std::make_unique<Activity>(*logger, lvlInfo, actBuild, msg, - Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", curRound, nrRounds}); - mcRunningBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.runningBuilds); - worker.updateProgress(); - }; - /* Is the build hook willing to accept this job? */ if (!buildLocally) { switch (tryBuildHook()) { @@ -1483,6 +1489,34 @@ void DerivationGoal::tryToBuild() return; } + state = &DerivationGoal::tryLocalBuild; + worker.wakeUp(shared_from_this()); +} + +void DerivationGoal::tryLocalBuild() { + + /* If `build-users-group' is not empty, then we have to build as + one of the members of that group. */ + if (settings.buildUsersGroup != "" && getuid() == 0) { +#if defined(__linux__) || defined(__APPLE__) + if (!buildUser) buildUser = std::make_unique<UserLock>(); + + if (buildUser->findFreeUser()) { + /* Make sure that no other processes are executing under this + uid. */ + buildUser->kill(); + } else { + debug("waiting for build users"); + worker.waitForAWhile(shared_from_this()); + return; + } +#else + /* Don't know how to block the creation of setuid/setgid + binaries on this platform. */ + throw Error("build users are not supported on this platform for security reasons"); +#endif + } + try { /* Okay, we have to build. */ @@ -1943,22 +1977,6 @@ void DerivationGoal::startBuilder() #endif } - /* If `build-users-group' is not empty, then we have to build as - one of the members of that group. */ - if (settings.buildUsersGroup != "" && getuid() == 0) { -#if defined(__linux__) || defined(__APPLE__) - buildUser = std::make_unique<UserLock>(); - - /* Make sure that no other processes are executing under this - uid. */ - buildUser->kill(); -#else - /* Don't know how to block the creation of setuid/setgid - binaries on this platform. */ - throw Error("build users are not supported on this platform for security reasons"); -#endif - } - /* Create a temporary directory where the build will take place. */ tmpDir = createTempDir("", "nix-build-" + std::string(drvPath.name()), false, false, 0700); @@ -3155,7 +3173,7 @@ void DerivationGoal::runChild() // Only use nss functions to resolve hosts and // services. Don’t use it for anything else that may // be configured for this system. This limits the - // potential impurities introduced in fixed outputs. + // potential impurities introduced in fixed-outputs. writeFile(chrootRootDir + "/etc/nsswitch.conf", "hosts: files dns\nservices: files\n"); ss.push_back("/etc/services"); @@ -3681,7 +3699,8 @@ void DerivationGoal::registerOutputs() /* The output path should be a regular file without execute permission. */ if (!S_ISREG(st.st_mode) || (st.st_mode & S_IXUSR) != 0) throw BuildError( - format("output path '%1%' should be a non-executable regular file") % path); + format("output path '%1%' should be a non-executable regular file " + "since recursive hashing is not enabled (outputHashMode=flat)") % path); } /* Check the hash. In hash mode, move the path produced by @@ -4819,7 +4838,7 @@ void Worker::waitForInput() if (!waitingForAWhile.empty()) { useTimeout = true; if (lastWokenUp == steady_time_point::min()) - printError("waiting for locks or build slots..."); + printError("waiting for locks, build slots or build users..."); if (lastWokenUp == steady_time_point::min() || lastWokenUp > before) lastWokenUp = before; timeout = std::max(1L, (long) std::chrono::duration_cast<std::chrono::seconds>( diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index a0a2d850e..bee94cbd8 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -130,7 +130,7 @@ bool Settings::isExperimentalFeatureEnabled(const std::string & name) void Settings::requireExperimentalFeature(const std::string & name) { if (!isExperimentalFeatureEnabled(name)) - throw Error("experimental Nix feature '%s' is disabled", name); + throw Error("experimental Nix feature '%1%' is disabled; use '--experimental-features %1%' to override", name); } bool Settings::isWSL1() diff --git a/src/libutil/tests/hash.cc b/src/libutil/tests/hash.cc new file mode 100644 index 000000000..7cb439817 --- /dev/null +++ b/src/libutil/tests/hash.cc @@ -0,0 +1,80 @@ +#include "hash.hh" +#include <gtest/gtest.h> + +namespace nix { + + /* ---------------------------------------------------------------------------- + * hashString + * --------------------------------------------------------------------------*/ + + TEST(hashString, testKnownMD5Hashes1) { + // values taken from: https://tools.ietf.org/html/rfc1321 + auto s1 = ""; + auto hash = hashString(HashType::htMD5, s1); + ASSERT_EQ(hash.to_string(Base::Base16), "md5:d41d8cd98f00b204e9800998ecf8427e"); + } + + TEST(hashString, testKnownMD5Hashes2) { + // values taken from: https://tools.ietf.org/html/rfc1321 + auto s2 = "abc"; + auto hash = hashString(HashType::htMD5, s2); + ASSERT_EQ(hash.to_string(Base::Base16), "md5:900150983cd24fb0d6963f7d28e17f72"); + } + + TEST(hashString, testKnownSHA1Hashes1) { + // values taken from: https://tools.ietf.org/html/rfc3174 + auto s = "abc"; + auto hash = hashString(HashType::htSHA1, s); + ASSERT_EQ(hash.to_string(Base::Base16),"sha1:a9993e364706816aba3e25717850c26c9cd0d89d"); + } + + TEST(hashString, testKnownSHA1Hashes2) { + // values taken from: https://tools.ietf.org/html/rfc3174 + auto s = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"; + auto hash = hashString(HashType::htSHA1, s); + ASSERT_EQ(hash.to_string(Base::Base16),"sha1:84983e441c3bd26ebaae4aa1f95129e5e54670f1"); + } + + TEST(hashString, testKnownSHA256Hashes1) { + // values taken from: https://tools.ietf.org/html/rfc4634 + auto s = "abc"; + + auto hash = hashString(HashType::htSHA256, s); + ASSERT_EQ(hash.to_string(Base::Base16), + "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"); + } + + TEST(hashString, testKnownSHA256Hashes2) { + // values taken from: https://tools.ietf.org/html/rfc4634 + auto s = "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"; + auto hash = hashString(HashType::htSHA256, s); + ASSERT_EQ(hash.to_string(Base::Base16), + "sha256:248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1"); + } + + TEST(hashString, testKnownSHA512Hashes1) { + // values taken from: https://tools.ietf.org/html/rfc4634 + auto s = "abc"; + auto hash = hashString(HashType::htSHA512, s); + ASSERT_EQ(hash.to_string(Base::Base16), + "sha512:ddaf35a193617abacc417349ae20413112e6fa4e89a9" + "7ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd" + "454d4423643ce80e2a9ac94fa54ca49f"); + } + + TEST(hashString, testKnownSHA512Hashes2) { + // values taken from: https://tools.ietf.org/html/rfc4634 + auto s = "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"; + + auto hash = hashString(HashType::htSHA512, s); + ASSERT_EQ(hash.to_string(Base::Base16), + "sha512:8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa1" + "7299aeadb6889018501d289e4900f7e4331b99dec4b5433a" + "c7d329eeb6dd26545e96e55b874be909"); + } + + TEST(hashString, hashingWithUnknownAlgoExits) { + auto s = "unknown"; + ASSERT_DEATH(hashString(HashType::htUnknown, s), ""); + } +} diff --git a/src/libutil/tests/json.cc b/src/libutil/tests/json.cc new file mode 100644 index 000000000..dea73f53a --- /dev/null +++ b/src/libutil/tests/json.cc @@ -0,0 +1,193 @@ +#include "json.hh" +#include <gtest/gtest.h> +#include <sstream> + +namespace nix { + + /* ---------------------------------------------------------------------------- + * toJSON + * --------------------------------------------------------------------------*/ + + TEST(toJSON, quotesCharPtr) { + const char* input = "test"; + std::stringstream out; + toJSON(out, input); + + ASSERT_EQ(out.str(), "\"test\""); + } + + TEST(toJSON, quotesStdString) { + std::string input = "test"; + std::stringstream out; + toJSON(out, input); + + ASSERT_EQ(out.str(), "\"test\""); + } + + TEST(toJSON, convertsNullptrtoNull) { + auto input = nullptr; + std::stringstream out; + toJSON(out, input); + + ASSERT_EQ(out.str(), "null"); + } + + TEST(toJSON, convertsNullToNull) { + const char* input = 0; + std::stringstream out; + toJSON(out, input); + + ASSERT_EQ(out.str(), "null"); + } + + + TEST(toJSON, convertsFloat) { + auto input = 1.024f; + std::stringstream out; + toJSON(out, input); + + ASSERT_EQ(out.str(), "1.024"); + } + + TEST(toJSON, convertsDouble) { + const double input = 1.024; + std::stringstream out; + toJSON(out, input); + + ASSERT_EQ(out.str(), "1.024"); + } + + TEST(toJSON, convertsBool) { + auto input = false; + std::stringstream out; + toJSON(out, input); + + ASSERT_EQ(out.str(), "false"); + } + + TEST(toJSON, quotesTab) { + std::stringstream out; + toJSON(out, "\t"); + + ASSERT_EQ(out.str(), "\"\\t\""); + } + + TEST(toJSON, quotesNewline) { + std::stringstream out; + toJSON(out, "\n"); + + ASSERT_EQ(out.str(), "\"\\n\""); + } + + TEST(toJSON, quotesCreturn) { + std::stringstream out; + toJSON(out, "\r"); + + ASSERT_EQ(out.str(), "\"\\r\""); + } + + TEST(toJSON, quotesCreturnNewLine) { + std::stringstream out; + toJSON(out, "\r\n"); + + ASSERT_EQ(out.str(), "\"\\r\\n\""); + } + + TEST(toJSON, quotesDoublequotes) { + std::stringstream out; + toJSON(out, "\""); + + ASSERT_EQ(out.str(), "\"\\\"\""); + } + + TEST(toJSON, substringEscape) { + std::stringstream out; + const char *s = "foo\t"; + toJSON(out, s+3, s + strlen(s)); + + ASSERT_EQ(out.str(), "\"\\t\""); + } + + /* ---------------------------------------------------------------------------- + * JSONObject + * --------------------------------------------------------------------------*/ + + TEST(JSONObject, emptyObject) { + std::stringstream out; + { + JSONObject t(out); + } + ASSERT_EQ(out.str(), "{}"); + } + + TEST(JSONObject, objectWithList) { + std::stringstream out; + { + JSONObject t(out); + auto l = t.list("list"); + l.elem("element"); + } + ASSERT_EQ(out.str(), R"#({"list":["element"]})#"); + } + + TEST(JSONObject, objectWithListIndent) { + std::stringstream out; + { + JSONObject t(out, true); + auto l = t.list("list"); + l.elem("element"); + } + ASSERT_EQ(out.str(), +R"#({ + "list": [ + "element" + ] +})#"); + } + + TEST(JSONObject, objectWithPlaceholderAndList) { + std::stringstream out; + { + JSONObject t(out); + auto l = t.placeholder("list"); + l.list().elem("element"); + } + + ASSERT_EQ(out.str(), R"#({"list":["element"]})#"); + } + + TEST(JSONObject, objectWithPlaceholderAndObject) { + std::stringstream out; + { + JSONObject t(out); + auto l = t.placeholder("object"); + l.object().attr("key", "value"); + } + + ASSERT_EQ(out.str(), R"#({"object":{"key":"value"}})#"); + } + + /* ---------------------------------------------------------------------------- + * JSONList + * --------------------------------------------------------------------------*/ + + TEST(JSONList, empty) { + std::stringstream out; + { + JSONList l(out); + } + ASSERT_EQ(out.str(), R"#([])#"); + } + + TEST(JSONList, withElements) { + std::stringstream out; + { + JSONList l(out); + l.elem("one"); + l.object(); + l.placeholder().write("three"); + } + ASSERT_EQ(out.str(), R"#(["one",{},"three"])#"); + } +} + diff --git a/src/libutil/tests/local.mk b/src/libutil/tests/local.mk new file mode 100644 index 000000000..a297edb64 --- /dev/null +++ b/src/libutil/tests/local.mk @@ -0,0 +1,15 @@ +check: libutil-tests_RUN + +programs += libutil-tests + +libutil-tests_DIR := $(d) + +libutil-tests_INSTALL_DIR := + +libutil-tests_SOURCES := $(wildcard $(d)/*.cc) + +libutil-tests_CXXFLAGS += -I src/libutil + +libutil-tests_LIBS = libutil + +libutil-tests_LDFLAGS := $(GTEST_LIBS) diff --git a/src/libutil/tests/tests.cc b/src/libutil/tests/tests.cc new file mode 100644 index 000000000..8e77ccbe1 --- /dev/null +++ b/src/libutil/tests/tests.cc @@ -0,0 +1,589 @@ +#include "util.hh" +#include "types.hh" + +#include <gtest/gtest.h> + +namespace nix { + +/* ----------- tests for util.hh ------------------------------------------------*/ + + /* ---------------------------------------------------------------------------- + * absPath + * --------------------------------------------------------------------------*/ + + TEST(absPath, doesntChangeRoot) { + auto p = absPath("/"); + + ASSERT_EQ(p, "/"); + } + + + + + TEST(absPath, turnsEmptyPathIntoCWD) { + char cwd[PATH_MAX+1]; + auto p = absPath(""); + + ASSERT_EQ(p, getcwd((char*)&cwd, PATH_MAX)); + } + + TEST(absPath, usesOptionalBasePathWhenGiven) { + char _cwd[PATH_MAX+1]; + char* cwd = getcwd((char*)&_cwd, PATH_MAX); + + auto p = absPath("", cwd); + + ASSERT_EQ(p, cwd); + } + + TEST(absPath, isIdempotent) { + char _cwd[PATH_MAX+1]; + char* cwd = getcwd((char*)&_cwd, PATH_MAX); + auto p1 = absPath(cwd); + auto p2 = absPath(p1); + + ASSERT_EQ(p1, p2); + } + + + TEST(absPath, pathIsCanonicalised) { + auto path = "/some/path/with/trailing/dot/."; + auto p1 = absPath(path); + auto p2 = absPath(p1); + + ASSERT_EQ(p1, "/some/path/with/trailing/dot"); + ASSERT_EQ(p1, p2); + } + + /* ---------------------------------------------------------------------------- + * canonPath + * --------------------------------------------------------------------------*/ + + TEST(canonPath, removesTrailingSlashes) { + auto path = "/this/is/a/path//"; + auto p = canonPath(path); + + ASSERT_EQ(p, "/this/is/a/path"); + } + + TEST(canonPath, removesDots) { + auto path = "/this/./is/a/path/./"; + auto p = canonPath(path); + + ASSERT_EQ(p, "/this/is/a/path"); + } + + TEST(canonPath, removesDots2) { + auto path = "/this/a/../is/a////path/foo/.."; + auto p = canonPath(path); + + ASSERT_EQ(p, "/this/is/a/path"); + } + + TEST(canonPath, requiresAbsolutePath) { + ASSERT_ANY_THROW(canonPath(".")); + ASSERT_ANY_THROW(canonPath("..")); + ASSERT_ANY_THROW(canonPath("../")); + ASSERT_DEATH({ canonPath(""); }, "path != \"\""); + } + + /* ---------------------------------------------------------------------------- + * dirOf + * --------------------------------------------------------------------------*/ + + TEST(dirOf, returnsEmptyStringForRoot) { + auto p = dirOf("/"); + + ASSERT_EQ(p, "/"); + } + + TEST(dirOf, returnsFirstPathComponent) { + auto p1 = dirOf("/dir/"); + ASSERT_EQ(p1, "/dir"); + auto p2 = dirOf("/dir"); + ASSERT_EQ(p2, "/"); + auto p3 = dirOf("/dir/.."); + ASSERT_EQ(p3, "/dir"); + auto p4 = dirOf("/dir/../"); + ASSERT_EQ(p4, "/dir/.."); + } + + /* ---------------------------------------------------------------------------- + * baseNameOf + * --------------------------------------------------------------------------*/ + + TEST(baseNameOf, emptyPath) { + auto p1 = baseNameOf(""); + ASSERT_EQ(p1, ""); + } + + TEST(baseNameOf, pathOnRoot) { + auto p1 = baseNameOf("/dir"); + ASSERT_EQ(p1, "dir"); + } + + TEST(baseNameOf, relativePath) { + auto p1 = baseNameOf("dir/foo"); + ASSERT_EQ(p1, "foo"); + } + + TEST(baseNameOf, pathWithTrailingSlashRoot) { + auto p1 = baseNameOf("/"); + ASSERT_EQ(p1, ""); + } + + TEST(baseNameOf, trailingSlash) { + auto p1 = baseNameOf("/dir/"); + ASSERT_EQ(p1, "dir"); + } + + /* ---------------------------------------------------------------------------- + * isInDir + * --------------------------------------------------------------------------*/ + + TEST(isInDir, trivialCase) { + auto p1 = isInDir("/foo/bar", "/foo"); + ASSERT_EQ(p1, true); + } + + TEST(isInDir, notInDir) { + auto p1 = isInDir("/zes/foo/bar", "/foo"); + ASSERT_EQ(p1, false); + } + + // XXX: hm, bug or feature? :) Looking at the implementation + // this might be problematic. + TEST(isInDir, emptyDir) { + auto p1 = isInDir("/zes/foo/bar", ""); + ASSERT_EQ(p1, true); + } + + /* ---------------------------------------------------------------------------- + * isDirOrInDir + * --------------------------------------------------------------------------*/ + + TEST(isDirOrInDir, trueForSameDirectory) { + ASSERT_EQ(isDirOrInDir("/nix", "/nix"), true); + ASSERT_EQ(isDirOrInDir("/", "/"), true); + } + + TEST(isDirOrInDir, trueForEmptyPaths) { + ASSERT_EQ(isDirOrInDir("", ""), true); + } + + TEST(isDirOrInDir, falseForDisjunctPaths) { + ASSERT_EQ(isDirOrInDir("/foo", "/bar"), false); + } + + TEST(isDirOrInDir, relativePaths) { + ASSERT_EQ(isDirOrInDir("/foo/..", "/foo"), true); + } + + // XXX: while it is possible to use "." or ".." in the + // first argument this doesn't seem to work in the second. + TEST(isDirOrInDir, DISABLED_shouldWork) { + ASSERT_EQ(isDirOrInDir("/foo/..", "/foo/."), true); + + } + + /* ---------------------------------------------------------------------------- + * pathExists + * --------------------------------------------------------------------------*/ + + TEST(pathExists, rootExists) { + ASSERT_TRUE(pathExists("/")); + } + + TEST(pathExists, cwdExists) { + ASSERT_TRUE(pathExists(".")); + } + + TEST(pathExists, bogusPathDoesNotExist) { + ASSERT_FALSE(pathExists("/home/schnitzel/darmstadt/pommes")); + } + + /* ---------------------------------------------------------------------------- + * concatStringsSep + * --------------------------------------------------------------------------*/ + + TEST(concatStringsSep, buildCommaSeparatedString) { + Strings strings; + strings.push_back("this"); + strings.push_back("is"); + strings.push_back("great"); + + ASSERT_EQ(concatStringsSep(",", strings), "this,is,great"); + } + + TEST(concatStringsSep, buildStringWithEmptySeparator) { + Strings strings; + strings.push_back("this"); + strings.push_back("is"); + strings.push_back("great"); + + ASSERT_EQ(concatStringsSep("", strings), "thisisgreat"); + } + + TEST(concatStringsSep, buildSingleString) { + Strings strings; + strings.push_back("this"); + + ASSERT_EQ(concatStringsSep(",", strings), "this"); + } + + /* ---------------------------------------------------------------------------- + * hasPrefix + * --------------------------------------------------------------------------*/ + + TEST(hasPrefix, emptyStringHasNoPrefix) { + ASSERT_FALSE(hasPrefix("", "foo")); + } + + TEST(hasPrefix, emptyStringIsAlwaysPrefix) { + ASSERT_TRUE(hasPrefix("foo", "")); + ASSERT_TRUE(hasPrefix("jshjkfhsadf", "")); + } + + TEST(hasPrefix, trivialCase) { + ASSERT_TRUE(hasPrefix("foobar", "foo")); + } + + /* ---------------------------------------------------------------------------- + * hasSuffix + * --------------------------------------------------------------------------*/ + + TEST(hasSuffix, emptyStringHasNoSuffix) { + ASSERT_FALSE(hasSuffix("", "foo")); + } + + TEST(hasSuffix, trivialCase) { + ASSERT_TRUE(hasSuffix("foo", "foo")); + ASSERT_TRUE(hasSuffix("foobar", "bar")); + } + + /* ---------------------------------------------------------------------------- + * base64Encode + * --------------------------------------------------------------------------*/ + + TEST(base64Encode, emptyString) { + ASSERT_EQ(base64Encode(""), ""); + } + + TEST(base64Encode, encodesAString) { + ASSERT_EQ(base64Encode("quod erat demonstrandum"), "cXVvZCBlcmF0IGRlbW9uc3RyYW5kdW0="); + } + + TEST(base64Encode, encodeAndDecode) { + auto s = "quod erat demonstrandum"; + auto encoded = base64Encode(s); + auto decoded = base64Decode(encoded); + + ASSERT_EQ(decoded, s); + } + + /* ---------------------------------------------------------------------------- + * base64Decode + * --------------------------------------------------------------------------*/ + + TEST(base64Decode, emptyString) { + ASSERT_EQ(base64Decode(""), ""); + } + + TEST(base64Decode, decodeAString) { + ASSERT_EQ(base64Decode("cXVvZCBlcmF0IGRlbW9uc3RyYW5kdW0="), "quod erat demonstrandum"); + } + + /* ---------------------------------------------------------------------------- + * toLower + * --------------------------------------------------------------------------*/ + + TEST(toLower, emptyString) { + ASSERT_EQ(toLower(""), ""); + } + + TEST(toLower, nonLetters) { + auto s = "!@(*$#)(@#=\\234_"; + ASSERT_EQ(toLower(s), s); + } + + // std::tolower() doesn't handle unicode characters. In the context of + // store paths this isn't relevant but doesn't hurt to record this behavior + // here. + TEST(toLower, umlauts) { + auto s = "ÄÖÜ"; + ASSERT_EQ(toLower(s), "ÄÖÜ"); + } + + /* ---------------------------------------------------------------------------- + * string2Float + * --------------------------------------------------------------------------*/ + + TEST(string2Float, emptyString) { + double n; + ASSERT_EQ(string2Float("", n), false); + } + + TEST(string2Float, trivialConversions) { + double n; + ASSERT_EQ(string2Float("1.0", n), true); + ASSERT_EQ(n, 1.0); + + ASSERT_EQ(string2Float("0.0", n), true); + ASSERT_EQ(n, 0.0); + + ASSERT_EQ(string2Float("-100.25", n), true); + ASSERT_EQ(n, (-100.25)); + } + + /* ---------------------------------------------------------------------------- + * string2Int + * --------------------------------------------------------------------------*/ + + TEST(string2Int, emptyString) { + double n; + ASSERT_EQ(string2Int("", n), false); + } + + TEST(string2Int, trivialConversions) { + double n; + ASSERT_EQ(string2Int("1", n), true); + ASSERT_EQ(n, 1); + + ASSERT_EQ(string2Int("0", n), true); + ASSERT_EQ(n, 0); + + ASSERT_EQ(string2Int("-100", n), true); + ASSERT_EQ(n, (-100)); + } + + /* ---------------------------------------------------------------------------- + * statusOk + * --------------------------------------------------------------------------*/ + + TEST(statusOk, zeroIsOk) { + ASSERT_EQ(statusOk(0), true); + ASSERT_EQ(statusOk(1), false); + } + + + /* ---------------------------------------------------------------------------- + * rewriteStrings + * --------------------------------------------------------------------------*/ + + TEST(rewriteStrings, emptyString) { + StringMap rewrites; + rewrites["this"] = "that"; + + ASSERT_EQ(rewriteStrings("", rewrites), ""); + } + + TEST(rewriteStrings, emptyRewrites) { + StringMap rewrites; + + ASSERT_EQ(rewriteStrings("this and that", rewrites), "this and that"); + } + + TEST(rewriteStrings, successfulRewrite) { + StringMap rewrites; + rewrites["this"] = "that"; + + ASSERT_EQ(rewriteStrings("this and that", rewrites), "that and that"); + } + + TEST(rewriteStrings, doesntOccur) { + StringMap rewrites; + rewrites["foo"] = "bar"; + + ASSERT_EQ(rewriteStrings("this and that", rewrites), "this and that"); + } + + /* ---------------------------------------------------------------------------- + * replaceStrings + * --------------------------------------------------------------------------*/ + + TEST(replaceStrings, emptyString) { + ASSERT_EQ(replaceStrings("", "this", "that"), ""); + ASSERT_EQ(replaceStrings("this and that", "", ""), "this and that"); + } + + TEST(replaceStrings, successfulReplace) { + ASSERT_EQ(replaceStrings("this and that", "this", "that"), "that and that"); + } + + TEST(replaceStrings, doesntOccur) { + ASSERT_EQ(replaceStrings("this and that", "foo", "bar"), "this and that"); + } + + /* ---------------------------------------------------------------------------- + * trim + * --------------------------------------------------------------------------*/ + + TEST(trim, emptyString) { + ASSERT_EQ(trim(""), ""); + } + + TEST(trim, removesWhitespace) { + ASSERT_EQ(trim("foo"), "foo"); + ASSERT_EQ(trim(" foo "), "foo"); + ASSERT_EQ(trim(" foo bar baz"), "foo bar baz"); + ASSERT_EQ(trim(" \t foo bar baz\n"), "foo bar baz"); + } + + /* ---------------------------------------------------------------------------- + * chomp + * --------------------------------------------------------------------------*/ + + TEST(chomp, emptyString) { + ASSERT_EQ(chomp(""), ""); + } + + TEST(chomp, removesWhitespace) { + ASSERT_EQ(chomp("foo"), "foo"); + ASSERT_EQ(chomp("foo "), "foo"); + ASSERT_EQ(chomp(" foo "), " foo"); + ASSERT_EQ(chomp(" foo bar baz "), " foo bar baz"); + ASSERT_EQ(chomp("\t foo bar baz\n"), "\t foo bar baz"); + } + + /* ---------------------------------------------------------------------------- + * quoteStrings + * --------------------------------------------------------------------------*/ + + TEST(quoteStrings, empty) { + Strings s = { }; + Strings expected = { }; + + ASSERT_EQ(quoteStrings(s), expected); + } + + TEST(quoteStrings, emptyStrings) { + Strings s = { "", "", "" }; + Strings expected = { "''", "''", "''" }; + ASSERT_EQ(quoteStrings(s), expected); + + } + + TEST(quoteStrings, trivialQuote) { + Strings s = { "foo", "bar", "baz" }; + Strings expected = { "'foo'", "'bar'", "'baz'" }; + + ASSERT_EQ(quoteStrings(s), expected); + } + + TEST(quoteStrings, quotedStrings) { + Strings s = { "'foo'", "'bar'", "'baz'" }; + Strings expected = { "''foo''", "''bar''", "''baz''" }; + + ASSERT_EQ(quoteStrings(s), expected); + } + + /* ---------------------------------------------------------------------------- + * tokenizeString + * --------------------------------------------------------------------------*/ + + TEST(tokenizeString, empty) { + Strings expected = { }; + + ASSERT_EQ(tokenizeString<Strings>(""), expected); + } + + TEST(tokenizeString, tokenizeSpacesWithDefaults) { + auto s = "foo bar baz"; + Strings expected = { "foo", "bar", "baz" }; + + ASSERT_EQ(tokenizeString<Strings>(s), expected); + } + + TEST(tokenizeString, tokenizeTabsWithDefaults) { + auto s = "foo\tbar\tbaz"; + Strings expected = { "foo", "bar", "baz" }; + + ASSERT_EQ(tokenizeString<Strings>(s), expected); + } + + TEST(tokenizeString, tokenizeTabsSpacesWithDefaults) { + auto s = "foo\t bar\t baz"; + Strings expected = { "foo", "bar", "baz" }; + + ASSERT_EQ(tokenizeString<Strings>(s), expected); + } + + TEST(tokenizeString, tokenizeTabsSpacesNewlineWithDefaults) { + auto s = "foo\t\n bar\t\n baz"; + Strings expected = { "foo", "bar", "baz" }; + + ASSERT_EQ(tokenizeString<Strings>(s), expected); + } + + TEST(tokenizeString, tokenizeTabsSpacesNewlineRetWithDefaults) { + auto s = "foo\t\n\r bar\t\n\r baz"; + Strings expected = { "foo", "bar", "baz" }; + + ASSERT_EQ(tokenizeString<Strings>(s), expected); + + auto s2 = "foo \t\n\r bar \t\n\r baz"; + Strings expected2 = { "foo", "bar", "baz" }; + + ASSERT_EQ(tokenizeString<Strings>(s2), expected2); + } + + TEST(tokenizeString, tokenizeWithCustomSep) { + auto s = "foo\n,bar\n,baz\n"; + Strings expected = { "foo\n", "bar\n", "baz\n" }; + + ASSERT_EQ(tokenizeString<Strings>(s, ","), expected); + } + + /* ---------------------------------------------------------------------------- + * get + * --------------------------------------------------------------------------*/ + + TEST(get, emptyContainer) { + StringMap s = { }; + auto expected = std::nullopt; + + ASSERT_EQ(get(s, "one"), expected); + } + + TEST(get, getFromContainer) { + StringMap s; + s["one"] = "yi"; + s["two"] = "er"; + auto expected = "yi"; + + ASSERT_EQ(get(s, "one"), expected); + } + + /* ---------------------------------------------------------------------------- + * filterANSIEscapes + * --------------------------------------------------------------------------*/ + + TEST(filterANSIEscapes, emptyString) { + auto s = ""; + auto expected = ""; + + ASSERT_EQ(filterANSIEscapes(s), expected); + } + + TEST(filterANSIEscapes, doesntChangePrintableChars) { + auto s = "09 2q304ruyhr slk2-19024 kjsadh sar f"; + + ASSERT_EQ(filterANSIEscapes(s), s); + } + + TEST(filterANSIEscapes, filtersColorCodes) { + auto s = "\u001b[30m A \u001b[31m B \u001b[32m C \u001b[33m D \u001b[0m"; + + ASSERT_EQ(filterANSIEscapes(s, true, 2), " A" ); + ASSERT_EQ(filterANSIEscapes(s, true, 3), " A " ); + ASSERT_EQ(filterANSIEscapes(s, true, 4), " A " ); + ASSERT_EQ(filterANSIEscapes(s, true, 5), " A B" ); + ASSERT_EQ(filterANSIEscapes(s, true, 8), " A B C" ); + } + + TEST(filterANSIEscapes, expandsTabs) { + auto s = "foo\tbar\tbaz"; + + ASSERT_EQ(filterANSIEscapes(s, true), "foo bar baz" ); + } +} diff --git a/src/libutil/tests/url.cc b/src/libutil/tests/url.cc new file mode 100644 index 000000000..80646ad3e --- /dev/null +++ b/src/libutil/tests/url.cc @@ -0,0 +1,266 @@ +#include "url.hh" +#include <gtest/gtest.h> + +namespace nix { + +/* ----------- tests for url.hh --------------------------------------------------*/ + + string print_map(std::map<string, string> m) { + std::map<string, string>::iterator it; + string s = "{ "; + for (it = m.begin(); it != m.end(); ++it) { + s += "{ "; + s += it->first; + s += " = "; + s += it->second; + s += " } "; + } + s += "}"; + return s; + } + + + std::ostream& operator<<(std::ostream& os, const ParsedURL& p) { + return os << "\n" + << "url: " << p.url << "\n" + << "base: " << p.base << "\n" + << "scheme: " << p.scheme << "\n" + << "authority: " << p.authority.value() << "\n" + << "path: " << p.path << "\n" + << "query: " << print_map(p.query) << "\n" + << "fragment: " << p.fragment << "\n"; + } + + TEST(parseURL, parsesSimpleHttpUrl) { + auto s = "http://www.example.org/file.tar.gz"; + auto parsed = parseURL(s); + + ParsedURL expected { + .url = "http://www.example.org/file.tar.gz", + .base = "http://www.example.org/file.tar.gz", + .scheme = "http", + .authority = "www.example.org", + .path = "/file.tar.gz", + .query = (StringMap) { }, + .fragment = "", + }; + + ASSERT_EQ(parsed, expected); + } + + TEST(parseURL, parsesSimpleHttpsUrl) { + auto s = "https://www.example.org/file.tar.gz"; + auto parsed = parseURL(s); + + ParsedURL expected { + .url = "https://www.example.org/file.tar.gz", + .base = "https://www.example.org/file.tar.gz", + .scheme = "https", + .authority = "www.example.org", + .path = "/file.tar.gz", + .query = (StringMap) { }, + .fragment = "", + }; + + ASSERT_EQ(parsed, expected); + } + + TEST(parseURL, parsesSimpleHttpUrlWithQueryAndFragment) { + auto s = "https://www.example.org/file.tar.gz?download=fast&when=now#hello"; + auto parsed = parseURL(s); + + ParsedURL expected { + .url = "https://www.example.org/file.tar.gz", + .base = "https://www.example.org/file.tar.gz", + .scheme = "https", + .authority = "www.example.org", + .path = "/file.tar.gz", + .query = (StringMap) { { "download", "fast" }, { "when", "now" } }, + .fragment = "hello", + }; + + ASSERT_EQ(parsed, expected); + } + + TEST(parseURL, parsesSimpleHttpUrlWithComplexFragment) { + auto s = "http://www.example.org/file.tar.gz?field=value#?foo=bar%23"; + auto parsed = parseURL(s); + + ParsedURL expected { + .url = "http://www.example.org/file.tar.gz", + .base = "http://www.example.org/file.tar.gz", + .scheme = "http", + .authority = "www.example.org", + .path = "/file.tar.gz", + .query = (StringMap) { { "field", "value" } }, + .fragment = "?foo=bar#", + }; + + ASSERT_EQ(parsed, expected); + } + + + TEST(parseURL, parseIPv4Address) { + auto s = "http://127.0.0.1:8080/file.tar.gz?download=fast&when=now#hello"; + auto parsed = parseURL(s); + + ParsedURL expected { + .url = "http://127.0.0.1:8080/file.tar.gz", + .base = "https://127.0.0.1:8080/file.tar.gz", + .scheme = "http", + .authority = "127.0.0.1:8080", + .path = "/file.tar.gz", + .query = (StringMap) { { "download", "fast" }, { "when", "now" } }, + .fragment = "hello", + }; + + ASSERT_EQ(parsed, expected); + } + + TEST(parseURL, parseIPv6Address) { + auto s = "http://[2a02:8071:8192:c100:311d:192d:81ac:11ea]:8080"; + auto parsed = parseURL(s); + + ParsedURL expected { + .url = "http://[2a02:8071:8192:c100:311d:192d:81ac:11ea]:8080", + .base = "http://[2a02:8071:8192:c100:311d:192d:81ac:11ea]:8080", + .scheme = "http", + .authority = "[2a02:8071:8192:c100:311d:192d:81ac:11ea]:8080", + .path = "", + .query = (StringMap) { }, + .fragment = "", + }; + + ASSERT_EQ(parsed, expected); + + } + + TEST(parseURL, parseEmptyQueryParams) { + auto s = "http://127.0.0.1:8080/file.tar.gz?&&&&&"; + auto parsed = parseURL(s); + ASSERT_EQ(parsed.query, (StringMap) { }); + } + + TEST(parseURL, parseUserPassword) { + auto s = "http://user:pass@www.example.org:8080/file.tar.gz"; + auto parsed = parseURL(s); + + ParsedURL expected { + .url = "http://user:pass@www.example.org/file.tar.gz", + .base = "http://user:pass@www.example.org/file.tar.gz", + .scheme = "http", + .authority = "user:pass@www.example.org:8080", + .path = "/file.tar.gz", + .query = (StringMap) { }, + .fragment = "", + }; + + + ASSERT_EQ(parsed, expected); + } + + TEST(parseURL, parseFileURLWithQueryAndFragment) { + auto s = "file:///none/of/your/business"; + auto parsed = parseURL(s); + + ParsedURL expected { + .url = "", + .base = "", + .scheme = "file", + .authority = "", + .path = "/none/of/your/business", + .query = (StringMap) { }, + .fragment = "", + }; + + ASSERT_EQ(parsed, expected); + + } + + TEST(parseURL, parsedUrlsIsEqualToItself) { + auto s = "http://www.example.org/file.tar.gz"; + auto url = parseURL(s); + + ASSERT_TRUE(url == url); + } + + TEST(parseURL, parseFTPUrl) { + auto s = "ftp://ftp.nixos.org/downloads/nixos.iso"; + auto parsed = parseURL(s); + + ParsedURL expected { + .url = "ftp://ftp.nixos.org/downloads/nixos.iso", + .base = "ftp://ftp.nixos.org/downloads/nixos.iso", + .scheme = "ftp", + .authority = "ftp.nixos.org", + .path = "/downloads/nixos.iso", + .query = (StringMap) { }, + .fragment = "", + }; + + ASSERT_EQ(parsed, expected); + } + + TEST(parseURL, parsesAnythingInUriFormat) { + auto s = "whatever://github.com/NixOS/nixpkgs.git"; + auto parsed = parseURL(s); + } + + TEST(parseURL, parsesAnythingInUriFormatWithoutDoubleSlash) { + auto s = "whatever:github.com/NixOS/nixpkgs.git"; + auto parsed = parseURL(s); + } + + TEST(parseURL, emptyStringIsInvalidURL) { + ASSERT_THROW(parseURL(""), Error); + } + + /* ---------------------------------------------------------------------------- + * decodeQuery + * --------------------------------------------------------------------------*/ + + TEST(decodeQuery, emptyStringYieldsEmptyMap) { + auto d = decodeQuery(""); + ASSERT_EQ(d, (StringMap) { }); + } + + TEST(decodeQuery, simpleDecode) { + auto d = decodeQuery("yi=one&er=two"); + ASSERT_EQ(d, ((StringMap) { { "yi", "one" }, { "er", "two" } })); + } + + TEST(decodeQuery, decodeUrlEncodedArgs) { + auto d = decodeQuery("arg=%3D%3D%40%3D%3D"); + ASSERT_EQ(d, ((StringMap) { { "arg", "==@==" } })); + } + + TEST(decodeQuery, decodeArgWithEmptyValue) { + auto d = decodeQuery("arg="); + ASSERT_EQ(d, ((StringMap) { { "arg", ""} })); + } + + /* ---------------------------------------------------------------------------- + * percentDecode + * --------------------------------------------------------------------------*/ + + TEST(percentDecode, decodesUrlEncodedString) { + string s = "==@=="; + string d = percentDecode("%3D%3D%40%3D%3D"); + ASSERT_EQ(d, s); + } + + TEST(percentDecode, multipleDecodesAreIdempotent) { + string once = percentDecode("%3D%3D%40%3D%3D"); + string twice = percentDecode(once); + + ASSERT_EQ(once, twice); + } + + TEST(percentDecode, trailingPercent) { + string s = "==@==%"; + string d = percentDecode("%3D%3D%40%3D%3D%25"); + + ASSERT_EQ(d, s); + } + +} diff --git a/src/libutil/tests/xml-writer.cc b/src/libutil/tests/xml-writer.cc new file mode 100644 index 000000000..adcde25c9 --- /dev/null +++ b/src/libutil/tests/xml-writer.cc @@ -0,0 +1,105 @@ +#include "xml-writer.hh" +#include <gtest/gtest.h> +#include <sstream> + +namespace nix { + + /* ---------------------------------------------------------------------------- + * XMLWriter + * --------------------------------------------------------------------------*/ + + TEST(XMLWriter, emptyObject) { + std::stringstream out; + { + XMLWriter t(false, out); + } + + ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n"); + } + + TEST(XMLWriter, objectWithEmptyElement) { + std::stringstream out; + { + XMLWriter t(false, out); + t.openElement("foobar"); + } + + ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar></foobar>"); + } + + TEST(XMLWriter, objectWithElementWithAttrs) { + std::stringstream out; + { + XMLWriter t(false, out); + XMLAttrs attrs = { + { "foo", "bar" } + }; + t.openElement("foobar", attrs); + } + + ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar foo=\"bar\"></foobar>"); + } + + TEST(XMLWriter, objectWithElementWithEmptyAttrs) { + std::stringstream out; + { + XMLWriter t(false, out); + XMLAttrs attrs = {}; + t.openElement("foobar", attrs); + } + + ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar></foobar>"); + } + + TEST(XMLWriter, objectWithElementWithAttrsEscaping) { + std::stringstream out; + { + XMLWriter t(false, out); + XMLAttrs attrs = { + { "<key>", "<value>" } + }; + t.openElement("foobar", attrs); + } + + // XXX: While "<value>" is escaped, "<key>" isn't which I think is a bug. + ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar <key>=\"<value>\"></foobar>"); + } + + TEST(XMLWriter, objectWithElementWithAttrsIndented) { + std::stringstream out; + { + XMLWriter t(true, out); + XMLAttrs attrs = { + { "foo", "bar" } + }; + t.openElement("foobar", attrs); + } + + ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar foo=\"bar\">\n</foobar>\n"); + } + + TEST(XMLWriter, writeEmptyElement) { + std::stringstream out; + { + XMLWriter t(false, out); + t.writeEmptyElement("foobar"); + } + + ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar />"); + } + + TEST(XMLWriter, writeEmptyElementWithAttributes) { + std::stringstream out; + { + XMLWriter t(false, out); + XMLAttrs attrs = { + { "foo", "bar" } + }; + t.writeEmptyElement("foobar", attrs); + + } + + ASSERT_EQ(out.str(), "<?xml version='1.0' encoding='utf-8'?>\n<foobar foo=\"bar\" />"); + } + +} diff --git a/src/libutil/util.hh b/src/libutil/util.hh index a861d5aa6..52ca804a0 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -60,12 +60,12 @@ Path canonPath(const Path & path, bool resolveSymlinks = false); /* Return the directory part of the given canonical path, i.e., everything before the final `/'. If the path is the root or an - immediate child thereof (e.g., `/foo'), this means an empty string - is returned. */ + immediate child thereof (e.g., `/foo'), this means `/' + is returned.*/ Path dirOf(const Path & path); /* Return the base name of the given canonical path, i.e., everything - following the final `/'. */ + following the final `/' (trailing slashes are removed). */ std::string_view baseNameOf(std::string_view path); /* Check whether 'path' is a descendant of 'dir'. */ @@ -393,17 +393,6 @@ string replaceStrings(const std::string & s, std::string rewriteStrings(const std::string & s, const StringMap & rewrites); -/* If a set contains 'from', remove it and insert 'to'. */ -template<typename T> -void replaceInSet(std::set<T> & set, const T & from, const T & to) -{ - auto i = set.find(from); - if (i == set.end()) return; - set.erase(i); - set.insert(to); -} - - /* Convert the exit status of a child as returned by wait() into an error string. */ string statusToString(int status); diff --git a/src/nix/main.cc b/src/nix/main.cc index fffdeab90..94fd33244 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -183,12 +183,15 @@ void mainWrapped(int argc, char * * argv) if (completions) return; - settings.requireExperimentalFeature("nix-command"); - initPlugins(); if (!args.command) args.showHelpAndExit(); + if (args.command->first != "repl" + && args.command->first != "doctor" + && args.command->first != "upgrade-nix") + settings.requireExperimentalFeature("nix-command"); + Finally f([]() { stopProgressBar(); }); startProgressBar(args.printBuildLogs); diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh index a3c3c7847..17b63d978 100644 --- a/tests/binary-cache.sh +++ b/tests/binary-cache.sh @@ -105,10 +105,24 @@ mv $cacheDir/nar2 $cacheDir/nar # incomplete closure. clearStore -rm $(grep -l "StorePath:.*dependencies-input-2" $cacheDir/*.narinfo) +rm -v $(grep -l "StorePath:.*dependencies-input-2" $cacheDir/*.narinfo) nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log -grep -q "copying path" $TEST_ROOT/log +grep -q "copying path.*input-0" $TEST_ROOT/log +grep -q "copying path.*input-2" $TEST_ROOT/log +grep -q "copying path.*top" $TEST_ROOT/log + + +# Idem, but without cached .narinfo. +clearStore +clearCacheCache + +nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log +grep -q "don't know how to build" $TEST_ROOT/log +grep -q "building.*input-1" $TEST_ROOT/log +grep -q "building.*input-2" $TEST_ROOT/log +grep -q "copying path.*input-0" $TEST_ROOT/log +grep -q "copying path.*top" $TEST_ROOT/log if [ -n "$HAVE_SODIUM" ]; then diff --git a/tests/build-hook.nix b/tests/build-hook.nix index 8bff0fe79..8c5ca8cd3 100644 --- a/tests/build-hook.nix +++ b/tests/build-hook.nix @@ -4,13 +4,13 @@ let input1 = mkDerivation { name = "build-hook-input-1"; - builder = ./dependencies.builder1.sh; + buildCommand = "mkdir $out; echo FOO > $out/foo"; requiredSystemFeatures = ["foo"]; }; input2 = mkDerivation { name = "build-hook-input-2"; - builder = ./dependencies.builder2.sh; + buildCommand = "mkdir $out; echo BAR > $out/bar"; }; in diff --git a/tests/build-remote.sh b/tests/build-remote.sh index ddd68f327..a550f4460 100644 --- a/tests/build-remote.sh +++ b/tests/build-remote.sh @@ -20,5 +20,5 @@ cat $outPath/foobar | grep FOOBAR # Ensure that input1 was built on store1 due to the required feature. p=$(readlink -f $outPath/input-2) -(! nix path-info --store $TEST_ROOT/store0 --all | grep dependencies.builder1.sh) -nix path-info --store $TEST_ROOT/store1 --all | grep dependencies.builder1.sh +(! nix path-info --store $TEST_ROOT/store0 --all | grep builder-build-hook-input-1.sh) +nix path-info --store $TEST_ROOT/store1 --all | grep builder-build-hook-input-1.sh diff --git a/tests/config.nix.in b/tests/config.nix.in index 0ec2eba6b..a57a8c596 100644 --- a/tests/config.nix.in +++ b/tests/config.nix.in @@ -11,7 +11,7 @@ rec { derivation ({ inherit system; builder = shell; - args = ["-e" args.builder or (builtins.toFile "builder.sh" "if [ -e .attrs.sh ]; then source .attrs.sh; fi; eval \"$buildCommand\"")]; + args = ["-e" args.builder or (builtins.toFile "builder-${args.name}.sh" "if [ -e .attrs.sh ]; then source .attrs.sh; fi; eval \"$buildCommand\"")]; PATH = path; } // removeAttrs args ["builder" "meta"]) // { meta = args.meta or {}; }; diff --git a/tests/dependencies.builder1.sh b/tests/dependencies.builder1.sh deleted file mode 100644 index 4b006a17d..000000000 --- a/tests/dependencies.builder1.sh +++ /dev/null @@ -1,2 +0,0 @@ -mkdir $out -echo FOO > $out/foo diff --git a/tests/dependencies.builder2.sh b/tests/dependencies.builder2.sh deleted file mode 100644 index 4f886fdb3..000000000 --- a/tests/dependencies.builder2.sh +++ /dev/null @@ -1,2 +0,0 @@ -mkdir $out -echo BAR > $out/bar diff --git a/tests/dependencies.nix b/tests/dependencies.nix index eca4b2964..e320d81c9 100644 --- a/tests/dependencies.nix +++ b/tests/dependencies.nix @@ -2,18 +2,27 @@ with import ./config.nix; let { + input0 = mkDerivation { + name = "dependencies-input-0"; + buildCommand = "mkdir $out; echo foo > $out/bar"; + }; + input1 = mkDerivation { name = "dependencies-input-1"; - builder = ./dependencies.builder1.sh; + buildCommand = "mkdir $out; echo FOO > $out/foo"; }; input2 = mkDerivation { name = "dependencies-input-2"; - builder = "${./dependencies.builder2.sh}"; + buildCommand = '' + mkdir $out + echo BAR > $out/bar + echo ${input0} > $out/input0 + ''; }; body = mkDerivation { - name = "dependencies"; + name = "dependencies-top"; builder = ./dependencies.builder0.sh + "/FOOBAR/../."; input1 = input1 + "/."; input2 = "${input2}/."; diff --git a/tests/dependencies.sh b/tests/dependencies.sh index 8d0fdc10f..092950aa7 100644 --- a/tests/dependencies.sh +++ b/tests/dependencies.sh @@ -6,7 +6,7 @@ drvPath=$(nix-instantiate dependencies.nix) echo "derivation is $drvPath" -nix-store -q --tree "$drvPath" | grep '───.*builder1.sh' +nix-store -q --tree "$drvPath" | grep '───.*builder-dependencies-input-1.sh' # Test Graphviz graph generation. nix-store -q --graph "$drvPath" > $TEST_ROOT/graph diff --git a/tests/export-graph.sh b/tests/export-graph.sh index a6fd69054..a1449b34e 100644 --- a/tests/export-graph.sh +++ b/tests/export-graph.sh @@ -11,7 +11,7 @@ checkRef() { outPath=$(nix-build ./export-graph.nix -A 'foo."bar.runtimeGraph"' -o $TEST_ROOT/result) -test $(nix-store -q --references $TEST_ROOT/result | wc -l) = 2 || fail "bad nr of references" +test $(nix-store -q --references $TEST_ROOT/result | wc -l) = 3 || fail "bad nr of references" checkRef input-2 for i in $(cat $outPath); do checkRef $i; done diff --git a/tests/gc-concurrent.nix b/tests/gc-concurrent.nix index c0595cc47..21671ea2c 100644 --- a/tests/gc-concurrent.nix +++ b/tests/gc-concurrent.nix @@ -4,12 +4,12 @@ rec { input1 = mkDerivation { name = "dependencies-input-1"; - builder = ./dependencies.builder1.sh; + buildCommand = "mkdir $out; echo FOO > $out/foo"; }; input2 = mkDerivation { name = "dependencies-input-2"; - builder = ./dependencies.builder2.sh; + buildCommand = "mkdir $out; echo BAR > $out/bar"; }; test1 = mkDerivation { @@ -23,5 +23,5 @@ rec { builder = ./gc-concurrent2.builder.sh; inherit input1 input2; }; - + } diff --git a/tests/nix-channel.sh b/tests/nix-channel.sh index 93f837bef..49c68981a 100644 --- a/tests/nix-channel.sh +++ b/tests/nix-channel.sh @@ -32,10 +32,10 @@ if [ "$xmllint" != false ]; then $xmllint --noout $TEST_ROOT/meta.xml || fail "malformed XML" fi grep -q 'meta.*description.*Random test package' $TEST_ROOT/meta.xml -grep -q 'item.*attrPath="foo".*name="dependencies"' $TEST_ROOT/meta.xml +grep -q 'item.*attrPath="foo".*name="dependencies-top"' $TEST_ROOT/meta.xml # Do an install. -nix-env -i dependencies +nix-env -i dependencies-top [ -e $TEST_HOME/.nix-profile/foobar ] clearProfiles @@ -51,9 +51,9 @@ if [ "$xmllint" != false ]; then $xmllint --noout $TEST_ROOT/meta.xml || fail "malformed XML" fi grep -q 'meta.*description.*Random test package' $TEST_ROOT/meta.xml -grep -q 'item.*attrPath="foo".*name="dependencies"' $TEST_ROOT/meta.xml +grep -q 'item.*attrPath="foo".*name="dependencies-top"' $TEST_ROOT/meta.xml # Do an install. -nix-env -i dependencies +nix-env -i dependencies-top [ -e $TEST_HOME/.nix-profile/foobar ] |