aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Makefile1
-rw-r--r--Makefile.config.in5
-rw-r--r--README.md2
-rw-r--r--configure.ac56
-rw-r--r--corepkgs/config.nix.in3
-rw-r--r--corepkgs/unpack-channel.nix4
-rw-r--r--doc/manual/advanced-topics/distributed-builds.xml10
-rw-r--r--doc/manual/command-ref/conf-file.xml37
-rw-r--r--doc/manual/command-ref/nix-store.xml7
-rw-r--r--doc/manual/command-ref/opt-common.xml5
-rw-r--r--doc/manual/installation/prerequisites-source.xml7
-rw-r--r--doc/manual/release-notes/rl-1.11.xml7
-rw-r--r--doc/manual/release-notes/rl-1.12.xml7
-rw-r--r--doc/manual/release-notes/rl-1.8.xml4
-rw-r--r--local.mk2
-rw-r--r--misc/docker/Dockerfile2
-rw-r--r--misc/launchd/org.nixos.nix-daemon.plist.in2
-rw-r--r--nix.spec.in12
-rw-r--r--perl/Makefile14
-rw-r--r--perl/Makefile.config.in18
-rw-r--r--perl/configure.ac104
-rw-r--r--perl/lib/Nix/Config.pm.in24
-rw-r--r--perl/lib/Nix/Store.xs1
-rw-r--r--perl/local.mk57
-rw-r--r--release.nix64
-rw-r--r--shell.nix6
-rw-r--r--src/build-remote/build-remote.cc109
-rw-r--r--src/libexpr/primops.cc45
-rw-r--r--src/libexpr/primops/fetchgit.cc10
-rw-r--r--src/libexpr/value.hh2
-rw-r--r--src/libmain/shared.cc13
-rw-r--r--src/libstore/binary-cache-store.cc36
-rw-r--r--src/libstore/binary-cache-store.hh16
-rw-r--r--src/libstore/build.cc98
-rw-r--r--src/libstore/derivations.cc12
-rw-r--r--src/libstore/download.cc53
-rw-r--r--src/libstore/download.hh5
-rw-r--r--src/libstore/export-import.cc34
-rw-r--r--src/libstore/gc.cc2
-rw-r--r--src/libstore/globals.cc32
-rw-r--r--src/libstore/globals.hh13
-rw-r--r--src/libstore/http-binary-cache-store.cc4
-rw-r--r--src/libstore/legacy-ssh-store.cc103
-rw-r--r--src/libstore/local-binary-cache-store.cc8
-rw-r--r--src/libstore/local-fs-store.cc49
-rw-r--r--src/libstore/local-store.cc13
-rw-r--r--src/libstore/local-store.hh22
-rw-r--r--src/libstore/nar-info-disk-cache.cc219
-rw-r--r--src/libstore/nar-info.cc8
-rw-r--r--src/libstore/remote-store.cc81
-rw-r--r--src/libstore/remote-store.hh17
-rw-r--r--src/libstore/s3-binary-cache-store.cc59
-rw-r--r--src/libstore/s3.hh4
-rw-r--r--src/libstore/sqlite.cc72
-rw-r--r--src/libstore/sqlite.hh6
-rw-r--r--src/libstore/ssh-store.cc88
-rw-r--r--src/libstore/ssh.cc102
-rw-r--r--src/libstore/ssh.hh49
-rw-r--r--src/libstore/store-api.cc56
-rw-r--r--src/libstore/store-api.hh70
-rw-r--r--src/libstore/worker-protocol.hh5
-rw-r--r--src/libutil/archive.hh7
-rw-r--r--src/libutil/compression.cc63
-rw-r--r--src/libutil/compression.hh2
-rw-r--r--src/libutil/config.cc112
-rw-r--r--src/libutil/config.hh151
-rw-r--r--src/libutil/hash.cc4
-rw-r--r--src/libutil/istringstream_nocopy.hh92
-rw-r--r--src/libutil/local.mk2
-rw-r--r--src/libutil/logging.cc9
-rw-r--r--src/libutil/logging.hh10
-rw-r--r--src/libutil/lru-cache.hh8
-rw-r--r--src/libutil/pool.hh8
-rw-r--r--src/libutil/serialise.cc49
-rw-r--r--src/libutil/serialise.hh63
-rw-r--r--src/libutil/sync.hh1
-rw-r--r--src/libutil/types.hh2
-rw-r--r--src/libutil/util.cc50
-rw-r--r--src/libutil/util.hh8
-rwxr-xr-xsrc/nix-build/nix-build.cc10
-rwxr-xr-xsrc/nix-copy-closure/nix-copy-closure.cc10
-rw-r--r--src/nix-daemon/nix-daemon.cc57
-rw-r--r--src/nix-prefetch-url/nix-prefetch-url.cc4
-rw-r--r--src/nix-store/local.mk2
-rw-r--r--src/nix-store/nix-store.cc66
-rw-r--r--src/nix/command.cc7
-rw-r--r--src/nix/command.hh1
-rw-r--r--src/nix/copy.cc12
-rw-r--r--src/nix/log.cc57
-rw-r--r--src/nix/main.cc1
-rw-r--r--tests/binary-cache.sh10
-rw-r--r--tests/nix-shell.sh1
-rw-r--r--tests/optimise-store.sh12
-rw-r--r--tests/repair.sh4
-rw-r--r--tests/shell.nix1
-rw-r--r--tests/timeout.nix3
-rw-r--r--tests/timeout.sh5
98 files changed, 1936 insertions, 945 deletions
diff --git a/.gitignore b/.gitignore
index 951efb4c9..4f7e668e7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
Makefile.config
+perl/Makefile.config
# /
/aclocal.m4
diff --git a/Makefile b/Makefile
index d26cf8d99..40ac4e72d 100644
--- a/Makefile
+++ b/Makefile
@@ -18,7 +18,6 @@ makefiles = \
src/nix-channel/local.mk \
src/nix-build/local.mk \
src/build-remote/local.mk \
- perl/local.mk \
scripts/local.mk \
corepkgs/local.mk \
misc/systemd/local.mk \
diff --git a/Makefile.config.in b/Makefile.config.in
index 15e943804..53dca1fcf 100644
--- a/Makefile.config.in
+++ b/Makefile.config.in
@@ -14,7 +14,7 @@ LIBLZMA_LIBS = @LIBLZMA_LIBS@
SQLITE3_LIBS = @SQLITE3_LIBS@
bash = @bash@
bindir = @bindir@
-curl = @curl@
+bro = @bro@
datadir = @datadir@
datarootdir = @datarootdir@
docdir = @docdir@
@@ -24,9 +24,6 @@ libdir = @libdir@
libexecdir = @libexecdir@
localstatedir = @localstatedir@
mandir = @mandir@
-perl = @perl@
-perlbindings = @perlbindings@
-perllibdir = @perllibdir@
pkglibdir = $(libdir)/$(PACKAGE_NAME)
prefix = @prefix@
storedir = @storedir@
diff --git a/README.md b/README.md
index 1eb73b256..3173c6c44 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
Nix, the purely functional package manager
------------------------------------------
-Nix is a new take on package management that is fairly unique. Because of it's
+Nix is a new take on package management that is fairly unique. Because of its
purity aspects, a lot of issues found in traditional package managers don't
appear with Nix.
diff --git a/configure.ac b/configure.ac
index e6b11be2d..3e6a894e3 100644
--- a/configure.ac
+++ b/configure.ac
@@ -114,14 +114,12 @@ if test -z "$$1"; then
fi
])
-NEED_PROG(curl, curl)
NEED_PROG(bash, bash)
NEED_PROG(patch, patch)
AC_PATH_PROG(xmllint, xmllint, false)
AC_PATH_PROG(xsltproc, xsltproc, false)
AC_PATH_PROG(flex, flex, false)
AC_PATH_PROG(bison, bison, false)
-NEED_PROG(perl, perl)
NEED_PROG(sed, sed)
NEED_PROG(tar, tar)
NEED_PROG(bzip2, bzip2)
@@ -129,23 +127,7 @@ NEED_PROG(gzip, gzip)
NEED_PROG(xz, xz)
AC_PATH_PROG(dot, dot)
AC_PATH_PROG(pv, pv, pv)
-
-
-# Test that Perl has the open/fork feature (Perl 5.8.0 and beyond).
-AC_MSG_CHECKING([whether Perl is recent enough])
-if ! $perl -e 'open(FOO, "-|", "true"); while (<FOO>) { print; }; close FOO or die;'; then
- AC_MSG_RESULT(no)
- AC_MSG_ERROR([Your Perl version is too old. Nix requires Perl 5.8.0 or newer.])
-fi
-AC_MSG_RESULT(yes)
-
-
-# Figure out where to install Perl modules.
-AC_MSG_CHECKING([for the Perl installation prefix])
-perlversion=$($perl -e 'use Config; print $Config{version};')
-perlarchname=$($perl -e 'use Config; print $Config{archname};')
-AC_SUBST(perllibdir, [${libdir}/perl5/site_perl/$perlversion/$perlarchname])
-AC_MSG_RESULT($perllibdir)
+AC_PATH_PROG(bro, bro, bro)
NEED_PROG(cat, cat)
@@ -213,40 +195,6 @@ if test "$gc" = yes; then
fi
-# Check for the required Perl dependencies (DBI, DBD::SQLite).
-perlFlags="-I$perllibdir"
-
-AC_ARG_WITH(dbi, AC_HELP_STRING([--with-dbi=PATH],
- [prefix of the Perl DBI library]),
- perlFlags="$perlFlags -I$withval")
-
-AC_ARG_WITH(dbd-sqlite, AC_HELP_STRING([--with-dbd-sqlite=PATH],
- [prefix of the Perl DBD::SQLite library]),
- perlFlags="$perlFlags -I$withval")
-
-AC_MSG_CHECKING([whether DBD::SQLite works])
-if ! $perl $perlFlags -e 'use DBI; use DBD::SQLite;' 2>&5; then
- AC_MSG_RESULT(no)
- AC_MSG_FAILURE([The Perl modules DBI and/or DBD::SQLite are missing.])
-fi
-AC_MSG_RESULT(yes)
-
-AC_SUBST(perlFlags)
-
-
-# Whether to build the Perl bindings
-AC_MSG_CHECKING([whether to build the Perl bindings])
-AC_ARG_ENABLE(perl-bindings, AC_HELP_STRING([--enable-perl-bindings],
- [whether to build the Perl bindings (recommended) [default=yes]]),
- perlbindings=$enableval, perlbindings=yes)
-if test "$enable_shared" = no; then
- # Perl bindings require shared libraries.
- perlbindings=no
-fi
-AC_SUBST(perlbindings)
-AC_MSG_RESULT($perlbindings)
-
-
AC_ARG_ENABLE(init-state, AC_HELP_STRING([--disable-init-state],
[do not initialise DB etc. in `make install']),
init_state=$enableval, init_state=yes)
@@ -265,7 +213,7 @@ AC_CHECK_FUNCS([setresuid setreuid lchown])
# Nice to have, but not essential.
-AC_CHECK_FUNCS([strsignal posix_fallocate nanosleep sysconf])
+AC_CHECK_FUNCS([strsignal posix_fallocate sysconf])
# This is needed if bzip2 is a static library, and the Nix libraries
diff --git a/corepkgs/config.nix.in b/corepkgs/config.nix.in
index f0f4890a3..32ce6b399 100644
--- a/corepkgs/config.nix.in
+++ b/corepkgs/config.nix.in
@@ -14,6 +14,9 @@ in rec {
nixBinDir = fromEnv "NIX_BIN_DIR" "@bindir@";
nixPrefix = "@prefix@";
nixLibexecDir = fromEnv "NIX_LIBEXEC_DIR" "@libexecdir@";
+ nixLocalstateDir = "@localstatedir@";
+ nixSysconfDir = "@sysconfdir@";
+ nixStoreDir = fromEnv "NIX_STORE_DIR" "@storedir@";
# If Nix is installed in the Nix store, then automatically add it as
# a dependency to the core packages. This ensures that they work
diff --git a/corepkgs/unpack-channel.nix b/corepkgs/unpack-channel.nix
index 9445532de..a654db40e 100644
--- a/corepkgs/unpack-channel.nix
+++ b/corepkgs/unpack-channel.nix
@@ -15,7 +15,9 @@ let
else
${bzip2} -d < $src | ${tar} xf - ${tarFlags}
fi
- mv * $out/$channelName
+ if [ * != $channelName ]; then
+ mv * $out/$channelName
+ fi
if [ -n "$binaryCacheURL" ]; then
mkdir $out/binary-caches
echo -n "$binaryCacheURL" > $out/binary-caches/$channelName
diff --git a/doc/manual/advanced-topics/distributed-builds.xml b/doc/manual/advanced-topics/distributed-builds.xml
index f85837003..d5bc1c592 100644
--- a/doc/manual/advanced-topics/distributed-builds.xml
+++ b/doc/manual/advanced-topics/distributed-builds.xml
@@ -42,7 +42,7 @@ purposes. It uses <command>ssh</command> and
<command>nix-copy-closure</command> to copy the build inputs and
outputs and perform the remote build. To use it, you should set
<envar>NIX_BUILD_HOOK</envar> to
-<filename><replaceable>prefix</replaceable>/libexec/nix/build-remote.pl</filename>.
+<filename><replaceable>prefix</replaceable>/libexec/nix/build-remote</filename>.
You should also define a list of available build machines and point
the environment variable <envar>NIX_REMOTE_SYSTEMS</envar> to
it. <envar>NIX_REMOTE_SYSTEMS</envar> must be an absolute path. An
@@ -68,7 +68,7 @@ bits of information:
should not have a passphrase!</para></listitem>
<listitem><para>The maximum number of builds that
- <filename>build-remote.pl</filename> will execute in parallel on the
+ <filename>build-remote</filename> will execute in parallel on the
machine. Typically this should be equal to the number of CPU cores.
For instance, the machine <literal>itchy</literal> in the example
will execute up to 8 builds in parallel.</para></listitem>
@@ -80,7 +80,7 @@ bits of information:
<listitem><para>A comma-separated list of <emphasis>supported
features</emphasis>. If a derivation has the
<varname>requiredSystemFeatures</varname> attribute, then
- <filename>build-remote.pl</filename> will only perform the
+ <filename>build-remote</filename> will only perform the
derivation on a machine that has the specified features. For
instance, the attribute
@@ -106,11 +106,11 @@ requiredSystemFeatures = [ "kvm" ];
You should also set up the environment variable
<envar>NIX_CURRENT_LOAD</envar> to point at a directory (e.g.,
<filename>/var/run/nix/current-load</filename>) that
-<filename>build-remote.pl</filename> uses to remember how many builds
+<filename>build-remote</filename> uses to remember how many builds
it is currently executing remotely. It doesn't look at the actual
load on the remote machine, so if you have multiple instances of Nix
running, they should use the same <envar>NIX_CURRENT_LOAD</envar>
-file. Maybe in the future <filename>build-remote.pl</filename> will
+file. Maybe in the future <filename>build-remote</filename> will
look at the actual remote load.</para>
</chapter>
diff --git a/doc/manual/command-ref/conf-file.xml b/doc/manual/command-ref/conf-file.xml
index 0f7a2deba..6952829e8 100644
--- a/doc/manual/command-ref/conf-file.xml
+++ b/doc/manual/command-ref/conf-file.xml
@@ -101,9 +101,9 @@ flag, e.g. <literal>--option gc-keep-outputs false</literal>.</para>
<listitem><para>This option defines the maximum number of jobs
that Nix will try to build in parallel. The default is
- <literal>1</literal>. You should generally set it to the number
- of CPUs in your system (e.g., <literal>2</literal> on an Athlon 64
- X2). It can be overridden using the <option
+ <literal>1</literal>. The special value <literal>auto</literal>
+ causes Nix to use the number of CPUs in your system. It can be
+ overridden using the <option
linkend='opt-max-jobs'>--max-jobs</option> (<option>-j</option>)
command line switch.</para></listitem>
@@ -394,9 +394,10 @@ flag, e.g. <literal>--option gc-keep-outputs false</literal>.</para>
<varlistentry><term><literal>signed-binary-caches</literal></term>
- <listitem><para>If set to <literal>*</literal>, Nix will only
- download binaries if they are signed using one of the keys listed
- in <option>binary-cache-public-keys</option>.</para></listitem>
+ <listitem><para>If set to <literal>*</literal> (the default), Nix
+ will only download binaries if they are signed using one of the
+ keys listed in <option>binary-cache-public-keys</option>. Set to
+ the empty string to disable signature checking.</para></listitem>
</varlistentry>
@@ -512,20 +513,6 @@ password <replaceable>my-password</replaceable>
</varlistentry>
- <varlistentry xml:id="conf-log-servers"><term><literal>log-servers</literal></term>
-
- <listitem>
-
- <para>A list of URL prefixes (such as
- <literal>http://hydra.nixos.org/log</literal>) from which
- <command>nix-store -l</command> will try to fetch build logs if
- they’re not available locally.</para>
-
- </listitem>
-
- </varlistentry>
-
-
<varlistentry xml:id="conf-trusted-users"><term><literal>trusted-users</literal></term>
<listitem>
@@ -644,6 +631,16 @@ password <replaceable>my-password</replaceable>
</varlistentry>
+ <varlistentry xml:id="conf-allow-import-from-derivation"><term><literal>allow-import-from-derivation</literal></term>
+
+ <listitem><para>By default, Nix allows you to <function>import</function> from a derivation,
+ allowing building at evaluation time. With this option set to false, Nix will throw an error
+ when evaluating an expression that uses this feature, allowing users to ensure their evaluation
+ will not require any builds to take place.</para></listitem>
+
+ </varlistentry>
+
+
</variablelist>
</para>
diff --git a/doc/manual/command-ref/nix-store.xml b/doc/manual/command-ref/nix-store.xml
index 0f6172def..fb017b741 100644
--- a/doc/manual/command-ref/nix-store.xml
+++ b/doc/manual/command-ref/nix-store.xml
@@ -1236,12 +1236,7 @@ the store path is used.</para>
<filename>/nix/var/log/nix/drvs</filename>. However, there is no
guarantee that a build log is available for any particular store path.
For instance, if the path was downloaded as a pre-built binary through
-a substitute, then the log is unavailable. If the log is not available
-locally, then <command>nix-store</command> will try to download the
-log from the servers specified in the Nix option
-<option>log-servers</option>. For example, if it’s set to
-<literal>http://hydra.nixos.org/log</literal>, then Nix will check
-<literal>http://hydra.nixos.org/log/<replaceable>base-name</replaceable></literal>.</para>
+a substitute, then the log is unavailable.</para>
</refsection>
diff --git a/doc/manual/command-ref/opt-common.xml b/doc/manual/command-ref/opt-common.xml
index 2a076877a..2aa41c4d4 100644
--- a/doc/manual/command-ref/opt-common.xml
+++ b/doc/manual/command-ref/opt-common.xml
@@ -93,8 +93,9 @@
<term><option>-j</option></term>
<listitem><para>Sets the maximum number of build jobs that Nix will
- perform in parallel to the specified number. The default is
- specified by the <link
+ perform in parallel to the specified number. Specify
+ <literal>auto</literal> to use the number of CPUs in the system.
+ The default is specified by the <link
linkend='conf-build-max-jobs'><literal>build-max-jobs</literal></link>
configuration setting, which itself defaults to
<literal>1</literal>. A higher value is useful on SMP systems or to
diff --git a/doc/manual/installation/prerequisites-source.xml b/doc/manual/installation/prerequisites-source.xml
index cd6d61356..7311e4885 100644
--- a/doc/manual/installation/prerequisites-source.xml
+++ b/doc/manual/installation/prerequisites-source.xml
@@ -12,8 +12,6 @@
<listitem><para>A version of GCC or Clang that supports C++11.</para></listitem>
- <listitem><para>Perl 5.8 or higher.</para></listitem>
-
<listitem><para><command>pkg-config</command> to locate
dependencies. If your distribution does not provide it, you can get
it from <link
@@ -34,11 +32,6 @@
or higher. If your distribution does not provide it, please install
it from <link xlink:href="http://www.sqlite.org/" />.</para></listitem>
- <listitem><para>The Perl DBI and DBD::SQLite libraries, which are
- available from <link
- xlink:href="http://search.cpan.org/">CPAN</link> if your
- distribution does not provide them.</para></listitem>
-
<listitem><para>The <link
xlink:href="http://www.hboehm.info/gc/">Boehm
garbage collector</link> to reduce the evaluator’s memory
diff --git a/doc/manual/release-notes/rl-1.11.xml b/doc/manual/release-notes/rl-1.11.xml
index efb03d613..fe422dd1f 100644
--- a/doc/manual/release-notes/rl-1.11.xml
+++ b/doc/manual/release-notes/rl-1.11.xml
@@ -122,13 +122,6 @@ $ diffoscope /nix/store/11a27shh6n2i…-zlib-1.2.8 /nix/store/11a27shh6n2i…-zl
</listitem>
<listitem>
- <para>The Nix language now supports floating point numbers. They are
- based on regular C++ <literal>float</literal> and compatible with
- existing integers and number-related operations. Export and import to and
- from JSON and XML works, too.
- </para>
- </listitem>
- <listitem>
<para>All "chroot"-containing strings got renamed to "sandbox".
In particular, some Nix options got renamed, but the old names
are still accepted as lower-priority aliases.
diff --git a/doc/manual/release-notes/rl-1.12.xml b/doc/manual/release-notes/rl-1.12.xml
index d6864b3f5..b7f45fc44 100644
--- a/doc/manual/release-notes/rl-1.12.xml
+++ b/doc/manual/release-notes/rl-1.12.xml
@@ -17,6 +17,13 @@
have write access to the Nix database.</para>
</listitem>
+ <listitem>
+ <para>The Nix language now supports floating point numbers. They are
+ based on regular C++ <literal>float</literal> and compatible with
+ existing integers and number-related operations. Export and import to and
+ from JSON and XML works, too.
+ </para>
+ </listitem>
</itemizedlist>
<para>This release has contributions from TBD.</para>
diff --git a/doc/manual/release-notes/rl-1.8.xml b/doc/manual/release-notes/rl-1.8.xml
index 48caac2c6..c854c5c5f 100644
--- a/doc/manual/release-notes/rl-1.8.xml
+++ b/doc/manual/release-notes/rl-1.8.xml
@@ -83,8 +83,8 @@ $ nix-store -l $(which xterm)
caches).</para></listitem>
<listitem><para>The configuration option
- <option>build-max-jobs</option> now defaults to the number of
- available CPU cores.</para></listitem>
+ <option>build-cores</option> now defaults to the number of available
+ CPU cores.</para></listitem>
<listitem><para>Build users are now used by default when Nix is
invoked as root. This prevents builds from accidentally running as
diff --git a/local.mk b/local.mk
index eebd71961..dc10e6870 100644
--- a/local.mk
+++ b/local.mk
@@ -3,7 +3,7 @@ ifeq ($(MAKECMDGOALS), dist)
dist-files += $(shell git --git-dir=.git ls-files || find * -type f)
endif
-dist-files += configure config.h.in nix.spec
+dist-files += configure config.h.in nix.spec perl/configure
clean-files += Makefile.config
diff --git a/misc/docker/Dockerfile b/misc/docker/Dockerfile
index 7b2865c94..85bd32e19 100644
--- a/misc/docker/Dockerfile
+++ b/misc/docker/Dockerfile
@@ -1,6 +1,6 @@
FROM alpine
-RUN wget -O- http://nixos.org/releases/nix/nix-1.11.2/nix-1.11.2-x86_64-linux.tar.bz2 | bzcat - | tar xf - \
+RUN wget -O- http://nixos.org/releases/nix/nix-1.11.7/nix-1.11.7-x86_64-linux.tar.bz2 | bzcat - | tar xf - \
&& echo "nixbld:x:30000:nixbld1,nixbld2,nixbld3,nixbld4,nixbld5,nixbld6,nixbld7,nixbld8,nixbld9,nixbld10,nixbld11,nixbld12,nixbld13,nixbld14,nixbld15,nixbld16,nixbld17,nixbld18,nixbld19,nixbld20,nixbld21,nixbld22,nixbld23,nixbld24,nixbld25,nixbld26,nixbld27,nixbld28,nixbld29,nixbld30" >> /etc/group \
&& for i in $(seq 1 30); do echo "nixbld$i:x:$((30000 + $i)):30000:::" >> /etc/passwd; done \
&& mkdir -m 0755 /nix && USER=root sh nix-*-x86_64-linux/install \
diff --git a/misc/launchd/org.nixos.nix-daemon.plist.in b/misc/launchd/org.nixos.nix-daemon.plist.in
index c5ef97ee9..5d57a5ec8 100644
--- a/misc/launchd/org.nixos.nix-daemon.plist.in
+++ b/misc/launchd/org.nixos.nix-daemon.plist.in
@@ -16,6 +16,8 @@
<dict>
<key>NIX_SSL_CERT_FILE</key>
<string>/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt</string>
+ <key>XDG_CACHE_HOME</key>
+ <string>/root/.cache</string>
</dict>
</dict>
</plist>
diff --git a/nix.spec.in b/nix.spec.in
index 0c9b9ab20..390893d64 100644
--- a/nix.spec.in
+++ b/nix.spec.in
@@ -16,12 +16,7 @@ Source0: %{name}-%{version}.tar.bz2
%if 0%{?el5}
BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
%endif
-BuildRequires: perl(DBD::SQLite)
-BuildRequires: perl(DBI)
-BuildRequires: perl(ExtUtils::ParseXS)
-Requires: /usr/bin/perl
Requires: curl
-Requires: perl-DBD-SQLite
Requires: bzip2
Requires: gzip
Requires: xz
@@ -92,11 +87,6 @@ the emacs-%{name} package to edit Nix expressions with GNU Emacs.
%prep
%setup -q
-# Install Perl modules to vendor_perl
-# configure.ac need to be changed to make this global; however, this will
-# also affect NixOS. Use discretion.
-%{__sed} -i 's|perl5/site_perl/$perlversion/$perlarchname|perl5/vendor_perl|' \
- configure
%build
@@ -169,8 +159,6 @@ systemctl start nix-daemon.socket
%files
%{_bindir}/nix*
%{_libdir}/*.so
-%{perl_vendorarch}/*
-%exclude %dir %{perl_vendorarch}/auto/
%{_prefix}/libexec/*
%if ! 0%{?rhel} || 0%{?rhel} >= 7
%{_prefix}/lib/systemd/system/nix-daemon.socket
diff --git a/perl/Makefile b/perl/Makefile
new file mode 100644
index 000000000..cf655ae3d
--- /dev/null
+++ b/perl/Makefile
@@ -0,0 +1,14 @@
+makefiles = local.mk
+
+GLOBAL_CXXFLAGS += -std=c++14 -g -Wall -include nix/config.h
+
+-include Makefile.config
+
+OPTIMIZE = 1
+
+ifeq ($(OPTIMIZE), 1)
+ GLOBAL_CFLAGS += -O3
+ GLOBAL_CXXFLAGS += -O3
+endif
+
+include mk/lib.mk
diff --git a/perl/Makefile.config.in b/perl/Makefile.config.in
new file mode 100644
index 000000000..c87d4817e
--- /dev/null
+++ b/perl/Makefile.config.in
@@ -0,0 +1,18 @@
+CC = @CC@
+CFLAGS = @CFLAGS@
+CXX = @CXX@
+CXXFLAGS = @CXXFLAGS@
+HAVE_SODIUM = @HAVE_SODIUM@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+SODIUM_LIBS = @SODIUM_LIBS@
+NIX_CFLAGS = @NIX_CFLAGS@
+NIX_LIBS = @NIX_LIBS@
+nixbindir = @nixbindir@
+curl = @curl@
+nixlibexecdir = @nixlibexecdir@
+nixlocalstatedir = @nixlocalstatedir@
+perl = @perl@
+perllibdir = @perllibdir@
+nixstoredir = @nixstoredir@
+nixsysconfdir = @nixsysconfdir@
diff --git a/perl/configure.ac b/perl/configure.ac
new file mode 100644
index 000000000..7a6b28be2
--- /dev/null
+++ b/perl/configure.ac
@@ -0,0 +1,104 @@
+AC_INIT(nix-perl, m4_esyscmd([bash -c "echo -n $(cat ../version)$VERSION_SUFFIX"]))
+AC_CONFIG_SRCDIR(MANIFEST)
+AC_CONFIG_AUX_DIR(../config)
+
+CFLAGS=
+CXXFLAGS=
+AC_PROG_CC
+AC_PROG_CXX
+AX_CXX_COMPILE_STDCXX_11
+
+# Use 64-bit file system calls so that we can support files > 2 GiB.
+AC_SYS_LARGEFILE
+
+AC_DEFUN([NEED_PROG],
+[
+AC_PATH_PROG($1, $2)
+if test -z "$$1"; then
+ AC_MSG_ERROR([$2 is required])
+fi
+])
+
+NEED_PROG(perl, perl)
+NEED_PROG(curl, curl)
+NEED_PROG(bzip2, bzip2)
+NEED_PROG(xz, xz)
+
+# Test that Perl has the open/fork feature (Perl 5.8.0 and beyond).
+AC_MSG_CHECKING([whether Perl is recent enough])
+if ! $perl -e 'open(FOO, "-|", "true"); while (<FOO>) { print; }; close FOO or die;'; then
+ AC_MSG_RESULT(no)
+ AC_MSG_ERROR([Your Perl version is too old. Nix requires Perl 5.8.0 or newer.])
+fi
+AC_MSG_RESULT(yes)
+
+
+# Figure out where to install Perl modules.
+AC_MSG_CHECKING([for the Perl installation prefix])
+perlversion=$($perl -e 'use Config; print $Config{version};')
+perlarchname=$($perl -e 'use Config; print $Config{archname};')
+AC_SUBST(perllibdir, [${libdir}/perl5/site_perl/$perlversion/$perlarchname])
+AC_MSG_RESULT($perllibdir)
+
+AC_ARG_WITH(store-dir, AC_HELP_STRING([--with-store-dir=PATH],
+ [path of the Nix store (defaults to /nix/store)]),
+ storedir=$withval, storedir='/nix/store')
+AC_SUBST(storedir)
+
+# Look for libsodium, an optional dependency.
+PKG_CHECK_MODULES([SODIUM], [libsodium],
+ [AC_DEFINE([HAVE_SODIUM], [1], [Whether to use libsodium for cryptography.])
+ CXXFLAGS="$SODIUM_CFLAGS $CXXFLAGS"
+ have_sodium=1], [have_sodium=])
+AC_SUBST(HAVE_SODIUM, [$have_sodium])
+
+# Check for the required Perl dependencies (DBI and DBD::SQLite).
+perlFlags="-I$perllibdir"
+
+AC_ARG_WITH(dbi, AC_HELP_STRING([--with-dbi=PATH],
+ [prefix of the Perl DBI library]),
+ perlFlags="$perlFlags -I$withval")
+
+AC_ARG_WITH(dbd-sqlite, AC_HELP_STRING([--with-dbd-sqlite=PATH],
+ [prefix of the Perl DBD::SQLite library]),
+ perlFlags="$perlFlags -I$withval")
+
+AC_MSG_CHECKING([whether DBD::SQLite works])
+if ! $perl $perlFlags -e 'use DBI; use DBD::SQLite;' 2>&5; then
+ AC_MSG_RESULT(no)
+ AC_MSG_FAILURE([The Perl modules DBI and/or DBD::SQLite are missing.])
+fi
+AC_MSG_RESULT(yes)
+
+AC_SUBST(perlFlags)
+
+PKG_CHECK_MODULES([NIX], [nix-store])
+
+NEED_PROG([NIX_INSTANTIATE_PROGRAM], [nix-instantiate])
+
+# Get nix configure values
+nixbindir=$("$NIX_INSTANTIATE_PROGRAM" --eval '<nix/config.nix>' -A nixBinDir | tr -d \")
+nixlibexecdir=$("$NIX_INSTANTIATE_PROGRAM" --eval '<nix/config.nix>' -A nixLibexecDir | tr -d \")
+nixlocalstatedir=$("$NIX_INSTANTIATE_PROGRAM" --eval '<nix/config.nix>' -A nixLocalstateDir | tr -d \")
+nixsysconfdir=$("$NIX_INSTANTIATE_PROGRAM" --eval '<nix/config.nix>' -A nixSysconfDir | tr -d \")
+nixstoredir=$("$NIX_INSTANTIATE_PROGRAM" --eval '<nix/config.nix>' -A nixStoreDir | tr -d \")
+AC_SUBST(nixbindir)
+AC_SUBST(nixlibexecdir)
+AC_SUBST(nixlocalstatedir)
+AC_SUBST(nixsysconfdir)
+AC_SUBST(nixstoredir)
+
+# Expand all variables in config.status.
+test "$prefix" = NONE && prefix=$ac_default_prefix
+test "$exec_prefix" = NONE && exec_prefix='${prefix}'
+for name in $ac_subst_vars; do
+ declare $name="$(eval echo "${!name}")"
+ declare $name="$(eval echo "${!name}")"
+ declare $name="$(eval echo "${!name}")"
+done
+
+rm -f Makefile.config
+ln -s ../mk mk
+
+AC_CONFIG_FILES([])
+AC_OUTPUT
diff --git a/perl/lib/Nix/Config.pm.in b/perl/lib/Nix/Config.pm.in
index 3575d99cb..f494e34a5 100644
--- a/perl/lib/Nix/Config.pm.in
+++ b/perl/lib/Nix/Config.pm.in
@@ -4,25 +4,21 @@ use MIME::Base64;
$version = "@PACKAGE_VERSION@";
-$binDir = $ENV{"NIX_BIN_DIR"} || "@bindir@";
-$libexecDir = $ENV{"NIX_LIBEXEC_DIR"} || "@libexecdir@";
-$stateDir = $ENV{"NIX_STATE_DIR"} || "@localstatedir@/nix";
-$logDir = $ENV{"NIX_LOG_DIR"} || "@localstatedir@/log/nix";
-$confDir = $ENV{"NIX_CONF_DIR"} || "@sysconfdir@/nix";
-$storeDir = $ENV{"NIX_STORE_DIR"} || "@storedir@";
+$binDir = $ENV{"NIX_BIN_DIR"} || "@nixbindir@";
+$libexecDir = $ENV{"NIX_LIBEXEC_DIR"} || "@nixlibexecdir@";
+$stateDir = $ENV{"NIX_STATE_DIR"} || "@nixlocalstatedir@/nix";
+$logDir = $ENV{"NIX_LOG_DIR"} || "@nixlocalstatedir@/log/nix";
+$confDir = $ENV{"NIX_CONF_DIR"} || "@nixsysconfdir@/nix";
+$storeDir = $ENV{"NIX_STORE_DIR"} || "@nixstoredir@";
$bzip2 = "@bzip2@";
$xz = "@xz@";
$curl = "@curl@";
-$useBindings = "@perlbindings@" eq "yes";
+$useBindings = 1;
%config = ();
-%binaryCachePublicKeys = ();
-
-$defaultPublicKeys = "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=";
-
sub readConfig {
if (defined $ENV{'_NIX_OPTIONS'}) {
foreach my $s (split '\n', $ENV{'_NIX_OPTIONS'}) {
@@ -40,12 +36,6 @@ sub readConfig {
}
close CONFIG;
}
-
- foreach my $s (split(/ /, $config{"binary-cache-public-keys"} // $defaultPublicKeys)) {
- my ($keyName, $publicKey) = split ":", $s;
- next unless defined $keyName && defined $publicKey;
- $binaryCachePublicKeys{$keyName} = decode_base64($publicKey);
- }
}
return 1;
diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs
index f613e3df3..46b41f923 100644
--- a/perl/lib/Nix/Store.xs
+++ b/perl/lib/Nix/Store.xs
@@ -25,7 +25,6 @@ static ref<Store> store()
static std::shared_ptr<Store> _store;
if (!_store) {
try {
- logger = makeDefaultLogger();
settings.loadConfFile();
settings.update();
settings.lockCPU = false;
diff --git a/perl/local.mk b/perl/local.mk
index 5b43c4b71..35113bd96 100644
--- a/perl/local.mk
+++ b/perl/local.mk
@@ -1,48 +1,43 @@
nix_perl_sources := \
- $(d)/lib/Nix/Store.pm \
- $(d)/lib/Nix/Manifest.pm \
- $(d)/lib/Nix/SSH.pm \
- $(d)/lib/Nix/CopyClosure.pm \
- $(d)/lib/Nix/Config.pm.in \
- $(d)/lib/Nix/Utils.pm
+ lib/Nix/Store.pm \
+ lib/Nix/Manifest.pm \
+ lib/Nix/SSH.pm \
+ lib/Nix/CopyClosure.pm \
+ lib/Nix/Config.pm.in \
+ lib/Nix/Utils.pm
nix_perl_modules := $(nix_perl_sources:.in=)
$(foreach x, $(nix_perl_modules), $(eval $(call install-data-in, $(x), $(perllibdir)/Nix)))
-ifeq ($(perlbindings), yes)
-
- $(d)/lib/Nix/Store.cc: $(d)/lib/Nix/Store.xs
+lib/Nix/Store.cc: lib/Nix/Store.xs
$(trace-gen) xsubpp $^ -output $@
- libraries += Store
-
- Store_DIR := $(d)/lib/Nix
-
- Store_SOURCES := $(Store_DIR)/Store.cc
+libraries += Store
- Store_CXXFLAGS = \
- -I$(shell $(perl) -e 'use Config; print $$Config{archlibexp};')/CORE \
- -D_FILE_OFFSET_BITS=64 \
- -Wno-unknown-warning-option -Wno-unused-variable -Wno-literal-suffix \
- -Wno-reserved-user-defined-literal -Wno-duplicate-decl-specifier -Wno-pointer-bool-conversion
+Store_DIR := lib/Nix
- Store_LIBS = libstore libutil
+Store_SOURCES := $(Store_DIR)/Store.cc
- Store_LDFLAGS := $(SODIUM_LIBS)
+Store_CXXFLAGS = \
+ -I$(shell $(perl) -e 'use Config; print $$Config{archlibexp};')/CORE \
+ -D_FILE_OFFSET_BITS=64 \
+ -Wno-unknown-warning-option -Wno-unused-variable -Wno-literal-suffix \
+ -Wno-reserved-user-defined-literal -Wno-duplicate-decl-specifier -Wno-pointer-bool-conversion \
+ $(NIX_CFLAGS)
- ifeq (CYGWIN,$(findstring CYGWIN,$(OS)))
- archlib = $(shell perl -E 'use Config; print $$Config{archlib};')
- libperl = $(shell perl -E 'use Config; print $$Config{libperl};')
- Store_LDFLAGS += $(shell find ${archlib} -name ${libperl})
- endif
+Store_LDFLAGS := $(SODIUM_LIBS) $(NIX_LIBS)
- Store_ALLOW_UNDEFINED = 1
+ifeq (CYGWIN,$(findstring CYGWIN,$(OS)))
+ archlib = $(shell perl -E 'use Config; print $$Config{archlib};')
+ libperl = $(shell perl -E 'use Config; print $$Config{libperl};')
+ Store_LDFLAGS += $(shell find ${archlib} -name ${libperl})
+endif
- Store_FORCE_INSTALL = 1
+Store_ALLOW_UNDEFINED = 1
- Store_INSTALL_DIR = $(perllibdir)/auto/Nix/Store
+Store_FORCE_INSTALL = 1
-endif
+Store_INSTALL_DIR = $(perllibdir)/auto/Nix/Store
-clean-files += $(d)/lib/Nix/Config.pm $(d)/lib/Nix/Store.cc
+clean-files += lib/Nix/Config.pm lib/Nix/Store.cc Makefile.config
diff --git a/release.nix b/release.nix
index dc49b20cc..6136f650d 100644
--- a/release.nix
+++ b/release.nix
@@ -24,18 +24,15 @@ let
inherit officialRelease;
buildInputs =
- [ curl bison flex perl libxml2 libxslt bzip2 xz
+ [ curl bison flex libxml2 libxslt
+ bzip2 xz brotli
pkgconfig sqlite libsodium boehmgc
docbook5 docbook5_xsl
autoconf-archive
git
];
- configureFlags = ''
- --with-dbi=${perlPackages.DBI}/${perl.libPrefix}
- --with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix}
- --enable-gc
- '';
+ configureFlags = "--enable-gc";
postUnpack = ''
# Clean up when building from a working tree.
@@ -45,6 +42,7 @@ let
'';
preConfigure = ''
+ (cd perl ; autoreconf --install --force --verbose)
# TeX needs a writable font cache.
export VARTEXFONTS=$TMPDIR/texfonts
'';
@@ -73,9 +71,12 @@ let
src = tarball;
buildInputs =
- [ curl perl bzip2 xz openssl pkgconfig sqlite boehmgc ]
- ++ lib.optional stdenv.isLinux libsodium
- ++ lib.optional stdenv.isLinux
+ [ curl
+ bzip2 xz brotli
+ openssl pkgconfig sqlite boehmgc
+ ]
+ ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
+ ++ lib.optional (stdenv.isLinux || stdenv.isDarwin)
(aws-sdk-cpp.override {
apis = ["s3"];
customMemoryManagement = false;
@@ -83,8 +84,6 @@ let
configureFlags = ''
--disable-init-state
- --with-dbi=${perlPackages.DBI}/${perl.libPrefix}
- --with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix}
--enable-gc
--sysconfdir=/etc
'';
@@ -102,6 +101,31 @@ let
});
+ perlBindings = pkgs.lib.genAttrs systems (system:
+
+ let pkgs = import <nixpkgs> { inherit system; }; in with pkgs;
+
+ releaseTools.nixBuild {
+ name = "nix-perl";
+ src = tarball;
+
+ buildInputs =
+ [ (builtins.getAttr system jobs.build) curl bzip2 xz pkgconfig pkgs.perl ]
+ ++ lib.optional stdenv.isLinux libsodium;
+
+ configureFlags = ''
+ --with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix}
+ --with-dbd-sqlite=${perlPackages.DBDSQLite}/${pkgs.perl.libPrefix}
+ '';
+
+ enableParallelBuilding = true;
+
+ postUnpack = "sourceRoot=$sourceRoot/perl";
+
+ preBuild = "unset NIX_INDENT_MAKE";
+ });
+
+
binaryTarball = pkgs.lib.genAttrs systems (system:
# FIXME: temporarily use a different branch for the Darwin build.
@@ -147,15 +171,13 @@ let
src = tarball;
buildInputs =
- [ curl perl bzip2 openssl pkgconfig sqlite xz libsodium
+ [ curl bzip2 openssl pkgconfig sqlite xz libsodium
# These are for "make check" only:
graphviz libxml2 libxslt
];
configureFlags = ''
--disable-init-state
- --with-dbi=${perlPackages.DBI}/${perl.libPrefix}
- --with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix}
'';
dontInstall = false;
@@ -276,12 +298,12 @@ let
src = jobs.tarball;
diskImage = (diskImageFun vmTools.diskImageFuns)
{ extraPackages =
- [ "perl-DBD-SQLite" "perl-devel" "sqlite" "sqlite-devel" "bzip2-devel" "emacs" "libcurl-devel" "openssl-devel" "xz-devel" ]
+ [ "sqlite" "sqlite-devel" "bzip2-devel" "emacs" "libcurl-devel" "openssl-devel" "xz-devel" ]
++ extraPackages; };
- memSize = 8192;
+ memSize = 1024;
meta.schedulingPriority = 50;
postRPMInstall = "cd /tmp/rpmout/BUILD/nix-* && make installcheck";
- enableParallelBuilding = true;
+ #enableParallelBuilding = true;
};
@@ -298,18 +320,18 @@ let
src = jobs.tarball;
diskImage = (diskImageFun vmTools.diskImageFuns)
{ extraPackages =
- [ "libdbd-sqlite3-perl" "libsqlite3-dev" "libbz2-dev" "libwww-curl-perl" "libcurl-dev" "libcurl3-nss" "libssl-dev" "liblzma-dev" ]
+ [ "libsqlite3-dev" "libbz2-dev" "libcurl-dev" "libcurl3-nss" "libssl-dev" "liblzma-dev" ]
++ extraPackages; };
- memSize = 8192;
+ memSize = 1024;
meta.schedulingPriority = 50;
postInstall = "make installcheck";
configureFlags = "--sysconfdir=/etc";
debRequires =
- [ "curl" "libdbd-sqlite3-perl" "libsqlite3-0" "libbz2-1.0" "bzip2" "xz-utils" "libwww-curl-perl" "libssl1.0.0" "liblzma5" ]
+ [ "curl" "libsqlite3-0" "libbz2-1.0" "bzip2" "xz-utils" "libssl1.0.0" "liblzma5" ]
++ extraDebPackages;
debMaintainer = "Eelco Dolstra <eelco.dolstra@logicblox.com>";
doInstallCheck = true;
- enableParallelBuilding = true;
+ #enableParallelBuilding = true;
};
diff --git a/shell.nix b/shell.nix
index 4c1608230..425eb0a19 100644
--- a/shell.nix
+++ b/shell.nix
@@ -6,7 +6,8 @@ with import <nixpkgs> {};
name = "nix";
buildInputs =
- [ curl bison flex perl libxml2 libxslt bzip2 xz
+ [ curl bison flex libxml2 libxslt
+ bzip2 xz brotli
pkgconfig sqlite libsodium boehmgc
docbook5 docbook5_xsl
autoconf-archive
@@ -15,14 +16,11 @@ with import <nixpkgs> {};
customMemoryManagement = false;
})
autoreconfHook
- perlPackages.DBDSQLite
];
configureFlags =
[ "--disable-init-state"
"--enable-gc"
- "--with-dbi=${perlPackages.DBI}/${perl.libPrefix}"
- "--with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix}"
];
enableParallelBuilding = true;
diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc
index 2ce20882d..d7aee2886 100644
--- a/src/build-remote/build-remote.cc
+++ b/src/build-remote/build-remote.cc
@@ -17,13 +17,12 @@
#include "derivations.hh"
using namespace nix;
-using std::cerr;
using std::cin;
-static void handle_alarm(int sig) {
+static void handleAlarm(int sig) {
}
-class machine {
+class Machine {
const std::set<string> supportedFeatures;
const std::set<string> mandatoryFeatures;
@@ -31,8 +30,8 @@ public:
const string hostName;
const std::vector<string> systemTypes;
const string sshKey;
- const unsigned long long maxJobs;
- const unsigned long long speedFactor;
+ const unsigned int maxJobs;
+ const unsigned int speedFactor;
bool enabled;
bool allSupported(const std::set<string> & features) const {
@@ -50,28 +49,29 @@ public:
});
}
- machine(decltype(hostName) hostName,
+ Machine(decltype(hostName) hostName,
decltype(systemTypes) systemTypes,
decltype(sshKey) sshKey,
decltype(maxJobs) maxJobs,
decltype(speedFactor) speedFactor,
decltype(supportedFeatures) supportedFeatures,
decltype(mandatoryFeatures) mandatoryFeatures) :
- supportedFeatures{std::move(supportedFeatures)},
- mandatoryFeatures{std::move(mandatoryFeatures)},
- hostName{std::move(hostName)},
- systemTypes{std::move(systemTypes)},
- sshKey{std::move(sshKey)},
- maxJobs{std::move(maxJobs)},
- speedFactor{speedFactor == 0 ? 1 : std::move(speedFactor)},
- enabled{true} {};
+ supportedFeatures(supportedFeatures),
+ mandatoryFeatures(mandatoryFeatures),
+ hostName(hostName),
+ systemTypes(systemTypes),
+ sshKey(sshKey),
+ maxJobs(maxJobs),
+ speedFactor(std::max(1U, speedFactor)),
+ enabled(true)
+ {};
};;
-static std::vector<machine> read_conf()
+static std::vector<Machine> readConf()
{
auto conf = getEnv("NIX_REMOTE_SYSTEMS", SYSCONFDIR "/nix/machines");
- auto machines = std::vector<machine>{};
+ auto machines = std::vector<Machine>{};
auto lines = std::vector<string>{};
try {
lines = tokenizeString<std::vector<string>>(readFile(conf), "\n");
@@ -87,10 +87,8 @@ static std::vector<machine> read_conf()
}
auto tokens = tokenizeString<std::vector<string>>(line);
auto sz = tokens.size();
- if (sz < 4) {
- throw new FormatError(format("Bad machines.conf file %1%")
- % conf);
- }
+ if (sz < 4)
+ throw FormatError("bad machines.conf file ‘%1%’", conf);
machines.emplace_back(tokens[0],
tokenizeString<std::vector<string>>(tokens[1], ","),
tokens[2],
@@ -108,7 +106,7 @@ static std::vector<machine> read_conf()
static string currentLoad;
-static AutoCloseFD openSlotLock(const machine & m, unsigned long long slot)
+static AutoCloseFD openSlotLock(const Machine & m, unsigned long long slot)
{
std::ostringstream fn_stream(currentLoad, std::ios_base::ate | std::ios_base::out);
fn_stream << "/";
@@ -126,15 +124,14 @@ int main (int argc, char * * argv)
{
return handleExceptions(argv[0], [&]() {
initNix();
+
/* Ensure we don't get any SSH passphrase or host key popups. */
if (putenv(display_env) == -1 ||
- putenv(ssh_env) == -1) {
- throw SysError("Setting SSH env vars");
- }
+ putenv(ssh_env) == -1)
+ throw SysError("setting SSH env vars");
- if (argc != 4) {
+ if (argc != 4)
throw UsageError("called without required arguments");
- }
auto store = openStore();
@@ -147,15 +144,14 @@ int main (int argc, char * * argv)
std::shared_ptr<Store> sshStore;
AutoCloseFD bestSlotLock;
- auto machines = read_conf();
+ auto machines = readConf();
string drvPath;
string hostName;
for (string line; getline(cin, line);) {
auto tokens = tokenizeString<std::vector<string>>(line);
auto sz = tokens.size();
- if (sz != 3 && sz != 4) {
- throw Error(format("invalid build hook line %1%") % line);
- }
+ if (sz != 3 && sz != 4)
+ throw Error("invalid build hook line ‘%1%’", line);
auto amWilling = tokens[0] == "1";
auto neededSystem = tokens[1];
drvPath = tokens[2];
@@ -174,7 +170,7 @@ int main (int argc, char * * argv)
bool rightType = false;
- machine * bestMachine = nullptr;
+ Machine * bestMachine = nullptr;
unsigned long long bestLoad = 0;
for (auto & m : machines) {
if (m.enabled && std::find(m.systemTypes.begin(),
@@ -221,11 +217,10 @@ int main (int argc, char * * argv)
}
if (!bestSlotLock) {
- if (rightType && !canBuildLocally) {
- cerr << "# postpone\n";
- } else {
- cerr << "# decline\n";
- }
+ if (rightType && !canBuildLocally)
+ std::cerr << "# postpone\n";
+ else
+ std::cerr << "# decline\n";
break;
}
@@ -238,47 +233,51 @@ int main (int argc, char * * argv)
lock = -1;
try {
- sshStore = openStore("ssh://" + bestMachine->hostName + "?key=" + bestMachine->sshKey);
+ sshStore = openStore("ssh-ng://" + bestMachine->hostName,
+ { {"ssh-key", bestMachine->sshKey },
+ {"max-connections", "1" } });
hostName = bestMachine->hostName;
} catch (std::exception & e) {
- cerr << e.what() << '\n';
- cerr << "unable to open SSH connection to ‘" << bestMachine->hostName << "’, trying other available machines...\n";
+ printError("unable to open SSH connection to ‘%s’: %s; trying other available machines...",
+ bestMachine->hostName, e.what());
bestMachine->enabled = false;
continue;
}
goto connected;
}
}
+
connected:
- cerr << "# accept\n";
+ std::cerr << "# accept\n";
string line;
- if (!getline(cin, line)) {
+ if (!getline(cin, line))
throw Error("hook caller didn't send inputs");
- }
- auto inputs = tokenizeString<std::list<string>>(line);
- if (!getline(cin, line)) {
+ auto inputs = tokenizeString<PathSet>(line);
+ if (!getline(cin, line))
throw Error("hook caller didn't send outputs");
- }
- auto outputs = tokenizeString<Strings>(line);
+ auto outputs = tokenizeString<PathSet>(line);
AutoCloseFD uploadLock = openLockFile(currentLoad + "/" + hostName + ".upload-lock", true);
- auto old = signal(SIGALRM, handle_alarm);
+ auto old = signal(SIGALRM, handleAlarm);
alarm(15 * 60);
- if (!lockFile(uploadLock.get(), ltWrite, true)) {
- cerr << "somebody is hogging the upload lock for " << hostName << ", continuing...\n";
- }
+ if (!lockFile(uploadLock.get(), ltWrite, true))
+ printError("somebody is hogging the upload lock for ‘%s’, continuing...");
alarm(0);
signal(SIGALRM, old);
copyPaths(store, ref<Store>(sshStore), inputs);
uploadLock = -1;
- cerr << "building ‘" << drvPath << "’ on ‘" << hostName << "’\n";
+ printError("building ‘%s’ on ‘%s’", drvPath, hostName);
sshStore->buildDerivation(drvPath, readDerivation(drvPath));
- std::remove_if(outputs.begin(), outputs.end(), [=](const Path & path) { return store->isValidPath(path); });
- if (!outputs.empty()) {
- setenv("NIX_HELD_LOCKS", concatStringsSep(" ", outputs).c_str(), 1); /* FIXME: ugly */
- copyPaths(ref<Store>(sshStore), store, outputs);
+ PathSet missing;
+ for (auto & path : outputs)
+ if (!store->isValidPath(path)) missing.insert(path);
+
+ if (!missing.empty()) {
+ setenv("NIX_HELD_LOCKS", concatStringsSep(" ", missing).c_str(), 1); /* FIXME: ugly */
+ copyPaths(ref<Store>(sshStore), store, missing);
}
+
return;
});
}
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
index 5a570cefb..615cc8138 100644
--- a/src/libexpr/primops.cc
+++ b/src/libexpr/primops.cc
@@ -59,6 +59,8 @@ void EvalState::realiseContext(const PathSet & context)
drvs.insert(decoded.first + "!" + decoded.second);
}
if (!drvs.empty()) {
+ if (!settings.enableImportFromDerivation)
+ throw EvalError(format("attempted to realize ‘%1%’ during evaluation but 'allow-import-from-derivation' is false") % *(drvs.begin()));
/* For performance, prefetch all substitute info. */
PathSet willBuild, willSubstitute, unknown;
unsigned long long downloadSize, narSize;
@@ -176,6 +178,45 @@ static void prim_importNative(EvalState & state, const Pos & pos, Value * * args
}
+/* Execute a program and parse its output */
+static void prim_exec(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+ state.forceList(*args[0], pos);
+ auto elems = args[0]->listElems();
+ auto count = args[0]->listSize();
+ if (count == 0) {
+ throw EvalError(format("at least one argument to 'exec' required, at %1%") % pos);
+ }
+ PathSet context;
+ auto program = state.coerceToString(pos, *elems[0], context, false, false);
+ Strings commandArgs;
+ for (unsigned int i = 1; i < args[0]->listSize(); ++i) {
+ commandArgs.emplace_back(state.coerceToString(pos, *elems[i], context, false, false));
+ }
+ try {
+ state.realiseContext(context);
+ } catch (InvalidPathError & e) {
+ throw EvalError(format("cannot execute ‘%1%’, since path ‘%2%’ is not valid, at %3%")
+ % program % e.path % pos);
+ }
+
+ auto output = runProgram(program, true, commandArgs);
+ Expr * parsed;
+ try {
+ parsed = state.parseExprFromString(output, pos.file);
+ } catch (Error & e) {
+ e.addPrefix(format("While parsing the output from ‘%1%’, at %2%\n") % program % pos);
+ throw;
+ }
+ try {
+ state.eval(parsed, v);
+ } catch (Error & e) {
+ e.addPrefix(format("While evaluating the output from ‘%1%’, at %2%\n") % program % pos);
+ throw;
+ }
+}
+
+
/* Return a string representing the type of the expression. */
static void prim_typeOf(EvalState & state, const Pos & pos, Value * * args, Value & v)
{
@@ -1901,8 +1942,10 @@ void EvalState::createBaseEnv()
mkApp(v, *baseEnv.values[baseEnvDispl - 1], *v2);
forceValue(v);
addConstant("import", v);
- if (settings.enableImportNative)
+ if (settings.enableNativeCode) {
addPrimOp("__importNative", 2, prim_importNative);
+ addPrimOp("__exec", 1, prim_exec);
+ }
addPrimOp("__typeOf", 1, prim_typeOf);
addPrimOp("isNull", 1, prim_isNull);
addPrimOp("__isFunction", 1, prim_isFunction);
diff --git a/src/libexpr/primops/fetchgit.cc b/src/libexpr/primops/fetchgit.cc
index bd440c8c6..09e2c077b 100644
--- a/src/libexpr/primops/fetchgit.cc
+++ b/src/libexpr/primops/fetchgit.cc
@@ -58,12 +58,14 @@ static void prim_fetchgit(EvalState & state, const Pos & pos, Value * * args, Va
for (auto & attr : *args[0]->attrs) {
string name(attr.name);
- if (name == "url")
- url = state.forceStringNoCtx(*attr.value, *attr.pos);
- else if (name == "rev")
+ if (name == "url") {
+ PathSet context;
+ url = state.coerceToString(*attr.pos, *attr.value, context, false, false);
+ if (hasPrefix(url, "/")) url = "file://" + url;
+ } else if (name == "rev")
rev = state.forceStringNoCtx(*attr.value, *attr.pos);
else
- throw EvalError(format("unsupported argument ‘%1%’ to ‘fetchgit’, at %3%") % attr.name % attr.pos);
+ throw EvalError("unsupported argument ‘%s’ to ‘fetchgit’, at %s", attr.name, *attr.pos);
}
if (url.empty())
diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh
index 81f918d48..802e8ed2e 100644
--- a/src/libexpr/value.hh
+++ b/src/libexpr/value.hh
@@ -256,7 +256,7 @@ size_t valueSize(Value & v);
#if HAVE_BOEHMGC
typedef std::vector<Value *, gc_allocator<Value *> > ValueVector;
-typedef std::map<Symbol, Value *, std::less<Symbol>, gc_allocator<Value *> > ValueMap;
+typedef std::map<Symbol, Value *, std::less<Symbol>, gc_allocator<std::pair<const Symbol, Value *> > > ValueMap;
#else
typedef std::vector<Value *> ValueVector;
typedef std::map<Symbol, Value *> ValueMap;
diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc
index 53fa83fe0..c1828aa7d 100644
--- a/src/libmain/shared.cc
+++ b/src/libmain/shared.cc
@@ -106,8 +106,6 @@ void initNix()
std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf));
#endif
- logger = makeDefaultLogger();
-
/* Initialise OpenSSL locking. */
opensslLocks = std::vector<std::mutex>(CRYPTO_num_locks());
CRYPTO_set_locking_callback(opensslLockCallback);
@@ -167,6 +165,10 @@ struct LegacyArgs : public MixCommonArgs
settings.set("build-fallback", "true");
});
+ mkFlag1('j', "max-jobs", "jobs", "maximum number of parallel builds", [=](std::string s) {
+ settings.set("build-max-jobs", s);
+ });
+
auto intSettingAlias = [&](char shortName, const std::string & longName,
const std::string & description, const std::string & dest) {
mkFlag<unsigned int>(shortName, longName, description, [=](unsigned int n) {
@@ -174,7 +176,6 @@ struct LegacyArgs : public MixCommonArgs
});
};
- intSettingAlias('j', "max-jobs", "maximum number of parallel builds", "build-max-jobs");
intSettingAlias(0, "cores", "maximum number of CPU cores to use inside a build", "build-cores");
intSettingAlias(0, "max-silent-time", "number of seconds of silence before a build is killed", "build-max-silent-time");
intSettingAlias(0, "timeout", "number of seconds before a build is killed", "build-timeout");
@@ -329,11 +330,7 @@ RunPager::~RunPager()
pid.wait();
}
} catch (...) {
- try {
- pid.kill(true);
- } catch (...) {
- ignoreException();
- }
+ ignoreException();
}
}
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc
index 3e07a2aa2..b536c6c00 100644
--- a/src/libstore/binary-cache-store.cc
+++ b/src/libstore/binary-cache-store.cc
@@ -79,10 +79,7 @@ struct BinaryCacheStoreAccessor : public FSAccessor
BinaryCacheStore::BinaryCacheStore(const Params & params)
: Store(params)
- , compression(get(params, "compression", "xz"))
- , writeNARListing(get(params, "write-nar-listing", "0") == "1")
{
- auto secretKeyFile = get(params, "secret-key", "");
if (secretKeyFile != "")
secretKey = std::unique_ptr<SecretKey>(new SecretKey(readFile(secretKeyFile)));
@@ -97,7 +94,7 @@ void BinaryCacheStore::init()
auto cacheInfo = getFile(cacheInfoFile);
if (!cacheInfo) {
- upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n");
+ upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n", "text/x-nix-cache-info");
} else {
for (auto & line : tokenizeString<Strings>(*cacheInfo, "\n")) {
size_t colon = line.find(':');
@@ -224,7 +221,7 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str
}
}
- upsertFile(storePathToHash(info.path) + ".ls.xz", *compress("xz", jsonOut.str()));
+ upsertFile(storePathToHash(info.path) + ".ls", jsonOut.str(), "application/json");
}
else {
@@ -250,10 +247,11 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str
narInfo->url = "nar/" + printHash32(narInfo->fileHash) + ".nar"
+ (compression == "xz" ? ".xz" :
compression == "bzip2" ? ".bz2" :
+ compression == "br" ? ".br" :
"");
if (repair || !fileExists(narInfo->url)) {
stats.narWrite++;
- upsertFile(narInfo->url, *narCompressed);
+ upsertFile(narInfo->url, *narCompressed, "application/x-nix-nar");
} else
stats.narWriteAverted++;
@@ -264,7 +262,7 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str
/* Atomically write the NAR info file.*/
if (secretKey) narInfo->sign(*secretKey);
- upsertFile(narInfoFile, narInfo->to_string());
+ upsertFile(narInfoFile, narInfo->to_string(), "text/x-nix-narinfo");
auto hashPart = storePathToHash(narInfo->path);
@@ -382,4 +380,28 @@ ref<FSAccessor> BinaryCacheStore::getFSAccessor()
return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()));
}
+std::shared_ptr<std::string> BinaryCacheStore::getBuildLog(const Path & path)
+{
+ Path drvPath;
+
+ if (isDerivation(path))
+ drvPath = path;
+ else {
+ try {
+ auto info = queryPathInfo(path);
+ // FIXME: add a "Log" field to .narinfo
+ if (info->deriver == "") return nullptr;
+ drvPath = info->deriver;
+ } catch (InvalidPath &) {
+ return nullptr;
+ }
+ }
+
+ auto logPath = "log/" + baseNameOf(drvPath);
+
+ debug("fetching build log from binary cache ‘%s/%s’", getUri(), logPath);
+
+ return getFile(logPath);
+}
+
}
diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh
index a70d50d49..5c2d0acfd 100644
--- a/src/libstore/binary-cache-store.hh
+++ b/src/libstore/binary-cache-store.hh
@@ -13,13 +13,15 @@ struct NarInfo;
class BinaryCacheStore : public Store
{
-private:
+public:
- std::unique_ptr<SecretKey> secretKey;
+ const Setting<std::string> compression{this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"};
+ const Setting<bool> writeNARListing{this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"};
+ const Setting<Path> secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"};
- std::string compression;
+private:
- bool writeNARListing;
+ std::unique_ptr<SecretKey> secretKey;
protected:
@@ -31,7 +33,9 @@ public:
virtual bool fileExists(const std::string & path) = 0;
- virtual void upsertFile(const std::string & path, const std::string & data) = 0;
+ virtual void upsertFile(const std::string & path,
+ const std::string & data,
+ const std::string & mimeType) = 0;
/* Return the contents of the specified file, or null if it
doesn't exist. */
@@ -122,6 +126,8 @@ public:
void addSignatures(const Path & storePath, const StringSet & sigs) override
{ notImpl(); }
+ std::shared_ptr<std::string> getBuildLog(const Path & path) override;
+
};
}
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
index 4a7e1a62b..b23447fa0 100644
--- a/src/libstore/build.cc
+++ b/src/libstore/build.cc
@@ -635,7 +635,7 @@ HookInstance::~HookInstance()
{
try {
toHook.writeSide = -1;
- if (pid != -1) pid.kill(true);
+ if (pid != -1) pid.kill();
} catch (...) {
ignoreException();
}
@@ -1430,7 +1430,7 @@ void DerivationGoal::buildDone()
to have terminated. In fact, the builder could also have
simply have closed its end of the pipe, so just to be sure,
kill it. */
- int status = hook ? hook->pid.kill(true) : pid.kill(true);
+ int status = hook ? hook->pid.kill() : pid.kill();
debug(format("builder process for ‘%1%’ finished") % drvPath);
@@ -1573,36 +1573,48 @@ HookReply DerivationGoal::tryBuildHook()
if (!worker.hook)
worker.hook = std::make_unique<HookInstance>();
- /* Tell the hook about system features (beyond the system type)
- required from the build machine. (The hook could parse the
- drv file itself, but this is easier.) */
- Strings features = tokenizeString<Strings>(get(drv->env, "requiredSystemFeatures"));
- for (auto & i : features) checkStoreName(i); /* !!! abuse */
-
- /* Send the request to the hook. */
- writeLine(worker.hook->toHook.writeSide.get(), (format("%1% %2% %3% %4%")
- % (worker.getNrLocalBuilds() < settings.maxBuildJobs ? "1" : "0")
- % drv->platform % drvPath % concatStringsSep(",", features)).str());
+ try {
- /* Read the first line of input, which should be a word indicating
- whether the hook wishes to perform the build. */
- string reply;
- while (true) {
- string s = readLine(worker.hook->fromHook.readSide.get());
- if (string(s, 0, 2) == "# ") {
- reply = string(s, 2);
- break;
+ /* Tell the hook about system features (beyond the system type)
+ required from the build machine. (The hook could parse the
+ drv file itself, but this is easier.) */
+ Strings features = tokenizeString<Strings>(get(drv->env, "requiredSystemFeatures"));
+ for (auto & i : features) checkStoreName(i); /* !!! abuse */
+
+ /* Send the request to the hook. */
+ writeLine(worker.hook->toHook.writeSide.get(), (format("%1% %2% %3% %4%")
+ % (worker.getNrLocalBuilds() < settings.maxBuildJobs ? "1" : "0")
+ % drv->platform % drvPath % concatStringsSep(",", features)).str());
+
+ /* Read the first line of input, which should be a word indicating
+ whether the hook wishes to perform the build. */
+ string reply;
+ while (true) {
+ string s = readLine(worker.hook->fromHook.readSide.get());
+ if (string(s, 0, 2) == "# ") {
+ reply = string(s, 2);
+ break;
+ }
+ s += "\n";
+ writeToStderr(s);
}
- s += "\n";
- writeToStderr(s);
- }
- debug(format("hook reply is ‘%1%’") % reply);
+ debug(format("hook reply is ‘%1%’") % reply);
+
+ if (reply == "decline" || reply == "postpone")
+ return reply == "decline" ? rpDecline : rpPostpone;
+ else if (reply != "accept")
+ throw Error(format("bad hook reply ‘%1%’") % reply);
- if (reply == "decline" || reply == "postpone")
- return reply == "decline" ? rpDecline : rpPostpone;
- else if (reply != "accept")
- throw Error(format("bad hook reply ‘%1%’") % reply);
+ } catch (SysError & e) {
+ if (e.errNo == EPIPE) {
+ printError("build hook died unexpectedly: %s",
+ chomp(drainFD(worker.hook->fromHook.readSide.get())));
+ worker.hook = 0;
+ return rpDecline;
+ } else
+ throw;
+ }
printMsg(lvlTalkative, format("using hook to build path(s) %1%") % showPaths(missingPaths));
@@ -2358,8 +2370,6 @@ void DerivationGoal::runChild()
ss.push_back("/dev/tty");
ss.push_back("/dev/urandom");
ss.push_back("/dev/zero");
- ss.push_back("/dev/ptmx");
- ss.push_back("/dev/pts");
createSymlink("/proc/self/fd", chrootRootDir + "/dev/fd");
createSymlink("/proc/self/fd/0", chrootRootDir + "/dev/stdin");
createSymlink("/proc/self/fd/1", chrootRootDir + "/dev/stdout");
@@ -2374,10 +2384,11 @@ void DerivationGoal::runChild()
ss.push_back("/etc/nsswitch.conf");
ss.push_back("/etc/services");
ss.push_back("/etc/hosts");
- ss.push_back("/var/run/nscd/socket");
+ if (pathExists("/var/run/nscd/socket"))
+ ss.push_back("/var/run/nscd/socket");
}
- for (auto & i : ss) dirsInChroot[i] = i;
+ for (auto & i : ss) dirsInChroot.emplace(i, i);
/* Bind-mount all the directories from the "host"
filesystem that we want in the chroot
@@ -2415,17 +2426,13 @@ void DerivationGoal::runChild()
fmt("size=%s", settings.sandboxShmSize).c_str()) == -1)
throw SysError("mounting /dev/shm");
-#if 0
- // FIXME: can't figure out how to do this in a user
- // namespace.
-
/* Mount a new devpts on /dev/pts. Note that this
requires the kernel to be compiled with
CONFIG_DEVPTS_MULTIPLE_INSTANCES=y (which is the case
if /dev/ptx/ptmx exists). */
if (pathExists("/dev/pts/ptmx") &&
!pathExists(chrootRootDir + "/dev/ptmx")
- && dirsInChroot.find("/dev/pts") == dirsInChroot.end())
+ && !dirsInChroot.count("/dev/pts"))
{
if (mount("none", (chrootRootDir + "/dev/pts").c_str(), "devpts", 0, "newinstance,mode=0620") == -1)
throw SysError("mounting /dev/pts");
@@ -2435,7 +2442,6 @@ void DerivationGoal::runChild()
Linux versions, it is created with permissions 0. */
chmod_(chrootRootDir + "/dev/pts/ptmx", 0666);
}
-#endif
/* Do the chroot(). */
if (chdir(chrootRootDir.c_str()) == -1)
@@ -2732,6 +2738,8 @@ void DerivationGoal::registerOutputs()
Path path = i.second.path;
if (missingPaths.find(path) == missingPaths.end()) continue;
+ ValidPathInfo info;
+
Path actualPath = path;
if (useChroot) {
actualPath = chrootRootDir + path;
@@ -2834,6 +2842,8 @@ void DerivationGoal::registerOutputs()
format("output path ‘%1%’ has %2% hash ‘%3%’ when ‘%4%’ was expected")
% path % i.second.hashAlgo % printHash16or32(h2) % printHash16or32(h));
}
+
+ info.ca = makeFixedOutputCA(recursive, h2);
}
/* Get rid of all weird permissions. This also checks that
@@ -2933,7 +2943,6 @@ void DerivationGoal::registerOutputs()
worker.markContentsGood(path);
}
- ValidPathInfo info;
info.path = path;
info.narHash = hash.first;
info.narSize = hash.second;
@@ -3012,9 +3021,6 @@ void DerivationGoal::registerOutputs()
}
-string drvsLogDir = "drvs";
-
-
Path DerivationGoal::openLogFile()
{
logSize = 0;
@@ -3024,13 +3030,11 @@ Path DerivationGoal::openLogFile()
string baseName = baseNameOf(drvPath);
/* Create a log file. */
- Path dir = (format("%1%/%2%/%3%/") % worker.store.logDir % drvsLogDir % string(baseName, 0, 2)).str();
+ Path dir = fmt("%s/%s/%s/", worker.store.logDir, worker.store.drvsLogDir, string(baseName, 0, 2));
createDirs(dir);
- Path logFileName = (format("%1%/%2%%3%")
- % dir
- % string(baseName, 2)
- % (settings.compressLog ? ".bz2" : "")).str();
+ Path logFileName = fmt("%s/%s%s", dir, string(baseName, 2),
+ settings.compressLog ? ".bz2" : "");
fdLogFile = open(logFileName.c_str(), O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC, 0666);
if (!fdLogFile) throw SysError(format("creating log file ‘%1%’") % logFileName);
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
index 79526c594..0c6ceb9f6 100644
--- a/src/libstore/derivations.cc
+++ b/src/libstore/derivations.cc
@@ -4,7 +4,7 @@
#include "util.hh"
#include "worker-protocol.hh"
#include "fs-accessor.hh"
-
+#include "istringstream_nocopy.hh"
namespace nix {
@@ -152,7 +152,7 @@ static StringSet parseStrings(std::istream & str, bool arePaths)
static Derivation parseDerivation(const string & s)
{
Derivation drv;
- std::istringstream str(s);
+ istringstream_nocopy str(s);
expect(str, "Derive([");
/* Parse the list of outputs. */
@@ -397,8 +397,8 @@ PathSet BasicDerivation::outputPaths() const
Source & readDerivation(Source & in, Store & store, BasicDerivation & drv)
{
drv.outputs.clear();
- auto nr = readInt(in);
- for (unsigned int n = 0; n < nr; n++) {
+ auto nr = readNum<size_t>(in);
+ for (size_t n = 0; n < nr; n++) {
auto name = readString(in);
DerivationOutput o;
in >> o.path >> o.hashAlgo >> o.hash;
@@ -410,8 +410,8 @@ Source & readDerivation(Source & in, Store & store, BasicDerivation & drv)
in >> drv.platform >> drv.builder;
drv.args = readStrings<Strings>(in);
- nr = readInt(in);
- for (unsigned int n = 0; n < nr; n++) {
+ nr = readNum<size_t>(in);
+ for (size_t n = 0; n < nr; n++) {
auto key = readString(in);
auto value = readString(in);
drv.env[key] = value;
diff --git a/src/libstore/download.cc b/src/libstore/download.cc
index 661ee2ed5..95e6f7bac 100644
--- a/src/libstore/download.cc
+++ b/src/libstore/download.cc
@@ -5,6 +5,11 @@
#include "store-api.hh"
#include "archive.hh"
#include "s3.hh"
+#include "compression.hh"
+
+#ifdef ENABLE_S3
+#include <aws/core/client/ClientConfiguration.h>
+#endif
#include <unistd.h>
#include <fcntl.h>
@@ -34,6 +39,16 @@ std::string resolveUri(const std::string & uri)
return uri;
}
+ref<std::string> decodeContent(const std::string & encoding, ref<std::string> data)
+{
+ if (encoding == "")
+ return data;
+ else if (encoding == "br")
+ return decompress(encoding, *data);
+ else
+ throw Error("unsupported Content-Encoding ‘%s’", encoding);
+}
+
struct CurlDownloader : public Downloader
{
CURLM * curlm = 0;
@@ -67,6 +82,8 @@ struct CurlDownloader : public Downloader
struct curl_slist * requestHeaders = 0;
+ std::string encoding;
+
DownloadItem(CurlDownloader & downloader, const DownloadRequest & request)
: downloader(downloader), request(request)
{
@@ -124,6 +141,7 @@ struct CurlDownloader : public Downloader
auto ss = tokenizeString<vector<string>>(line, " ");
status = ss.size() >= 2 ? ss[1] : "";
result.data = std::make_shared<std::string>();
+ encoding = "";
} else {
auto i = line.find(':');
if (i != string::npos) {
@@ -139,7 +157,8 @@ struct CurlDownloader : public Downloader
debug(format("shutting down on 200 HTTP response with expected ETag"));
return 0;
}
- }
+ } else if (name == "content-encoding")
+ encoding = trim(string(line, i + 1));;
}
}
return realSize;
@@ -224,8 +243,7 @@ struct CurlDownloader : public Downloader
curl_easy_setopt(req, CURLOPT_NOBODY, 1);
if (request.verifyTLS)
- curl_easy_setopt(req, CURLOPT_CAINFO,
- getEnv("NIX_SSL_CERT_FILE", getEnv("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt")).c_str());
+ curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str());
else {
curl_easy_setopt(req, CURLOPT_SSL_VERIFYPEER, 0);
curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0);
@@ -266,14 +284,34 @@ struct CurlDownloader : public Downloader
{
result.cached = httpStatus == 304;
done = true;
- callSuccess(success, failure, const_cast<const DownloadResult &>(result));
+
+ try {
+ result.data = decodeContent(encoding, ref<std::string>(result.data));
+ callSuccess(success, failure, const_cast<const DownloadResult &>(result));
+ } catch (...) {
+ done = true;
+ callFailure(failure, std::current_exception());
+ }
} else {
Error err =
(httpStatus == 404 || code == CURLE_FILE_COULDNT_READ_FILE) ? NotFound :
httpStatus == 403 ? Forbidden :
(httpStatus == 408 || httpStatus == 500 || httpStatus == 503
|| httpStatus == 504 || httpStatus == 522 || httpStatus == 524
- || code == CURLE_COULDNT_RESOLVE_HOST || code == CURLE_RECV_ERROR) ? Transient :
+ || code == CURLE_COULDNT_RESOLVE_HOST
+ || code == CURLE_RECV_ERROR
+
+ // this seems to occur occasionally for retriable reasons, and shows up in an error like this:
+ // curl: (23) Failed writing body (315 != 16366)
+ || code == CURLE_WRITE_ERROR
+
+ // this is a generic SSL failure that in some cases (e.g., certificate error) is permanent but also appears in transient cases, so we consider it retryable
+ || code == CURLE_SSL_CONNECT_ERROR
+#if LIBCURL_VERSION_NUM >= 0x073200
+ || code == CURLE_HTTP2
+ || code == CURLE_HTTP2_STREAM
+#endif
+ ) ? Transient :
Misc;
attempt++;
@@ -491,7 +529,7 @@ struct CurlDownloader : public Downloader
// FIXME: do this on a worker thread
sync2async<DownloadResult>(success, failure, [&]() -> DownloadResult {
#ifdef ENABLE_S3
- S3Helper s3Helper;
+ S3Helper s3Helper(Aws::Region::US_EAST_1); // FIXME: make configurable
auto slash = request.uri.find('/', 5);
if (slash == std::string::npos)
throw nix::Error("bad S3 URI ‘%s’", request.uri);
@@ -612,6 +650,7 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa
Hash hash = hashString(expectedHash ? expectedHash.type : htSHA256, *res.data);
info.path = store->makeFixedOutputPath(false, hash, name);
info.narHash = hashString(htSHA256, *sink.s);
+ info.ca = makeFixedOutputCA(false, hash);
store->addToStore(info, sink.s, false, true);
storePath = info.path;
}
@@ -640,7 +679,7 @@ Path Downloader::downloadCached(ref<Store> store, const string & url_, bool unpa
Path tmpDir = createTempDir();
AutoDelete autoDelete(tmpDir, true);
// FIXME: this requires GNU tar for decompression.
- runProgram("tar", true, {"xf", storePath, "-C", tmpDir, "--strip-components", "1"}, "");
+ runProgram("tar", true, {"xf", storePath, "-C", tmpDir, "--strip-components", "1"});
unpackedStorePath = store->addToStore(name, tmpDir, true, htSHA256, defaultPathFilter, false);
}
replaceSymlink(unpackedStorePath, unpackedLink);
diff --git a/src/libstore/download.hh b/src/libstore/download.hh
index bdb5011e7..62f3860b9 100644
--- a/src/libstore/download.hh
+++ b/src/libstore/download.hh
@@ -15,7 +15,7 @@ struct DownloadRequest
bool verifyTLS = true;
enum { yes, no, automatic } showProgress = yes;
bool head = false;
- size_t tries = 1;
+ size_t tries = 5;
unsigned int baseRetryTimeMs = 250;
DownloadRequest(const std::string & uri) : uri(uri) { }
@@ -73,4 +73,7 @@ public:
bool isUri(const string & s);
+/* Decode data according to the Content-Encoding header. */
+ref<std::string> decodeContent(const std::string & encoding, ref<std::string> data);
+
}
diff --git a/src/libstore/export-import.cc b/src/libstore/export-import.cc
index c5618c826..2b8ab063e 100644
--- a/src/libstore/export-import.cc
+++ b/src/libstore/export-import.cc
@@ -61,39 +61,17 @@ void Store::exportPath(const Path & path, Sink & sink)
hashAndWriteSink << exportMagic << path << info->references << info->deriver << 0;
}
-struct TeeSource : Source
-{
- Source & readSource;
- ref<std::string> data;
- TeeSource(Source & readSource)
- : readSource(readSource)
- , data(make_ref<std::string>())
- {
- }
- size_t read(unsigned char * data, size_t len)
- {
- size_t n = readSource.read(data, len);
- this->data->append((char *) data, n);
- return n;
- }
-};
-
-struct NopSink : ParseSink
-{
-};
-
Paths Store::importPaths(Source & source, std::shared_ptr<FSAccessor> accessor, bool dontCheckSigs)
{
Paths res;
while (true) {
- unsigned long long n = readLongLong(source);
+ auto n = readNum<uint64_t>(source);
if (n == 0) break;
if (n != 1) throw Error("input doesn't look like something created by ‘nix-store --export’");
/* Extract the NAR from the source. */
- TeeSource tee(source);
- NopSink sink;
- parseDump(sink, tee);
+ TeeSink tee(source);
+ parseDump(tee, tee.source);
uint32_t magic = readInt(source);
if (magic != exportMagic)
@@ -110,14 +88,14 @@ Paths Store::importPaths(Source & source, std::shared_ptr<FSAccessor> accessor,
info.deriver = readString(source);
if (info.deriver != "") assertStorePath(info.deriver);
- info.narHash = hashString(htSHA256, *tee.data);
- info.narSize = tee.data->size();
+ info.narHash = hashString(htSHA256, *tee.source.data);
+ info.narSize = tee.source.data->size();
// Ignore optional legacy signature.
if (readInt(source) == 1)
readString(source);
- addToStore(info, tee.data, false, dontCheckSigs, accessor);
+ addToStore(info, tee.source.data, false, dontCheckSigs, accessor);
res.push_back(info.path);
}
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
index 8e90913cc..0b03d61a7 100644
--- a/src/libstore/gc.cc
+++ b/src/libstore/gc.cc
@@ -679,7 +679,7 @@ void LocalStore::removeUnusedLinks(const GCState & state)
if (unlink(path.c_str()) == -1)
throw SysError(format("deleting ‘%1%’") % path);
- state.results.bytesFreed += st.st_blocks * 512;
+ state.results.bytesFreed += st.st_blocks * 512ULL;
}
struct stat st;
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index 62ed0376d..b9f4fada5 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -2,9 +2,9 @@
#include "util.hh"
#include "archive.hh"
-#include <map>
#include <algorithm>
-#include <unistd.h>
+#include <map>
+#include <thread>
namespace nix {
@@ -53,11 +53,7 @@ Settings::Settings()
keepGoing = false;
tryFallback = false;
maxBuildJobs = 1;
- buildCores = 1;
-#ifdef _SC_NPROCESSORS_ONLN
- long res = sysconf(_SC_NPROCESSORS_ONLN);
- if (res > 0) buildCores = res;
-#endif
+ buildCores = std::max(1U, std::thread::hardware_concurrency());
readOnlyMode = false;
thisSystem = SYSTEM;
maxSilentTime = 0;
@@ -82,8 +78,10 @@ Settings::Settings()
envKeepDerivations = false;
lockCPU = getEnv("NIX_AFFINITY_HACK", "1") == "1";
showTrace = false;
- enableImportNative = false;
+ enableNativeCode = false;
netrcFile = fmt("%s/%s", nixConfDir, "netrc");
+ caFile = getEnv("NIX_SSL_CERT_FILE", getEnv("SSL_CERT_FILE", "/etc/ssl/certs/ca-certificates.crt"));
+ enableImportFromDerivation = true;
useSandbox = "false"; // TODO: make into an enum
#if __linux__
@@ -98,14 +96,14 @@ Settings::Settings()
runDiffHook = false;
diffHook = "";
enforceDeterminism = true;
- binaryCachePublicKeys = Strings();
+ binaryCachePublicKeys = Strings{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="};
secretKeyFiles = Strings();
binaryCachesParallelConnections = 25;
enableHttp2 = true;
tarballTtl = 60 * 60;
signedBinaryCaches = "";
substituters = Strings();
- binaryCaches = Strings();
+ binaryCaches = nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"} : Strings();
extraBinaryCaches = Strings();
trustedUsers = Strings({"root"});
allowedUsers = Strings({"*"});
@@ -155,7 +153,15 @@ void Settings::set(const string & name, const string & value)
void Settings::update()
{
_get(tryFallback, "build-fallback");
- _get(maxBuildJobs, "build-max-jobs");
+
+ std::string s = "1";
+ _get(s, "build-max-jobs");
+ if (s == "auto")
+ maxBuildJobs = std::max(1U, std::thread::hardware_concurrency());
+ else
+ if (!string2Int(s, maxBuildJobs))
+ throw Error("configuration setting ‘build-max-jobs’ should be ‘auto’ or an integer");
+
_get(buildCores, "build-cores");
_get(thisSystem, "system");
_get(maxSilentTime, "build-max-silent-time");
@@ -178,13 +184,13 @@ void Settings::update()
_get(envKeepDerivations, "env-keep-derivations");
_get(sshSubstituterHosts, "ssh-substituter-hosts");
_get(useSshSubstituter, "use-ssh-substituter");
- _get(logServers, "log-servers");
- _get(enableImportNative, "allow-unsafe-native-code-during-evaluation");
+ _get(enableNativeCode, "allow-unsafe-native-code-during-evaluation");
_get(useCaseHack, "use-case-hack");
_get(preBuildHook, "pre-build-hook");
_get(keepGoing, "keep-going");
_get(keepFailed, "keep-failed");
_get(netrcFile, "netrc-file");
+ _get(enableImportFromDerivation, "allow-import-from-derivation");
_get(useSandbox, "build-use-sandbox", "build-use-chroot");
_get(sandboxPaths, "build-sandbox-paths", "build-chroot-dirs");
_get(extraSandboxPaths, "build-extra-sandbox-paths", "build-extra-chroot-dirs");
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index d74488a41..d47fdb7c9 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -177,11 +177,8 @@ struct Settings {
/* Whether to show a stack trace if Nix evaluation fails. */
bool showTrace;
- /* A list of URL prefixes that can return Nix build logs. */
- Strings logServers;
-
- /* Whether the importNative primop should be enabled */
- bool enableImportNative;
+ /* Whether native-code enabling primops should be enabled */
+ bool enableNativeCode;
/* Whether to enable sandboxed builds (string until we get an enum for true/false/relaxed) */
string useSandbox;
@@ -260,6 +257,12 @@ struct Settings {
downloads. */
Path netrcFile;
+ /* Path to the SSL CA file used */
+ Path caFile;
+
+ /* Whether we allow import-from-derivation */
+ bool enableImportFromDerivation;
+
private:
StringSet deprecatedOptions;
SettingsMap settings, overrides;
diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc
index 9d31f77c9..37a7d6ace 100644
--- a/src/libstore/http-binary-cache-store.cc
+++ b/src/libstore/http-binary-cache-store.cc
@@ -64,7 +64,9 @@ protected:
}
}
- void upsertFile(const std::string & path, const std::string & data) override
+ void upsertFile(const std::string & path,
+ const std::string & data,
+ const std::string & mimeType) override
{
throw UploadToHTTP("uploading to an HTTP binary cache is not supported");
}
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
index b20ff185f..befc560bf 100644
--- a/src/libstore/legacy-ssh-store.cc
+++ b/src/libstore/legacy-ssh-store.cc
@@ -4,80 +4,54 @@
#include "serve-protocol.hh"
#include "store-api.hh"
#include "worker-protocol.hh"
+#include "ssh.hh"
namespace nix {
-static std::string uriScheme = "legacy-ssh://";
+static std::string uriScheme = "ssh://";
struct LegacySSHStore : public Store
{
- string host;
+ const Setting<int> maxConnections{this, 1, "max-connections", "maximum number of concurrent SSH connections"};
+ const Setting<Path> sshKey{this, "", "ssh-key", "path to an SSH private key"};
+ const Setting<bool> compress{this, false, "compress", "whether to compress the connection"};
struct Connection
{
- Pid sshPid;
- AutoCloseFD out;
- AutoCloseFD in;
+ std::unique_ptr<SSHMaster::Connection> sshConn;
FdSink to;
FdSource from;
};
- AutoDelete tmpDir;
-
- Path socketPath;
-
- Pid sshMaster;
+ std::string host;
ref<Pool<Connection>> connections;
- Path key;
+ SSHMaster master;
- LegacySSHStore(const string & host, const Params & params,
- size_t maxConnections = std::numeric_limits<size_t>::max())
+ LegacySSHStore(const string & host, const Params & params)
: Store(params)
, host(host)
- , tmpDir(createTempDir("", "nix", true, true, 0700))
- , socketPath((Path) tmpDir + "/ssh.sock")
, connections(make_ref<Pool<Connection>>(
- maxConnections,
+ std::max(1, (int) maxConnections),
[this]() { return openConnection(); },
[](const ref<Connection> & r) { return true; }
))
- , key(get(params, "ssh-key", ""))
+ , master(
+ host,
+ sshKey,
+ // Use SSH master only if using more than 1 connection.
+ connections->capacity() > 1,
+ compress)
{
}
ref<Connection> openConnection()
{
- if ((pid_t) sshMaster == -1) {
- sshMaster = startProcess([&]() {
- restoreSignals();
- Strings args{ "ssh", "-M", "-S", socketPath, "-N", "-x", "-a", host };
- if (!key.empty())
- args.insert(args.end(), {"-i", key});
- execvp("ssh", stringsToCharPtrs(args).data());
- throw SysError("starting SSH master connection to host ‘%s’", host);
- });
- }
-
auto conn = make_ref<Connection>();
- Pipe in, out;
- in.create();
- out.create();
- conn->sshPid = startProcess([&]() {
- if (dup2(in.readSide.get(), STDIN_FILENO) == -1)
- throw SysError("duping over STDIN");
- if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
- throw SysError("duping over STDOUT");
- execlp("ssh", "ssh", "-S", socketPath.c_str(), host.c_str(), "nix-store", "--serve", "--write", nullptr);
- throw SysError("executing ‘nix-store --serve’ on remote host ‘%s’", host);
- });
- in.readSide = -1;
- out.writeSide = -1;
- conn->out = std::move(out.readSide);
- conn->in = std::move(in.writeSide);
- conn->to = FdSink(conn->in.get());
- conn->from = FdSource(conn->out.get());
+ conn->sshConn = master.startCommand("nix-store --serve --write");
+ conn->to = FdSink(conn->sshConn->in.get());
+ conn->from = FdSource(conn->sshConn->out.get());
int remoteVersion;
@@ -169,9 +143,9 @@ struct LegacySSHStore : public Store
/* FIXME: inefficient. */
ParseSink parseSink; /* null sink; just parse the NAR */
- SavingSourceAdapter savedNAR(conn->from);
+ TeeSource savedNAR(conn->from);
parseDump(parseSink, savedNAR);
- sink(savedNAR.s);
+ sink(*savedNAR.data);
}
/* Unsupported methods. */
@@ -234,6 +208,41 @@ struct LegacySSHStore : public Store
bool isTrusted() override
{ return true; }
+ void computeFSClosure(const PathSet & paths,
+ PathSet & out, bool flipDirection = false,
+ bool includeOutputs = false, bool includeDerivers = false) override
+ {
+ if (flipDirection || includeDerivers) {
+ Store::computeFSClosure(paths, out, flipDirection, includeOutputs, includeDerivers);
+ return;
+ }
+
+ auto conn(connections->get());
+
+ conn->to
+ << cmdQueryClosure
+ << includeOutputs
+ << paths;
+ conn->to.flush();
+
+ auto res = readStorePaths<PathSet>(*this, conn->from);
+
+ out.insert(res.begin(), res.end());
+ }
+
+ PathSet queryValidPaths(const PathSet & paths, bool maybeSubstitute = false) override
+ {
+ auto conn(connections->get());
+
+ conn->to
+ << cmdQueryValidPaths
+ << false // lock
+ << maybeSubstitute
+ << paths;
+ conn->to.flush();
+
+ return readStorePaths<PathSet>(*this, conn->from);
+ }
};
static RegisterStoreImplementation regStore([](
diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc
index 0f377989b..aff22f9fc 100644
--- a/src/libstore/local-binary-cache-store.cc
+++ b/src/libstore/local-binary-cache-store.cc
@@ -30,7 +30,9 @@ protected:
bool fileExists(const std::string & path) override;
- void upsertFile(const std::string & path, const std::string & data) override;
+ void upsertFile(const std::string & path,
+ const std::string & data,
+ const std::string & mimeType) override;
void getFile(const std::string & path,
std::function<void(std::shared_ptr<std::string>)> success,
@@ -83,7 +85,9 @@ bool LocalBinaryCacheStore::fileExists(const std::string & path)
return pathExists(binaryCacheDir + "/" + path);
}
-void LocalBinaryCacheStore::upsertFile(const std::string & path, const std::string & data)
+void LocalBinaryCacheStore::upsertFile(const std::string & path,
+ const std::string & data,
+ const std::string & mimeType)
{
atomicWrite(binaryCacheDir + "/" + path, data);
}
diff --git a/src/libstore/local-fs-store.cc b/src/libstore/local-fs-store.cc
index 4571a2211..bf247903c 100644
--- a/src/libstore/local-fs-store.cc
+++ b/src/libstore/local-fs-store.cc
@@ -2,14 +2,13 @@
#include "fs-accessor.hh"
#include "store-api.hh"
#include "globals.hh"
+#include "compression.hh"
+#include "derivations.hh"
namespace nix {
LocalFSStore::LocalFSStore(const Params & params)
: Store(params)
- , rootDir(get(params, "root"))
- , stateDir(canonPath(get(params, "state", rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir)))
- , logDir(canonPath(get(params, "log", rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir)))
{
}
@@ -84,4 +83,48 @@ void LocalFSStore::narFromPath(const Path & path, Sink & sink)
dumpPath(getRealStoreDir() + std::string(path, storeDir.size()), sink);
}
+const string LocalFSStore::drvsLogDir = "drvs";
+
+
+
+std::shared_ptr<std::string> LocalFSStore::getBuildLog(const Path & path_)
+{
+ auto path(path_);
+
+ assertStorePath(path);
+
+
+ if (!isDerivation(path)) {
+ try {
+ path = queryPathInfo(path)->deriver;
+ } catch (InvalidPath &) {
+ return nullptr;
+ }
+ if (path == "") return nullptr;
+ }
+
+ string baseName = baseNameOf(path);
+
+ for (int j = 0; j < 2; j++) {
+
+ Path logPath =
+ j == 0
+ ? fmt("%s/%s/%s/%s", logDir, drvsLogDir, string(baseName, 0, 2), string(baseName, 2))
+ : fmt("%s/%s/%s", logDir, drvsLogDir, baseName);
+ Path logBz2Path = logPath + ".bz2";
+
+ if (pathExists(logPath))
+ return std::make_shared<std::string>(readFile(logPath));
+
+ else if (pathExists(logBz2Path)) {
+ try {
+ return decompress("bzip2", readFile(logBz2Path));
+ } catch (Error &) { }
+ }
+
+ }
+
+ return nullptr;
+}
+
}
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index afcda6e2b..9111a45f8 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -38,13 +38,14 @@ namespace nix {
LocalStore::LocalStore(const Params & params)
: Store(params)
, LocalFSStore(params)
- , realStoreDir(get(params, "real", rootDir != "" ? rootDir + "/nix/store" : storeDir))
+ , realStoreDir_{this, false, rootDir != "" ? rootDir + "/nix/store" : storeDir, "real",
+ "physical path to the Nix store"}
+ , realStoreDir(realStoreDir_)
, dbDir(stateDir + "/db")
, linksDir(realStoreDir + "/.links")
, reservedPath(dbDir + "/reserved")
, schemaPath(dbDir + "/schema")
, trashDir(realStoreDir + "/trash")
- , requireSigs(trim(settings.signedBinaryCaches) != "") // FIXME: rename option
, publicKeys(getDefaultPublicKeys())
{
auto state(_state.lock());
@@ -519,6 +520,8 @@ void LocalStore::checkDerivationOutputs(const Path & drvPath, const Derivation &
uint64_t LocalStore::addValidPath(State & state,
const ValidPathInfo & info, bool checkOutputs)
{
+ assert(info.ca == "" || info.isContentAddressed(*this));
+
state.stmtRegisterValidPath.use()
(info.path)
("sha256:" + printHash(info.narHash))
@@ -667,7 +670,7 @@ bool LocalStore::isValidPathUncached(const Path & path)
}
-PathSet LocalStore::queryValidPaths(const PathSet & paths)
+PathSet LocalStore::queryValidPaths(const PathSet & paths, bool maybeSubstitute)
{
PathSet res;
for (auto & i : paths)
@@ -918,7 +921,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, const ref<std::string> &
info.path % info.narHash.to_string() % h.to_string());
if (requireSigs && !dontCheckSigs && !info.checkSignatures(*this, publicKeys))
- throw Error(format("cannot import path ‘%s’ because it lacks a valid signature") % info.path);
+ throw Error("cannot add path ‘%s’ because it lacks a valid signature", info.path);
addTempRoot(info.path);
@@ -1002,7 +1005,7 @@ Path LocalStore::addToStoreFromDump(const string & dump, const string & name,
info.narHash = hash.first;
info.narSize = hash.second;
info.ultimate = true;
- info.ca = "fixed:" + (recursive ? (std::string) "r:" : "") + h.to_string();
+ info.ca = makeFixedOutputCA(recursive, h);
registerValidPath(info);
}
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
index 511209d84..f2c40e964 100644
--- a/src/libstore/local-store.hh
+++ b/src/libstore/local-store.hh
@@ -21,22 +21,14 @@ namespace nix {
const int nixSchemaVersion = 10;
-extern string drvsLogDir;
-
-
struct Derivation;
struct OptimiseStats
{
- unsigned long filesLinked;
- unsigned long long bytesFreed;
- unsigned long long blocksFreed;
- OptimiseStats()
- {
- filesLinked = 0;
- bytesFreed = blocksFreed = 0;
- }
+ unsigned long filesLinked = 0;
+ unsigned long long bytesFreed = 0;
+ unsigned long long blocksFreed = 0;
};
@@ -75,6 +67,8 @@ private:
public:
+ PathSetting realStoreDir_;
+
const Path realStoreDir;
const Path dbDir;
const Path linksDir;
@@ -84,7 +78,9 @@ public:
private:
- bool requireSigs;
+ Setting<bool> requireSigs{(Store*) this,
+ settings.signedBinaryCaches != "", // FIXME
+ "require-sigs", "whether store paths should have a trusted signature on import"};
PublicKeys publicKeys;
@@ -102,7 +98,7 @@ public:
bool isValidPathUncached(const Path & path) override;
- PathSet queryValidPaths(const PathSet & paths) override;
+ PathSet queryValidPaths(const PathSet & paths, bool maybeSubstitute = false) override;
PathSet queryAllValidPaths() override;
diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc
index 13b67b81f..180a936ed 100644
--- a/src/libstore/nar-info-disk-cache.cc
+++ b/src/libstore/nar-info-disk-cache.cc
@@ -106,25 +106,27 @@ public:
"select * from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))");
/* Periodically purge expired entries from the database. */
- auto now = time(0);
-
- SQLiteStmt queryLastPurge(state->db, "select value from LastPurge");
- auto queryLastPurge_(queryLastPurge.use());
-
- if (!queryLastPurge_.next() || queryLastPurge_.getInt(0) < now - purgeInterval) {
- SQLiteStmt(state->db,
- "delete from NARs where ((present = 0 and timestamp < ?) or (present = 1 and timestamp < ?))")
- .use()
- (now - ttlNegative)
- (now - ttlPositive)
- .exec();
-
- debug("deleted %d entries from the NAR info disk cache", sqlite3_changes(state->db));
-
- SQLiteStmt(state->db,
- "insert or replace into LastPurge(dummy, value) values ('', ?)")
- .use()(now).exec();
- }
+ retrySQLite<void>([&]() {
+ auto now = time(0);
+
+ SQLiteStmt queryLastPurge(state->db, "select value from LastPurge");
+ auto queryLastPurge_(queryLastPurge.use());
+
+ if (!queryLastPurge_.next() || queryLastPurge_.getInt(0) < now - purgeInterval) {
+ SQLiteStmt(state->db,
+ "delete from NARs where ((present = 0 and timestamp < ?) or (present = 1 and timestamp < ?))")
+ .use()
+ (now - ttlNegative)
+ (now - ttlPositive)
+ .exec();
+
+ debug("deleted %d entries from the NAR info disk cache", sqlite3_changes(state->db));
+
+ SQLiteStmt(state->db,
+ "insert or replace into LastPurge(dummy, value) values ('', ?)")
+ .use()(now).exec();
+ }
+ });
}
Cache & getCache(State & state, const std::string & uri)
@@ -136,114 +138,123 @@ public:
void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
{
- auto state(_state.lock());
+ retrySQLite<void>([&]() {
+ auto state(_state.lock());
- // FIXME: race
+ // FIXME: race
- state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec();
- assert(sqlite3_changes(state->db) == 1);
- state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority};
+ state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec();
+ assert(sqlite3_changes(state->db) == 1);
+ state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority};
+ });
}
bool cacheExists(const std::string & uri,
bool & wantMassQuery, int & priority) override
{
- auto state(_state.lock());
+ return retrySQLite<bool>([&]() {
+ auto state(_state.lock());
- auto i = state->caches.find(uri);
- if (i == state->caches.end()) {
- auto queryCache(state->queryCache.use()(uri));
- if (!queryCache.next()) return false;
- state->caches.emplace(uri,
- Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
- }
+ auto i = state->caches.find(uri);
+ if (i == state->caches.end()) {
+ auto queryCache(state->queryCache.use()(uri));
+ if (!queryCache.next()) return false;
+ state->caches.emplace(uri,
+ Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
+ }
- auto & cache(getCache(*state, uri));
+ auto & cache(getCache(*state, uri));
- wantMassQuery = cache.wantMassQuery;
- priority = cache.priority;
+ wantMassQuery = cache.wantMassQuery;
+ priority = cache.priority;
- return true;
+ return true;
+ });
}
std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
const std::string & uri, const std::string & hashPart) override
{
- auto state(_state.lock());
+ return retrySQLite<std::pair<Outcome, std::shared_ptr<NarInfo>>>(
+ [&]() -> std::pair<Outcome, std::shared_ptr<NarInfo>> {
+ auto state(_state.lock());
+
+ auto & cache(getCache(*state, uri));
- auto & cache(getCache(*state, uri));
-
- auto now = time(0);
-
- auto queryNAR(state->queryNAR.use()
- (cache.id)
- (hashPart)
- (now - ttlNegative)
- (now - ttlPositive));
-
- if (!queryNAR.next())
- return {oUnknown, 0};
-
- if (!queryNAR.getInt(13))
- return {oInvalid, 0};
-
- auto narInfo = make_ref<NarInfo>();
-
- auto namePart = queryNAR.getStr(2);
- narInfo->path = cache.storeDir + "/" +
- hashPart + (namePart.empty() ? "" : "-" + namePart);
- narInfo->url = queryNAR.getStr(3);
- narInfo->compression = queryNAR.getStr(4);
- if (!queryNAR.isNull(5))
- narInfo->fileHash = parseHash(queryNAR.getStr(5));
- narInfo->fileSize = queryNAR.getInt(6);
- narInfo->narHash = parseHash(queryNAR.getStr(7));
- narInfo->narSize = queryNAR.getInt(8);
- for (auto & r : tokenizeString<Strings>(queryNAR.getStr(9), " "))
- narInfo->references.insert(cache.storeDir + "/" + r);
- if (!queryNAR.isNull(10))
- narInfo->deriver = cache.storeDir + "/" + queryNAR.getStr(10);
- for (auto & sig : tokenizeString<Strings>(queryNAR.getStr(11), " "))
- narInfo->sigs.insert(sig);
-
- return {oValid, narInfo};
+ auto now = time(0);
+
+ auto queryNAR(state->queryNAR.use()
+ (cache.id)
+ (hashPart)
+ (now - ttlNegative)
+ (now - ttlPositive));
+
+ if (!queryNAR.next())
+ return {oUnknown, 0};
+
+ if (!queryNAR.getInt(13))
+ return {oInvalid, 0};
+
+ auto narInfo = make_ref<NarInfo>();
+
+ auto namePart = queryNAR.getStr(2);
+ narInfo->path = cache.storeDir + "/" +
+ hashPart + (namePart.empty() ? "" : "-" + namePart);
+ narInfo->url = queryNAR.getStr(3);
+ narInfo->compression = queryNAR.getStr(4);
+ if (!queryNAR.isNull(5))
+ narInfo->fileHash = parseHash(queryNAR.getStr(5));
+ narInfo->fileSize = queryNAR.getInt(6);
+ narInfo->narHash = parseHash(queryNAR.getStr(7));
+ narInfo->narSize = queryNAR.getInt(8);
+ for (auto & r : tokenizeString<Strings>(queryNAR.getStr(9), " "))
+ narInfo->references.insert(cache.storeDir + "/" + r);
+ if (!queryNAR.isNull(10))
+ narInfo->deriver = cache.storeDir + "/" + queryNAR.getStr(10);
+ for (auto & sig : tokenizeString<Strings>(queryNAR.getStr(11), " "))
+ narInfo->sigs.insert(sig);
+
+ return {oValid, narInfo};
+ });
}
void upsertNarInfo(
const std::string & uri, const std::string & hashPart,
std::shared_ptr<ValidPathInfo> info) override
{
- auto state(_state.lock());
-
- auto & cache(getCache(*state, uri));
-
- if (info) {
-
- auto narInfo = std::dynamic_pointer_cast<NarInfo>(info);
-
- assert(hashPart == storePathToHash(info->path));
-
- state->insertNAR.use()
- (cache.id)
- (hashPart)
- (storePathToName(info->path))
- (narInfo ? narInfo->url : "", narInfo != 0)
- (narInfo ? narInfo->compression : "", narInfo != 0)
- (narInfo && narInfo->fileHash ? narInfo->fileHash.to_string() : "", narInfo && narInfo->fileHash)
- (narInfo ? narInfo->fileSize : 0, narInfo != 0 && narInfo->fileSize)
- (info->narHash.to_string())
- (info->narSize)
- (concatStringsSep(" ", info->shortRefs()))
- (info->deriver != "" ? baseNameOf(info->deriver) : "", info->deriver != "")
- (concatStringsSep(" ", info->sigs))
- (time(0)).exec();
-
- } else {
- state->insertMissingNAR.use()
- (cache.id)
- (hashPart)
- (time(0)).exec();
- }
+ retrySQLite<void>([&]() {
+ auto state(_state.lock());
+
+ auto & cache(getCache(*state, uri));
+
+ if (info) {
+
+ auto narInfo = std::dynamic_pointer_cast<NarInfo>(info);
+
+ assert(hashPart == storePathToHash(info->path));
+
+ state->insertNAR.use()
+ (cache.id)
+ (hashPart)
+ (storePathToName(info->path))
+ (narInfo ? narInfo->url : "", narInfo != 0)
+ (narInfo ? narInfo->compression : "", narInfo != 0)
+ (narInfo && narInfo->fileHash ? narInfo->fileHash.to_string() : "", narInfo && narInfo->fileHash)
+ (narInfo ? narInfo->fileSize : 0, narInfo != 0 && narInfo->fileSize)
+ (info->narHash.to_string())
+ (info->narSize)
+ (concatStringsSep(" ", info->shortRefs()))
+ (info->deriver != "" ? baseNameOf(info->deriver) : "", info->deriver != "")
+ (concatStringsSep(" ", info->sigs))
+ (time(0)).exec();
+
+ } else {
+ state->insertMissingNAR.use()
+ (cache.id)
+ (hashPart)
+ (time(0)).exec();
+ }
+ });
}
};
diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc
index 201cac671..d1042c6de 100644
--- a/src/libstore/nar-info.cc
+++ b/src/libstore/nar-info.cc
@@ -59,9 +59,11 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string &
}
}
else if (name == "Deriver") {
- auto p = store.storeDir + "/" + value;
- if (!store.isStorePath(p)) corrupt();
- deriver = p;
+ if (value != "unknown-deriver") {
+ auto p = store.storeDir + "/" + value;
+ if (!store.isStorePath(p)) corrupt();
+ deriver = p;
+ }
}
else if (name == "System")
system = value;
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index 42c09ec7e..e1df137e4 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -40,21 +40,34 @@ template PathSet readStorePaths(Store & store, Source & from);
template Paths readStorePaths(Store & store, Source & from);
/* TODO: Separate these store impls into different files, give them better names */
-RemoteStore::RemoteStore(const Params & params, size_t maxConnections)
+RemoteStore::RemoteStore(const Params & params)
: Store(params)
, connections(make_ref<Pool<Connection>>(
- maxConnections,
- [this]() { return openConnection(); },
+ std::max(1, (int) maxConnections),
+ [this]() { return openConnectionWrapper(); },
[](const ref<Connection> & r) { return r->to.good() && r->from.good(); }
))
{
}
-UDSRemoteStore::UDSRemoteStore(const Params & params, size_t maxConnections)
+ref<RemoteStore::Connection> RemoteStore::openConnectionWrapper()
+{
+ if (failed)
+ throw Error("opening a connection to remote store ‘%s’ previously failed", getUri());
+ try {
+ return openConnection();
+ } catch (...) {
+ failed = true;
+ throw;
+ }
+}
+
+
+UDSRemoteStore::UDSRemoteStore(const Params & params)
: Store(params)
, LocalFSStore(params)
- , RemoteStore(params, maxConnections)
+ , RemoteStore(params)
{
}
@@ -108,7 +121,7 @@ void RemoteStore::initConnection(Connection & conn)
unsigned int magic = readInt(conn.from);
if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch");
- conn.daemonVersion = readInt(conn.from);
+ conn.from >> conn.daemonVersion;
if (GET_PROTOCOL_MAJOR(conn.daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION))
throw Error("Nix daemon protocol version not supported");
if (GET_PROTOCOL_MINOR(conn.daemonVersion) < 10)
@@ -129,7 +142,7 @@ void RemoteStore::initConnection(Connection & conn)
conn.processStderr();
}
catch (Error & e) {
- throw Error(format("cannot start daemon worker: %1%") % e.msg());
+ throw Error("cannot open connection to remote store ‘%s’: %s", getUri(), e.what());
}
setOptions(conn);
@@ -170,12 +183,11 @@ bool RemoteStore::isValidPathUncached(const Path & path)
auto conn(connections->get());
conn->to << wopIsValidPath << path;
conn->processStderr();
- unsigned int reply = readInt(conn->from);
- return reply != 0;
+ return readInt(conn->from);
}
-PathSet RemoteStore::queryValidPaths(const PathSet & paths)
+PathSet RemoteStore::queryValidPaths(const PathSet & paths, bool maybeSubstitute)
{
auto conn(connections->get());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
@@ -246,8 +258,8 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
conn->to << wopQuerySubstitutablePathInfos << paths;
conn->processStderr();
- unsigned int count = readInt(conn->from);
- for (unsigned int n = 0; n < count; n++) {
+ size_t count = readNum<size_t>(conn->from);
+ for (size_t n = 0; n < count; n++) {
Path path = readStorePath(*this, conn->from);
SubstitutablePathInfo & info(infos[path]);
info.deriver = readString(conn->from);
@@ -277,7 +289,7 @@ void RemoteStore::queryPathInfoUncached(const Path & path,
throw;
}
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
- bool valid = readInt(conn->from) != 0;
+ bool valid; conn->from >> valid;
if (!valid) throw InvalidPath(format("path ‘%s’ is not valid") % path);
}
auto info = std::make_shared<ValidPathInfo>();
@@ -286,12 +298,11 @@ void RemoteStore::queryPathInfoUncached(const Path & path,
if (info->deriver != "") assertStorePath(info->deriver);
info->narHash = parseHash(htSHA256, readString(conn->from));
info->references = readStorePaths<PathSet>(*this, conn->from);
- info->registrationTime = readInt(conn->from);
- info->narSize = readLongLong(conn->from);
+ conn->from >> info->registrationTime >> info->narSize;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) {
- info->ultimate = readInt(conn->from) != 0;
+ conn->from >> info->ultimate;
info->sigs = readStrings<StringSet>(conn->from);
- info->ca = readString(conn->from);
+ conn->from >> info->ca;
}
return info;
});
@@ -380,8 +391,9 @@ void RemoteStore::addToStore(const ValidPathInfo & info, const ref<std::string>
conn->to << wopAddToStoreNar
<< info.path << info.deriver << printHash(info.narHash)
<< info.references << info.registrationTime << info.narSize
- << info.ultimate << info.sigs << *nar << repair << dontCheckSigs;
- // FIXME: don't send nar as a string
+ << info.ultimate << info.sigs << info.ca
+ << repair << dontCheckSigs;
+ conn->to(*nar);
conn->processStderr();
}
}
@@ -515,7 +527,7 @@ Roots RemoteStore::findRoots()
auto conn(connections->get());
conn->to << wopFindRoots;
conn->processStderr();
- unsigned int count = readInt(conn->from);
+ size_t count = readNum<size_t>(conn->from);
Roots result;
while (count--) {
Path link = readString(conn->from);
@@ -563,7 +575,7 @@ bool RemoteStore::verifyStore(bool checkContents, bool repair)
auto conn(connections->get());
conn->to << wopVerifyStore << checkContents << repair;
conn->processStderr();
- return readInt(conn->from) != 0;
+ return readInt(conn->from);
}
@@ -576,6 +588,31 @@ void RemoteStore::addSignatures(const Path & storePath, const StringSet & sigs)
}
+void RemoteStore::queryMissing(const PathSet & targets,
+ PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
+ unsigned long long & downloadSize, unsigned long long & narSize)
+{
+ {
+ auto conn(connections->get());
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 19)
+ // Don't hold the connection handle in the fallback case
+ // to prevent a deadlock.
+ goto fallback;
+ conn->to << wopQueryMissing << targets;
+ conn->processStderr();
+ willBuild = readStorePaths<PathSet>(*this, conn->from);
+ willSubstitute = readStorePaths<PathSet>(*this, conn->from);
+ unknown = readStorePaths<PathSet>(*this, conn->from);
+ conn->from >> downloadSize >> narSize;
+ return;
+ }
+
+ fallback:
+ return Store::queryMissing(targets, willBuild, willSubstitute,
+ unknown, downloadSize, narSize);
+}
+
+
RemoteStore::Connection::~Connection()
{
try {
@@ -599,7 +636,7 @@ void RemoteStore::Connection::processStderr(Sink * sink, Source * source)
}
else if (msg == STDERR_READ) {
if (!source) throw Error("no source");
- size_t len = readInt(from);
+ size_t len = readNum<size_t>(from);
auto buf = std::make_unique<unsigned char[]>(len);
writeString(buf.get(), source->read(buf.get(), len), to);
to.flush();
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
index 40f17da30..479cf3a79 100644
--- a/src/libstore/remote-store.hh
+++ b/src/libstore/remote-store.hh
@@ -22,13 +22,16 @@ class RemoteStore : public virtual Store
{
public:
- RemoteStore(const Params & params, size_t maxConnections = std::numeric_limits<size_t>::max());
+ const Setting<int> maxConnections{(Store*) this, 1,
+ "max-connections", "maximum number of concurrent connections to the Nix daemon"};
+
+ RemoteStore(const Params & params);
/* Implementations of abstract store API methods. */
bool isValidPathUncached(const Path & path) override;
- PathSet queryValidPaths(const PathSet & paths) override;
+ PathSet queryValidPaths(const PathSet & paths, bool maybeSubstitute = false) override;
PathSet queryAllValidPaths() override;
@@ -85,6 +88,10 @@ public:
void addSignatures(const Path & storePath, const StringSet & sigs) override;
+ void queryMissing(const PathSet & targets,
+ PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
+ unsigned long long & downloadSize, unsigned long long & narSize) override;
+
protected:
struct Connection
@@ -98,6 +105,8 @@ protected:
void processStderr(Sink * sink = 0, Source * source = 0);
};
+ ref<Connection> openConnectionWrapper();
+
virtual ref<Connection> openConnection() = 0;
void initConnection(Connection & conn);
@@ -106,6 +115,8 @@ protected:
private:
+ std::atomic_bool failed{false};
+
void setOptions(Connection & conn);
};
@@ -113,7 +124,7 @@ class UDSRemoteStore : public LocalFSStore, public RemoteStore
{
public:
- UDSRemoteStore(const Params & params, size_t maxConnections = std::numeric_limits<size_t>::max());
+ UDSRemoteStore(const Params & params);
std::string getUri() override;
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
index 041c68c68..245455296 100644
--- a/src/libstore/s3-binary-cache-store.cc
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -5,6 +5,9 @@
#include "nar-info.hh"
#include "nar-info-disk-cache.hh"
#include "globals.hh"
+#include "compression.hh"
+#include "download.hh"
+#include "istringstream_nocopy.hh"
#include <aws/core/Aws.h>
#include <aws/core/client/ClientConfiguration.h>
@@ -52,8 +55,8 @@ static void initAWS()
});
}
-S3Helper::S3Helper()
- : config(makeConfig())
+S3Helper::S3Helper(const string & region)
+ : config(makeConfig(region))
, client(make_ref<Aws::S3::S3Client>(*config))
{
}
@@ -70,13 +73,14 @@ class RetryStrategy : public Aws::Client::DefaultRetryStrategy
}
};
-ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig()
+ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig(const string & region)
{
initAWS();
auto res = make_ref<Aws::Client::ClientConfiguration>();
- res->region = Aws::Region::US_EAST_1; // FIXME: make configurable
+ res->region = region;
res->requestTimeoutMs = 600 * 1000;
res->retryStrategy = std::make_shared<RetryStrategy>();
+ res->caFile = settings.caFile;
return res;
}
@@ -103,8 +107,10 @@ S3Helper::DownloadResult S3Helper::getObject(
auto result = checkAws(fmt("AWS error fetching ‘%s’", key),
client->GetObject(request));
- res.data = std::make_shared<std::string>(
- dynamic_cast<std::stringstream &>(result.GetBody()).str());
+ res.data = decodeContent(
+ result.GetContentEncoding(),
+ make_ref<std::string>(
+ dynamic_cast<std::stringstream &>(result.GetBody()).str()));
} catch (S3Error & e) {
if (e.err != Aws::S3::S3Errors::NO_SUCH_KEY) throw;
@@ -117,19 +123,13 @@ S3Helper::DownloadResult S3Helper::getObject(
return res;
}
-#if __linux__
-
-struct istringstream_nocopy : public std::stringstream
-{
- istringstream_nocopy(const std::string & s)
- {
- rdbuf()->pubsetbuf(
- (char *) s.data(), s.size());
- }
-};
-
struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
{
+ const Setting<std::string> region{this, Aws::Region::US_EAST_1, "region", {"aws-region"}};
+ const Setting<std::string> narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"};
+ const Setting<std::string> lsCompression{this, "", "ls-compression", "compression method for .ls files"};
+ const Setting<std::string> logCompression{this, "", "log-compression", "compression method for log/* files"};
+
std::string bucketName;
Stats stats;
@@ -140,6 +140,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
const Params & params, const std::string & bucketName)
: S3BinaryCacheStore(params)
, bucketName(bucketName)
+ , s3Helper(region)
{
diskCache = getNarInfoDiskCache();
}
@@ -218,13 +219,20 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
return true;
}
- void upsertFile(const std::string & path, const std::string & data) override
+ void uploadFile(const std::string & path, const std::string & data,
+ const std::string & mimeType,
+ const std::string & contentEncoding)
{
auto request =
Aws::S3::Model::PutObjectRequest()
.WithBucket(bucketName)
.WithKey(path);
+ request.SetContentType(mimeType);
+
+ if (contentEncoding != "")
+ request.SetContentEncoding(contentEncoding);
+
auto stream = std::make_shared<istringstream_nocopy>(data);
request.SetBody(stream);
@@ -247,6 +255,19 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
stats.putTimeMs += duration;
}
+ void upsertFile(const std::string & path, const std::string & data,
+ const std::string & mimeType) override
+ {
+ if (narinfoCompression != "" && hasSuffix(path, ".narinfo"))
+ uploadFile(path, *compress(narinfoCompression, data), mimeType, narinfoCompression);
+ else if (lsCompression != "" && hasSuffix(path, ".ls"))
+ uploadFile(path, *compress(lsCompression, data), mimeType, lsCompression);
+ else if (logCompression != "" && hasPrefix(path, "log/"))
+ uploadFile(path, *compress(logCompression, data), mimeType, logCompression);
+ else
+ uploadFile(path, data, mimeType, "");
+ }
+
void getFile(const std::string & path,
std::function<void(std::shared_ptr<std::string>)> success,
std::function<void(std::exception_ptr exc)> failure) override
@@ -313,8 +334,6 @@ static RegisterStoreImplementation regStore([](
return store;
});
-#endif
-
}
#endif
diff --git a/src/libstore/s3.hh b/src/libstore/s3.hh
index 5d5d3475c..08a7fbf96 100644
--- a/src/libstore/s3.hh
+++ b/src/libstore/s3.hh
@@ -14,9 +14,9 @@ struct S3Helper
ref<Aws::Client::ClientConfiguration> config;
ref<Aws::S3::S3Client> client;
- S3Helper();
+ S3Helper(const std::string & region);
- ref<Aws::Client::ClientConfiguration> makeConfig();
+ ref<Aws::Client::ClientConfiguration> makeConfig(const std::string & region);
struct DownloadResult
{
diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc
index 0197b091c..a81e62dbd 100644
--- a/src/libstore/sqlite.cc
+++ b/src/libstore/sqlite.cc
@@ -3,36 +3,25 @@
#include <sqlite3.h>
+#include <atomic>
+
namespace nix {
[[noreturn]] void throwSQLiteError(sqlite3 * db, const format & f)
{
int err = sqlite3_errcode(db);
+
+ auto path = sqlite3_db_filename(db, nullptr);
+ if (!path) path = "(in-memory)";
+
if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) {
- if (err == SQLITE_PROTOCOL)
- printError("warning: SQLite database is busy (SQLITE_PROTOCOL)");
- else {
- static bool warned = false;
- if (!warned) {
- printError("warning: SQLite database is busy");
- warned = true;
- }
- }
- /* Sleep for a while since retrying the transaction right away
- is likely to fail again. */
- checkInterrupt();
-#if HAVE_NANOSLEEP
- struct timespec t;
- t.tv_sec = 0;
- t.tv_nsec = (random() % 100) * 1000 * 1000; /* <= 0.1s */
- nanosleep(&t, 0);
-#else
- sleep(1);
-#endif
- throw SQLiteBusy(format("%1%: %2%") % f.str() % sqlite3_errmsg(db));
+ throw SQLiteBusy(
+ err == SQLITE_PROTOCOL
+ ? fmt("SQLite database ‘%s’ is busy (SQLITE_PROTOCOL)", path)
+ : fmt("SQLite database ‘%s’ is busy", path));
}
else
- throw SQLiteError(format("%1%: %2%") % f.str() % sqlite3_errmsg(db));
+ throw SQLiteError("%s: %s (in ‘%s’)", f.str(), sqlite3_errstr(err), path);
}
SQLite::SQLite(const Path & path)
@@ -54,24 +43,27 @@ SQLite::~SQLite()
void SQLite::exec(const std::string & stmt)
{
- if (sqlite3_exec(db, stmt.c_str(), 0, 0, 0) != SQLITE_OK)
- throwSQLiteError(db, format("executing SQLite statement ‘%s’") % stmt);
+ retrySQLite<void>([&]() {
+ if (sqlite3_exec(db, stmt.c_str(), 0, 0, 0) != SQLITE_OK)
+ throwSQLiteError(db, format("executing SQLite statement ‘%s’") % stmt);
+ });
}
-void SQLiteStmt::create(sqlite3 * db, const string & s)
+void SQLiteStmt::create(sqlite3 * db, const string & sql)
{
checkInterrupt();
assert(!stmt);
- if (sqlite3_prepare_v2(db, s.c_str(), -1, &stmt, 0) != SQLITE_OK)
- throwSQLiteError(db, "creating statement");
+ if (sqlite3_prepare_v2(db, sql.c_str(), -1, &stmt, 0) != SQLITE_OK)
+ throwSQLiteError(db, fmt("creating statement ‘%s’", sql));
this->db = db;
+ this->sql = sql;
}
SQLiteStmt::~SQLiteStmt()
{
try {
if (stmt && sqlite3_finalize(stmt) != SQLITE_OK)
- throwSQLiteError(db, "finalizing statement");
+ throwSQLiteError(db, fmt("finalizing statement ‘%s’", sql));
} catch (...) {
ignoreException();
}
@@ -128,14 +120,14 @@ void SQLiteStmt::Use::exec()
int r = step();
assert(r != SQLITE_ROW);
if (r != SQLITE_DONE)
- throwSQLiteError(stmt.db, "executing SQLite statement");
+ throwSQLiteError(stmt.db, fmt("executing SQLite statement ‘%s’", stmt.sql));
}
bool SQLiteStmt::Use::next()
{
int r = step();
if (r != SQLITE_DONE && r != SQLITE_ROW)
- throwSQLiteError(stmt.db, "executing SQLite query");
+ throwSQLiteError(stmt.db, fmt("executing SQLite query ‘%s’", stmt.sql));
return r == SQLITE_ROW;
}
@@ -182,4 +174,24 @@ SQLiteTxn::~SQLiteTxn()
}
}
+void handleSQLiteBusy(const SQLiteBusy & e)
+{
+ static std::atomic<time_t> lastWarned{0};
+
+ time_t now = time(0);
+
+ if (now > lastWarned + 10) {
+ lastWarned = now;
+ printError("warning: %s", e.what());
+ }
+
+ /* Sleep for a while since retrying the transaction right away
+ is likely to fail again. */
+ checkInterrupt();
+ struct timespec t;
+ t.tv_sec = 0;
+ t.tv_nsec = (random() % 100) * 1000 * 1000; /* <= 0.1s */
+ nanosleep(&t, 0);
+}
+
}
diff --git a/src/libstore/sqlite.hh b/src/libstore/sqlite.hh
index 4d347a2e5..14a7a0dd8 100644
--- a/src/libstore/sqlite.hh
+++ b/src/libstore/sqlite.hh
@@ -30,8 +30,9 @@ struct SQLiteStmt
{
sqlite3 * db = 0;
sqlite3_stmt * stmt = 0;
+ std::string sql;
SQLiteStmt() { }
- SQLiteStmt(sqlite3 * db, const std::string & s) { create(db, s); }
+ SQLiteStmt(sqlite3 * db, const std::string & sql) { create(db, sql); }
void create(sqlite3 * db, const std::string & s);
~SQLiteStmt();
operator sqlite3_stmt * () { return stmt; }
@@ -94,6 +95,8 @@ MakeError(SQLiteBusy, SQLiteError);
[[noreturn]] void throwSQLiteError(sqlite3 * db, const format & f);
+void handleSQLiteBusy(const SQLiteBusy & e);
+
/* Convenience function for retrying a SQLite transaction when the
database is busy. */
template<typename T>
@@ -103,6 +106,7 @@ T retrySQLite(std::function<T()> fun)
try {
return fun();
} catch (SQLiteBusy & e) {
+ handleSQLiteBusy(e);
}
}
}
diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc
index 6f1862afa..bb536fadf 100644
--- a/src/libstore/ssh-store.cc
+++ b/src/libstore/ssh-store.cc
@@ -4,18 +4,36 @@
#include "archive.hh"
#include "worker-protocol.hh"
#include "pool.hh"
+#include "ssh.hh"
namespace nix {
-static std::string uriScheme = "ssh://";
+static std::string uriScheme = "ssh-ng://";
class SSHStore : public RemoteStore
{
public:
- SSHStore(string host, const Params & params, size_t maxConnections = std::numeric_limits<size_t>::max());
+ const Setting<Path> sshKey{(Store*) this, "", "ssh-key", "path to an SSH private key"};
+ const Setting<bool> compress{(Store*) this, false, "compress", "whether to compress the connection"};
+
+ SSHStore(const std::string & host, const Params & params)
+ : Store(params)
+ , RemoteStore(params)
+ , host(host)
+ , master(
+ host,
+ sshKey,
+ // Use SSH master only if using more than 1 connection.
+ connections->capacity() > 1,
+ compress)
+ {
+ }
- std::string getUri() override;
+ std::string getUri() override
+ {
+ return uriScheme + host;
+ }
void narFromPath(const Path & path, Sink & sink) override;
@@ -25,43 +43,16 @@ private:
struct Connection : RemoteStore::Connection
{
- Pid sshPid;
- AutoCloseFD out;
- AutoCloseFD in;
+ std::unique_ptr<SSHMaster::Connection> sshConn;
};
ref<RemoteStore::Connection> openConnection() override;
- AutoDelete tmpDir;
-
- Path socketPath;
-
- Pid sshMaster;
-
- string host;
-
- Path key;
+ std::string host;
- bool compress;
+ SSHMaster master;
};
-SSHStore::SSHStore(string host, const Params & params, size_t maxConnections)
- : Store(params)
- , RemoteStore(params, maxConnections)
- , tmpDir(createTempDir("", "nix", true, true, 0700))
- , socketPath((Path) tmpDir + "/ssh.sock")
- , host(std::move(host))
- , key(get(params, "ssh-key", ""))
- , compress(get(params, "compress", "") == "true")
-{
- /* open a connection and perform the handshake to verify all is well */
- connections->get();
-}
-
-string SSHStore::getUri()
-{
- return uriScheme + host;
-}
class ForwardSource : public Source
{
@@ -94,35 +85,10 @@ ref<FSAccessor> SSHStore::getFSAccessor()
ref<RemoteStore::Connection> SSHStore::openConnection()
{
- if ((pid_t) sshMaster == -1) {
- sshMaster = startProcess([&]() {
- restoreSignals();
- if (key.empty())
- execlp("ssh", "ssh", "-N", "-M", "-S", socketPath.c_str(), host.c_str(), NULL);
- else
- execlp("ssh", "ssh", "-N", "-M", "-S", socketPath.c_str(), "-i", key.c_str(), host.c_str(), NULL);
- throw SysError("starting ssh master");
- });
- }
-
auto conn = make_ref<Connection>();
- Pipe in, out;
- in.create();
- out.create();
- conn->sshPid = startProcess([&]() {
- if (dup2(in.readSide.get(), STDIN_FILENO) == -1)
- throw SysError("duping over STDIN");
- if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
- throw SysError("duping over STDOUT");
- execlp("ssh", "ssh", "-S", socketPath.c_str(), host.c_str(), "nix-daemon", "--stdio", NULL);
- throw SysError("executing nix-daemon --stdio over ssh");
- });
- in.readSide = -1;
- out.writeSide = -1;
- conn->out = std::move(out.readSide);
- conn->in = std::move(in.writeSide);
- conn->to = FdSink(conn->in.get());
- conn->from = FdSource(conn->out.get());
+ conn->sshConn = master.startCommand("nix-daemon --stdio");
+ conn->to = FdSink(conn->sshConn->in.get());
+ conn->from = FdSource(conn->sshConn->out.get());
initConnection(*conn);
return conn;
}
diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc
new file mode 100644
index 000000000..e54f3f4ba
--- /dev/null
+++ b/src/libstore/ssh.cc
@@ -0,0 +1,102 @@
+#include "ssh.hh"
+
+namespace nix {
+
+void SSHMaster::addCommonSSHOpts(Strings & args)
+{
+ for (auto & i : tokenizeString<Strings>(getEnv("NIX_SSHOPTS")))
+ args.push_back(i);
+ if (!keyFile.empty())
+ args.insert(args.end(), {"-i", keyFile});
+ if (compress)
+ args.push_back("-C");
+}
+
+std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string & command)
+{
+ Path socketPath = startMaster();
+
+ Pipe in, out;
+ in.create();
+ out.create();
+
+ auto conn = std::make_unique<Connection>();
+ conn->sshPid = startProcess([&]() {
+ restoreSignals();
+
+ close(in.writeSide.get());
+ close(out.readSide.get());
+
+ if (dup2(in.readSide.get(), STDIN_FILENO) == -1)
+ throw SysError("duping over stdin");
+ if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
+ throw SysError("duping over stdout");
+
+ Strings args = { "ssh", host.c_str(), "-x", "-a" };
+ addCommonSSHOpts(args);
+ if (socketPath != "")
+ args.insert(args.end(), {"-S", socketPath});
+ args.push_back(command);
+ execvp(args.begin()->c_str(), stringsToCharPtrs(args).data());
+
+ throw SysError("executing ‘%s’ on ‘%s’", command, host);
+ });
+
+
+ in.readSide = -1;
+ out.writeSide = -1;
+
+ conn->out = std::move(out.readSide);
+ conn->in = std::move(in.writeSide);
+
+ return conn;
+}
+
+Path SSHMaster::startMaster()
+{
+ if (!useMaster) return "";
+
+ auto state(state_.lock());
+
+ if (state->sshMaster != -1) return state->socketPath;
+
+ state->tmpDir = std::make_unique<AutoDelete>(createTempDir("", "nix", true, true, 0700));
+
+ state->socketPath = (Path) *state->tmpDir + "/ssh.sock";
+
+ Pipe out;
+ out.create();
+
+ state->sshMaster = startProcess([&]() {
+ restoreSignals();
+
+ close(out.readSide.get());
+
+ if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
+ throw SysError("duping over stdout");
+
+ Strings args =
+ { "ssh", host.c_str(), "-M", "-N", "-S", state->socketPath
+ , "-o", "LocalCommand=echo started"
+ , "-o", "PermitLocalCommand=yes"
+ };
+ addCommonSSHOpts(args);
+ execvp(args.begin()->c_str(), stringsToCharPtrs(args).data());
+
+ throw SysError("starting SSH master");
+ });
+
+ out.writeSide = -1;
+
+ std::string reply;
+ try {
+ reply = readLine(out.readSide.get());
+ } catch (EndOfFile & e) { }
+
+ if (reply != "started")
+ throw Error("failed to start SSH master connection to ‘%s’", host);
+
+ return state->socketPath;
+}
+
+}
diff --git a/src/libstore/ssh.hh b/src/libstore/ssh.hh
new file mode 100644
index 000000000..b4396467e
--- /dev/null
+++ b/src/libstore/ssh.hh
@@ -0,0 +1,49 @@
+#pragma once
+
+#include "util.hh"
+#include "sync.hh"
+
+namespace nix {
+
+class SSHMaster
+{
+private:
+
+ const std::string host;
+ const std::string keyFile;
+ const bool useMaster;
+ const bool compress;
+
+ struct State
+ {
+ Pid sshMaster;
+ std::unique_ptr<AutoDelete> tmpDir;
+ Path socketPath;
+ };
+
+ Sync<State> state_;
+
+ void addCommonSSHOpts(Strings & args);
+
+public:
+
+ SSHMaster(const std::string & host, const std::string & keyFile, bool useMaster, bool compress)
+ : host(host)
+ , keyFile(keyFile)
+ , useMaster(useMaster)
+ , compress(compress)
+ {
+ }
+
+ struct Connection
+ {
+ Pid sshPid;
+ AutoCloseFD out, in;
+ };
+
+ std::unique_ptr<Connection> startCommand(const std::string & command);
+
+ Path startMaster();
+};
+
+}
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index 603424277..514d1c2ff 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -241,7 +241,8 @@ Path Store::computeStorePathForText(const string & name, const string & s,
Store::Store(const Params & params)
- : storeDir(get(params, "store", settings.nixStore))
+ : Config(params)
+ , state({(size_t) pathInfoCacheSize})
{
}
@@ -377,7 +378,7 @@ void Store::queryPathInfo(const Path & storePath,
}
-PathSet Store::queryValidPaths(const PathSet & paths)
+PathSet Store::queryValidPaths(const PathSet & paths, bool maybeSubstitute)
{
struct State
{
@@ -550,6 +551,8 @@ void copyClosure(ref<Store> srcStore, ref<Store> dstStore,
for (auto & path : storePaths)
srcStore->computeFSClosure(path, closure);
+ // FIXME: use copyStorePaths()
+
PathSet valid = dstStore->queryValidPaths(closure);
if (valid.size() == closure.size()) return;
@@ -676,6 +679,12 @@ Strings ValidPathInfo::shortRefs() const
}
+std::string makeFixedOutputCA(bool recursive, const Hash & hash)
+{
+ return "fixed:" + (recursive ? (std::string) "r:" : "") + hash.to_string();
+}
+
+
}
@@ -702,10 +711,17 @@ ref<Store> openStore(const std::string & uri_)
}
uri = uri_.substr(0, q);
}
+ return openStore(uri, params);
+}
+ref<Store> openStore(const std::string & uri, const Store::Params & params)
+{
for (auto fun : *RegisterStoreImplementation::implementations) {
auto store = fun(uri, params);
- if (store) return ref<Store>(store);
+ if (store) {
+ store->warnUnused();
+ return ref<Store>(store);
+ }
}
throw Error(format("don't know how to open Nix store ‘%s’") % uri);
@@ -718,7 +734,7 @@ StoreType getStoreType(const std::string & uri, const std::string & stateDir)
return tDaemon;
} else if (uri == "local") {
return tLocal;
- } else if (uri == "") {
+ } else if (uri == "" || uri == "auto") {
if (access(stateDir.c_str(), R_OK | W_OK) == 0)
return tLocal;
else if (pathExists(settings.nixDaemonSocketFile))
@@ -781,37 +797,25 @@ std::list<ref<Store>> getDefaultSubstituters()
}
-void copyPaths(ref<Store> from, ref<Store> to, const Paths & storePaths, bool substitute)
-{
- if (substitute) {
- /* Filter out .drv files (we don't want to build anything). */
- PathSet paths2;
- for (auto & path : storePaths)
- if (!isDerivation(path)) paths2.insert(path);
- unsigned long long downloadSize, narSize;
- PathSet willBuild, willSubstitute, unknown;
- to->queryMissing(PathSet(paths2.begin(), paths2.end()),
- willBuild, willSubstitute, unknown, downloadSize, narSize);
- /* FIXME: should use ensurePath(), but it only
- does one path at a time. */
- if (!willSubstitute.empty())
- try {
- to->buildPaths(willSubstitute);
- } catch (Error & e) {
- printMsg(lvlError, format("warning: %1%") % e.msg());
- }
- }
+void copyPaths(ref<Store> from, ref<Store> to, const PathSet & storePaths, bool substitute)
+{
+ PathSet valid = to->queryValidPaths(storePaths, substitute);
+
+ PathSet missing;
+ for (auto & path : storePaths)
+ if (!valid.count(path)) missing.insert(path);
std::string copiedLabel = "copied";
- logger->setExpected(copiedLabel, storePaths.size());
+ logger->setExpected(copiedLabel, missing.size());
ThreadPool pool;
processGraph<Path>(pool,
- PathSet(storePaths.begin(), storePaths.end()),
+ PathSet(missing.begin(), missing.end()),
[&](const Path & storePath) {
+ if (to->isValidPath(storePath)) return PathSet();
return from->queryPathInfo(storePath)->references;
},
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index d03e70849..067309c9e 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -6,6 +6,7 @@
#include "lru-cache.hh"
#include "sync.hh"
#include "globals.hh"
+#include "config.hh"
#include <atomic>
#include <limits>
@@ -81,12 +82,7 @@ struct GCResults
/* For `gcReturnDead', `gcDeleteDead' and `gcDeleteSpecific', the
number of bytes that would be or was freed. */
- unsigned long long bytesFreed;
-
- GCResults()
- {
- bytesFreed = 0;
- }
+ unsigned long long bytesFreed = 0;
};
@@ -128,7 +124,7 @@ struct ValidPathInfo
of an output path of a derivation were actually produced by
that derivation. In the intensional model, we have to trust
that a particular output path was produced by a derivation; the
- path name then implies the contents.)
+ path then implies the contents.)
Ideally, the content-addressability assertion would just be a
Boolean, and the store path would be computed from
@@ -229,19 +225,23 @@ struct BuildResult
};
-class Store : public std::enable_shared_from_this<Store>
+class Store : public std::enable_shared_from_this<Store>, public Config
{
public:
typedef std::map<std::string, std::string> Params;
- const Path storeDir;
+ const PathSetting storeDir_{this, false, settings.nixStore,
+ "store", "path to the Nix store"};
+ const Path storeDir = storeDir_;
+
+ const Setting<int> pathInfoCacheSize{this, 65536, "path-info-cache-size", "size of the in-memory store path information cache"};
protected:
struct State
{
- LRUCache<std::string, std::shared_ptr<ValidPathInfo>> pathInfoCache{64 * 1024};
+ LRUCache<std::string, std::shared_ptr<ValidPathInfo>> pathInfoCache;
};
Sync<State> state;
@@ -324,8 +324,10 @@ protected:
public:
- /* Query which of the given paths is valid. */
- virtual PathSet queryValidPaths(const PathSet & paths);
+ /* Query which of the given paths is valid. Optionally, try to
+ substitute missing paths. */
+ virtual PathSet queryValidPaths(const PathSet & paths,
+ bool maybeSubstitute = false);
/* Query the set of all valid paths. Note that for some store
backends, the name part of store paths may be omitted
@@ -511,7 +513,7 @@ public:
`storePath' is returned; that is, the closures under the
`referrers' relation instead of the `references' relation is
returned. */
- void computeFSClosure(const PathSet & paths,
+ virtual void computeFSClosure(const PathSet & paths,
PathSet & out, bool flipDirection = false,
bool includeOutputs = false, bool includeDerivers = false);
@@ -522,7 +524,7 @@ public:
/* Given a set of paths that are to be built, return the set of
derivations that will be built, and the set of output paths
that will be substituted. */
- void queryMissing(const PathSet & targets,
+ virtual void queryMissing(const PathSet & targets,
PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
unsigned long long & downloadSize, unsigned long long & narSize);
@@ -566,6 +568,18 @@ public:
if they lack a signature. */
virtual bool isTrusted() { return false; }
+ /* Return the build log of the specified store path, if available,
+ or null otherwise. */
+ virtual std::shared_ptr<std::string> getBuildLog(const Path & path)
+ { return nullptr; }
+
+ /* Hack to allow long-running processes like hydra-queue-runner to
+ occasionally flush their path info cache. */
+ void clearPathInfoCache()
+ {
+ state.lock()->pathInfoCache.clear();
+ }
+
protected:
Stats stats;
@@ -576,9 +590,20 @@ protected:
class LocalFSStore : public virtual Store
{
public:
- const Path rootDir;
- const Path stateDir;
- const Path logDir;
+
+ // FIXME: the (Store*) cast works around a bug in gcc that causes
+ // it to emit the call to the Option constructor. Clang works fine
+ // either way.
+ const PathSetting rootDir{(Store*) this, true, "",
+ "root", "directory prefixed to all other paths"};
+ const PathSetting stateDir{(Store*) this, false,
+ rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir,
+ "state", "directory where Nix will store state"};
+ const PathSetting logDir{(Store*) this, false,
+ rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir,
+ "log", "directory where Nix will store state"};
+
+ const static string drvsLogDir;
LocalFSStore(const Params & params);
@@ -595,6 +620,8 @@ public:
{
return getRealStoreDir() + "/" + baseNameOf(storePath);
}
+
+ std::shared_ptr<std::string> getBuildLog(const Path & path) override;
};
@@ -642,8 +669,10 @@ void removeTempRoots();
set to true *unless* you're going to collect garbage. */
ref<Store> openStore(const std::string & uri = getEnv("NIX_REMOTE"));
+ref<Store> openStore(const std::string & uri, const Store::Params & params);
-void copyPaths(ref<Store> from, ref<Store> to, const Paths & storePaths, bool substitute = false);
+
+void copyPaths(ref<Store> from, ref<Store> to, const PathSet & storePaths, bool substitute = false);
enum StoreType {
tDaemon,
@@ -687,6 +716,11 @@ ValidPathInfo decodeValidPathInfo(std::istream & str,
bool hashGiven = false);
+/* Compute the content-addressability assertion (ValidPathInfo::ca)
+ for paths created by makeFixedOutputPath() / addToStore(). */
+std::string makeFixedOutputCA(bool recursive, const Hash & hash);
+
+
MakeError(SubstError, Error)
MakeError(BuildError, Error) /* denotes a permanent build failure */
MakeError(InvalidPath, Error)
diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh
index 6a4ed47cc..6c6766b36 100644
--- a/src/libstore/worker-protocol.hh
+++ b/src/libstore/worker-protocol.hh
@@ -6,7 +6,7 @@ namespace nix {
#define WORKER_MAGIC_1 0x6e697863
#define WORKER_MAGIC_2 0x6478696f
-#define PROTOCOL_VERSION 0x112
+#define PROTOCOL_VERSION 0x113
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
@@ -47,7 +47,8 @@ typedef enum {
wopBuildDerivation = 36,
wopAddSignatures = 37,
wopNarFromPath = 38,
- wopAddToStoreNar = 39
+ wopAddToStoreNar = 39,
+ wopQueryMissing = 40,
} WorkerOp;
diff --git a/src/libutil/archive.hh b/src/libutil/archive.hh
index d58b91df0..607ebf8b2 100644
--- a/src/libutil/archive.hh
+++ b/src/libutil/archive.hh
@@ -70,6 +70,13 @@ struct ParseSink
virtual void createSymlink(const Path & path, const string & target) { };
};
+struct TeeSink : ParseSink
+{
+ TeeSource source;
+
+ TeeSink(Source & source) : source(source) { }
+};
+
void parseDump(ParseSink & sink, Source & source);
void restorePath(const Path & path, Source & source);
diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc
index a3bbb5170..b0b1d709f 100644
--- a/src/libutil/compression.cc
+++ b/src/libutil/compression.cc
@@ -18,7 +18,7 @@ static ref<std::string> decompressXZ(const std::string & in)
lzma_ret ret = lzma_stream_decoder(
&strm, UINT64_MAX, LZMA_CONCATENATED);
if (ret != LZMA_OK)
- throw Error("unable to initialise lzma decoder");
+ throw CompressionError("unable to initialise lzma decoder");
Finally free([&]() { lzma_end(&strm); });
@@ -48,7 +48,7 @@ static ref<std::string> decompressXZ(const std::string & in)
return res;
if (ret != LZMA_OK)
- throw Error("error while decompressing xz file");
+ throw CompressionError("error %d while decompressing xz file", ret);
}
}
@@ -59,7 +59,7 @@ static ref<std::string> decompressBzip2(const std::string & in)
int ret = BZ2_bzDecompressInit(&strm, 0, 0);
if (ret != BZ_OK)
- throw Error("unable to initialise bzip2 decoder");
+ throw CompressionError("unable to initialise bzip2 decoder");
Finally free([&]() { BZ2_bzDecompressEnd(&strm); });
@@ -85,10 +85,19 @@ static ref<std::string> decompressBzip2(const std::string & in)
return res;
if (ret != BZ_OK)
- throw Error("error while decompressing bzip2 file");
+ throw CompressionError("error while decompressing bzip2 file");
+
+ if (strm.avail_in == 0)
+ throw CompressionError("bzip2 data ends prematurely");
}
}
+static ref<std::string> decompressBrotli(const std::string & in)
+{
+ // FIXME: use libbrotli
+ return make_ref<std::string>(runProgram(BRO, true, {"-d"}, {in}));
+}
+
ref<std::string> compress(const std::string & method, const std::string & in)
{
StringSink ssink;
@@ -106,6 +115,8 @@ ref<std::string> decompress(const std::string & method, const std::string & in)
return decompressXZ(in);
else if (method == "bzip2")
return decompressBzip2(in);
+ else if (method == "br")
+ return decompressBrotli(in);
else
throw UnknownCompressionMethod(format("unknown compression method ‘%s’") % method);
}
@@ -130,7 +141,7 @@ struct XzSink : CompressionSink
lzma_ret ret = lzma_easy_encoder(
&strm, 6, LZMA_CHECK_CRC64);
if (ret != LZMA_OK)
- throw Error("unable to initialise lzma encoder");
+ throw CompressionError("unable to initialise lzma encoder");
// FIXME: apply the x86 BCJ filter?
strm.next_out = outbuf;
@@ -139,7 +150,6 @@ struct XzSink : CompressionSink
~XzSink()
{
- assert(finished);
lzma_end(&strm);
}
@@ -155,7 +165,7 @@ struct XzSink : CompressionSink
lzma_ret ret = lzma_code(&strm, LZMA_FINISH);
if (ret != LZMA_OK && ret != LZMA_STREAM_END)
- throw Error("error while flushing xz file");
+ throw CompressionError("error while flushing xz file");
if (strm.avail_out == 0 || ret == LZMA_STREAM_END) {
nextSink(outbuf, sizeof(outbuf) - strm.avail_out);
@@ -179,7 +189,7 @@ struct XzSink : CompressionSink
lzma_ret ret = lzma_code(&strm, LZMA_RUN);
if (ret != LZMA_OK)
- throw Error("error while compressing xz file");
+ throw CompressionError("error while compressing xz file");
if (strm.avail_out == 0) {
nextSink(outbuf, sizeof(outbuf));
@@ -202,7 +212,7 @@ struct BzipSink : CompressionSink
memset(&strm, 0, sizeof(strm));
int ret = BZ2_bzCompressInit(&strm, 9, 0, 30);
if (ret != BZ_OK)
- throw Error("unable to initialise bzip2 encoder");
+ throw CompressionError("unable to initialise bzip2 encoder");
strm.next_out = outbuf;
strm.avail_out = sizeof(outbuf);
@@ -210,7 +220,6 @@ struct BzipSink : CompressionSink
~BzipSink()
{
- assert(finished);
BZ2_bzCompressEnd(&strm);
}
@@ -226,7 +235,7 @@ struct BzipSink : CompressionSink
int ret = BZ2_bzCompress(&strm, BZ_FINISH);
if (ret != BZ_FINISH_OK && ret != BZ_STREAM_END)
- throw Error("error while flushing bzip2 file");
+ throw CompressionError("error while flushing bzip2 file");
if (strm.avail_out == 0 || ret == BZ_STREAM_END) {
nextSink((unsigned char *) outbuf, sizeof(outbuf) - strm.avail_out);
@@ -250,7 +259,7 @@ struct BzipSink : CompressionSink
int ret = BZ2_bzCompress(&strm, BZ_RUN);
if (ret != BZ_OK)
- Error("error while compressing bzip2 file");
+ CompressionError("error while compressing bzip2 file");
if (strm.avail_out == 0) {
nextSink((unsigned char *) outbuf, sizeof(outbuf));
@@ -261,6 +270,34 @@ struct BzipSink : CompressionSink
}
};
+struct BrotliSink : CompressionSink
+{
+ Sink & nextSink;
+ std::string data;
+
+ BrotliSink(Sink & nextSink) : nextSink(nextSink)
+ {
+ }
+
+ ~BrotliSink()
+ {
+ }
+
+ // FIXME: use libbrotli
+
+ void finish() override
+ {
+ flush();
+ nextSink(runProgram(BRO, true, {}, data));
+ }
+
+ void write(const unsigned char * data, size_t len) override
+ {
+ checkInterrupt();
+ this->data.append((const char *) data, len);
+ }
+};
+
ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink)
{
if (method == "none")
@@ -269,6 +306,8 @@ ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & next
return make_ref<XzSink>(nextSink);
else if (method == "bzip2")
return make_ref<BzipSink>(nextSink);
+ else if (method == "br")
+ return make_ref<BrotliSink>(nextSink);
else
throw UnknownCompressionMethod(format("unknown compression method ‘%s’") % method);
}
diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh
index eacf559d6..e3e6f5a99 100644
--- a/src/libutil/compression.hh
+++ b/src/libutil/compression.hh
@@ -21,4 +21,6 @@ ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & next
MakeError(UnknownCompressionMethod, Error);
+MakeError(CompressionError, Error);
+
}
diff --git a/src/libutil/config.cc b/src/libutil/config.cc
new file mode 100644
index 000000000..2f9f98860
--- /dev/null
+++ b/src/libutil/config.cc
@@ -0,0 +1,112 @@
+#include "config.hh"
+#include "args.hh"
+
+namespace nix {
+
+void Config::set(const std::string & name, const std::string & value)
+{
+ auto i = _settings.find(name);
+ if (i == _settings.end())
+ throw UsageError("unknown setting '%s'", name);
+ i->second.setting->set(value);
+}
+
+void Config::add(AbstractSetting * setting)
+{
+ _settings.emplace(setting->name, Config::SettingData{false, setting});
+ for (auto & alias : setting->aliases)
+ _settings.emplace(alias, Config::SettingData{true, setting});
+
+ bool set = false;
+
+ auto i = initials.find(setting->name);
+ if (i != initials.end()) {
+ setting->set(i->second);
+ initials.erase(i);
+ set = true;
+ }
+
+ for (auto & alias : setting->aliases) {
+ auto i = initials.find(alias);
+ if (i != initials.end()) {
+ if (set)
+ warn("setting '%s' is set, but it's an alias of '%s' which is also set",
+ alias, setting->name);
+ else {
+ setting->set(i->second);
+ initials.erase(i);
+ set = true;
+ }
+ }
+ }
+}
+
+void Config::warnUnused()
+{
+ for (auto & i : initials)
+ warn("unknown setting '%s'", i.first);
+}
+
+std::string Config::dump()
+{
+ std::string res;
+ for (auto & opt : _settings)
+ if (!opt.second.isAlias)
+ res += opt.first + " = " + opt.second.setting->to_string() + "\n";
+ return res;
+}
+
+AbstractSetting::AbstractSetting(
+ const std::string & name,
+ const std::string & description,
+ const std::set<std::string> & aliases)
+ : name(name), description(description), aliases(aliases)
+{
+}
+
+template<> void Setting<std::string>::set(const std::string & str)
+{
+ value = str;
+}
+
+template<> std::string Setting<std::string>::to_string()
+{
+ return value;
+}
+
+template<> void Setting<int>::set(const std::string & str)
+{
+ try {
+ value = std::stoi(str);
+ } catch (...) {
+ throw UsageError("setting '%s' has invalid value '%s'", name, str);
+ }
+}
+
+template<> std::string Setting<int>::to_string()
+{
+ return std::to_string(value);
+}
+
+template<> void Setting<bool>::set(const std::string & str)
+{
+ value = str == "true" || str == "1";
+}
+
+template<> std::string Setting<bool>::to_string()
+{
+ return value ? "true" : "false";
+}
+
+void PathSetting::set(const std::string & str)
+{
+ if (str == "") {
+ if (allowEmpty)
+ value = "";
+ else
+ throw UsageError("setting '%s' cannot be empty", name);
+ } else
+ value = canonPath(str);
+}
+
+}
diff --git a/src/libutil/config.hh b/src/libutil/config.hh
new file mode 100644
index 000000000..fb2d48e9c
--- /dev/null
+++ b/src/libutil/config.hh
@@ -0,0 +1,151 @@
+#include <map>
+#include <set>
+
+#include "types.hh"
+
+#pragma once
+
+namespace nix {
+
+class Args;
+class AbstractSetting;
+
+/* A class to simplify providing configuration settings. The typical
+ use is to inherit Config and add Setting<T> members:
+
+ class MyClass : private Config
+ {
+ Setting<int> foo{this, 123, "foo", "the number of foos to use"};
+ Setting<std::string> bar{this, "blabla", "bar", "the name of the bar"};
+
+ MyClass() : Config(readConfigFile("/etc/my-app.conf"))
+ {
+ std::cout << foo << "\n"; // will print 123 unless overriden
+ }
+ };
+*/
+
+class Config
+{
+ friend class AbstractSetting;
+
+ struct SettingData
+ {
+ bool isAlias = false;
+ AbstractSetting * setting;
+ };
+
+ std::map<std::string, SettingData> _settings;
+
+ StringMap initials;
+
+public:
+
+ Config(const StringMap & initials)
+ : initials(initials)
+ { }
+
+ void set(const std::string & name, const std::string & value);
+
+ void add(AbstractSetting * setting);
+
+ void warnUnused();
+
+ std::string dump();
+};
+
+class AbstractSetting
+{
+ friend class Config;
+
+public:
+
+ const std::string name;
+ const std::string description;
+ const std::set<std::string> aliases;
+
+ int created = 123;
+
+protected:
+
+ AbstractSetting(
+ const std::string & name,
+ const std::string & description,
+ const std::set<std::string> & aliases);
+
+ virtual ~AbstractSetting()
+ {
+ // Check against a gcc miscompilation causing our constructor
+ // not to run.
+ assert(created == 123);
+ }
+
+ virtual void set(const std::string & value) = 0;
+
+ virtual std::string to_string() = 0;
+};
+
+/* A setting of type T. */
+template<typename T>
+class Setting : public AbstractSetting
+{
+protected:
+
+ T value;
+
+public:
+
+ Setting(Config * options,
+ const T & def,
+ const std::string & name,
+ const std::string & description,
+ const std::set<std::string> & aliases = {})
+ : AbstractSetting(name, description, aliases)
+ , value(def)
+ {
+ options->add(this);
+ }
+
+ operator const T &() const { return value; }
+ bool operator ==(const T & v2) const { return value == v2; }
+ bool operator !=(const T & v2) const { return value != v2; }
+ void operator =(const T & v) { value = v; }
+
+ void set(const std::string & str) override;
+
+ std::string to_string() override;
+};
+
+template<typename T>
+std::ostream & operator <<(std::ostream & str, const Setting<T> & opt)
+{
+ str << (const T &) opt;
+ return str;
+}
+
+/* A special setting for Paths. These are automatically canonicalised
+ (e.g. "/foo//bar/" becomes "/foo/bar"). */
+class PathSetting : public Setting<Path>
+{
+ bool allowEmpty;
+
+public:
+
+ PathSetting(Config * options,
+ bool allowEmpty,
+ const Path & def,
+ const std::string & name,
+ const std::string & description,
+ const std::set<std::string> & aliases = {})
+ : Setting<Path>(options, def, name, description, aliases)
+ , allowEmpty(allowEmpty)
+ {
+ set(value);
+ }
+
+ void set(const std::string & str) override;
+
+ Path operator +(const char * p) const { return value + p; }
+};
+
+}
diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc
index f447c80c5..9f4afd93c 100644
--- a/src/libutil/hash.cc
+++ b/src/libutil/hash.cc
@@ -7,12 +7,12 @@
#include "hash.hh"
#include "archive.hh"
#include "util.hh"
+#include "istringstream_nocopy.hh"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
-
namespace nix {
@@ -104,7 +104,7 @@ Hash parseHash(HashType ht, const string & s)
string s2(s, i * 2, 2);
if (!isxdigit(s2[0]) || !isxdigit(s2[1]))
throw BadHash(format("invalid hash ‘%1%’") % s);
- std::istringstream str(s2);
+ istringstream_nocopy str(s2);
int n;
str >> std::hex >> n;
hash.hash[i] = n;
diff --git a/src/libutil/istringstream_nocopy.hh b/src/libutil/istringstream_nocopy.hh
new file mode 100644
index 000000000..f7beac578
--- /dev/null
+++ b/src/libutil/istringstream_nocopy.hh
@@ -0,0 +1,92 @@
+/* This file provides a variant of std::istringstream that doesn't
+ copy its string argument. This is useful for large strings. The
+ caller must ensure that the string object is not destroyed while
+ it's referenced by this object. */
+
+#pragma once
+
+#include <string>
+#include <iostream>
+
+template <class CharT, class Traits = std::char_traits<CharT>, class Allocator = std::allocator<CharT>>
+class basic_istringbuf_nocopy : public std::basic_streambuf<CharT, Traits>
+{
+public:
+ typedef std::basic_string<CharT, Traits, Allocator> string_type;
+
+ typedef typename std::basic_streambuf<CharT, Traits>::off_type off_type;
+
+ typedef typename std::basic_streambuf<CharT, Traits>::pos_type pos_type;
+
+ typedef typename std::basic_streambuf<CharT, Traits>::int_type int_type;
+
+ typedef typename std::basic_streambuf<CharT, Traits>::traits_type traits_type;
+
+private:
+ const string_type & s;
+
+ off_type off;
+
+public:
+ basic_istringbuf_nocopy(const string_type & s) : s{s}, off{0}
+ {
+ }
+
+private:
+ pos_type seekoff(off_type off, std::ios_base::seekdir dir, std::ios_base::openmode which)
+ {
+ if (which & std::ios_base::in) {
+ this->off = dir == std::ios_base::beg
+ ? off
+ : (dir == std::ios_base::end
+ ? s.size() + off
+ : this->off + off);
+ }
+ return pos_type(this->off);
+ }
+
+ pos_type seekpos(pos_type pos, std::ios_base::openmode which)
+ {
+ return seekoff(pos, std::ios_base::beg, which);
+ }
+
+ std::streamsize showmanyc()
+ {
+ return s.size() - off;
+ }
+
+ int_type underflow()
+ {
+ if (typename string_type::size_type(off) == s.size())
+ return traits_type::eof();
+ return traits_type::to_int_type(s[off]);
+ }
+
+ int_type uflow()
+ {
+ if (typename string_type::size_type(off) == s.size())
+ return traits_type::eof();
+ return traits_type::to_int_type(s[off++]);
+ }
+
+ int_type pbackfail(int_type ch)
+ {
+ if (off == 0 || (ch != traits_type::eof() && ch != s[off - 1]))
+ return traits_type::eof();
+
+ return traits_type::to_int_type(s[--off]);
+ }
+
+};
+
+template <class CharT, class Traits = std::char_traits<CharT>, class Allocator = std::allocator<CharT>>
+class basic_istringstream_nocopy : public std::basic_iostream<CharT, Traits>
+{
+ typedef basic_istringbuf_nocopy<CharT, Traits, Allocator> buf_type;
+ buf_type buf;
+public:
+ basic_istringstream_nocopy(const typename buf_type::string_type & s) :
+ std::basic_iostream<CharT, Traits>(&buf), buf(s) {};
+};
+
+typedef basic_istringstream_nocopy<char> istringstream_nocopy;
diff --git a/src/libutil/local.mk b/src/libutil/local.mk
index cac5c8795..0721b21c2 100644
--- a/src/libutil/local.mk
+++ b/src/libutil/local.mk
@@ -9,3 +9,5 @@ libutil_SOURCES := $(wildcard $(d)/*.cc)
libutil_LDFLAGS = $(LIBLZMA_LIBS) -lbz2 -pthread $(OPENSSL_LIBS)
libutil_LIBS = libformat
+
+libutil_CXXFLAGS = -DBRO=\"$(bro)\"
diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc
index d9e8d22d7..afcc2ec58 100644
--- a/src/libutil/logging.cc
+++ b/src/libutil/logging.cc
@@ -3,7 +3,12 @@
namespace nix {
-Logger * logger = 0;
+Logger * logger = makeDefaultLogger();
+
+void Logger::warn(const std::string & msg)
+{
+ log(lvlInfo, ANSI_RED "warning:" ANSI_NORMAL " " + msg);
+}
class SimpleLogger : public Logger
{
@@ -52,7 +57,7 @@ Verbosity verbosity = lvlInfo;
void warnOnce(bool & haveWarned, const FormatOrString & fs)
{
if (!haveWarned) {
- printError(format("warning: %1%") % fs.s);
+ warn(fs.s);
haveWarned = true;
}
}
diff --git a/src/libutil/logging.hh b/src/libutil/logging.hh
index 3f8366479..81aebccdc 100644
--- a/src/libutil/logging.hh
+++ b/src/libutil/logging.hh
@@ -30,6 +30,8 @@ public:
log(lvlInfo, fs);
}
+ virtual void warn(const std::string & msg);
+
virtual void setExpected(const std::string & label, uint64_t value = 1) { }
virtual void setProgress(const std::string & label, uint64_t value = 1) { }
virtual void incExpected(const std::string & label, uint64_t value = 1) { }
@@ -82,6 +84,14 @@ extern Verbosity verbosity; /* suppress msgs > this */
#define debug(args...) printMsg(lvlDebug, args)
#define vomit(args...) printMsg(lvlVomit, args)
+template<typename... Args>
+inline void warn(const std::string & fs, Args... args)
+{
+ boost::format f(fs);
+ formatHelper(f, args...);
+ logger->warn(f.str());
+}
+
void warnOnce(bool & haveWarned, const FormatOrString & fs);
void writeToStderr(const string & s);
diff --git a/src/libutil/lru-cache.hh b/src/libutil/lru-cache.hh
index 35983aa2c..3cb5d5088 100644
--- a/src/libutil/lru-cache.hh
+++ b/src/libutil/lru-cache.hh
@@ -11,7 +11,7 @@ class LRUCache
{
private:
- size_t maxSize;
+ size_t capacity;
// Stupid wrapper to get around circular dependency between Data
// and LRU.
@@ -27,14 +27,16 @@ private:
public:
- LRUCache(size_t maxSize) : maxSize(maxSize) { }
+ LRUCache(size_t capacity) : capacity(capacity) { }
/* Insert or upsert an item in the cache. */
void upsert(const Key & key, const Value & value)
{
+ if (capacity == 0) return;
+
erase(key);
- if (data.size() >= maxSize) {
+ if (data.size() >= capacity) {
/* Retire the oldest item. */
auto oldest = lru.begin();
data.erase(*oldest);
diff --git a/src/libutil/pool.hh b/src/libutil/pool.hh
index f291cd578..20df21948 100644
--- a/src/libutil/pool.hh
+++ b/src/libutil/pool.hh
@@ -137,15 +137,21 @@ public:
} catch (...) {
auto state_(state.lock());
state_->inUse--;
+ wakeup.notify_one();
throw;
}
}
- unsigned int count()
+ size_t count()
{
auto state_(state.lock());
return state_->idle.size() + state_->inUse;
}
+
+ size_t capacity()
+ {
+ return state.lock()->max;
+ }
};
}
diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc
index a68f7a0fa..950e6362a 100644
--- a/src/libutil/serialise.cc
+++ b/src/libutil/serialise.cc
@@ -194,39 +194,9 @@ void readPadding(size_t len, Source & source)
}
-unsigned int readInt(Source & source)
-{
- unsigned char buf[8];
- source(buf, sizeof(buf));
- if (buf[4] || buf[5] || buf[6] || buf[7])
- throw SerialisationError("implementation cannot deal with > 32-bit integers");
- return
- buf[0] |
- (buf[1] << 8) |
- (buf[2] << 16) |
- (buf[3] << 24);
-}
-
-
-unsigned long long readLongLong(Source & source)
-{
- unsigned char buf[8];
- source(buf, sizeof(buf));
- return
- ((unsigned long long) buf[0]) |
- ((unsigned long long) buf[1] << 8) |
- ((unsigned long long) buf[2] << 16) |
- ((unsigned long long) buf[3] << 24) |
- ((unsigned long long) buf[4] << 32) |
- ((unsigned long long) buf[5] << 40) |
- ((unsigned long long) buf[6] << 48) |
- ((unsigned long long) buf[7] << 56);
-}
-
-
size_t readString(unsigned char * buf, size_t max, Source & source)
{
- size_t len = readInt(source);
+ auto len = readNum<size_t>(source);
if (len > max) throw Error("string is too long");
source(buf, len);
readPadding(len, source);
@@ -236,11 +206,11 @@ size_t readString(unsigned char * buf, size_t max, Source & source)
string readString(Source & source)
{
- size_t len = readInt(source);
- auto buf = std::make_unique<unsigned char[]>(len);
- source(buf.get(), len);
+ auto len = readNum<size_t>(source);
+ std::string res(len, 0);
+ source((unsigned char*) res.data(), len);
readPadding(len, source);
- return string((char *) buf.get(), len);
+ return res;
}
Source & operator >> (Source & in, string & s)
@@ -250,16 +220,9 @@ Source & operator >> (Source & in, string & s)
}
-Source & operator >> (Source & in, unsigned int & n)
-{
- n = readInt(in);
- return in;
-}
-
-
template<class T> T readStrings(Source & source)
{
- unsigned int count = readInt(source);
+ auto count = readNum<size_t>(source);
T ss;
while (count--)
ss.insert(ss.end(), readString(source));
diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh
index 5646d08c1..2bdee7080 100644
--- a/src/libutil/serialise.hh
+++ b/src/libutil/serialise.hh
@@ -140,15 +140,16 @@ struct StringSource : Source
/* Adapter class of a Source that saves all data read to `s'. */
-struct SavingSourceAdapter : Source
+struct TeeSource : Source
{
Source & orig;
- string s;
- SavingSourceAdapter(Source & orig) : orig(orig) { }
+ ref<std::string> data;
+ TeeSource(Source & orig)
+ : orig(orig), data(make_ref<std::string>()) { }
size_t read(unsigned char * data, size_t len)
{
size_t n = orig.read(data, len);
- s.append((const char *) data, n);
+ this->data->append((const char *) data, n);
return n;
}
};
@@ -177,18 +178,64 @@ Sink & operator << (Sink & sink, const Strings & s);
Sink & operator << (Sink & sink, const StringSet & s);
+MakeError(SerialisationError, Error)
+
+
+template<typename T>
+T readNum(Source & source)
+{
+ unsigned char buf[8];
+ source(buf, sizeof(buf));
+
+ uint64_t n =
+ ((unsigned long long) buf[0]) |
+ ((unsigned long long) buf[1] << 8) |
+ ((unsigned long long) buf[2] << 16) |
+ ((unsigned long long) buf[3] << 24) |
+ ((unsigned long long) buf[4] << 32) |
+ ((unsigned long long) buf[5] << 40) |
+ ((unsigned long long) buf[6] << 48) |
+ ((unsigned long long) buf[7] << 56);
+
+ if (n > std::numeric_limits<T>::max())
+ throw SerialisationError("serialised integer %d is too large for type ‘%s’", n, typeid(T).name());
+
+ return n;
+}
+
+
+inline unsigned int readInt(Source & source)
+{
+ return readNum<unsigned int>(source);
+}
+
+
+inline uint64_t readLongLong(Source & source)
+{
+ return readNum<uint64_t>(source);
+}
+
+
void readPadding(size_t len, Source & source);
-unsigned int readInt(Source & source);
-unsigned long long readLongLong(Source & source);
size_t readString(unsigned char * buf, size_t max, Source & source);
string readString(Source & source);
template<class T> T readStrings(Source & source);
Source & operator >> (Source & in, string & s);
-Source & operator >> (Source & in, unsigned int & n);
+template<typename T>
+Source & operator >> (Source & in, T & n)
+{
+ n = readNum<T>(in);
+ return in;
+}
-MakeError(SerialisationError, Error)
+template<typename T>
+Source & operator >> (Source & in, bool & b)
+{
+ b = readNum<uint64_t>(in);
+ return in;
+}
}
diff --git a/src/libutil/sync.hh b/src/libutil/sync.hh
index 2aa074299..611c900e0 100644
--- a/src/libutil/sync.hh
+++ b/src/libutil/sync.hh
@@ -33,6 +33,7 @@ public:
Sync() { }
Sync(const T & data) : data(data) { }
+ Sync(T && data) noexcept : data(std::move(data)) { }
class Lock
{
diff --git a/src/libutil/types.hh b/src/libutil/types.hh
index 97d79af9b..1429c2385 100644
--- a/src/libutil/types.hh
+++ b/src/libutil/types.hh
@@ -7,6 +7,7 @@
#include <list>
#include <set>
#include <memory>
+#include <map>
#include <boost/format.hpp>
@@ -141,6 +142,7 @@ private:
typedef list<string> Strings;
typedef set<string> StringSet;
+typedef std::map<std::string, std::string> StringMap;
/* Paths are just strings. */
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 0a5f796e4..0bd51afd1 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -1,6 +1,7 @@
#include "util.hh"
#include "affinity.hh"
#include "sync.hh"
+#include "finally.hh"
#include <cctype>
#include <cerrno>
@@ -10,6 +11,7 @@
#include <iostream>
#include <sstream>
#include <thread>
+#include <future>
#include <sys/wait.h>
#include <unistd.h>
@@ -94,6 +96,8 @@ Path absPath(Path path, Path dir)
Path canonPath(const Path & path, bool resolveSymlinks)
{
+ assert(path != "");
+
string s;
if (path[0] != '/')
@@ -485,6 +489,7 @@ void readFull(int fd, unsigned char * buf, size_t count)
void writeFull(int fd, const unsigned char * buf, size_t count, bool allowInterrupts)
{
while (count) {
+ if (allowInterrupts) checkInterrupt();
ssize_t res = write(fd, (char *) buf, count);
if (res == -1 && errno != EINTR)
throw SysError("writing to file");
@@ -492,7 +497,6 @@ void writeFull(int fd, const unsigned char * buf, size_t count, bool allowInterr
count -= res;
buf += res;
}
- if (allowInterrupts) checkInterrupt();
}
}
@@ -676,12 +680,11 @@ Pid::operator pid_t()
}
-int Pid::kill(bool quiet)
+int Pid::kill()
{
assert(pid != -1);
- if (!quiet)
- printError(format("killing process %1%") % pid);
+ debug(format("killing process %1%") % pid);
/* Send the requested signal to the child. If it has its own
process group, send the signal to every process in the child
@@ -837,23 +840,21 @@ std::vector<char *> stringsToCharPtrs(const Strings & ss)
string runProgram(Path program, bool searchPath, const Strings & args,
- const string & input)
+ const std::experimental::optional<std::string> & input)
{
checkInterrupt();
/* Create a pipe. */
Pipe out, in;
out.create();
- if (!input.empty()) in.create();
+ if (input) in.create();
/* Fork. */
Pid pid = startProcess([&]() {
if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
throw SysError("dupping stdout");
- if (!input.empty()) {
- if (dup2(in.readSide.get(), STDIN_FILENO) == -1)
- throw SysError("dupping stdin");
- }
+ if (input && dup2(in.readSide.get(), STDIN_FILENO) == -1)
+ throw SysError("dupping stdin");
Strings args_(args);
args_.push_front(program);
@@ -870,11 +871,27 @@ string runProgram(Path program, bool searchPath, const Strings & args,
out.writeSide = -1;
- /* FIXME: This can deadlock if the input is too long. */
- if (!input.empty()) {
+ std::thread writerThread;
+
+ std::promise<void> promise;
+
+ Finally doJoin([&]() {
+ if (writerThread.joinable())
+ writerThread.join();
+ });
+
+
+ if (input) {
in.readSide = -1;
- writeFull(in.writeSide.get(), input);
- in.writeSide = -1;
+ writerThread = std::thread([&]() {
+ try {
+ writeFull(in.writeSide.get(), *input);
+ promise.set_value();
+ } catch (...) {
+ promise.set_exception(std::current_exception());
+ }
+ in.writeSide = -1;
+ });
}
string result = drainFD(out.readSide.get());
@@ -885,6 +902,9 @@ string runProgram(Path program, bool searchPath, const Strings & args,
throw ExecError(status, format("program ‘%1%’ %2%")
% program % statusToString(status));
+ /* Wait for the writer thread to finish. */
+ if (input) promise.get_future().get();
+
return result;
}
@@ -1194,7 +1214,7 @@ static void signalHandlerThread(sigset_t set)
void triggerInterrupt()
{
- _isInterrupted = 1;
+ _isInterrupted = true;
{
auto interruptCallbacks(_interruptCallbacks.lock());
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
index 2950f7daa..0e6941e4a 100644
--- a/src/libutil/util.hh
+++ b/src/libutil/util.hh
@@ -13,6 +13,8 @@
#include <limits>
#include <cstdio>
#include <map>
+#include <sstream>
+#include <experimental/optional>
#ifndef HAVE_STRUCT_DIRENT_D_TYPE
#define DT_UNKNOWN 0
@@ -201,7 +203,7 @@ public:
~Pid();
void operator =(pid_t pid);
operator pid_t();
- int kill(bool quiet = false);
+ int kill();
int wait();
void setSeparatePG(bool separatePG);
@@ -231,7 +233,8 @@ pid_t startProcess(std::function<void()> fun, const ProcessOptions & options = P
/* Run a program and return its stdout in a string (i.e., like the
shell backtick operator). */
string runProgram(Path program, bool searchPath = false,
- const Strings & args = Strings(), const string & input = "");
+ const Strings & args = Strings(),
+ const std::experimental::optional<std::string> & input = {});
class ExecError : public Error
{
@@ -448,5 +451,4 @@ struct ReceiveInterrupts
{ }
};
-
}
diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc
index ee030c57b..b4206033c 100755
--- a/src/nix-build/nix-build.cc
+++ b/src/nix-build/nix-build.cc
@@ -394,7 +394,7 @@ int main(int argc, char ** argv)
auto tmp = getEnv("TMPDIR", getEnv("XDG_RUNTIME_DIR", "/tmp"));
if (pure) {
- std::set<string> keepVars{"HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL", "TZ", "PAGER", "NIX_BUILD_SHELL"};
+ std::set<string> keepVars{"HOME", "USER", "LOGNAME", "DISPLAY", "PATH", "TERM", "IN_NIX_SHELL", "TZ", "PAGER", "NIX_BUILD_SHELL", "SHLVL"};
decltype(env) newEnv;
for (auto & i : env)
if (keepVars.count(i.first))
@@ -408,7 +408,7 @@ int main(int argc, char ** argv)
env["NIX_STORE"] = store->storeDir;
for (auto & var : drv.env)
- env.emplace(var);
+ env[var.first] = var.second;
restoreAffinity();
@@ -448,15 +448,17 @@ int main(int argc, char ** argv)
auto envPtrs = stringsToCharPtrs(envStrs);
+ auto shell = getEnv("NIX_BUILD_SHELL", "bash");
+
environ = envPtrs.data();
auto argPtrs = stringsToCharPtrs(args);
restoreSignals();
- execvp(getEnv("NIX_BUILD_SHELL", "bash").c_str(), argPtrs.data());
+ execvp(shell.c_str(), argPtrs.data());
- throw SysError("executing shell");
+ throw SysError("executing shell ‘%s’", shell);
}
// Ugly hackery to make "nix-build -A foo.all" produce symlinks
diff --git a/src/nix-copy-closure/nix-copy-closure.cc b/src/nix-copy-closure/nix-copy-closure.cc
index 4340443b5..ed43bffbc 100755
--- a/src/nix-copy-closure/nix-copy-closure.cc
+++ b/src/nix-copy-closure/nix-copy-closure.cc
@@ -47,13 +47,17 @@ int main(int argc, char ** argv)
if (sshHost.empty())
throw UsageError("no host name specified");
- auto remoteUri = "legacy-ssh://" + sshHost + (gzip ? "?compress=true" : "");
+ auto remoteUri = "ssh://" + sshHost + (gzip ? "?compress=true" : "");
auto to = toMode ? openStore(remoteUri) : openStore();
auto from = toMode ? openStore() : openStore(remoteUri);
+ PathSet storePaths2;
+ for (auto & path : storePaths)
+ storePaths2.insert(from->followLinksToStorePath(path));
+
PathSet closure;
- from->computeFSClosure(storePaths, closure, false, includeOutputs);
+ from->computeFSClosure(storePaths2, closure, false, includeOutputs);
- copyPaths(from, to, Paths(closure.begin(), closure.end()), useSubstitutes);
+ copyPaths(from, to, closure, useSubstitutes);
});
}
diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc
index 2fcb5b565..f4285693f 100644
--- a/src/nix-daemon/nix-daemon.cc
+++ b/src/nix-daemon/nix-daemon.cc
@@ -273,10 +273,9 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
}
case wopAddToStore: {
- string baseName = readString(from);
- bool fixed = readInt(from) == 1; /* obsolete */
- bool recursive = readInt(from) == 1;
- string s = readString(from);
+ bool fixed, recursive;
+ std::string s, baseName;
+ from >> baseName >> fixed /* obsolete */ >> recursive >> s;
/* Compatibility hack. */
if (!fixed) {
s = "sha256";
@@ -284,7 +283,7 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
}
HashType hashAlgo = parseHashType(s);
- SavingSourceAdapter savedNAR(from);
+ TeeSource savedNAR(from);
RetrieveRegularNARSink savedRegular;
if (recursive) {
@@ -298,7 +297,7 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
startWork();
if (!savedRegular.regular) throw Error("regular file expected");
- Path path = store->addToStoreFromDump(recursive ? savedNAR.s : savedRegular.s, baseName, recursive, hashAlgo);
+ Path path = store->addToStoreFromDump(recursive ? *savedNAR.data : savedRegular.s, baseName, recursive, hashAlgo);
stopWork();
to << path;
@@ -340,7 +339,7 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
PathSet drvs = readStorePaths<PathSet>(*store, from);
BuildMode mode = bmNormal;
if (GET_PROTOCOL_MINOR(clientVersion) >= 15) {
- mode = (BuildMode)readInt(from);
+ mode = (BuildMode) readInt(from);
/* Repairing is not atomic, so disallowed for "untrusted"
clients. */
@@ -417,8 +416,7 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
GCOptions options;
options.action = (GCOptions::GCAction) readInt(from);
options.pathsToDelete = readStorePaths<PathSet>(*store, from);
- options.ignoreLiveness = readInt(from);
- options.maxFreed = readLongLong(from);
+ from >> options.ignoreLiveness >> options.maxFreed;
// obsolete fields
readInt(from);
readInt(from);
@@ -438,8 +436,8 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
}
case wopSetOptions: {
- settings.keepFailed = readInt(from) != 0;
- settings.keepGoing = readInt(from) != 0;
+ from >> settings.keepFailed;
+ from >> settings.keepGoing;
settings.set("build-fallback", readInt(from) ? "true" : "false");
verbosity = (Verbosity) readInt(from);
settings.set("build-max-jobs", std::to_string(readInt(from)));
@@ -539,8 +537,8 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
break;
case wopVerifyStore: {
- bool checkContents = readInt(from) != 0;
- bool repair = readInt(from) != 0;
+ bool checkContents, repair;
+ from >> checkContents >> repair;
startWork();
if (repair && !trusted)
throw Error("you are not privileged to repair paths");
@@ -571,25 +569,37 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
}
case wopAddToStoreNar: {
+ bool repair, dontCheckSigs;
ValidPathInfo info;
info.path = readStorePath(*store, from);
- info.deriver = readString(from);
+ from >> info.deriver;
if (!info.deriver.empty())
store->assertStorePath(info.deriver);
info.narHash = parseHash(htSHA256, readString(from));
info.references = readStorePaths<PathSet>(*store, from);
- info.registrationTime = readInt(from);
- info.narSize = readLongLong(from);
- info.ultimate = readLongLong(from);
+ from >> info.registrationTime >> info.narSize >> info.ultimate;
info.sigs = readStrings<StringSet>(from);
- auto nar = make_ref<std::string>(readString(from));
- auto repair = readInt(from) ? true : false;
- auto dontCheckSigs = readInt(from) ? true : false;
+ from >> info.ca >> repair >> dontCheckSigs;
if (!trusted && dontCheckSigs)
dontCheckSigs = false;
+
+ TeeSink tee(from);
+ parseDump(tee, tee.source);
+
+ startWork();
+ store->addToStore(info, tee.source.data, repair, dontCheckSigs, nullptr);
+ stopWork();
+ break;
+ }
+
+ case wopQueryMissing: {
+ PathSet targets = readStorePaths<PathSet>(*store, from);
startWork();
- store->addToStore(info, nar, repair, dontCheckSigs, nullptr);
+ PathSet willBuild, willSubstitute, unknown;
+ unsigned long long downloadSize, narSize;
+ store->queryMissing(targets, willBuild, willSubstitute, unknown, downloadSize, narSize);
stopWork();
+ to << willBuild << willSubstitute << unknown << downloadSize << narSize;
break;
}
@@ -638,7 +648,10 @@ static void processConnection(bool trusted)
#endif
/* Open the store. */
- auto store = make_ref<LocalStore>(Store::Params()); // FIXME: get params from somewhere
+ Store::Params params; // FIXME: get params from somewhere
+ // Disable caching since the client already does that.
+ params["path-info-cache-size"] = "0";
+ auto store = make_ref<LocalStore>(params);
stopWork();
to.flush();
diff --git a/src/nix-prefetch-url/nix-prefetch-url.cc b/src/nix-prefetch-url/nix-prefetch-url.cc
index acf603025..b3b2fcac7 100644
--- a/src/nix-prefetch-url/nix-prefetch-url.cc
+++ b/src/nix-prefetch-url/nix-prefetch-url.cc
@@ -170,10 +170,10 @@ int main(int argc, char * * argv)
Path unpacked = (Path) tmpDir + "/unpacked";
createDirs(unpacked);
if (hasSuffix(baseNameOf(uri), ".zip"))
- runProgram("unzip", true, {"-qq", tmpFile, "-d", unpacked}, "");
+ runProgram("unzip", true, {"-qq", tmpFile, "-d", unpacked});
else
// FIXME: this requires GNU tar for decompression.
- runProgram("tar", true, {"xf", tmpFile, "-C", unpacked}, "");
+ runProgram("tar", true, {"xf", tmpFile, "-C", unpacked});
/* If the archive unpacks to a single file/directory, then use
that as the top-level. */
diff --git a/src/nix-store/local.mk b/src/nix-store/local.mk
index 84ff15b24..ade0b233a 100644
--- a/src/nix-store/local.mk
+++ b/src/nix-store/local.mk
@@ -7,5 +7,3 @@ nix-store_SOURCES := $(wildcard $(d)/*.cc)
nix-store_LIBS = libmain libstore libutil libformat
nix-store_LDFLAGS = -lbz2 -pthread $(SODIUM_LIBS)
-
-nix-store_CXXFLAGS = -DCURL=\"$(curl)\"
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index bb3b430c9..a40cca982 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -9,7 +9,6 @@
#include "util.hh"
#include "worker-protocol.hh"
#include "xmlgraph.hh"
-#include "compression.hh"
#include <iostream>
#include <algorithm>
@@ -482,58 +481,12 @@ static void opReadLog(Strings opFlags, Strings opArgs)
RunPager pager;
- // FIXME: move getting logs into Store.
- auto store2 = std::dynamic_pointer_cast<LocalFSStore>(store);
- if (!store2) throw Error(format("store ‘%s’ does not support reading logs") % store->getUri());
-
for (auto & i : opArgs) {
- Path path = useDeriver(store->followLinksToStorePath(i));
-
- string baseName = baseNameOf(path);
- bool found = false;
-
- for (int j = 0; j < 2; j++) {
-
- Path logPath =
- j == 0
- ? (format("%1%/%2%/%3%/%4%") % store2->logDir % drvsLogDir % string(baseName, 0, 2) % string(baseName, 2)).str()
- : (format("%1%/%2%/%3%") % store2->logDir % drvsLogDir % baseName).str();
- Path logBz2Path = logPath + ".bz2";
-
- if (pathExists(logPath)) {
- /* !!! Make this run in O(1) memory. */
- string log = readFile(logPath);
- writeFull(STDOUT_FILENO, log);
- found = true;
- break;
- }
-
- else if (pathExists(logBz2Path)) {
- std::cout << *decompress("bzip2", readFile(logBz2Path));
- found = true;
- break;
- }
- }
-
- if (!found) {
- for (auto & i : settings.logServers) {
- string prefix = i;
- if (!prefix.empty() && prefix.back() != '/') prefix += '/';
- string url = prefix + baseName;
- try {
- string log = runProgram(CURL, true, {"--fail", "--location", "--silent", "--", url});
- std::cout << "(using build log from " << url << ")" << std::endl;
- std::cout << log;
- found = true;
- break;
- } catch (ExecError & e) {
- /* Ignore errors from curl. FIXME: actually, might be
- nice to print a warning on HTTP status != 404. */
- }
- }
- }
-
- if (!found) throw Error(format("build log of derivation ‘%1%’ is not available") % path);
+ auto path = store->followLinksToStorePath(i);
+ auto log = store->getBuildLog(path);
+ if (!log)
+ throw Error("build log of derivation ‘%s’ is not available", path);
+ std::cout << *log;
}
}
@@ -708,6 +661,9 @@ static void opExport(Strings opFlags, Strings opArgs)
for (auto & i : opFlags)
throw UsageError(format("unknown flag ‘%1%’") % i);
+ for (auto & i : opArgs)
+ i = store->followLinksToStorePath(i);
+
FdSink sink(STDOUT_FILENO);
store->exportPaths(opArgs, sink);
}
@@ -721,7 +677,7 @@ static void opImport(Strings opFlags, Strings opArgs)
if (!opArgs.empty()) throw UsageError("no arguments expected");
FdSource source(STDIN_FILENO);
- Paths paths = store->importPaths(source, 0);
+ Paths paths = store->importPaths(source, nullptr, true);
for (auto & i : paths)
cout << format("%1%\n") % i << std::flush;
@@ -839,7 +795,7 @@ static void opServe(Strings opFlags, Strings opArgs)
settings.maxSilentTime = readInt(in);
settings.buildTimeout = readInt(in);
if (GET_PROTOCOL_MINOR(clientVersion) >= 2)
- settings.maxLogSize = readInt(in);
+ in >> settings.maxLogSize;
if (GET_PROTOCOL_MINOR(clientVersion) >= 3) {
settings.set("build-repeat", std::to_string(readInt(in)));
settings.set("enforce-determinism", readInt(in) != 0 ? "true" : "false");
@@ -926,7 +882,7 @@ static void opServe(Strings opFlags, Strings opArgs)
break;
}
- case cmdBuildPaths: { /* Used by build-remote.pl. */
+ case cmdBuildPaths: {
if (!writeAllowed) throw Error("building paths is not allowed");
PathSet paths = readStorePaths<PathSet>(*store, in);
diff --git a/src/nix/command.cc b/src/nix/command.cc
index 5a8288da9..a1b2c120a 100644
--- a/src/nix/command.cc
+++ b/src/nix/command.cc
@@ -79,9 +79,14 @@ StoreCommand::StoreCommand()
mkFlag(0, "store", "store-uri", "URI of the Nix store to use", &storeUri);
}
+ref<Store> StoreCommand::createStore()
+{
+ return openStore(storeUri);
+}
+
void StoreCommand::run()
{
- run(openStore(storeUri));
+ run(createStore());
}
StorePathsCommand::StorePathsCommand()
diff --git a/src/nix/command.hh b/src/nix/command.hh
index a29cdcf7f..fa6c21abf 100644
--- a/src/nix/command.hh
+++ b/src/nix/command.hh
@@ -33,6 +33,7 @@ struct StoreCommand : virtual Command
std::string storeUri;
StoreCommand();
void run() override;
+ virtual ref<Store> createStore();
virtual void run(ref<Store>) = 0;
};
diff --git a/src/nix/copy.cc b/src/nix/copy.cc
index 976b0d3e0..b2165cb8f 100644
--- a/src/nix/copy.cc
+++ b/src/nix/copy.cc
@@ -38,15 +38,19 @@ struct CmdCopy : StorePathsCommand
};
}
- void run(ref<Store> store, Paths storePaths) override
+ ref<Store> createStore() override
+ {
+ return srcUri.empty() ? StoreCommand::createStore() : openStore(srcUri);
+ }
+
+ void run(ref<Store> srcStore, Paths storePaths) override
{
if (srcUri.empty() && dstUri.empty())
throw UsageError("you must pass ‘--from’ and/or ‘--to’");
- ref<Store> srcStore = srcUri.empty() ? store : openStore(srcUri);
- ref<Store> dstStore = dstUri.empty() ? store : openStore(dstUri);
+ ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
- copyPaths(srcStore, dstStore, storePaths);
+ copyPaths(srcStore, dstStore, PathSet(storePaths.begin(), storePaths.end()));
}
};
diff --git a/src/nix/log.cc b/src/nix/log.cc
new file mode 100644
index 000000000..d8a3830e9
--- /dev/null
+++ b/src/nix/log.cc
@@ -0,0 +1,57 @@
+#include "command.hh"
+#include "common-args.hh"
+#include "installables.hh"
+#include "shared.hh"
+#include "store-api.hh"
+
+using namespace nix;
+
+struct CmdLog : StoreCommand, MixInstallables
+{
+ CmdLog()
+ {
+ }
+
+ std::string name() override
+ {
+ return "log";
+ }
+
+ std::string description() override
+ {
+ return "show the build log of the specified packages or paths";
+ }
+
+ void run(ref<Store> store) override
+ {
+ auto elems = evalInstallables(store);
+
+ PathSet paths;
+
+ for (auto & elem : elems) {
+ if (elem.isDrv)
+ paths.insert(elem.drvPath);
+ else
+ paths.insert(elem.outPaths.begin(), elem.outPaths.end());
+ }
+
+ auto subs = getDefaultSubstituters();
+
+ subs.push_front(store);
+
+ for (auto & path : paths) {
+ bool found = false;
+ for (auto & sub : subs) {
+ auto log = sub->getBuildLog(path);
+ if (!log) continue;
+ std::cout << *log;
+ found = true;
+ break;
+ }
+ if (!found)
+ throw Error("build log of path ‘%s’ is not available", path);
+ }
+ }
+};
+
+static RegisterCommand r1(make_ref<CmdLog>());
diff --git a/src/nix/main.cc b/src/nix/main.cc
index 440ced97d..fdb8f6e3a 100644
--- a/src/nix/main.cc
+++ b/src/nix/main.cc
@@ -42,6 +42,7 @@ void mainWrapped(int argc, char * * argv)
NixArgs args;
args.parseCmdline(argvToStrings(argc, argv));
+ settings.update();
assert(args.command);
diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh
index 4ce428f64..532099d02 100644
--- a/tests/binary-cache.sh
+++ b/tests/binary-cache.sh
@@ -18,7 +18,7 @@ basicTests() {
nix-env --option binary-caches "file://$cacheDir" -f dependencies.nix -qas \* | grep -- "---"
- nix-store --option binary-caches "file://$cacheDir" -r $outPath
+ nix-store --option binary-caches "file://$cacheDir" --option signed-binary-caches '' -r $outPath
[ -x $outPath/program ]
@@ -34,7 +34,7 @@ basicTests() {
x=$(nix-env -f dependencies.nix -qas \* --prebuilt-only)
[ -z "$x" ]
- nix-store --option binary-caches "file://$cacheDir" -r $outPath
+ nix-store --option binary-caches "file://$cacheDir" --option signed-binary-caches '' -r $outPath
nix-store --check-validity $outPath
nix-store -qR $outPath | grep input-2
@@ -63,7 +63,7 @@ mv $nar $nar.good
mkdir -p $TEST_ROOT/empty
nix-store --dump $TEST_ROOT/empty | xz > $nar
-nix-build --option binary-caches "file://$cacheDir" dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log
+nix-build --option binary-caches "file://$cacheDir" --option signed-binary-caches '' dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log
grep -q "hash mismatch" $TEST_ROOT/log
mv $nar.good $nar
@@ -73,7 +73,7 @@ mv $nar.good $nar
clearStore
clearCacheCache
-if nix-store --option binary-caches "file://$cacheDir" --option signed-binary-caches '*' -r $outPath; then
+if nix-store --option binary-caches "file://$cacheDir" -r $outPath; then
echo "unsigned binary cache incorrectly accepted"
exit 1
fi
@@ -99,7 +99,7 @@ clearStore
rm $(grep -l "StorePath:.*dependencies-input-2" $cacheDir/*.narinfo)
-nix-build --option binary-caches "file://$cacheDir" dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log
+nix-build --option binary-caches "file://$cacheDir" --option signed-binary-caches '' dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log
grep -q "fetching path" $TEST_ROOT/log
diff --git a/tests/nix-shell.sh b/tests/nix-shell.sh
index 26cc521bb..f0f34a5f8 100644
--- a/tests/nix-shell.sh
+++ b/tests/nix-shell.sh
@@ -4,6 +4,7 @@ clearStore
# Test nix-shell -A
export IMPURE_VAR=foo
+export NIX_BUILD_SHELL=$SHELL
output=$(nix-shell --pure shell.nix -A shellDrv --run \
'echo "$IMPURE_VAR - $VAR_FROM_STDENV_SETUP - $VAR_FROM_NIX"')
diff --git a/tests/optimise-store.sh b/tests/optimise-store.sh
index ea4478693..bd88662bc 100644
--- a/tests/optimise-store.sh
+++ b/tests/optimise-store.sh
@@ -5,14 +5,14 @@ clearStore
outPath1=$(echo 'with import ./config.nix; mkDerivation { name = "foo1"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link --option auto-optimise-store true)
outPath2=$(echo 'with import ./config.nix; mkDerivation { name = "foo2"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link --option auto-optimise-store true)
-inode1="$(perl -e "print ((lstat('$outPath1/foo'))[1])")"
-inode2="$(perl -e "print ((lstat('$outPath2/foo'))[1])")"
+inode1="$(stat --format=%i $outPath1/foo)"
+inode2="$(stat --format=%i $outPath2/foo)"
if [ "$inode1" != "$inode2" ]; then
echo "inodes do not match"
exit 1
fi
-nlink="$(perl -e "print ((lstat('$outPath1/foo'))[3])")"
+nlink="$(stat --format=%h $outPath1/foo)"
if [ "$nlink" != 3 ]; then
echo "link count incorrect"
exit 1
@@ -20,7 +20,7 @@ fi
outPath3=$(echo 'with import ./config.nix; mkDerivation { name = "foo3"; builder = builtins.toFile "builder" "mkdir $out; echo hello > $out/foo"; }' | nix-build - --no-out-link)
-inode3="$(perl -e "print ((lstat('$outPath3/foo'))[1])")"
+inode3="$(stat --format=%i $outPath3/foo)"
if [ "$inode1" = "$inode3" ]; then
echo "inodes match unexpectedly"
exit 1
@@ -28,8 +28,8 @@ fi
nix-store --optimise
-inode1="$(perl -e "print ((lstat('$outPath1/foo'))[1])")"
-inode3="$(perl -e "print ((lstat('$outPath3/foo'))[1])")"
+inode1="$(stat --format=%i $outPath1/foo)"
+inode3="$(stat --format=%i $outPath3/foo)"
if [ "$inode1" != "$inode3" ]; then
echo "inodes do not match"
exit 1
diff --git a/tests/repair.sh b/tests/repair.sh
index 782838704..57152d450 100644
--- a/tests/repair.sh
+++ b/tests/repair.sh
@@ -51,7 +51,7 @@ nix copy --recursive --to file://$cacheDir $path
chmod u+w $path2
rm -rf $path2
-nix-store --verify --check-contents --repair --option binary-caches "file://$cacheDir"
+nix-store --verify --check-contents --repair --option binary-caches "file://$cacheDir" --option signed-binary-caches ''
if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then
echo "path not repaired properly" >&2
@@ -69,7 +69,7 @@ if nix-store --verify-path $path2; then
exit 1
fi
-nix-store --repair-path $path2 --option binary-caches "file://$cacheDir"
+nix-store --repair-path $path2 --option binary-caches "file://$cacheDir" --option signed-binary-caches ''
if [ "$(nix-hash $path2)" != "$hash" -o -e $path2/bad ]; then
echo "path not repaired properly" >&2
diff --git a/tests/shell.nix b/tests/shell.nix
index ed4d6fbaa..1a092913b 100644
--- a/tests/shell.nix
+++ b/tests/shell.nix
@@ -34,6 +34,7 @@ rec {
mkdir -p $out/bin
echo 'echo foo' > $out/bin/foo
chmod a+rx $out/bin/foo
+ ln -s ${shell} $out/bin/bash
'';
bar = runCommand "bar" {} ''
diff --git a/tests/timeout.nix b/tests/timeout.nix
index 540fba934..e18d717ef 100644
--- a/tests/timeout.nix
+++ b/tests/timeout.nix
@@ -5,6 +5,7 @@ with import ./config.nix;
infiniteLoop = mkDerivation {
name = "timeout";
buildCommand = ''
+ touch $out
echo "‘timeout’ builder entering an infinite loop"
while true ; do echo -n .; done
'';
@@ -13,6 +14,7 @@ with import ./config.nix;
silent = mkDerivation {
name = "silent";
buildCommand = ''
+ touch $out
sleep 60
'';
};
@@ -20,6 +22,7 @@ with import ./config.nix;
closeLog = mkDerivation {
name = "silent";
buildCommand = ''
+ touch $out
exec > /dev/null 2>&1
sleep 1000000000
'';
diff --git a/tests/timeout.sh b/tests/timeout.sh
index ce1ae7d67..77b227e89 100644
--- a/tests/timeout.sh
+++ b/tests/timeout.sh
@@ -29,3 +29,8 @@ if nix-build timeout.nix -A closeLog; then
echo "build should have failed"
exit 1
fi
+
+if nix build -f timeout.nix silent --option build-max-silent-time 2; then
+ echo "build should have failed"
+ exit 1
+fi