aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile.config.in1
-rw-r--r--configure.ac14
-rwxr-xr-xdev-shell18
-rw-r--r--doc/manual/expressions/builtins.xml29
-rw-r--r--doc/manual/expressions/language-values.xml11
-rw-r--r--doc/manual/hacking.xml4
-rw-r--r--doc/manual/installation/prerequisites-source.xml2
-rw-r--r--nix.spec.in1
-rw-r--r--release.nix8
-rw-r--r--shell.nix34
-rw-r--r--src/libexpr/lexer.l18
-rw-r--r--src/libstore/binary-cache-store.cc7
-rw-r--r--src/libstore/build.cc137
-rw-r--r--src/libstore/gc.cc5
-rw-r--r--src/libstore/globals.hh5
-rw-r--r--src/libstore/local-store.cc3
-rw-r--r--src/libstore/misc.cc12
-rw-r--r--src/libstore/nar-accessor.cc3
-rw-r--r--src/libstore/optimise-store.cc25
-rw-r--r--src/libstore/pathlocks.cc87
-rw-r--r--src/libstore/remote-fs-accessor.cc57
-rw-r--r--src/libstore/remote-fs-accessor.hh29
-rw-r--r--src/libstore/remote-store.cc100
-rw-r--r--src/libstore/remote-store.hh35
-rw-r--r--src/libstore/s3-binary-cache-store.cc13
-rw-r--r--src/libstore/ssh-store.cc130
-rw-r--r--src/libstore/store-api.cc55
-rw-r--r--src/libstore/store-api.hh43
-rw-r--r--src/libstore/worker-protocol.hh4
-rw-r--r--src/libutil/finally.hh2
-rw-r--r--src/libutil/logging.hh1
-rwxr-xr-xsrc/nix-channel/nix-channel.cc4
-rw-r--r--src/nix-daemon/nix-daemon.cc109
-rw-r--r--src/nix-store/nix-store.cc21
-rw-r--r--src/nix-store/serve-protocol.hh2
-rw-r--r--src/nix/command.cc6
-rw-r--r--src/nix/copy.cc4
-rw-r--r--src/nix/sigs.cc2
-rw-r--r--src/nix/verify.cc2
-rw-r--r--tests/lang/eval-fail-path-slash.nix6
-rw-r--r--tests/lang/eval-okay-comments.exp1
-rw-r--r--tests/lang/eval-okay-comments.nix59
42 files changed, 871 insertions, 238 deletions
diff --git a/Makefile.config.in b/Makefile.config.in
index 2db7172b1..a03776d57 100644
--- a/Makefile.config.in
+++ b/Makefile.config.in
@@ -18,7 +18,6 @@ bsddiff_compat_include = @bsddiff_compat_include@
curl = @curl@
datadir = @datadir@
datarootdir = @datarootdir@
-dblatex = @dblatex@
docdir = @docdir@
exec_prefix = @exec_prefix@
includedir = @includedir@
diff --git a/configure.ac b/configure.ac
index 91ed9947a..e6b11be2d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -128,7 +128,6 @@ NEED_PROG(bzip2, bzip2)
NEED_PROG(gzip, gzip)
NEED_PROG(xz, xz)
AC_PATH_PROG(dot, dot)
-AC_PATH_PROG(dblatex, dblatex)
AC_PATH_PROG(pv, pv, pv)
@@ -214,7 +213,7 @@ if test "$gc" = yes; then
fi
-# Check for the required Perl dependencies (DBI, DBD::SQLite and WWW::Curl).
+# Check for the required Perl dependencies (DBI, DBD::SQLite).
perlFlags="-I$perllibdir"
AC_ARG_WITH(dbi, AC_HELP_STRING([--with-dbi=PATH],
@@ -225,10 +224,6 @@ AC_ARG_WITH(dbd-sqlite, AC_HELP_STRING([--with-dbd-sqlite=PATH],
[prefix of the Perl DBD::SQLite library]),
perlFlags="$perlFlags -I$withval")
-AC_ARG_WITH(www-curl, AC_HELP_STRING([--with-www-curl=PATH],
- [prefix of the Perl WWW::Curl library]),
- perlFlags="$perlFlags -I$withval")
-
AC_MSG_CHECKING([whether DBD::SQLite works])
if ! $perl $perlFlags -e 'use DBI; use DBD::SQLite;' 2>&5; then
AC_MSG_RESULT(no)
@@ -236,13 +231,6 @@ if ! $perl $perlFlags -e 'use DBI; use DBD::SQLite;' 2>&5; then
fi
AC_MSG_RESULT(yes)
-AC_MSG_CHECKING([whether WWW::Curl works])
-if ! $perl $perlFlags -e 'use WWW::Curl;' 2>&5; then
- AC_MSG_RESULT(no)
- AC_MSG_FAILURE([The Perl module WWW::Curl is missing.])
-fi
-AC_MSG_RESULT(yes)
-
AC_SUBST(perlFlags)
diff --git a/dev-shell b/dev-shell
deleted file mode 100755
index 5a090ded6..000000000
--- a/dev-shell
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env bash
-if [ -e tests/test-tmp ]; then
- chmod -R u+w tests/test-tmp
- rm -rf tests/test-tmp
-fi
-
-s=$(type -p nix-shell)
-exec $s release.nix -A tarball --command "
- unset http_proxy
- export NIX_REMOTE=$NIX_REMOTE
- export NIX_PATH='$NIX_PATH'
- export NIX_BUILD_SHELL=$(type -p bash)
- export c=\$configureFlags
- exec $s release.nix -A build.$(if [ $(uname -s) = Darwin ]; then echo x86_64-darwin; elif [[ $(uname -m) =~ ^i[3456]86$ ]]; then echo i686-linux; else echo x86_64-linux; fi) --exclude tarball --command '
- configureFlags+=\" \$c --prefix=$(pwd)/inst --sysconfdir=$(pwd)/inst/etc\"
- return
- '" \
- "$@"
diff --git a/doc/manual/expressions/builtins.xml b/doc/manual/expressions/builtins.xml
index 9517f2010..063bc04be 100644
--- a/doc/manual/expressions/builtins.xml
+++ b/doc/manual/expressions/builtins.xml
@@ -210,6 +210,35 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
</varlistentry>
+ <varlistentry><term><function>builtins.match</function>
+ <replaceable>regex</replaceable> <replaceable>str</replaceable></term>
+
+ <listitem><para>Returns a list if
+ <replaceable>regex</replaceable> matches
+ <replaceable>str</replaceable> precisely, otherwise returns <literal>null</literal>.
+ Each item in the list is a regex group.
+
+<programlisting>
+builtins.match "ab" "abc"
+</programlisting>
+
+Evaluates to <literal>null</literal>.
+
+<programlisting>
+builtins.match "abc" "abc"
+</programlisting>
+
+Evaluates to <literal>[ ]</literal>.
+
+<programlisting>
+builtins.match "a(b)(c)" "abc"
+</programlisting>
+
+Evaluates to <literal>[ "b" "c" ]</literal>.
+
+
+ </para></listitem>
+ </varlistentry>
<varlistentry><term><function>builtins.elem</function>
<replaceable>x</replaceable> <replaceable>xs</replaceable></term>
diff --git a/doc/manual/expressions/language-values.xml b/doc/manual/expressions/language-values.xml
index b90baac50..67da688a4 100644
--- a/doc/manual/expressions/language-values.xml
+++ b/doc/manual/expressions/language-values.xml
@@ -167,7 +167,16 @@ stdenv.mkDerivation {
user's home directory. e.g. <filename>~/foo</filename> would be
equivalent to <filename>/home/edolstra/foo</filename> for a user
whose home directory is <filename>/home/edolstra</filename>.
- </para></listitem>
+ </para>
+
+ <para>Paths can also be specified between angle brackets, e.g.
+ <literal>&lt;nixpkgs&gt;</literal>. This means that the directories
+ listed in the environment variable
+ <envar linkend="env-NIX_PATH">NIX_PATH</envar> will be searched
+ for the given file or directory name.
+ </para>
+
+ </listitem>
<listitem><para><emphasis>Booleans</emphasis> with values
<literal>true</literal> and
diff --git a/doc/manual/hacking.xml b/doc/manual/hacking.xml
index 11af0998f..183aed7ad 100644
--- a/doc/manual/hacking.xml
+++ b/doc/manual/hacking.xml
@@ -22,7 +22,7 @@ $ nix-build release.nix -A build.x86_64-linux
environment variables are set up so that those dependencies can be
found:
<screen>
-$ ./dev-shell
+$ nix-shell
</screen>
To build Nix itself in this shell:
<screen>
@@ -30,7 +30,7 @@ To build Nix itself in this shell:
[nix-shell]$ configurePhase
[nix-shell]$ make
</screen>
-To test it:
+To install it in <literal>$(pwd)/nix</literal> and test it:
<screen>
[nix-shell]$ make install
[nix-shell]$ make installcheck
diff --git a/doc/manual/installation/prerequisites-source.xml b/doc/manual/installation/prerequisites-source.xml
index c0065f133..cd6d61356 100644
--- a/doc/manual/installation/prerequisites-source.xml
+++ b/doc/manual/installation/prerequisites-source.xml
@@ -34,7 +34,7 @@
or higher. If your distribution does not provide it, please install
it from <link xlink:href="http://www.sqlite.org/" />.</para></listitem>
- <listitem><para>The Perl DBI, DBD::SQLite, and WWW::Curl libraries, which are
+ <listitem><para>The Perl DBI and DBD::SQLite libraries, which are
available from <link
xlink:href="http://search.cpan.org/">CPAN</link> if your
distribution does not provide them.</para></listitem>
diff --git a/nix.spec.in b/nix.spec.in
index edbc12d8f..401a2dc8a 100644
--- a/nix.spec.in
+++ b/nix.spec.in
@@ -16,7 +16,6 @@ BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
%endif
BuildRequires: perl(DBD::SQLite)
BuildRequires: perl(DBI)
-BuildRequires: perl(WWW::Curl)
BuildRequires: perl(ExtUtils::ParseXS)
Requires: /usr/bin/perl
Requires: curl
diff --git a/release.nix b/release.nix
index d236cdae9..d825dd583 100644
--- a/release.nix
+++ b/release.nix
@@ -25,7 +25,7 @@ let
buildInputs =
[ curl bison flex perl libxml2 libxslt bzip2 xz
- pkgconfig sqlite libsodium
+ pkgconfig sqlite libsodium boehmgc
docbook5 docbook5_xsl
autoconf-archive
] ++ lib.optional (!lib.inNixShell) git;
@@ -33,7 +33,7 @@ let
configureFlags = ''
--with-dbi=${perlPackages.DBI}/${perl.libPrefix}
--with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix}
- --with-www-curl=${perlPackages.WWWCurl}/${perl.libPrefix}
+ --enable-gc
'';
postUnpack = ''
@@ -84,7 +84,6 @@ let
--disable-init-state
--with-dbi=${perlPackages.DBI}/${perl.libPrefix}
--with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix}
- --with-www-curl=${perlPackages.WWWCurl}/${perl.libPrefix}
--enable-gc
--sysconfdir=/etc
'';
@@ -156,7 +155,6 @@ let
--disable-init-state
--with-dbi=${perlPackages.DBI}/${perl.libPrefix}
--with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix}
- --with-www-curl=${perlPackages.WWWCurl}/${perl.libPrefix}
'';
dontInstall = false;
@@ -283,7 +281,7 @@ let
src = jobs.tarball;
diskImage = (diskImageFun vmTools.diskImageFuns)
{ extraPackages =
- [ "perl-DBD-SQLite" "perl-devel" "sqlite" "sqlite-devel" "bzip2-devel" "emacs" "perl-WWW-Curl" "libcurl-devel" "openssl-devel" "xz-devel" ]
+ [ "perl-DBD-SQLite" "perl-devel" "sqlite" "sqlite-devel" "bzip2-devel" "emacs" "libcurl-devel" "openssl-devel" "xz-devel" ]
++ extraPackages; };
memSize = 1024;
meta.schedulingPriority = 50;
diff --git a/shell.nix b/shell.nix
new file mode 100644
index 000000000..dd4484105
--- /dev/null
+++ b/shell.nix
@@ -0,0 +1,34 @@
+with import <nixpkgs> {};
+
+stdenv.mkDerivation {
+ name = "nix";
+
+ buildInputs =
+ [ curl bison flex perl libxml2 libxslt bzip2 xz
+ pkgconfig sqlite libsodium boehmgc
+ docbook5 docbook5_xsl
+ autoconf-archive
+ (aws-sdk-cpp.override {
+ apis = ["s3"];
+ customMemoryManagement = false;
+ })
+ autoreconfHook
+ perlPackages.DBDSQLite
+ ];
+
+ configureFlags =
+ [ "--disable-init-state"
+ "--enable-gc"
+ "--with-dbi=${perlPackages.DBI}/${perl.libPrefix}"
+ "--with-dbd-sqlite=${perlPackages.DBDSQLite}/${perl.libPrefix}"
+ ];
+
+ enableParallelBuilding = true;
+
+ installFlags = "sysconfdir=$(out)/etc";
+
+ shellHook =
+ ''
+ configureFlags+=" --prefix=$(pwd)/inst"
+ '';
+}
diff --git a/src/libexpr/lexer.l b/src/libexpr/lexer.l
index f3660ab43..5b1ff0350 100644
--- a/src/libexpr/lexer.l
+++ b/src/libexpr/lexer.l
@@ -87,8 +87,8 @@ static Expr * unescapeStr(SymbolTable & symbols, const char * s)
ID [a-zA-Z\_][a-zA-Z0-9\_\'\-]*
INT [0-9]+
FLOAT (([1-9][0-9]*\.[0-9]*)|(0?\.[0-9]+))([Ee][+-]?[0-9]+)?
-PATH [a-zA-Z0-9\.\_\-\+]*(\/[a-zA-Z0-9\.\_\-\+]+)+
-HPATH \~(\/[a-zA-Z0-9\.\_\-\+]+)+
+PATH [a-zA-Z0-9\.\_\-\+]*(\/[a-zA-Z0-9\.\_\-\+]+)+\/?
+HPATH \~(\/[a-zA-Z0-9\.\_\-\+]+)+\/?
SPATH \<[a-zA-Z0-9\.\_\-\+]+(\/[a-zA-Z0-9\.\_\-\+]+)*\>
URI [a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*\']+
@@ -182,14 +182,22 @@ or { return OR_KW; }
<INITIAL,INSIDE_DOLLAR_CURLY>{
-{PATH} { yylval->path = strdup(yytext); return PATH; }
-{HPATH} { yylval->path = strdup(yytext); return HPATH; }
+{PATH} { if (yytext[yyleng-1] == '/')
+ throw ParseError("path ‘%s’ has a trailing slash", yytext);
+ yylval->path = strdup(yytext);
+ return PATH;
+ }
+{HPATH} { if (yytext[yyleng-1] == '/')
+ throw ParseError("path ‘%s’ has a trailing slash", yytext);
+ yylval->path = strdup(yytext);
+ return HPATH;
+ }
{SPATH} { yylval->path = strdup(yytext); return SPATH; }
{URI} { yylval->uri = strdup(yytext); return URI; }
[ \t\r\n]+ /* eat up whitespace */
\#[^\r\n]* /* single-line comments */
-\/\*([^*]|\*[^\/])*\*\/ /* long comments */
+\/\*([^*]|\*+[^*/])*\*+\/ /* long comments */
. return yytext[0];
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc
index 4125af118..3e07a2aa2 100644
--- a/src/libstore/binary-cache-store.cc
+++ b/src/libstore/binary-cache-store.cc
@@ -6,9 +6,9 @@
#include "globals.hh"
#include "nar-info.hh"
#include "sync.hh"
-#include "worker-protocol.hh"
-#include "nar-accessor.hh"
+#include "remote-fs-accessor.hh"
#include "nar-info-disk-cache.hh"
+#include "nar-accessor.hh"
#include "json.hh"
#include <chrono>
@@ -379,8 +379,7 @@ Path BinaryCacheStore::addTextToStore(const string & name, const string & s,
ref<FSAccessor> BinaryCacheStore::getFSAccessor()
{
- return make_ref<BinaryCacheStoreAccessor>(ref<BinaryCacheStore>(
- std::dynamic_pointer_cast<BinaryCacheStore>(shared_from_this())));
+ return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()));
}
}
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
index b682a8019..c970fbdca 100644
--- a/src/libstore/build.cc
+++ b/src/libstore/build.cc
@@ -17,9 +17,9 @@
#include <sstream>
#include <thread>
#include <future>
+#include <chrono>
#include <limits.h>
-#include <time.h>
#include <sys/time.h>
#include <sys/wait.h>
#include <sys/types.h>
@@ -187,6 +187,9 @@ bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) {
}
+typedef std::chrono::time_point<std::chrono::steady_clock> steady_time_point;
+
+
/* A mapping used to remember for each child process to what goal it
belongs, and file descriptors for receiving log data and output
path creation commands. */
@@ -197,8 +200,8 @@ struct Child
set<int> fds;
bool respectTimeouts;
bool inBuildSlot;
- time_t lastOutput; /* time we last got output on stdout/stderr */
- time_t timeStarted;
+ steady_time_point lastOutput; /* time we last got output on stdout/stderr */
+ steady_time_point timeStarted;
};
@@ -238,7 +241,7 @@ private:
WeakGoals waitingForAWhile;
/* Last time the goals in `waitingForAWhile' where woken up. */
- time_t lastWokenUp;
+ steady_time_point lastWokenUp;
/* Cache for pathContentsGood(). */
std::map<Path, bool> pathContentsGoodCache;
@@ -1249,8 +1252,7 @@ void DerivationGoal::inputsRealised()
}
/* Second, the input sources. */
- for (auto & i : drv->inputSrcs)
- worker.store.computeFSClosure(i, inputPaths);
+ worker.store.computeFSClosure(drv->inputSrcs, inputPaths);
debug(format("added input paths %1%") % showPaths(inputPaths));
@@ -1270,6 +1272,8 @@ void DerivationGoal::inputsRealised()
build hook. */
state = &DerivationGoal::tryToBuild;
worker.wakeUp(shared_from_this());
+
+ result = BuildResult();
}
@@ -1343,6 +1347,7 @@ void DerivationGoal::tryToBuild()
case rpAccept:
/* Yes, it has started doing so. Wait until we get
EOF from the hook. */
+ result.startTime = time(0); // inexact
state = &DerivationGoal::buildDone;
return;
case rpPostpone:
@@ -1419,6 +1424,9 @@ void DerivationGoal::buildDone()
debug(format("builder process for ‘%1%’ finished") % drvPath);
+ result.timesBuilt++;
+ result.stopTime = time(0);
+
/* So the child is gone now. */
worker.childTerminated(this);
@@ -2102,6 +2110,8 @@ void DerivationGoal::startBuilder()
/* Create a pipe to get the output of the builder. */
builderOut.create();
+ result.startTime = time(0);
+
/* Fork a child to build the package. */
#if __linux__
if (useChroot) {
@@ -2555,15 +2565,18 @@ void DerivationGoal::runChild()
*/
sandboxProfile += "(allow file-read* file-write* process-exec\n";
for (auto & i : dirsInChroot) {
- if (i.first != i.second)
+ if (i.first != i.second.source)
throw Error(format(
"can't map '%1%' to '%2%': mismatched impure paths not supported on Darwin")
- % i.first % i.second);
+ % i.first % i.second.source);
string path = i.first;
struct stat st;
- if (lstat(path.c_str(), &st))
+ if (lstat(path.c_str(), &st)) {
+ if (i.second.optional && errno == ENOENT)
+ continue;
throw SysError(format("getting attributes of path ‘%1%’") % path);
+ }
if (S_ISDIR(st.st_mode))
sandboxProfile += (format("\t(subpath \"%1%\")\n") % path).str();
else
@@ -2674,7 +2687,9 @@ void DerivationGoal::registerOutputs()
outputs to allow hard links between outputs. */
InodesSeen inodesSeen;
- Path checkSuffix = "-check";
+ Path checkSuffix = ".check";
+ bool runDiffHook = settings.get("run-diff-hook", false);
+ bool keepPreviousRound = settings.keepFailed || runDiffHook;
/* Check whether the output paths were created, and grep each
output path to determine what other paths it references. Also make all
@@ -2904,30 +2919,42 @@ void DerivationGoal::registerOutputs()
assert(prevInfos.size() == infos.size());
for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); ++i, ++j)
if (!(*i == *j)) {
+ result.isNonDeterministic = true;
Path prev = i->path + checkSuffix;
- if (pathExists(prev))
- throw NotDeterministic(
- format("output ‘%1%’ of ‘%2%’ differs from ‘%3%’ from previous round")
- % i->path % drvPath % prev);
- else
- throw NotDeterministic(
- format("output ‘%1%’ of ‘%2%’ differs from previous round")
- % i->path % drvPath);
+ bool prevExists = keepPreviousRound && pathExists(prev);
+ auto msg = prevExists
+ ? fmt("output ‘%1%’ of ‘%2%’ differs from ‘%3%’ from previous round", i->path, drvPath, prev)
+ : fmt("output ‘%1%’ of ‘%2%’ differs from previous round", i->path, drvPath);
+
+ auto diffHook = settings.get("diff-hook", std::string(""));
+ if (prevExists && diffHook != "" && runDiffHook) {
+ try {
+ auto diff = runProgram(diffHook, true, {prev, i->path});
+ if (diff != "")
+ printError(chomp(diff));
+ } catch (Error & error) {
+ printError("diff hook execution failed: %s", error.what());
+ }
+ }
+
+ if (settings.get("enforce-determinism", true))
+ throw NotDeterministic(msg);
+
+ printError(msg);
+ curRound = nrRounds; // we know enough, bail out early
}
- abort(); // shouldn't happen
}
- if (settings.keepFailed) {
+ /* If this is the first round of several, then move the output out
+ of the way. */
+ if (nrRounds > 1 && curRound == 1 && curRound < nrRounds && keepPreviousRound) {
for (auto & i : drv->outputs) {
Path prev = i.second.path + checkSuffix;
deletePath(prev);
- if (curRound < nrRounds) {
- Path dst = i.second.path + checkSuffix;
- if (rename(i.second.path.c_str(), dst.c_str()))
- throw SysError(format("renaming ‘%1%’ to ‘%2%’") % i.second.path % dst);
- }
+ Path dst = i.second.path + checkSuffix;
+ if (rename(i.second.path.c_str(), dst.c_str()))
+ throw SysError(format("renaming ‘%1%’ to ‘%2%’") % i.second.path % dst);
}
-
}
if (curRound < nrRounds) {
@@ -2935,6 +2962,15 @@ void DerivationGoal::registerOutputs()
return;
}
+ /* Remove the .check directories if we're done. FIXME: keep them
+ if the result was not determistic? */
+ if (curRound == nrRounds) {
+ for (auto & i : drv->outputs) {
+ Path prev = i.second.path + checkSuffix;
+ deletePath(prev);
+ }
+ }
+
/* Register each output path as valid, and register the sets of
paths referenced by each of them. If there are cycles in the
outputs, this will fail. */
@@ -3045,7 +3081,8 @@ void DerivationGoal::handleEOF(int fd)
void DerivationGoal::flushLine()
{
- if (settings.verboseBuild)
+ if (settings.verboseBuild &&
+ (settings.printRepeatedBuilds || curRound == 1))
printError(filterANSIEscapes(currentLogLine, true));
else {
logTail.push_back(currentLogLine);
@@ -3387,7 +3424,7 @@ Worker::Worker(LocalStore & store)
if (working) abort();
working = true;
nrLocalBuilds = 0;
- lastWokenUp = 0;
+ lastWokenUp = steady_time_point::min();
permanentFailure = false;
timedOut = false;
}
@@ -3496,7 +3533,7 @@ void Worker::childStarted(GoalPtr goal, const set<int> & fds,
child.goal = goal;
child.goal2 = goal.get();
child.fds = fds;
- child.timeStarted = child.lastOutput = time(0);
+ child.timeStarted = child.lastOutput = steady_time_point::clock::now();
child.inBuildSlot = inBuildSlot;
child.respectTimeouts = respectTimeouts;
children.emplace_back(child);
@@ -3615,35 +3652,38 @@ void Worker::waitForInput()
bool useTimeout = false;
struct timeval timeout;
timeout.tv_usec = 0;
- time_t before = time(0);
+ auto before = steady_time_point::clock::now();
/* If we're monitoring for silence on stdout/stderr, or if there
is a build timeout, then wait for input until the first
deadline for any child. */
- assert(sizeof(time_t) >= sizeof(long));
- time_t nearest = LONG_MAX; // nearest deadline
+ auto nearest = steady_time_point::max(); // nearest deadline
for (auto & i : children) {
if (!i.respectTimeouts) continue;
if (settings.maxSilentTime != 0)
- nearest = std::min(nearest, i.lastOutput + settings.maxSilentTime);
+ nearest = std::min(nearest, i.lastOutput + std::chrono::seconds(settings.maxSilentTime));
if (settings.buildTimeout != 0)
- nearest = std::min(nearest, i.timeStarted + settings.buildTimeout);
+ nearest = std::min(nearest, i.timeStarted + std::chrono::seconds(settings.buildTimeout));
}
- if (nearest != LONG_MAX) {
- timeout.tv_sec = std::max((time_t) 1, nearest - before);
+ if (nearest != steady_time_point::max()) {
+ timeout.tv_sec = std::max(1L, (long) std::chrono::duration_cast<std::chrono::seconds>(nearest - before).count());
useTimeout = true;
- printMsg(lvlVomit, format("sleeping %1% seconds") % timeout.tv_sec);
}
/* If we are polling goals that are waiting for a lock, then wake
up after a few seconds at most. */
if (!waitingForAWhile.empty()) {
useTimeout = true;
- if (lastWokenUp == 0)
+ if (lastWokenUp == steady_time_point::min())
printError("waiting for locks or build slots...");
- if (lastWokenUp == 0 || lastWokenUp > before) lastWokenUp = before;
- timeout.tv_sec = std::max((time_t) 1, (time_t) (lastWokenUp + settings.pollInterval - before));
- } else lastWokenUp = 0;
+ if (lastWokenUp == steady_time_point::min() || lastWokenUp > before) lastWokenUp = before;
+ timeout.tv_sec = std::max(1L,
+ (long) std::chrono::duration_cast<std::chrono::seconds>(
+ lastWokenUp + std::chrono::seconds(settings.pollInterval) - before).count());
+ } else lastWokenUp = steady_time_point::min();
+
+ if (useTimeout)
+ vomit("sleeping %d seconds", timeout.tv_sec);
/* Use select() to wait for the input side of any logger pipe to
become `available'. Note that `available' (i.e., non-blocking)
@@ -3663,7 +3703,7 @@ void Worker::waitForInput()
throw SysError("waiting for input");
}
- time_t after = time(0);
+ auto after = steady_time_point::clock::now();
/* Process all available file descriptors. */
decltype(children)::iterator i;
@@ -3701,7 +3741,7 @@ void Worker::waitForInput()
if (goal->getExitCode() == Goal::ecBusy &&
settings.maxSilentTime != 0 &&
j->respectTimeouts &&
- after - j->lastOutput >= (time_t) settings.maxSilentTime)
+ after - j->lastOutput >= std::chrono::seconds(settings.maxSilentTime))
{
printError(
format("%1% timed out after %2% seconds of silence")
@@ -3712,7 +3752,7 @@ void Worker::waitForInput()
else if (goal->getExitCode() == Goal::ecBusy &&
settings.buildTimeout != 0 &&
j->respectTimeouts &&
- after - j->timeStarted >= (time_t) settings.buildTimeout)
+ after - j->timeStarted >= std::chrono::seconds(settings.buildTimeout))
{
printError(
format("%1% timed out after %2% seconds")
@@ -3721,7 +3761,7 @@ void Worker::waitForInput()
}
}
- if (!waitingForAWhile.empty() && lastWokenUp + (time_t) settings.pollInterval <= after) {
+ if (!waitingForAWhile.empty() && lastWokenUp + std::chrono::seconds(settings.pollInterval) <= after) {
lastWokenUp = after;
for (auto & i : waitingForAWhile) {
GoalPtr goal = i.lock();
@@ -3783,15 +3823,16 @@ void LocalStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
worker.run(goals);
PathSet failed;
- for (auto & i : goals)
- if (i->getExitCode() == Goal::ecFailed) {
+ for (auto & i : goals) {
+ if (i->getExitCode() != Goal::ecSuccess) {
DerivationGoal * i2 = dynamic_cast<DerivationGoal *>(i.get());
if (i2) failed.insert(i2->getDrvPath());
else failed.insert(dynamic_cast<SubstitutionGoal *>(i.get())->getStorePath());
}
+ }
if (!failed.empty())
- throw Error(worker.exitStatus(), "build of %s failed",showPaths(failed));
+ throw Error(worker.exitStatus(), "build of %s failed", showPaths(failed));
}
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
index ae03604fa..f8c4a0723 100644
--- a/src/libstore/gc.cc
+++ b/src/libstore/gc.cc
@@ -621,6 +621,11 @@ void LocalStore::tryToDelete(GCState & state, const Path & path)
/* Don't delete .chroot directories for derivations that are
currently being built. */
if (isActiveTempFile(state, path, ".chroot")) return;
+
+ /* Don't delete .check directories for derivations that are
+ currently being built, because we may need to run
+ diff-hook. */
+ if (isActiveTempFile(state, path, ".check")) return;
}
PathSet visited;
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index 3194193bc..a423b4e5c 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -149,6 +149,11 @@ struct Settings {
before being killed (0 means no limit). */
unsigned long maxLogSize;
+ /* When build-repeat > 0 and verboseBuild == true, whether to
+ print repeated builds (i.e. builds other than the first one) to
+ stderr. Hack to prevent Hydra logs from being polluted. */
+ bool printRepeatedBuilds = true;
+
/* How often (in seconds) to poll for locks. */
unsigned int pollInterval;
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index d3a641fd9..612efde7b 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -37,7 +37,8 @@ namespace nix {
LocalStore::LocalStore(const Params & params)
- : LocalFSStore(params)
+ : Store(params)
+ , LocalFSStore(params)
, realStoreDir(get(params, "real", rootDir != "" ? rootDir + "/nix/store" : storeDir))
, dbDir(stateDir + "/db")
, linksDir(realStoreDir + "/.links")
diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc
index 0c2c49e55..9a88cdc31 100644
--- a/src/libstore/misc.cc
+++ b/src/libstore/misc.cc
@@ -8,7 +8,7 @@
namespace nix {
-void Store::computeFSClosure(const Path & startPath,
+void Store::computeFSClosure(const PathSet & startPaths,
PathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
{
struct State
@@ -85,7 +85,8 @@ void Store::computeFSClosure(const Path & startPath,
});
};
- enqueue(startPath);
+ for (auto & startPath : startPaths)
+ enqueue(startPath);
{
auto state(state_.lock());
@@ -95,6 +96,13 @@ void Store::computeFSClosure(const Path & startPath,
}
+void Store::computeFSClosure(const Path & startPath,
+ PathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
+{
+ computeFSClosure(PathSet{startPath}, paths_, flipDirection, includeOutputs, includeDerivers);
+}
+
+
void Store::queryMissing(const PathSet & targets,
PathSet & willBuild_, PathSet & willSubstitute_, PathSet & unknown_,
unsigned long long & downloadSize_, unsigned long long & narSize_)
diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc
index ded19c05d..4cb5de744 100644
--- a/src/libstore/nar-accessor.cc
+++ b/src/libstore/nar-accessor.cc
@@ -52,8 +52,9 @@ struct NarIndexer : ParseSink, StringSource
void preallocateContents(unsigned long long size) override
{
currentStart = string(s, pos, 16);
+ assert(size <= std::numeric_limits<size_t>::max());
members.emplace(currentPath,
- NarMember{FSAccessor::Type::tRegular, isExec, pos, size});
+ NarMember{FSAccessor::Type::tRegular, isExec, pos, (size_t) size});
}
void receiveContents(unsigned char * data, unsigned int len) override
diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc
index 1bf8b7d83..454c8b49d 100644
--- a/src/libstore/optimise-store.cc
+++ b/src/libstore/optimise-store.cc
@@ -5,6 +5,7 @@
#include "globals.hh"
#include <cstdlib>
+#include <cstring>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
@@ -148,10 +149,24 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa
inodeHash.insert(st.st_ino);
return;
}
- if (errno != EEXIST)
- throw SysError(format("cannot link ‘%1%’ to ‘%2%’") % linkPath % path);
- /* Fall through if another process created ‘linkPath’ before
- we did. */
+
+ switch (errno) {
+ case EEXIST:
+ /* Fall through if another process created ‘linkPath’ before
+ we did. */
+ break;
+
+ case ENOSPC:
+ /* On ext4, that probably means the directory index is
+ full. When that happens, it's fine to ignore it: we
+ just effectively disable deduplication of this
+ file. */
+ printInfo("cannot link ‘%s’ to ‘%s’: %s", linkPath, path, strerror(errno));
+ return;
+
+ default:
+ throw SysError("cannot link ‘%1%’ to ‘%2%’", linkPath, path);
+ }
}
/* Yes! We've seen a file with the same contents. Replace the
@@ -195,7 +210,7 @@ void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path, InodeHa
printInfo(format("‘%1%’ has maximum number of links") % linkPath);
return;
}
- throw SysError(format("cannot link ‘%1%’ to ‘%2%’") % tempLink % linkPath);
+ throw SysError("cannot link ‘%1%’ to ‘%2%’", tempLink, linkPath);
}
/* Atomically replace the old file with the new hard link. */
diff --git a/src/libstore/pathlocks.cc b/src/libstore/pathlocks.cc
index 8788ee164..fecd63687 100644
--- a/src/libstore/pathlocks.cc
+++ b/src/libstore/pathlocks.cc
@@ -1,5 +1,6 @@
#include "pathlocks.hh"
#include "util.hh"
+#include "sync.hh"
#include <cerrno>
#include <cstdlib>
@@ -72,7 +73,7 @@ bool lockFile(int fd, LockType lockType, bool wait)
close a descriptor, the previous lock will be closed as well. And
there is no way to query whether we already have a lock (F_GETLK
only works on locks held by other processes). */
-static StringSet lockedPaths; /* !!! not thread-safe */
+static Sync<StringSet> lockedPaths_;
PathLocks::PathLocks()
@@ -108,49 +109,60 @@ bool PathLocks::lockPaths(const PathSet & _paths,
debug(format("locking path ‘%1%’") % path);
- if (lockedPaths.find(lockPath) != lockedPaths.end())
- throw Error("deadlock: trying to re-acquire self-held lock");
+ {
+ auto lockedPaths(lockedPaths_.lock());
+ if (lockedPaths->count(lockPath))
+ throw Error("deadlock: trying to re-acquire self-held lock ‘%s’", lockPath);
+ lockedPaths->insert(lockPath);
+ }
+
+ try {
- AutoCloseFD fd;
+ AutoCloseFD fd;
- while (1) {
+ while (1) {
- /* Open/create the lock file. */
- fd = openLockFile(lockPath, true);
+ /* Open/create the lock file. */
+ fd = openLockFile(lockPath, true);
- /* Acquire an exclusive lock. */
- if (!lockFile(fd.get(), ltWrite, false)) {
- if (wait) {
- if (waitMsg != "") printError(waitMsg);
- lockFile(fd.get(), ltWrite, true);
- } else {
- /* Failed to lock this path; release all other
- locks. */
- unlock();
- return false;
+ /* Acquire an exclusive lock. */
+ if (!lockFile(fd.get(), ltWrite, false)) {
+ if (wait) {
+ if (waitMsg != "") printError(waitMsg);
+ lockFile(fd.get(), ltWrite, true);
+ } else {
+ /* Failed to lock this path; release all other
+ locks. */
+ unlock();
+ return false;
+ }
}
+
+ debug(format("lock acquired on ‘%1%’") % lockPath);
+
+ /* Check that the lock file hasn't become stale (i.e.,
+ hasn't been unlinked). */
+ struct stat st;
+ if (fstat(fd.get(), &st) == -1)
+ throw SysError(format("statting lock file ‘%1%’") % lockPath);
+ if (st.st_size != 0)
+ /* This lock file has been unlinked, so we're holding
+ a lock on a deleted file. This means that other
+ processes may create and acquire a lock on
+ `lockPath', and proceed. So we must retry. */
+ debug(format("open lock file ‘%1%’ has become stale") % lockPath);
+ else
+ break;
}
- debug(format("lock acquired on ‘%1%’") % lockPath);
-
- /* Check that the lock file hasn't become stale (i.e.,
- hasn't been unlinked). */
- struct stat st;
- if (fstat(fd.get(), &st) == -1)
- throw SysError(format("statting lock file ‘%1%’") % lockPath);
- if (st.st_size != 0)
- /* This lock file has been unlinked, so we're holding
- a lock on a deleted file. This means that other
- processes may create and acquire a lock on
- `lockPath', and proceed. So we must retry. */
- debug(format("open lock file ‘%1%’ has become stale") % lockPath);
- else
- break;
+ /* Use borrow so that the descriptor isn't closed. */
+ fds.push_back(FDPair(fd.release(), lockPath));
+
+ } catch (...) {
+ lockedPaths_.lock()->erase(lockPath);
+ throw;
}
- /* Use borrow so that the descriptor isn't closed. */
- fds.push_back(FDPair(fd.release(), lockPath));
- lockedPaths.insert(lockPath);
}
return true;
@@ -172,7 +184,8 @@ void PathLocks::unlock()
for (auto & i : fds) {
if (deletePaths) deleteLockFile(i.second, i.first);
- lockedPaths.erase(i.second);
+ lockedPaths_.lock()->erase(i.second);
+
if (close(i.first) == -1)
printError(
format("error (ignored): cannot close lock file on ‘%1%’") % i.second);
@@ -193,7 +206,7 @@ void PathLocks::setDeletion(bool deletePaths)
bool pathIsLockedByMe(const Path & path)
{
Path lockPath = path + ".lock";
- return lockedPaths.find(lockPath) != lockedPaths.end();
+ return lockedPaths_.lock()->count(lockPath);
}
diff --git a/src/libstore/remote-fs-accessor.cc b/src/libstore/remote-fs-accessor.cc
new file mode 100644
index 000000000..ca14057c2
--- /dev/null
+++ b/src/libstore/remote-fs-accessor.cc
@@ -0,0 +1,57 @@
+#include "remote-fs-accessor.hh"
+#include "nar-accessor.hh"
+
+namespace nix {
+
+
+RemoteFSAccessor::RemoteFSAccessor(ref<Store> store)
+ : store(store)
+{
+}
+
+std::pair<ref<FSAccessor>, Path> RemoteFSAccessor::fetch(const Path & path_)
+{
+ auto path = canonPath(path_);
+
+ auto storePath = store->toStorePath(path);
+ std::string restPath = std::string(path, storePath.size());
+
+ if (!store->isValidPath(storePath))
+ throw InvalidPath(format("path ‘%1%’ is not a valid store path") % storePath);
+
+ auto i = nars.find(storePath);
+ if (i != nars.end()) return {i->second, restPath};
+
+ StringSink sink;
+ store->narFromPath(storePath, sink);
+
+ auto accessor = makeNarAccessor(sink.s);
+ nars.emplace(storePath, accessor);
+ return {accessor, restPath};
+}
+
+FSAccessor::Stat RemoteFSAccessor::stat(const Path & path)
+{
+ auto res = fetch(path);
+ return res.first->stat(res.second);
+}
+
+StringSet RemoteFSAccessor::readDirectory(const Path & path)
+{
+ auto res = fetch(path);
+ return res.first->readDirectory(res.second);
+}
+
+std::string RemoteFSAccessor::readFile(const Path & path)
+{
+ auto res = fetch(path);
+ return res.first->readFile(res.second);
+}
+
+std::string RemoteFSAccessor::readLink(const Path & path)
+{
+ auto res = fetch(path);
+ return res.first->readLink(res.second);
+}
+
+}
diff --git a/src/libstore/remote-fs-accessor.hh b/src/libstore/remote-fs-accessor.hh
new file mode 100644
index 000000000..28f36c829
--- /dev/null
+++ b/src/libstore/remote-fs-accessor.hh
@@ -0,0 +1,29 @@
+#pragma once
+
+#include "fs-accessor.hh"
+#include "ref.hh"
+#include "store-api.hh"
+
+namespace nix {
+
+class RemoteFSAccessor : public FSAccessor
+{
+ ref<Store> store;
+
+ std::map<Path, ref<FSAccessor>> nars;
+
+ std::pair<ref<FSAccessor>, Path> fetch(const Path & path_);
+public:
+
+ RemoteFSAccessor(ref<Store> store);
+
+ Stat stat(const Path & path) override;
+
+ StringSet readDirectory(const Path & path) override;
+
+ std::string readFile(const Path & path) override;
+
+ std::string readLink(const Path & path) override;
+};
+
+}
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index 7a041c6e0..77faa2f80 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -38,9 +38,9 @@ template<class T> T readStorePaths(Store & store, Source & from)
template PathSet readStorePaths(Store & store, Source & from);
-
+/* TODO: Separate these store impls into different files, give them better names */
RemoteStore::RemoteStore(const Params & params, size_t maxConnections)
- : LocalFSStore(params)
+ : Store(params)
, connections(make_ref<Pool<Connection>>(
maxConnections,
[this]() { return openConnection(); },
@@ -50,13 +50,21 @@ RemoteStore::RemoteStore(const Params & params, size_t maxConnections)
}
-std::string RemoteStore::getUri()
+UDSRemoteStore::UDSRemoteStore(const Params & params, size_t maxConnections)
+ : Store(params)
+ , LocalFSStore(params)
+ , RemoteStore(params, maxConnections)
+{
+}
+
+
+std::string UDSRemoteStore::getUri()
{
return "daemon";
}
-ref<RemoteStore::Connection> RemoteStore::openConnection()
+ref<RemoteStore::Connection> UDSRemoteStore::openConnection()
{
auto conn = make_ref<Connection>();
@@ -84,46 +92,52 @@ ref<RemoteStore::Connection> RemoteStore::openConnection()
conn->from.fd = conn->fd.get();
conn->to.fd = conn->fd.get();
+ initConnection(*conn);
+
+ return conn;
+}
+
+
+void RemoteStore::initConnection(Connection & conn)
+{
/* Send the magic greeting, check for the reply. */
try {
- conn->to << WORKER_MAGIC_1;
- conn->to.flush();
- unsigned int magic = readInt(conn->from);
+ conn.to << WORKER_MAGIC_1;
+ conn.to.flush();
+ unsigned int magic = readInt(conn.from);
if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch");
- conn->daemonVersion = readInt(conn->from);
- if (GET_PROTOCOL_MAJOR(conn->daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION))
+ conn.daemonVersion = readInt(conn.from);
+ if (GET_PROTOCOL_MAJOR(conn.daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION))
throw Error("Nix daemon protocol version not supported");
- if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 10)
+ if (GET_PROTOCOL_MINOR(conn.daemonVersion) < 10)
throw Error("the Nix daemon version is too old");
- conn->to << PROTOCOL_VERSION;
+ conn.to << PROTOCOL_VERSION;
- if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 14) {
+ if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 14) {
int cpu = settings.lockCPU ? lockToCurrentCPU() : -1;
if (cpu != -1)
- conn->to << 1 << cpu;
+ conn.to << 1 << cpu;
else
- conn->to << 0;
+ conn.to << 0;
}
- if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 11)
- conn->to << false;
+ if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 11)
+ conn.to << false;
- conn->processStderr();
+ conn.processStderr();
}
catch (Error & e) {
throw Error(format("cannot start daemon worker: %1%") % e.msg());
}
setOptions(conn);
-
- return conn;
}
-void RemoteStore::setOptions(ref<Connection> conn)
+void RemoteStore::setOptions(Connection & conn)
{
- conn->to << wopSetOptions
+ conn.to << wopSetOptions
<< settings.keepFailed
<< settings.keepGoing
<< settings.tryFallback
@@ -137,16 +151,16 @@ void RemoteStore::setOptions(ref<Connection> conn)
<< settings.buildCores
<< settings.useSubstitutes;
- if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 12) {
+ if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 12) {
Settings::SettingsMap overrides = settings.getOverrides();
if (overrides["ssh-auth-sock"] == "")
overrides["ssh-auth-sock"] = getEnv("SSH_AUTH_SOCK");
- conn->to << overrides.size();
+ conn.to << overrides.size();
for (auto & i : overrides)
- conn->to << i.first << i.second;
+ conn.to << i.first << i.second;
}
- conn->processStderr();
+ conn.processStderr();
}
@@ -335,7 +349,40 @@ Path RemoteStore::queryPathFromHashPart(const string & hashPart)
void RemoteStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
bool repair, bool dontCheckSigs, std::shared_ptr<FSAccessor> accessor)
{
- throw Error("RemoteStore::addToStore() not implemented");
+ auto conn(connections->get());
+
+ if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) {
+ conn->to << wopImportPaths;
+
+ StringSink sink;
+ sink << 1 // == path follows
+ ;
+ assert(nar->size() % 8 == 0);
+ sink((unsigned char *) nar->data(), nar->size());
+ sink
+ << exportMagic
+ << info.path
+ << info.references
+ << info.deriver
+ << 0 // == no legacy signature
+ << 0 // == no path follows
+ ;
+
+ StringSource source(*sink.s);
+ conn->processStderr(0, &source);
+
+ auto importedPaths = readStorePaths<PathSet>(*this, conn->from);
+ assert(importedPaths.size() <= 1);
+ }
+
+ else {
+ conn->to << wopAddToStoreNar
+ << info.path << info.deriver << printHash(info.narHash)
+ << info.references << info.registrationTime << info.narSize
+ << info.ultimate << info.sigs << *nar << repair << dontCheckSigs;
+ // FIXME: don't send nar as a string
+ conn->processStderr();
+ }
}
@@ -532,7 +579,6 @@ RemoteStore::Connection::~Connection()
{
try {
to.flush();
- fd = -1;
} catch (...) {
ignoreException();
}
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
index b14ce4a97..40f17da30 100644
--- a/src/libstore/remote-store.hh
+++ b/src/libstore/remote-store.hh
@@ -18,7 +18,7 @@ template<typename T> class Pool;
/* FIXME: RemoteStore is a misnomer - should be something like
DaemonStore. */
-class RemoteStore : public LocalFSStore
+class RemoteStore : public virtual Store
{
public:
@@ -26,8 +26,6 @@ public:
/* Implementations of abstract store API methods. */
- std::string getUri() override;
-
bool isValidPathUncached(const Path & path) override;
PathSet queryValidPaths(const PathSet & paths) override;
@@ -87,25 +85,46 @@ public:
void addSignatures(const Path & storePath, const StringSet & sigs) override;
-private:
+protected:
struct Connection
{
- AutoCloseFD fd;
FdSink to;
FdSource from;
unsigned int daemonVersion;
- ~Connection();
+ virtual ~Connection();
void processStderr(Sink * sink = 0, Source * source = 0);
};
+ virtual ref<Connection> openConnection() = 0;
+
+ void initConnection(Connection & conn);
+
ref<Pool<Connection>> connections;
- ref<Connection> openConnection();
+private:
+
+ void setOptions(Connection & conn);
+};
+
+class UDSRemoteStore : public LocalFSStore, public RemoteStore
+{
+public:
+
+ UDSRemoteStore(const Params & params, size_t maxConnections = std::numeric_limits<size_t>::max());
+
+ std::string getUri() override;
+
+private:
+
+ struct Connection : RemoteStore::Connection
+ {
+ AutoCloseFD fd;
+ };
- void setOptions(ref<Connection> conn);
+ ref<RemoteStore::Connection> openConnection() override;
};
diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc
index c11f2b06b..5361bd9da 100644
--- a/src/libstore/s3-binary-cache-store.cc
+++ b/src/libstore/s3-binary-cache-store.cc
@@ -1,6 +1,7 @@
#include "config.h"
#if ENABLE_S3
+#if __linux__
#include "s3-binary-cache-store.hh"
#include "nar-info.hh"
@@ -18,6 +19,15 @@
namespace nix {
+struct istringstream_nocopy : public std::stringstream
+{
+ istringstream_nocopy(const std::string & s)
+ {
+ rdbuf()->pubsetbuf(
+ (char *) s.data(), s.size());
+ }
+};
+
struct S3Error : public Error
{
Aws::S3::S3Errors err;
@@ -145,7 +155,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
.WithBucket(bucketName)
.WithKey(path);
- auto stream = std::make_shared<std::stringstream>(data);
+ auto stream = std::make_shared<istringstream_nocopy>(data);
request.SetBody(stream);
@@ -260,3 +270,4 @@ static RegisterStoreImplementation regStore([](
}
#endif
+#endif
diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc
new file mode 100644
index 000000000..516648522
--- /dev/null
+++ b/src/libstore/ssh-store.cc
@@ -0,0 +1,130 @@
+#include "store-api.hh"
+#include "remote-store.hh"
+#include "remote-fs-accessor.hh"
+#include "archive.hh"
+#include "worker-protocol.hh"
+#include "pool.hh"
+
+namespace nix {
+
+class SSHStore : public RemoteStore
+{
+public:
+
+ SSHStore(string uri, const Params & params, size_t maxConnections = std::numeric_limits<size_t>::max());
+
+ std::string getUri() override;
+
+ void narFromPath(const Path & path, Sink & sink) override;
+
+ ref<FSAccessor> getFSAccessor() override;
+
+private:
+
+ struct Connection : RemoteStore::Connection
+ {
+ Pid sshPid;
+ AutoCloseFD out;
+ AutoCloseFD in;
+ };
+
+ ref<RemoteStore::Connection> openConnection() override;
+
+ AutoDelete tmpDir;
+
+ Path socketPath;
+
+ Pid sshMaster;
+
+ string uri;
+
+ Path key;
+};
+
+SSHStore::SSHStore(string uri, const Params & params, size_t maxConnections)
+ : Store(params)
+ , RemoteStore(params, maxConnections)
+ , tmpDir(createTempDir("", "nix", true, true, 0700))
+ , socketPath((Path) tmpDir + "/ssh.sock")
+ , uri(std::move(uri))
+ , key(get(params, "ssh-key", ""))
+{
+}
+
+string SSHStore::getUri()
+{
+ return "ssh://" + uri;
+}
+
+class ForwardSource : public Source
+{
+ Source & readSource;
+ Sink & writeSink;
+public:
+ ForwardSource(Source & readSource, Sink & writeSink) : readSource(readSource), writeSink(writeSink) {}
+ size_t read(unsigned char * data, size_t len) override
+ {
+ auto res = readSource.read(data, len);
+ writeSink(data, len);
+ return res;
+ }
+};
+
+void SSHStore::narFromPath(const Path & path, Sink & sink)
+{
+ auto conn(connections->get());
+ conn->to << wopNarFromPath << path;
+ conn->processStderr();
+ ParseSink ps;
+ auto fwd = ForwardSource(conn->from, sink);
+ parseDump(ps, fwd);
+}
+
+ref<FSAccessor> SSHStore::getFSAccessor()
+{
+ return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()));
+}
+
+ref<RemoteStore::Connection> SSHStore::openConnection()
+{
+ if ((pid_t) sshMaster == -1) {
+ sshMaster = startProcess([&]() {
+ if (key.empty())
+ execlp("ssh", "ssh", "-N", "-M", "-S", socketPath.c_str(), uri.c_str(), NULL);
+ else
+ execlp("ssh", "ssh", "-N", "-M", "-S", socketPath.c_str(), "-i", key.c_str(), uri.c_str(), NULL);
+ throw SysError("starting ssh master");
+ });
+ }
+
+ auto conn = make_ref<Connection>();
+ Pipe in, out;
+ in.create();
+ out.create();
+ conn->sshPid = startProcess([&]() {
+ if (dup2(in.readSide.get(), STDIN_FILENO) == -1)
+ throw SysError("duping over STDIN");
+ if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
+ throw SysError("duping over STDOUT");
+ execlp("ssh", "ssh", "-S", socketPath.c_str(), uri.c_str(), "nix-daemon", "--stdio", NULL);
+ throw SysError("executing nix-daemon --stdio over ssh");
+ });
+ in.readSide = -1;
+ out.writeSide = -1;
+ conn->out = std::move(out.readSide);
+ conn->in = std::move(in.writeSide);
+ conn->to = FdSink(conn->in.get());
+ conn->from = FdSource(conn->out.get());
+ initConnection(*conn);
+ return conn;
+}
+
+static RegisterStoreImplementation regStore([](
+ const std::string & uri, const Store::Params & params)
+ -> std::shared_ptr<Store>
+{
+ if (std::string(uri, 0, 6) != "ssh://") return 0;
+ return std::make_shared<SSHStore>(uri.substr(6), params);
+});
+
+}
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index f365406cb..37a2d45fe 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -449,19 +449,19 @@ const Store::Stats & Store::getStats()
void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
- const Path & storePath, bool repair)
+ const Path & storePath, bool repair, bool dontCheckSigs)
{
auto info = srcStore->queryPathInfo(storePath);
StringSink sink;
srcStore->narFromPath({storePath}, sink);
- dstStore->addToStore(*info, sink.s, repair);
+ dstStore->addToStore(*info, sink.s, repair, dontCheckSigs);
}
void copyClosure(ref<Store> srcStore, ref<Store> dstStore,
- const PathSet & storePaths, bool repair)
+ const PathSet & storePaths, bool repair, bool dontCheckSigs)
{
PathSet closure;
for (auto & path : storePaths)
@@ -480,7 +480,7 @@ void copyClosure(ref<Store> srcStore, ref<Store> dstStore,
printMsg(lvlDebug, format("copying %1% missing paths") % missing.size());
for (auto & i : missing)
- copyStorePath(srcStore, dstStore, i, repair);
+ copyStorePath(srcStore, dstStore, i, repair, dontCheckSigs);
}
@@ -606,7 +606,7 @@ namespace nix {
RegisterStoreImplementation::Implementations * RegisterStoreImplementation::implementations = 0;
-ref<Store> openStoreAt(const std::string & uri_)
+ref<Store> openStore(const std::string & uri_)
{
auto uri(uri_);
Store::Params params;
@@ -629,9 +629,22 @@ ref<Store> openStoreAt(const std::string & uri_)
}
-ref<Store> openStore()
+StoreType getStoreType(const std::string & uri, const std::string & stateDir)
{
- return openStoreAt(getEnv("NIX_REMOTE"));
+ if (uri == "daemon") {
+ return tDaemon;
+ } else if (uri == "local") {
+ return tLocal;
+ } else if (uri == "") {
+ if (access(stateDir.c_str(), R_OK | W_OK) == 0)
+ return tLocal;
+ else if (pathExists(settings.nixDaemonSocketFile))
+ return tDaemon;
+ else
+ return tLocal;
+ } else {
+ return tOther;
+ }
}
@@ -639,26 +652,14 @@ static RegisterStoreImplementation regStore([](
const std::string & uri, const Store::Params & params)
-> std::shared_ptr<Store>
{
- enum { mDaemon, mLocal, mAuto } mode;
-
- if (uri == "daemon") mode = mDaemon;
- else if (uri == "local") mode = mLocal;
- else if (uri == "") mode = mAuto;
- else return 0;
-
- if (mode == mAuto) {
- auto stateDir = get(params, "state", settings.nixStateDir);
- if (access(stateDir.c_str(), R_OK | W_OK) == 0)
- mode = mLocal;
- else if (pathExists(settings.nixDaemonSocketFile))
- mode = mDaemon;
- else
- mode = mLocal;
+ switch (getStoreType(uri, get(params, "state", settings.nixStateDir))) {
+ case tDaemon:
+ return std::shared_ptr<Store>(std::make_shared<UDSRemoteStore>(params));
+ case tLocal:
+ return std::shared_ptr<Store>(std::make_shared<LocalStore>(params));
+ default:
+ return nullptr;
}
-
- return mode == mDaemon
- ? std::shared_ptr<Store>(std::make_shared<RemoteStore>(params))
- : std::shared_ptr<Store>(std::make_shared<LocalStore>(params));
});
@@ -679,7 +680,7 @@ std::list<ref<Store>> getDefaultSubstituters()
auto addStore = [&](const std::string & uri) {
if (done.count(uri)) return;
done.insert(uri);
- state->stores.push_back(openStoreAt(uri));
+ state->stores.push_back(openStore(uri));
};
for (auto uri : settings.get("substituters", Strings()))
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index 3057106ec..789526cc2 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -5,6 +5,7 @@
#include "crypto.hh"
#include "lru-cache.hh"
#include "sync.hh"
+#include "globals.hh"
#include <atomic>
#include <limits>
@@ -207,7 +208,20 @@ struct BuildResult
NotDeterministic,
} status = MiscFailure;
std::string errorMsg;
- //time_t startTime = 0, stopTime = 0;
+
+ /* How many times this build was performed. */
+ unsigned int timesBuilt = 0;
+
+ /* If timesBuilt > 1, whether some builds did not produce the same
+ result. (Note that 'isNonDeterministic = false' does not mean
+ the build is deterministic, just that we don't have evidence of
+ non-determinism.) */
+ bool isNonDeterministic = false;
+
+ /* The start/stop times of the build (or one of the rounds, if it
+ was repeated). */
+ time_t startTime = 0, stopTime = 0;
+
bool success() {
return status == Built || status == Substituted || status == AlreadyValid;
}
@@ -476,15 +490,19 @@ public:
ensurePath(). */
Derivation derivationFromPath(const Path & drvPath);
- /* Place in `paths' the set of all store paths in the file system
+ /* Place in `out' the set of all store paths in the file system
closure of `storePath'; that is, all paths than can be directly
- or indirectly reached from it. `paths' is not cleared. If
+ or indirectly reached from it. `out' is not cleared. If
`flipDirection' is true, the set of paths that can reach
`storePath' is returned; that is, the closures under the
`referrers' relation instead of the `references' relation is
returned. */
+ void computeFSClosure(const PathSet & paths,
+ PathSet & out, bool flipDirection = false,
+ bool includeOutputs = false, bool includeDerivers = false);
+
void computeFSClosure(const Path & path,
- PathSet & paths, bool flipDirection = false,
+ PathSet & out, bool flipDirection = false,
bool includeOutputs = false, bool includeDerivers = false);
/* Given a set of paths that are to be built, return the set of
@@ -537,7 +555,7 @@ protected:
};
-class LocalFSStore : public Store
+class LocalFSStore : public virtual Store
{
public:
const Path rootDir;
@@ -576,12 +594,12 @@ void checkStoreName(const string & name);
/* Copy a path from one store to another. */
void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
- const Path & storePath, bool repair = false);
+ const Path & storePath, bool repair = false, bool dontCheckSigs = false);
/* Copy the closure of the specified paths from one store to another. */
void copyClosure(ref<Store> srcStore, ref<Store> dstStore,
- const PathSet & storePaths, bool repair = false);
+ const PathSet & storePaths, bool repair = false, bool dontCheckSigs = false);
/* Remove the temporary roots file for this process. Any temporary
@@ -604,12 +622,17 @@ void removeTempRoots();
If ‘uri’ is empty, it defaults to ‘direct’ or ‘daemon’ depending on
whether the user has write access to the local Nix store/database.
set to true *unless* you're going to collect garbage. */
-ref<Store> openStoreAt(const std::string & uri);
+ref<Store> openStore(const std::string & uri = getEnv("NIX_REMOTE"));
-/* Open the store indicated by the ‘NIX_REMOTE’ environment variable. */
-ref<Store> openStore();
+enum StoreType {
+ tDaemon,
+ tLocal,
+ tOther
+};
+
+StoreType getStoreType(const std::string & uri = getEnv("NIX_REMOTE"), const std::string & stateDir = settings.nixStateDir);
/* Return the default substituter stores, defined by the
‘substituters’ option and various legacy options like
diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh
index f8cd7cc4b..6a4ed47cc 100644
--- a/src/libstore/worker-protocol.hh
+++ b/src/libstore/worker-protocol.hh
@@ -6,7 +6,7 @@ namespace nix {
#define WORKER_MAGIC_1 0x6e697863
#define WORKER_MAGIC_2 0x6478696f
-#define PROTOCOL_VERSION 0x111
+#define PROTOCOL_VERSION 0x112
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
@@ -46,6 +46,8 @@ typedef enum {
wopVerifyStore = 35,
wopBuildDerivation = 36,
wopAddSignatures = 37,
+ wopNarFromPath = 38,
+ wopAddToStoreNar = 39
} WorkerOp;
diff --git a/src/libutil/finally.hh b/src/libutil/finally.hh
index 47c64deae..7760cfe9a 100644
--- a/src/libutil/finally.hh
+++ b/src/libutil/finally.hh
@@ -1,5 +1,7 @@
#pragma once
+#include <functional>
+
/* A trivial class to run a function at the end of a scope. */
class Finally
{
diff --git a/src/libutil/logging.hh b/src/libutil/logging.hh
index ba99a81c3..3e6c4b548 100644
--- a/src/libutil/logging.hh
+++ b/src/libutil/logging.hh
@@ -79,6 +79,7 @@ extern Verbosity verbosity; /* suppress msgs > this */
#define printError(args...) printMsg(lvlError, args)
#define printInfo(args...) printMsg(lvlInfo, args)
#define debug(args...) printMsg(lvlDebug, args)
+#define vomit(args...) printMsg(lvlVomit, args)
void warnOnce(bool & haveWarned, const FormatOrString & fs);
diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc
index 5b4c21819..361627823 100755
--- a/src/nix-channel/nix-channel.cc
+++ b/src/nix-channel/nix-channel.cc
@@ -76,10 +76,10 @@ static void update(const StringSet & channelNames)
// Download each channel.
auto exprs = Strings{};
for (const auto & channel : channels) {
- if (!channelNames.empty() && channelNames.find(channel.first) != channelNames.end())
- continue;
auto name = channel.first;
auto url = channel.second;
+ if (!(channelNames.empty() || channelNames.count(name)))
+ continue;
// We want to download the url to a file to see if it's a tarball while also checking if we
// got redirected in the process, so that we can grab the various parts of a nix channel
diff --git a/src/nix-daemon/nix-daemon.cc b/src/nix-daemon/nix-daemon.cc
index 41c847dad..90a730187 100644
--- a/src/nix-daemon/nix-daemon.cc
+++ b/src/nix-daemon/nix-daemon.cc
@@ -22,6 +22,7 @@
#include <errno.h>
#include <pwd.h>
#include <grp.h>
+#include <fcntl.h>
#if __APPLE__ || __FreeBSD__
#include <sys/ucred.h>
@@ -29,6 +30,25 @@
using namespace nix;
+#ifndef __linux__
+#define SPLICE_F_MOVE 0
+static ssize_t splice(int fd_in, void *off_in, int fd_out, void *off_out, size_t len, unsigned int flags)
+{
+ /* We ignore most parameters, we just have them for conformance with the linux syscall */
+ char buf[8192];
+ auto read_count = read(fd_in, buf, sizeof(buf));
+ if (read_count == -1)
+ return read_count;
+ auto write_count = decltype(read_count)(0);
+ while (write_count < read_count) {
+ auto res = write(fd_out, buf + write_count, read_count - write_count);
+ if (res == -1)
+ return res;
+ write_count += res;
+ }
+ return read_count;
+}
+#endif
static FdSource from(STDIN_FILENO);
static FdSink to(STDOUT_FILENO);
@@ -324,7 +344,7 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
case wopImportPaths: {
startWork();
TunnelSource source(from);
- Paths paths = store->importPaths(source, 0);
+ Paths paths = store->importPaths(source, 0, trusted);
stopWork();
to << paths;
break;
@@ -556,6 +576,37 @@ static void performOp(ref<LocalStore> store, bool trusted, unsigned int clientVe
break;
}
+ case wopNarFromPath: {
+ auto path = readStorePath(*store, from);
+ startWork();
+ stopWork();
+ dumpPath(path, to);
+ break;
+ }
+
+ case wopAddToStoreNar: {
+ ValidPathInfo info;
+ info.path = readStorePath(*store, from);
+ info.deriver = readString(from);
+ if (!info.deriver.empty())
+ store->assertStorePath(info.deriver);
+ info.narHash = parseHash(htSHA256, readString(from));
+ info.references = readStorePaths<PathSet>(*store, from);
+ info.registrationTime = readInt(from);
+ info.narSize = readLongLong(from);
+ info.ultimate = readLongLong(from);
+ info.sigs = readStrings<StringSet>(from);
+ auto nar = make_ref<std::string>(readString(from));
+ auto repair = readInt(from) ? true : false;
+ auto dontCheckSigs = readInt(from) ? true : false;
+ if (!trusted && dontCheckSigs)
+ dontCheckSigs = false;
+ startWork();
+ store->addToStore(info, nar, repair, dontCheckSigs, nullptr);
+ stopWork();
+ break;
+ }
+
default:
throw Error(format("invalid operation %1%") % op);
}
@@ -885,6 +936,8 @@ int main(int argc, char * * argv)
return handleExceptions(argv[0], [&]() {
initNix();
+ auto stdio = false;
+
parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) {
if (*arg == "--daemon")
; /* ignored for backwards compatibility */
@@ -892,10 +945,62 @@ int main(int argc, char * * argv)
showManPage("nix-daemon");
else if (*arg == "--version")
printVersion("nix-daemon");
+ else if (*arg == "--stdio")
+ stdio = true;
else return false;
return true;
});
- daemonLoop(argv);
+ if (stdio) {
+ if (getStoreType() == tDaemon) {
+ /* Forward on this connection to the real daemon */
+ auto socketPath = settings.nixDaemonSocketFile;
+ auto s = socket(PF_UNIX, SOCK_STREAM, 0);
+ if (s == -1)
+ throw SysError("creating Unix domain socket");
+
+ auto socketDir = dirOf(socketPath);
+ if (chdir(socketDir.c_str()) == -1)
+ throw SysError(format("changing to socket directory ‘%1%’") % socketDir);
+
+ auto socketName = baseNameOf(socketPath);
+ auto addr = sockaddr_un{};
+ addr.sun_family = AF_UNIX;
+ if (socketName.size() + 1 >= sizeof(addr.sun_path))
+ throw Error(format("socket name %1% is too long") % socketName);
+ strcpy(addr.sun_path, socketName.c_str());
+
+ if (connect(s, (struct sockaddr *) &addr, sizeof(addr)) == -1)
+ throw SysError(format("cannot connect to daemon at %1%") % socketPath);
+
+ auto nfds = (s > STDIN_FILENO ? s : STDIN_FILENO) + 1;
+ while (true) {
+ fd_set fds;
+ FD_ZERO(&fds);
+ FD_SET(s, &fds);
+ FD_SET(STDIN_FILENO, &fds);
+ if (select(nfds, &fds, nullptr, nullptr, nullptr) == -1)
+ throw SysError("waiting for data from client or server");
+ if (FD_ISSET(s, &fds)) {
+ auto res = splice(s, nullptr, STDOUT_FILENO, nullptr, SIZE_MAX, SPLICE_F_MOVE);
+ if (res == -1)
+ throw SysError("splicing data from daemon socket to stdout");
+ else if (res == 0)
+ throw EndOfFile("unexpected EOF from daemon socket");
+ }
+ if (FD_ISSET(STDIN_FILENO, &fds)) {
+ auto res = splice(STDIN_FILENO, nullptr, s, nullptr, SIZE_MAX, SPLICE_F_MOVE);
+ if (res == -1)
+ throw SysError("splicing data from stdin to daemon socket");
+ else if (res == 0)
+ return;
+ }
+ }
+ } else {
+ processConnection(true);
+ }
+ } else {
+ daemonLoop(argv);
+ }
});
}
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index a8cb46319..c1e6afef0 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -424,10 +424,9 @@ static void opQuery(Strings opFlags, Strings opArgs)
case qRoots: {
PathSet referrers;
for (auto & i : opArgs) {
- PathSet paths = maybeUseOutputs(store->followLinksToStorePath(i), useOutput, forceRealise);
- for (auto & j : paths)
- store->computeFSClosure(j, referrers, true,
- settings.gcKeepOutputs, settings.gcKeepDerivations);
+ store->computeFSClosure(
+ maybeUseOutputs(store->followLinksToStorePath(i), useOutput, forceRealise),
+ referrers, true, settings.gcKeepOutputs, settings.gcKeepDerivations);
}
Roots roots = store->findRoots();
for (auto & i : roots)
@@ -841,6 +840,12 @@ static void opServe(Strings opFlags, Strings opArgs)
settings.buildTimeout = readInt(in);
if (GET_PROTOCOL_MINOR(clientVersion) >= 2)
settings.maxLogSize = readInt(in);
+ if (GET_PROTOCOL_MINOR(clientVersion) >= 3) {
+ settings.set("build-repeat", std::to_string(readInt(in)));
+ settings.set("enforce-determinism", readInt(in) != 0 ? "true" : "false");
+ settings.set("run-diff-hook", "true");
+ }
+ settings.printRepeatedBuilds = false;
};
while (true) {
@@ -956,15 +961,17 @@ static void opServe(Strings opFlags, Strings opArgs)
out << status.status << status.errorMsg;
+ if (GET_PROTOCOL_MINOR(clientVersion) >= 3)
+ out << status.timesBuilt << status.isNonDeterministic << status.startTime << status.stopTime;
+
break;
}
case cmdQueryClosure: {
bool includeOutputs = readInt(in);
- PathSet paths = readStorePaths<PathSet>(*store, in);
PathSet closure;
- for (auto & i : paths)
- store->computeFSClosure(i, closure, false, includeOutputs);
+ store->computeFSClosure(readStorePaths<PathSet>(*store, in),
+ closure, false, includeOutputs);
out << closure;
break;
}
diff --git a/src/nix-store/serve-protocol.hh b/src/nix-store/serve-protocol.hh
index c4e2a3703..f8cc9a4b6 100644
--- a/src/nix-store/serve-protocol.hh
+++ b/src/nix-store/serve-protocol.hh
@@ -5,7 +5,7 @@ namespace nix {
#define SERVE_MAGIC_1 0x390c9deb
#define SERVE_MAGIC_2 0x5452eecb
-#define SERVE_PROTOCOL_VERSION 0x202
+#define SERVE_PROTOCOL_VERSION 0x203
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
diff --git a/src/nix/command.cc b/src/nix/command.cc
index 37534015b..5a8288da9 100644
--- a/src/nix/command.cc
+++ b/src/nix/command.cc
@@ -81,7 +81,7 @@ StoreCommand::StoreCommand()
void StoreCommand::run()
{
- run(openStoreAt(storeUri));
+ run(openStore(storeUri));
}
StorePathsCommand::StorePathsCommand()
@@ -106,8 +106,8 @@ void StorePathsCommand::run(ref<Store> store)
if (recursive) {
PathSet closure;
- for (auto & storePath : storePaths)
- store->computeFSClosure(storePath, closure, false, false);
+ store->computeFSClosure(PathSet(storePaths.begin(), storePaths.end()),
+ closure, false, false);
storePaths = Paths(closure.begin(), closure.end());
}
}
diff --git a/src/nix/copy.cc b/src/nix/copy.cc
index de306cbf9..e8317dc39 100644
--- a/src/nix/copy.cc
+++ b/src/nix/copy.cc
@@ -43,8 +43,8 @@ struct CmdCopy : StorePathsCommand
if (srcUri.empty() && dstUri.empty())
throw UsageError("you must pass ‘--from’ and/or ‘--to’");
- ref<Store> srcStore = srcUri.empty() ? store : openStoreAt(srcUri);
- ref<Store> dstStore = dstUri.empty() ? store : openStoreAt(dstUri);
+ ref<Store> srcStore = srcUri.empty() ? store : openStore(srcUri);
+ ref<Store> dstStore = dstUri.empty() ? store : openStore(dstUri);
std::string copiedLabel = "copied";
diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc
index 0ff1b9f7c..d8d8c0f53 100644
--- a/src/nix/sigs.cc
+++ b/src/nix/sigs.cc
@@ -35,7 +35,7 @@ struct CmdCopySigs : StorePathsCommand
// FIXME: factor out commonality with MixVerify.
std::vector<ref<Store>> substituters;
for (auto & s : substituterUris)
- substituters.push_back(openStoreAt(s));
+ substituters.push_back(openStore(s));
ThreadPool pool;
diff --git a/src/nix/verify.cc b/src/nix/verify.cc
index 5314a42a4..2f8d02fa0 100644
--- a/src/nix/verify.cc
+++ b/src/nix/verify.cc
@@ -52,7 +52,7 @@ struct CmdVerify : StorePathsCommand
{
std::vector<ref<Store>> substituters;
for (auto & s : substituterUris)
- substituters.push_back(openStoreAt(s));
+ substituters.push_back(openStore(s));
auto publicKeys = getDefaultPublicKeys();
diff --git a/tests/lang/eval-fail-path-slash.nix b/tests/lang/eval-fail-path-slash.nix
new file mode 100644
index 000000000..530105b32
--- /dev/null
+++ b/tests/lang/eval-fail-path-slash.nix
@@ -0,0 +1,6 @@
+# Trailing slashes in paths are not allowed.
+# This restriction could be lifted sometime,
+# for example if we make '/' a path concatenation operator.
+# See https://github.com/NixOS/nix/issues/1138
+# and http://lists.science.uu.nl/pipermail/nix-dev/2016-June/020829.html
+/nix/store/
diff --git a/tests/lang/eval-okay-comments.exp b/tests/lang/eval-okay-comments.exp
new file mode 100644
index 000000000..7182dc2f9
--- /dev/null
+++ b/tests/lang/eval-okay-comments.exp
@@ -0,0 +1 @@
+"abcdefghijklmnopqrstuvwxyz"
diff --git a/tests/lang/eval-okay-comments.nix b/tests/lang/eval-okay-comments.nix
new file mode 100644
index 000000000..cb2cce218
--- /dev/null
+++ b/tests/lang/eval-okay-comments.nix
@@ -0,0 +1,59 @@
+# A simple comment
+"a"+ # And another
+## A double comment
+"b"+ ## And another
+# Nested # comments #
+"c"+ # and # some # other #
+# An empty line, following here:
+
+"d"+ # and a comment not starting the line !
+
+"e"+
+/* multiline comments */
+"f" +
+/* multiline
+ comments,
+ on
+ multiple
+ lines
+*/
+"g" +
+# Small, tricky comments
+/**/ "h"+ /*/*/ "i"+ /***/ "j"+ /* /*/ "k"+ /*/* /*/ "l"+
+# Comments with an even number of ending '*' used to fail:
+"m"+
+/* */ /* **/ /* ***/ /* ****/ "n"+
+/* */ /** */ /*** */ /**** */ "o"+
+/** **/ /*** ***/ /**** ****/ "p"+
+/* * ** *** **** ***** */ "q"+
+# Random comments
+/* ***** ////// * / * / /* */ "r"+
+# Mixed comments
+/* # */
+"s"+
+# /* #
+"t"+
+# /* # */
+"u"+
+# /*********/
+"v"+
+## */*
+"w"+
+/*
+ * Multiline, decorated comments
+ * # This ain't a nest'd comm'nt
+ */
+"x"+
+''${/** with **/"y"
+ # real
+ /* comments
+ inside ! # */
+
+ # (and empty lines)
+
+}''+ /* And a multiline comment,
+ on the same line,
+ after some spaces
+*/ # followed by a one-line comment
+"z"
+/* EOF */